label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
1 | static void pci_config(void) { QVirtioPCIDevice *dev; QOSState *qs; int n_size = TEST_IMAGE_SIZE / 2; uint64_t capacity; qs = pci_test_start(); dev = virtio_blk_pci_init(qs->pcibus, PCI_SLOT); capacity = qvirtio_config_readq(&dev->vdev, 0); g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512); qvirtio_set_driver_ok(&dev->vdev); qmp_discard_response("{ 'execute': 'block_resize', " " 'arguments': { 'device': 'drive0', " " 'size': %d } }", n_size); qvirtio_wait_config_isr(&dev->vdev, QVIRTIO_BLK_TIMEOUT_US); capacity = qvirtio_config_readq(&dev->vdev, 0); g_assert_cmpint(capacity, ==, n_size / 512); qvirtio_pci_device_disable(dev); g_free(dev); qtest_shutdown(qs); } | 22,613 |
1 | static int draw_slice(AVFilterLink *inlink, int y0, int h, int slice_dir) { AlphaExtractContext *extract = inlink->dst->priv; AVFilterBufferRef *cur_buf = inlink->cur_buf; AVFilterBufferRef *out_buf = inlink->dst->outputs[0]->out_buf; if (extract->is_packed_rgb) { int x, y; uint8_t *pin, *pout; for (y = y0; y < (y0 + h); y++) { pin = cur_buf->data[0] + y * cur_buf->linesize[0] + extract->rgba_map[A]; pout = out_buf->data[0] + y * out_buf->linesize[0]; for (x = 0; x < out_buf->video->w; x++) { *pout = *pin; pout += 1; pin += 4; } } } else if (cur_buf->linesize[A] == out_buf->linesize[Y]) { const int linesize = cur_buf->linesize[A]; memcpy(out_buf->data[Y] + y0 * linesize, cur_buf->data[A] + y0 * linesize, linesize * h); } else { const int linesize = FFMIN(out_buf->linesize[Y], cur_buf->linesize[A]); int y; for (y = y0; y < (y0 + h); y++) { memcpy(out_buf->data[Y] + y * out_buf->linesize[Y], cur_buf->data[A] + y * cur_buf->linesize[A], linesize); } } return ff_draw_slice(inlink->dst->outputs[0], y0, h, slice_dir); } | 22,614 |
1 | static int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int i; int s; const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0); vector unsigned char perm1, perm2, *pix1v, *pix2v; vector unsigned char t1, t2, t3,t4, t5; vector unsigned int sad; vector signed int sumdiffs; sad = (vector unsigned int)vec_splat_u32(0); for (i = 0; i < h; i++) { /* Read potentially unaligned pixels into t1 and t2 */ perm1 = vec_lvsl(0, pix1); pix1v = (vector unsigned char *) pix1; perm2 = vec_lvsl(0, pix2); pix2v = (vector unsigned char *) pix2; t1 = vec_perm(pix1v[0], pix1v[1], perm1); t2 = vec_perm(pix2v[0], pix2v[1], perm2); /* Calculate a sum of abs differences vector */ t3 = vec_max(t1, t2); t4 = vec_min(t1, t2); t5 = vec_sub(t3, t4); /* Add each 4 pixel group together and put 4 results into sad */ sad = vec_sum4s(t5, sad); pix1 += line_size; pix2 += line_size; } /* Sum up the four partial sums, and put the result into s */ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero); sumdiffs = vec_splat(sumdiffs, 3); vec_ste(sumdiffs, 0, &s); return s; } | 22,616 |
1 | void rgb8tobgr8(const uint8_t *src, uint8_t *dst, unsigned int src_size) { unsigned i; unsigned num_pixels = src_size; for(i=0; i<num_pixels; i++) { unsigned b,g,r; register uint8_t rgb; rgb = src[i]; r = (rgb&0x07); g = (rgb&0x38)>>3; b = (rgb&0xC0)>>6; dst[i] = ((b<<1)&0x07) | ((g&0x07)<<3) | ((r&0x03)<<6); } } | 22,617 |
1 | static int net_vde_init(VLANState *vlan, const char *model, const char *name, const char *sock, int port, const char *group, int mode) { VDEState *s; char *init_group = strlen(group) ? (char *)group : NULL; char *init_sock = strlen(sock) ? (char *)sock : NULL; struct vde_open_args args = { .port = port, .group = init_group, .mode = mode, }; s = qemu_mallocz(sizeof(VDEState)); s->vde = vde_open(init_sock, "QEMU", &args); if (!s->vde){ free(s); return -1; } s->vc = qemu_new_vlan_client(vlan, model, name, vde_from_qemu, NULL, s); qemu_set_fd_handler(vde_datafd(s->vde), vde_to_qemu, NULL, s); snprintf(s->vc->info_str, sizeof(s->vc->info_str), "sock=%s,fd=%d", sock, vde_datafd(s->vde)); return 0; } | 22,618 |
1 | static int vorbis_parse_setup_hdr_residues(vorbis_context *vc){ GetBitContext *gb=&vc->gb; uint_fast8_t i, j, k; vc->residue_count=get_bits(gb, 6)+1; vc->residues=av_mallocz(vc->residue_count * sizeof(vorbis_residue)); AV_DEBUG(" There are %d residues. \n", vc->residue_count); for(i=0;i<vc->residue_count;++i) { vorbis_residue *res_setup=&vc->residues[i]; uint_fast8_t cascade[64]; uint_fast8_t high_bits; uint_fast8_t low_bits; res_setup->type=get_bits(gb, 16); AV_DEBUG(" %d. residue type %d \n", i, res_setup->type); res_setup->begin=get_bits(gb, 24); res_setup->end=get_bits(gb, 24); res_setup->partition_size=get_bits(gb, 24)+1; res_setup->classifications=get_bits(gb, 6)+1; res_setup->classbook=get_bits(gb, 8); if (res_setup->classbook>=vc->codebook_count) { av_log(vc->avccontext, AV_LOG_ERROR, "classbook value %d out of range. \n", res_setup->classbook); AV_DEBUG(" begin %d end %d part.size %d classif.s %d classbook %d \n", res_setup->begin, res_setup->end, res_setup->partition_size, res_setup->classifications, res_setup->classbook); for(j=0;j<res_setup->classifications;++j) { high_bits=0; low_bits=get_bits(gb, 3); if (get_bits1(gb)) { high_bits=get_bits(gb, 5); cascade[j]=(high_bits<<3)+low_bits; AV_DEBUG(" %d class casscade depth: %d \n", j, ilog(cascade[j])); res_setup->maxpass=0; for(j=0;j<res_setup->classifications;++j) { for(k=0;k<8;++k) { if (cascade[j]&(1<<k)) { int bits=get_bits(gb, 8); if (bits>=vc->codebook_count) { av_log(vc->avccontext, AV_LOG_ERROR, "book value %d out of range. \n", bits); res_setup->books[j][k]=bits; AV_DEBUG(" %d class casscade depth %d book: %d \n", j, k, res_setup->books[j][k]); if (k>res_setup->maxpass) { res_setup->maxpass=k; } else { res_setup->books[j][k]=-1; return 0; | 22,620 |
1 | static int flashsv_decode_block(AVCodecContext *avctx, AVPacket *avpkt, GetBitContext *gb, int block_size, int width, int height, int x_pos, int y_pos, int blk_idx) { struct FlashSVContext *s = avctx->priv_data; uint8_t *line = s->tmpblock; int k; int ret = inflateReset(&s->zstream); if (ret != Z_OK) { av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", ret); return AVERROR_UNKNOWN; } if (s->zlibprime_curr || s->zlibprime_prev) { ret = flashsv2_prime(s, s->blocks[blk_idx].pos, s->blocks[blk_idx].size); if (ret < 0) return ret; } s->zstream.next_in = avpkt->data + get_bits_count(gb) / 8; s->zstream.avail_in = block_size; s->zstream.next_out = s->tmpblock; s->zstream.avail_out = s->block_size * 3; ret = inflate(&s->zstream, Z_FINISH); if (ret == Z_DATA_ERROR) { av_log(avctx, AV_LOG_ERROR, "Zlib resync occurred\n"); inflateSync(&s->zstream); ret = inflate(&s->zstream, Z_FINISH); } if (ret != Z_OK && ret != Z_STREAM_END) { //return -1; } if (s->is_keyframe) { s->blocks[blk_idx].pos = s->keyframedata + (get_bits_count(gb) / 8); s->blocks[blk_idx].size = block_size; } y_pos += s->diff_start; if (!s->color_depth) { /* Flash Screen Video stores the image upside down, so copy * lines to destination in reverse order. */ for (k = 1; k <= s->diff_height; k++) { memcpy(s->frame->data[0] + x_pos * 3 + (s->image_height - y_pos - k) * s->frame->linesize[0], line, width * 3); /* advance source pointer to next line */ line += width * 3; } } else { /* hybrid 15-bit/palette mode */ decode_hybrid(s->tmpblock, s->frame->data[0], s->image_height - (y_pos + 1 + s->diff_height), x_pos, s->diff_height, width, s->frame->linesize[0], s->pal); } skip_bits_long(gb, 8 * block_size); /* skip the consumed bits */ return 0; } | 22,621 |
1 | void breakpoint_handler(CPUX86State *env) { CPUBreakpoint *bp; if (env->watchpoint_hit) { if (env->watchpoint_hit->flags & BP_CPU) { env->watchpoint_hit = NULL; if (check_hw_breakpoints(env, 0)) raise_exception(env, EXCP01_DB); else cpu_resume_from_signal(env, NULL); } } else { QTAILQ_FOREACH(bp, &env->breakpoints, entry) if (bp->pc == env->eip) { if (bp->flags & BP_CPU) { check_hw_breakpoints(env, 1); raise_exception(env, EXCP01_DB); } break; } } } | 22,622 |
1 | static uint32_t m5206_mbar_readl(void *opaque, target_phys_addr_t offset) { m5206_mbar_state *s = (m5206_mbar_state *)opaque; int width; offset &= 0x3ff; if (offset > 0x200) { hw_error("Bad MBAR read offset 0x%x", (int)offset); } width = m5206_mbar_width[offset >> 2]; if (width < 4) { uint32_t val; val = m5206_mbar_readw(opaque, offset) << 16; val |= m5206_mbar_readw(opaque, offset + 2); return val; } return m5206_mbar_read(s, offset, 4); } | 22,623 |
1 | static void test_qemu_strtoul_empty(void) { const char *str = ""; char f = 'X'; const char *endptr = &f; unsigned long res = 999; int err; err = qemu_strtoul(str, &endptr, 0, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, 0); g_assert(endptr == str); } | 22,624 |
1 | static void lm32_cpu_class_init(ObjectClass *oc, void *data) { LM32CPUClass *lcc = LM32_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); DeviceClass *dc = DEVICE_CLASS(oc); lcc->parent_realize = dc->realize; dc->realize = lm32_cpu_realizefn; lcc->parent_reset = cc->reset; cc->reset = lm32_cpu_reset; cc->class_by_name = lm32_cpu_class_by_name; cc->has_work = lm32_cpu_has_work; cc->do_interrupt = lm32_cpu_do_interrupt; cc->cpu_exec_interrupt = lm32_cpu_exec_interrupt; cc->dump_state = lm32_cpu_dump_state; cc->set_pc = lm32_cpu_set_pc; cc->gdb_read_register = lm32_cpu_gdb_read_register; cc->gdb_write_register = lm32_cpu_gdb_write_register; #ifdef CONFIG_USER_ONLY cc->handle_mmu_fault = lm32_cpu_handle_mmu_fault; #else cc->get_phys_page_debug = lm32_cpu_get_phys_page_debug; cc->vmsd = &vmstate_lm32_cpu; #endif cc->gdb_num_core_regs = 32 + 7; cc->gdb_stop_before_watchpoint = true; cc->debug_excp_handler = lm32_debug_excp_handler; } | 22,625 |
1 | static void vnc_copy(VncState *vs, int src_x, int src_y, int dst_x, int dst_y, int w, int h) { /* send bitblit op to the vnc client */ vnc_lock_output(vs); vnc_write_u8(vs, VNC_MSG_SERVER_FRAMEBUFFER_UPDATE); vnc_write_u8(vs, 0); vnc_write_u16(vs, 1); /* number of rects */ vnc_framebuffer_update(vs, dst_x, dst_y, w, h, VNC_ENCODING_COPYRECT); vnc_write_u16(vs, src_x); vnc_write_u16(vs, src_y); vnc_unlock_output(vs); vnc_flush(vs); } | 22,627 |
0 | static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int mb_xy, int mb_type, int mvy_limit, int first_vertical_edge_done, int chroma, int chroma444, int dir) { MpegEncContext * const s = &h->s; int edge; int chroma_qp_avg[2]; const int mbm_xy = dir == 0 ? mb_xy -1 : h->top_mb_xy; const int mbm_type = dir == 0 ? h->left_type[LTOP] : h->top_type; // how often to recheck mv-based bS when iterating between edges static const uint8_t mask_edge_tab[2][8]={{0,3,3,3,1,1,1,1}, {0,3,1,1,3,3,3,3}}; const int mask_edge = mask_edge_tab[dir][(mb_type>>3)&7]; const int edges = mask_edge== 3 && !(h->cbp&15) ? 1 : 4; // how often to recheck mv-based bS when iterating along each edge const int mask_par0 = mb_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)); if(mbm_type && !first_vertical_edge_done){ if (FRAME_MBAFF && (dir == 1) && ((mb_y&1) == 0) && IS_INTERLACED(mbm_type&~mb_type) ) { // This is a special case in the norm where the filtering must // be done twice (one each of the field) even if we are in a // frame macroblock. // unsigned int tmp_linesize = 2 * linesize; unsigned int tmp_uvlinesize = 2 * uvlinesize; int mbn_xy = mb_xy - 2 * s->mb_stride; int j; for(j=0; j<2; j++, mbn_xy += s->mb_stride){ DECLARE_ALIGNED(8, int16_t, bS)[4]; int qp; if (IS_INTRA(mb_type | s->current_picture.f.mb_type[mbn_xy])) { AV_WN64A(bS, 0x0003000300030003ULL); } else { if (!CABAC && IS_8x8DCT(s->current_picture.f.mb_type[mbn_xy])) { bS[0]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+0]); bS[1]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+1]); bS[2]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+2]); bS[3]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+3]); }else{ const uint8_t *mbn_nnz = h->non_zero_count[mbn_xy] + 3*4; int i; for( i = 0; i < 4; i++ ) { bS[i] = 1 + !!(h->non_zero_count_cache[scan8[0]+i] | mbn_nnz[i]); } } } // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. qp = (s->current_picture.f.qscale_table[mb_xy] + s->current_picture.f.qscale_table[mbn_xy] + 1) >> 1; tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize); { int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, h ); chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mbn_xy]) + 1) >> 1; chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mbn_xy]) + 1) >> 1; if (chroma) { if (chroma444) { filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], h); filter_mb_edgeh (&img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[1], h); } else { filter_mb_edgech(&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], h); filter_mb_edgech(&img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[1], h); } } } }else{ DECLARE_ALIGNED(8, int16_t, bS)[4]; int qp; if( IS_INTRA(mb_type|mbm_type)) { AV_WN64A(bS, 0x0003000300030003ULL); if ( (!IS_INTERLACED(mb_type|mbm_type)) || ((FRAME_MBAFF || (s->picture_structure != PICT_FRAME)) && (dir == 0)) ) AV_WN64A(bS, 0x0004000400040004ULL); } else { int i; int mv_done; if( dir && FRAME_MBAFF && IS_INTERLACED(mb_type ^ mbm_type)) { AV_WN64A(bS, 0x0001000100010001ULL); mv_done = 1; } else if( mask_par0 && ((mbm_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)))) ) { int b_idx= 8 + 4; int bn_idx= b_idx - (dir ? 8:1); bS[0] = bS[1] = bS[2] = bS[3] = check_mv(h, 8 + 4, bn_idx, mvy_limit); mv_done = 1; } else mv_done = 0; for( i = 0; i < 4; i++ ) { int x = dir == 0 ? 0 : i; int y = dir == 0 ? i : 0; int b_idx= 8 + 4 + x + 8*y; int bn_idx= b_idx - (dir ? 8:1); if( h->non_zero_count_cache[b_idx] | h->non_zero_count_cache[bn_idx] ) { bS[i] = 2; } else if(!mv_done) { bS[i] = check_mv(h, b_idx, bn_idx, mvy_limit); } } } /* Filter edge */ // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. if(bS[0]+bS[1]+bS[2]+bS[3]){ qp = (s->current_picture.f.qscale_table[mb_xy] + s->current_picture.f.qscale_table[mbm_xy] + 1) >> 1; //tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]); tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); //{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mbm_xy]) + 1) >> 1; chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mbm_xy]) + 1) >> 1; if( dir == 0 ) { filter_mb_edgev( &img_y[0], linesize, bS, qp, h ); if (chroma) { if (chroma444) { filter_mb_edgev ( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], h); filter_mb_edgev ( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], h); } else { filter_mb_edgecv( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], h); filter_mb_edgecv( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], h); } } } else { filter_mb_edgeh( &img_y[0], linesize, bS, qp, h ); if (chroma) { if (chroma444) { filter_mb_edgeh ( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], h); filter_mb_edgeh ( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], h); } else { filter_mb_edgech( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], h); filter_mb_edgech( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], h); } } } } } } /* Calculate bS */ for( edge = 1; edge < edges; edge++ ) { DECLARE_ALIGNED(8, int16_t, bS)[4]; int qp; if( IS_8x8DCT(mb_type & (edge<<24)) ) // (edge&1) && IS_8x8DCT(mb_type) continue; if( IS_INTRA(mb_type)) { AV_WN64A(bS, 0x0003000300030003ULL); } else { int i; int mv_done; if( edge & mask_edge ) { AV_ZERO64(bS); mv_done = 1; } else if( mask_par0 ) { int b_idx= 8 + 4 + edge * (dir ? 8:1); int bn_idx= b_idx - (dir ? 8:1); bS[0] = bS[1] = bS[2] = bS[3] = check_mv(h, b_idx, bn_idx, mvy_limit); mv_done = 1; } else mv_done = 0; for( i = 0; i < 4; i++ ) { int x = dir == 0 ? edge : i; int y = dir == 0 ? i : edge; int b_idx= 8 + 4 + x + 8*y; int bn_idx= b_idx - (dir ? 8:1); if( h->non_zero_count_cache[b_idx] | h->non_zero_count_cache[bn_idx] ) { bS[i] = 2; } else if(!mv_done) { bS[i] = check_mv(h, b_idx, bn_idx, mvy_limit); } } if(bS[0]+bS[1]+bS[2]+bS[3] == 0) continue; } /* Filter edge */ // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. qp = s->current_picture.f.qscale_table[mb_xy]; //tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]); tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); //{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } if( dir == 0 ) { filter_mb_edgev( &img_y[4*edge << h->pixel_shift], linesize, bS, qp, h ); if (chroma) { if (chroma444) { filter_mb_edgev ( &img_cb[4*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[0], h); filter_mb_edgev ( &img_cr[4*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[1], h); } else if( (edge&1) == 0 ) { filter_mb_edgecv( &img_cb[2*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[0], h); filter_mb_edgecv( &img_cr[2*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[1], h); } } } else { filter_mb_edgeh( &img_y[4*edge*linesize], linesize, bS, qp, h ); if (chroma) { if (chroma444) { filter_mb_edgeh ( &img_cb[4*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[0], h); filter_mb_edgeh ( &img_cr[4*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[1], h); } else if( (edge&1) == 0 ) { filter_mb_edgech( &img_cb[2*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[0], h); filter_mb_edgech( &img_cr[2*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[1], h); } } } } } | 22,628 |
0 | static int mov_read_close(AVFormatContext *s) { MOVContext *mov = s->priv_data; int i, j; for (i = 0; i < s->nb_streams; i++) { AVStream *st = s->streams[i]; MOVStreamContext *sc = st->priv_data; av_freep(&sc->ctts_data); for (j = 0; j < sc->drefs_count; j++) { av_freep(&sc->drefs[j].path); av_freep(&sc->drefs[j].dir); } av_freep(&sc->drefs); if (sc->pb && sc->pb != s->pb) avio_close(sc->pb); av_freep(&sc->chunk_offsets); av_freep(&sc->stsc_data); av_freep(&sc->sample_sizes); av_freep(&sc->keyframes); av_freep(&sc->stts_data); av_freep(&sc->stps_data); av_freep(&sc->rap_group); av_freep(&sc->display_matrix); } if (mov->dv_demux) { avformat_free_context(mov->dv_fctx); mov->dv_fctx = NULL; } av_freep(&mov->trex_data); return 0; } | 22,629 |
0 | static int color_request_frame(AVFilterLink *link) { ColorContext *color = link->src->priv; AVFilterBufferRef *picref = ff_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h); int ret; picref->video->pixel_aspect = (AVRational) {1, 1}; picref->pts = color->pts++; picref->pos = -1; ret = ff_start_frame(link, avfilter_ref_buffer(picref, ~0)); if (ret < 0) goto fail; ff_draw_rectangle(picref->data, picref->linesize, color->line, color->line_step, color->hsub, color->vsub, 0, 0, color->w, color->h); ret = ff_draw_slice(link, 0, color->h, 1); if (ret < 0) goto fail; ret = ff_end_frame(link); fail: avfilter_unref_buffer(picref); return ret; } | 22,630 |
0 | static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom) { AVStream *st = c->fc->streams[c->fc->nb_streams-1]; MOVStreamContext *sc = st->priv_data; int j, entries, pseudo_stream_id; get_byte(pb); /* version */ get_be24(pb); /* flags */ entries = get_be32(pb); for(pseudo_stream_id=0; pseudo_stream_id<entries; pseudo_stream_id++) { //Parsing Sample description table enum CodecID id; int dref_id; MOV_atom_t a = { 0, 0, 0 }; offset_t start_pos = url_ftell(pb); int size = get_be32(pb); /* size */ uint32_t format = get_le32(pb); /* data format */ get_be32(pb); /* reserved */ get_be16(pb); /* reserved */ dref_id = get_be16(pb); if (st->codec->codec_tag && st->codec->codec_tag != format && (c->fc->video_codec_id ? codec_get_id(codec_movvideo_tags, format) != c->fc->video_codec_id : st->codec->codec_tag != MKTAG('j','p','e','g')) ){ /* Multiple fourcc, we skip JPEG. This is not correct, we should * export it as a separate AVStream but this needs a few changes * in the MOV demuxer, patch welcome. */ av_log(c->fc, AV_LOG_WARNING, "multiple fourcc not supported\n"); url_fskip(pb, size - (url_ftell(pb) - start_pos)); continue; } sc->pseudo_stream_id = st->codec->codec_tag ? -1 : pseudo_stream_id; sc->dref_id= dref_id; st->codec->codec_tag = format; id = codec_get_id(codec_movaudio_tags, format); if (id<=0 && (format&0xFFFF) == 'm'+('s'<<8)) id = codec_get_id(codec_wav_tags, bswap_32(format)&0xFFFF); if (st->codec->codec_type != CODEC_TYPE_VIDEO && id > 0) { st->codec->codec_type = CODEC_TYPE_AUDIO; } else if (st->codec->codec_type != CODEC_TYPE_AUDIO && /* do not overwrite codec type */ format && format != MKTAG('m','p','4','s')) { /* skip old asf mpeg4 tag */ id = codec_get_id(codec_movvideo_tags, format); if (id <= 0) id = codec_get_id(codec_bmp_tags, format); if (id > 0) st->codec->codec_type = CODEC_TYPE_VIDEO; else if(st->codec->codec_type == CODEC_TYPE_DATA){ id = codec_get_id(ff_codec_movsubtitle_tags, format); if(id > 0) st->codec->codec_type = CODEC_TYPE_SUBTITLE; } } dprintf(c->fc, "size=%d 4CC= %c%c%c%c codec_type=%d\n", size, (format >> 0) & 0xff, (format >> 8) & 0xff, (format >> 16) & 0xff, (format >> 24) & 0xff, st->codec->codec_type); if(st->codec->codec_type==CODEC_TYPE_VIDEO) { uint8_t codec_name[32]; unsigned int color_depth; int color_greyscale; st->codec->codec_id = id; get_be16(pb); /* version */ get_be16(pb); /* revision level */ get_be32(pb); /* vendor */ get_be32(pb); /* temporal quality */ get_be32(pb); /* spatial quality */ st->codec->width = get_be16(pb); /* width */ st->codec->height = get_be16(pb); /* height */ get_be32(pb); /* horiz resolution */ get_be32(pb); /* vert resolution */ get_be32(pb); /* data size, always 0 */ get_be16(pb); /* frames per samples */ get_buffer(pb, codec_name, 32); /* codec name, pascal string */ if (codec_name[0] <= 31) { memcpy(st->codec->codec_name, &codec_name[1],codec_name[0]); st->codec->codec_name[codec_name[0]] = 0; } st->codec->bits_per_coded_sample = get_be16(pb); /* depth */ st->codec->color_table_id = get_be16(pb); /* colortable id */ dprintf(c->fc, "depth %d, ctab id %d\n", st->codec->bits_per_coded_sample, st->codec->color_table_id); /* figure out the palette situation */ color_depth = st->codec->bits_per_coded_sample & 0x1F; color_greyscale = st->codec->bits_per_coded_sample & 0x20; /* if the depth is 2, 4, or 8 bpp, file is palettized */ if ((color_depth == 2) || (color_depth == 4) || (color_depth == 8)) { /* for palette traversal */ unsigned int color_start, color_count, color_end; unsigned char r, g, b; if (color_greyscale) { int color_index, color_dec; /* compute the greyscale palette */ st->codec->bits_per_coded_sample = color_depth; color_count = 1 << color_depth; color_index = 255; color_dec = 256 / (color_count - 1); for (j = 0; j < color_count; j++) { r = g = b = color_index; c->palette_control.palette[j] = (r << 16) | (g << 8) | (b); color_index -= color_dec; if (color_index < 0) color_index = 0; } } else if (st->codec->color_table_id) { const uint8_t *color_table; /* if flag bit 3 is set, use the default palette */ color_count = 1 << color_depth; if (color_depth == 2) color_table = ff_qt_default_palette_4; else if (color_depth == 4) color_table = ff_qt_default_palette_16; else color_table = ff_qt_default_palette_256; for (j = 0; j < color_count; j++) { r = color_table[j * 4 + 0]; g = color_table[j * 4 + 1]; b = color_table[j * 4 + 2]; c->palette_control.palette[j] = (r << 16) | (g << 8) | (b); } } else { /* load the palette from the file */ color_start = get_be32(pb); color_count = get_be16(pb); color_end = get_be16(pb); if ((color_start <= 255) && (color_end <= 255)) { for (j = color_start; j <= color_end; j++) { /* each R, G, or B component is 16 bits; * only use the top 8 bits; skip alpha bytes * up front */ get_byte(pb); get_byte(pb); r = get_byte(pb); get_byte(pb); g = get_byte(pb); get_byte(pb); b = get_byte(pb); get_byte(pb); c->palette_control.palette[j] = (r << 16) | (g << 8) | (b); } } } st->codec->palctrl = &c->palette_control; st->codec->palctrl->palette_changed = 1; } else st->codec->palctrl = NULL; } else if(st->codec->codec_type==CODEC_TYPE_AUDIO) { int bits_per_sample, flags; uint16_t version = get_be16(pb); st->codec->codec_id = id; get_be16(pb); /* revision level */ get_be32(pb); /* vendor */ st->codec->channels = get_be16(pb); /* channel count */ dprintf(c->fc, "audio channels %d\n", st->codec->channels); st->codec->bits_per_coded_sample = get_be16(pb); /* sample size */ sc->audio_cid = get_be16(pb); get_be16(pb); /* packet size = 0 */ st->codec->sample_rate = ((get_be32(pb) >> 16)); //Read QT version 1 fields. In version 0 these do not exist. dprintf(c->fc, "version =%d, isom =%d\n",version,c->isom); if(!c->isom) { if(version==1) { sc->samples_per_frame = get_be32(pb); get_be32(pb); /* bytes per packet */ sc->bytes_per_frame = get_be32(pb); get_be32(pb); /* bytes per sample */ } else if(version==2) { get_be32(pb); /* sizeof struct only */ st->codec->sample_rate = av_int2dbl(get_be64(pb)); /* float 64 */ st->codec->channels = get_be32(pb); get_be32(pb); /* always 0x7F000000 */ st->codec->bits_per_coded_sample = get_be32(pb); /* bits per channel if sound is uncompressed */ flags = get_be32(pb); /* lcpm format specific flag */ sc->bytes_per_frame = get_be32(pb); /* bytes per audio packet if constant */ sc->samples_per_frame = get_be32(pb); /* lpcm frames per audio packet if constant */ if (format == MKTAG('l','p','c','m')) st->codec->codec_id = mov_get_lpcm_codec_id(st->codec->bits_per_coded_sample, flags); } } switch (st->codec->codec_id) { case CODEC_ID_PCM_S8: case CODEC_ID_PCM_U8: if (st->codec->bits_per_coded_sample == 16) st->codec->codec_id = CODEC_ID_PCM_S16BE; break; case CODEC_ID_PCM_S16LE: case CODEC_ID_PCM_S16BE: if (st->codec->bits_per_coded_sample == 8) st->codec->codec_id = CODEC_ID_PCM_S8; else if (st->codec->bits_per_coded_sample == 24) st->codec->codec_id = st->codec->codec_id == CODEC_ID_PCM_S16BE ? CODEC_ID_PCM_S24BE : CODEC_ID_PCM_S24LE; break; /* set values for old format before stsd version 1 appeared */ case CODEC_ID_MACE3: sc->samples_per_frame = 6; sc->bytes_per_frame = 2*st->codec->channels; break; case CODEC_ID_MACE6: sc->samples_per_frame = 6; sc->bytes_per_frame = 1*st->codec->channels; break; case CODEC_ID_ADPCM_IMA_QT: sc->samples_per_frame = 64; sc->bytes_per_frame = 34*st->codec->channels; break; case CODEC_ID_GSM: sc->samples_per_frame = 160; sc->bytes_per_frame = 33; break; default: break; } bits_per_sample = av_get_bits_per_sample(st->codec->codec_id); if (bits_per_sample) { st->codec->bits_per_coded_sample = bits_per_sample; sc->sample_size = (bits_per_sample >> 3) * st->codec->channels; } } else if(st->codec->codec_type==CODEC_TYPE_SUBTITLE){ st->codec->codec_id= id; } else { /* other codec type, just skip (rtp, mp4s, tmcd ...) */ url_fskip(pb, size - (url_ftell(pb) - start_pos)); } /* this will read extra atoms at the end (wave, alac, damr, avcC, SMI ...) */ a.size = size - (url_ftell(pb) - start_pos); if (a.size > 8) { if (mov_read_default(c, pb, a) < 0) return -1; } else if (a.size > 0) url_fskip(pb, a.size); } if(st->codec->codec_type==CODEC_TYPE_AUDIO && st->codec->sample_rate==0 && sc->time_scale>1) st->codec->sample_rate= sc->time_scale; /* special codec parameters handling */ switch (st->codec->codec_id) { #ifdef CONFIG_DV_DEMUXER case CODEC_ID_DVAUDIO: c->dv_fctx = av_alloc_format_context(); c->dv_demux = dv_init_demux(c->dv_fctx); if (!c->dv_demux) { av_log(c->fc, AV_LOG_ERROR, "dv demux context init error\n"); return -1; } sc->dv_audio_container = 1; st->codec->codec_id = CODEC_ID_PCM_S16LE; break; #endif /* no ifdef since parameters are always those */ case CODEC_ID_AMR_WB: st->codec->sample_rate= 16000; st->codec->channels= 1; /* really needed */ break; case CODEC_ID_QCELP: case CODEC_ID_AMR_NB: st->codec->frame_size= sc->samples_per_frame; st->codec->sample_rate= 8000; st->codec->channels= 1; /* really needed */ break; case CODEC_ID_MP2: case CODEC_ID_MP3: st->codec->codec_type = CODEC_TYPE_AUDIO; /* force type after stsd for m1a hdlr */ st->need_parsing = AVSTREAM_PARSE_FULL; break; case CODEC_ID_GSM: case CODEC_ID_ADPCM_MS: case CODEC_ID_ADPCM_IMA_WAV: st->codec->block_align = sc->bytes_per_frame; break; case CODEC_ID_ALAC: if (st->codec->extradata_size == 36) st->codec->frame_size = AV_RB32((st->codec->extradata+12)); break; default: break; } return 0; } | 22,631 |
0 | void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0, int log2_trafo_size) { HEVCLocalContext *lc = &s->HEVClc; MvField *tab_mvf = s->ref->tab_mvf; int log2_min_pu_size = s->sps->log2_min_pu_size; int log2_min_tu_size = s->sps->log2_min_tb_size; int min_pu_width = s->sps->min_pu_width; int min_tu_width = s->sps->min_tb_width; int is_intra = tab_mvf[(y0 >> log2_min_pu_size) * min_pu_width + (x0 >> log2_min_pu_size)].is_intra; int i, j, bs; if (y0 > 0 && (y0 & 7) == 0) { int yp_pu = (y0 - 1) >> log2_min_pu_size; int yq_pu = y0 >> log2_min_pu_size; int yp_tu = (y0 - 1) >> log2_min_tu_size; int yq_tu = y0 >> log2_min_tu_size; for (i = 0; i < (1 << log2_trafo_size); i += 4) { int x_pu = (x0 + i) >> log2_min_pu_size; int x_tu = (x0 + i) >> log2_min_tu_size; MvField *top = &tab_mvf[yp_pu * min_pu_width + x_pu]; MvField *curr = &tab_mvf[yq_pu * min_pu_width + x_pu]; uint8_t top_cbf_luma = s->cbf_luma[yp_tu * min_tu_width + x_tu]; uint8_t curr_cbf_luma = s->cbf_luma[yq_tu * min_tu_width + x_tu]; RefPicList *top_refPicList = ff_hevc_get_ref_list(s, s->ref, x0 + i, y0 - 1); bs = boundary_strength(s, curr, curr_cbf_luma, top, top_cbf_luma, top_refPicList, 1); if (!s->sh.slice_loop_filter_across_slices_enabled_flag && lc->boundary_flags & BOUNDARY_UPPER_SLICE && (y0 % (1 << s->sps->log2_ctb_size)) == 0) bs = 0; else if (!s->pps->loop_filter_across_tiles_enabled_flag && lc->boundary_flags & BOUNDARY_UPPER_TILE && (y0 % (1 << s->sps->log2_ctb_size)) == 0) bs = 0; if (bs) s->horizontal_bs[((x0 + i) + y0 * s->bs_width) >> 2] = bs; } } // bs for TU internal horizontal PU boundaries if (log2_trafo_size > s->sps->log2_min_pu_size && !is_intra) for (j = 8; j < (1 << log2_trafo_size); j += 8) { int yp_pu = (y0 + j - 1) >> log2_min_pu_size; int yq_pu = (y0 + j) >> log2_min_pu_size; int yp_tu = (y0 + j - 1) >> log2_min_tu_size; int yq_tu = (y0 + j) >> log2_min_tu_size; for (i = 0; i < (1 << log2_trafo_size); i += 4) { int x_pu = (x0 + i) >> log2_min_pu_size; int x_tu = (x0 + i) >> log2_min_tu_size; MvField *top = &tab_mvf[yp_pu * min_pu_width + x_pu]; MvField *curr = &tab_mvf[yq_pu * min_pu_width + x_pu]; uint8_t top_cbf_luma = s->cbf_luma[yp_tu * min_tu_width + x_tu]; uint8_t curr_cbf_luma = s->cbf_luma[yq_tu * min_tu_width + x_tu]; RefPicList *top_refPicList = ff_hevc_get_ref_list(s, s->ref, x0 + i, y0 + j - 1); bs = boundary_strength(s, curr, curr_cbf_luma, top, top_cbf_luma, top_refPicList, 0); if (bs) s->horizontal_bs[((x0 + i) + (y0 + j) * s->bs_width) >> 2] = bs; } } // bs for vertical TU boundaries if (x0 > 0 && (x0 & 7) == 0) { int xp_pu = (x0 - 1) >> log2_min_pu_size; int xq_pu = x0 >> log2_min_pu_size; int xp_tu = (x0 - 1) >> log2_min_tu_size; int xq_tu = x0 >> log2_min_tu_size; for (i = 0; i < (1 << log2_trafo_size); i += 4) { int y_pu = (y0 + i) >> log2_min_pu_size; int y_tu = (y0 + i) >> log2_min_tu_size; MvField *left = &tab_mvf[y_pu * min_pu_width + xp_pu]; MvField *curr = &tab_mvf[y_pu * min_pu_width + xq_pu]; uint8_t left_cbf_luma = s->cbf_luma[y_tu * min_tu_width + xp_tu]; uint8_t curr_cbf_luma = s->cbf_luma[y_tu * min_tu_width + xq_tu]; RefPicList *left_refPicList = ff_hevc_get_ref_list(s, s->ref, x0 - 1, y0 + i); bs = boundary_strength(s, curr, curr_cbf_luma, left, left_cbf_luma, left_refPicList, 1); if (!s->sh.slice_loop_filter_across_slices_enabled_flag && lc->boundary_flags & BOUNDARY_LEFT_SLICE && (x0 % (1 << s->sps->log2_ctb_size)) == 0) bs = 0; else if (!s->pps->loop_filter_across_tiles_enabled_flag && lc->boundary_flags & BOUNDARY_LEFT_TILE && (x0 % (1 << s->sps->log2_ctb_size)) == 0) bs = 0; if (bs) s->vertical_bs[(x0 >> 3) + ((y0 + i) >> 2) * s->bs_width] = bs; } } // bs for TU internal vertical PU boundaries if (log2_trafo_size > log2_min_pu_size && !is_intra) for (j = 0; j < (1 << log2_trafo_size); j += 4) { int y_pu = (y0 + j) >> log2_min_pu_size; int y_tu = (y0 + j) >> log2_min_tu_size; for (i = 8; i < (1 << log2_trafo_size); i += 8) { int xp_pu = (x0 + i - 1) >> log2_min_pu_size; int xq_pu = (x0 + i) >> log2_min_pu_size; int xp_tu = (x0 + i - 1) >> log2_min_tu_size; int xq_tu = (x0 + i) >> log2_min_tu_size; MvField *left = &tab_mvf[y_pu * min_pu_width + xp_pu]; MvField *curr = &tab_mvf[y_pu * min_pu_width + xq_pu]; uint8_t left_cbf_luma = s->cbf_luma[y_tu * min_tu_width + xp_tu]; uint8_t curr_cbf_luma = s->cbf_luma[y_tu * min_tu_width + xq_tu]; RefPicList *left_refPicList = ff_hevc_get_ref_list(s, s->ref, x0 + i - 1, y0 + j); bs = boundary_strength(s, curr, curr_cbf_luma, left, left_cbf_luma, left_refPicList, 0); if (bs) s->vertical_bs[((x0 + i) >> 3) + ((y0 + j) >> 2) * s->bs_width] = bs; } } } | 22,632 |
0 | int ffv1_init_slice_state(FFV1Context *f, FFV1Context *fs) { int j; fs->plane_count = f->plane_count; fs->transparency = f->transparency; for (j = 0; j < f->plane_count; j++) { PlaneContext *const p = &fs->plane[j]; if (fs->ac) { if (!p->state) p->state = av_malloc(CONTEXT_SIZE * p->context_count * sizeof(uint8_t)); if (!p->state) return AVERROR(ENOMEM); } else { if (!p->vlc_state) p->vlc_state = av_malloc(p->context_count * sizeof(VlcState)); if (!p->vlc_state) return AVERROR(ENOMEM); } } if (fs->ac > 1) { //FIXME only redo if state_transition changed for (j = 1; j < 256; j++) { fs->c.one_state[j] = f->state_transition[j]; fs->c.zero_state[256 - j] = 256 - fs->c.one_state[j]; } } return 0; } | 22,633 |
1 | static int vdpau_vc1_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) { VC1Context * const v = avctx->priv_data; MpegEncContext * const s = &v->s; Picture *pic = s->current_picture_ptr; struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private; VdpPictureInfoVC1 *info = &pic_ctx->info.vc1; VdpVideoSurface ref; /* fill LvPictureInfoVC1 struct */ info->forward_reference = VDP_INVALID_HANDLE; info->backward_reference = VDP_INVALID_HANDLE; switch (s->pict_type) { case AV_PICTURE_TYPE_B: ref = ff_vdpau_get_surface_id(&s->next_picture.f); assert(ref != VDP_INVALID_HANDLE); info->backward_reference = ref; /* fall-through */ case AV_PICTURE_TYPE_P: ref = ff_vdpau_get_surface_id(&s->last_picture.f); assert(ref != VDP_INVALID_HANDLE); info->forward_reference = ref; } info->slice_count = 0; if (v->bi_type) info->picture_type = 4; else info->picture_type = s->pict_type - 1 + s->pict_type / 3; info->frame_coding_mode = v->fcm ? (v->fcm + 1) : 0; info->postprocflag = v->postprocflag; info->pulldown = v->broadcast; info->interlace = v->interlace; info->tfcntrflag = v->tfcntrflag; info->finterpflag = v->finterpflag; info->psf = v->psf; info->dquant = v->dquant; info->panscan_flag = v->panscanflag; info->refdist_flag = v->refdist_flag; info->quantizer = v->quantizer_mode; info->extended_mv = v->extended_mv; info->extended_dmv = v->extended_dmv; info->overlap = v->overlap; info->vstransform = v->vstransform; info->loopfilter = v->s.loop_filter; info->fastuvmc = v->fastuvmc; info->range_mapy_flag = v->range_mapy_flag; info->range_mapy = v->range_mapy; info->range_mapuv_flag = v->range_mapuv_flag; info->range_mapuv = v->range_mapuv; /* Specific to simple/main profile only */ info->multires = v->multires; info->syncmarker = v->resync_marker; info->rangered = v->rangered | (v->rangeredfrm << 1); info->maxbframes = v->s.max_b_frames; info->deblockEnable = v->postprocflag & 1; info->pquant = v->pq; return ff_vdpau_common_start_frame(pic_ctx, buffer, size); } | 22,634 |
1 | static int cinvideo_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; CinVideoContext *cin = avctx->priv_data; int i, y, palette_type, palette_colors_count, bitmap_frame_type, bitmap_frame_size; cin->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, &cin->frame)) { av_log(cin->avctx, AV_LOG_ERROR, "delphinecinvideo: reget_buffer() failed to allocate a frame\n"); return -1; } palette_type = buf[0]; palette_colors_count = AV_RL16(buf+1); bitmap_frame_type = buf[3]; buf += 4; bitmap_frame_size = buf_size - 4; /* handle palette */ if (palette_type == 0) { for (i = 0; i < palette_colors_count; ++i) { cin->palette[i] = bytestream_get_le24(&buf); bitmap_frame_size -= 3; } } else { for (i = 0; i < palette_colors_count; ++i) { cin->palette[buf[0]] = AV_RL24(buf+1); buf += 4; bitmap_frame_size -= 4; } } memcpy(cin->frame.data[1], cin->palette, sizeof(cin->palette)); cin->frame.palette_has_changed = 1; /* note: the decoding routines below assumes that surface.width = surface.pitch */ switch (bitmap_frame_type) { case 9: cin_decode_rle(buf, bitmap_frame_size, cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size); break; case 34: cin_decode_rle(buf, bitmap_frame_size, cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size); cin_apply_delta_data(cin->bitmap_table[CIN_PRE_BMP], cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size); break; case 35: cin_decode_huffman(buf, bitmap_frame_size, cin->bitmap_table[CIN_INT_BMP], cin->bitmap_size); cin_decode_rle(cin->bitmap_table[CIN_INT_BMP], bitmap_frame_size, cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size); break; case 36: bitmap_frame_size = cin_decode_huffman(buf, bitmap_frame_size, cin->bitmap_table[CIN_INT_BMP], cin->bitmap_size); cin_decode_rle(cin->bitmap_table[CIN_INT_BMP], bitmap_frame_size, cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size); cin_apply_delta_data(cin->bitmap_table[CIN_PRE_BMP], cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size); break; case 37: cin_decode_huffman(buf, bitmap_frame_size, cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size); break; case 38: cin_decode_lzss(buf, bitmap_frame_size, cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size); break; case 39: cin_decode_lzss(buf, bitmap_frame_size, cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size); cin_apply_delta_data(cin->bitmap_table[CIN_PRE_BMP], cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size); break; } for (y = 0; y < cin->avctx->height; ++y) memcpy(cin->frame.data[0] + (cin->avctx->height - 1 - y) * cin->frame.linesize[0], cin->bitmap_table[CIN_CUR_BMP] + y * cin->avctx->width, cin->avctx->width); FFSWAP(uint8_t *, cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_table[CIN_PRE_BMP]); *data_size = sizeof(AVFrame); *(AVFrame *)data = cin->frame; return buf_size; } | 22,635 |
1 | static void RENAME(decode_rgb_frame)(FFV1Context *s, uint8_t *src[3], int w, int h, int stride[3]) { int x, y, p; TYPE *sample[4][2]; int lbd = s->avctx->bits_per_raw_sample <= 8; int bits = s->avctx->bits_per_raw_sample > 0 ? s->avctx->bits_per_raw_sample : 8; int offset = 1 << bits; for (x = 0; x < 4; x++) { sample[x][0] = RENAME(s->sample_buffer) + x * 2 * (w + 6) + 3; sample[x][1] = RENAME(s->sample_buffer) + (x * 2 + 1) * (w + 6) + 3; } s->run_index = 0; memset(RENAME(s->sample_buffer), 0, 8 * (w + 6) * sizeof(*RENAME(s->sample_buffer))); for (y = 0; y < h; y++) { for (p = 0; p < 3 + s->transparency; p++) { TYPE *temp = sample[p][0]; // FIXME: try a normal buffer sample[p][0] = sample[p][1]; sample[p][1] = temp; sample[p][1][-1]= sample[p][0][0 ]; sample[p][0][ w]= sample[p][0][w-1]; if (lbd && s->slice_coding_mode == 0) RENAME(decode_line)(s, w, sample[p], (p + 1)/2, 9); else RENAME(decode_line)(s, w, sample[p], (p + 1)/2, bits + (s->slice_coding_mode != 1)); } for (x = 0; x < w; x++) { int g = sample[0][1][x]; int b = sample[1][1][x]; int r = sample[2][1][x]; int a = sample[3][1][x]; if (s->slice_coding_mode != 1) { b -= offset; r -= offset; g -= (b * s->slice_rct_by_coef + r * s->slice_rct_ry_coef) >> 2; b += g; r += g; } if (lbd) *((uint32_t*)(src[0] + x*4 + stride[0]*y)) = b + (g<<8) + (r<<16) + (a<<24); else if (sizeof(TYPE) == 4) { *((uint16_t*)(src[0] + x*2 + stride[0]*y)) = g; *((uint16_t*)(src[1] + x*2 + stride[1]*y)) = b; *((uint16_t*)(src[2] + x*2 + stride[2]*y)) = r; } else { *((uint16_t*)(src[0] + x*2 + stride[0]*y)) = b; *((uint16_t*)(src[1] + x*2 + stride[1]*y)) = g; *((uint16_t*)(src[2] + x*2 + stride[2]*y)) = r; } } } } | 22,636 |
0 | QPEL_H264(put_, PUT_OP, mmxext) QPEL_H264(avg_, AVG_MMXEXT_OP, mmxext) QPEL_H264_V_XMM(put_, PUT_OP, sse2) QPEL_H264_V_XMM(avg_,AVG_MMXEXT_OP, sse2) QPEL_H264_HV_XMM(put_, PUT_OP, sse2) QPEL_H264_HV_XMM(avg_,AVG_MMXEXT_OP, sse2) QPEL_H264_H_XMM(put_, PUT_OP, ssse3) QPEL_H264_H_XMM(avg_,AVG_MMXEXT_OP, ssse3) QPEL_H264_HV_XMM(put_, PUT_OP, ssse3) QPEL_H264_HV_XMM(avg_,AVG_MMXEXT_OP, ssse3) #undef PAVGB H264_MC_4816(mmxext) H264_MC_816(H264_MC_V, sse2) H264_MC_816(H264_MC_HV, sse2) H264_MC_816(H264_MC_H, ssse3) H264_MC_816(H264_MC_HV, ssse3) //10bit #define LUMA_MC_OP(OP, NUM, DEPTH, TYPE, OPT) \ void ff_ ## OP ## _h264_qpel ## NUM ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT \ (uint8_t *dst, uint8_t *src, int stride); #define LUMA_MC_ALL(DEPTH, TYPE, OPT) \ LUMA_MC_OP(put, 4, DEPTH, TYPE, OPT) \ LUMA_MC_OP(avg, 4, DEPTH, TYPE, OPT) \ LUMA_MC_OP(put, 8, DEPTH, TYPE, OPT) \ LUMA_MC_OP(avg, 8, DEPTH, TYPE, OPT) \ LUMA_MC_OP(put, 16, DEPTH, TYPE, OPT) \ LUMA_MC_OP(avg, 16, DEPTH, TYPE, OPT) #define LUMA_MC_816(DEPTH, TYPE, OPT) \ LUMA_MC_OP(put, 8, DEPTH, TYPE, OPT) \ LUMA_MC_OP(avg, 8, DEPTH, TYPE, OPT) \ LUMA_MC_OP(put, 16, DEPTH, TYPE, OPT) \ LUMA_MC_OP(avg, 16, DEPTH, TYPE, OPT) LUMA_MC_ALL(10, mc00, mmxext) LUMA_MC_ALL(10, mc10, mmxext) LUMA_MC_ALL(10, mc20, mmxext) LUMA_MC_ALL(10, mc30, mmxext) LUMA_MC_ALL(10, mc01, mmxext) LUMA_MC_ALL(10, mc11, mmxext) LUMA_MC_ALL(10, mc21, mmxext) LUMA_MC_ALL(10, mc31, mmxext) LUMA_MC_ALL(10, mc02, mmxext) LUMA_MC_ALL(10, mc12, mmxext) LUMA_MC_ALL(10, mc22, mmxext) LUMA_MC_ALL(10, mc32, mmxext) LUMA_MC_ALL(10, mc03, mmxext) LUMA_MC_ALL(10, mc13, mmxext) LUMA_MC_ALL(10, mc23, mmxext) LUMA_MC_ALL(10, mc33, mmxext) LUMA_MC_816(10, mc00, sse2) LUMA_MC_816(10, mc10, sse2) LUMA_MC_816(10, mc10, sse2_cache64) LUMA_MC_816(10, mc10, ssse3_cache64) LUMA_MC_816(10, mc20, sse2) LUMA_MC_816(10, mc20, sse2_cache64) LUMA_MC_816(10, mc20, ssse3_cache64) LUMA_MC_816(10, mc30, sse2) LUMA_MC_816(10, mc30, sse2_cache64) LUMA_MC_816(10, mc30, ssse3_cache64) LUMA_MC_816(10, mc01, sse2) LUMA_MC_816(10, mc11, sse2) LUMA_MC_816(10, mc21, sse2) LUMA_MC_816(10, mc31, sse2) LUMA_MC_816(10, mc02, sse2) LUMA_MC_816(10, mc12, sse2) LUMA_MC_816(10, mc22, sse2) LUMA_MC_816(10, mc32, sse2) LUMA_MC_816(10, mc03, sse2) LUMA_MC_816(10, mc13, sse2) LUMA_MC_816(10, mc23, sse2) LUMA_MC_816(10, mc33, sse2) #define QPEL16_OPMC(OP, MC, MMX)\ void ff_ ## OP ## _h264_qpel16_ ## MC ## _10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ ff_ ## OP ## _h264_qpel8_ ## MC ## _10_ ## MMX(dst , src , stride);\ ff_ ## OP ## _h264_qpel8_ ## MC ## _10_ ## MMX(dst+16, src+16, stride);\ src += 8*stride;\ dst += 8*stride;\ ff_ ## OP ## _h264_qpel8_ ## MC ## _10_ ## MMX(dst , src , stride);\ ff_ ## OP ## _h264_qpel8_ ## MC ## _10_ ## MMX(dst+16, src+16, stride);\ } #define QPEL16_OP(MC, MMX)\ QPEL16_OPMC(put, MC, MMX)\ QPEL16_OPMC(avg, MC, MMX) #define QPEL16(MMX)\ QPEL16_OP(mc00, MMX)\ QPEL16_OP(mc01, MMX)\ QPEL16_OP(mc02, MMX)\ QPEL16_OP(mc03, MMX)\ QPEL16_OP(mc10, MMX)\ QPEL16_OP(mc11, MMX)\ QPEL16_OP(mc12, MMX)\ QPEL16_OP(mc13, MMX)\ QPEL16_OP(mc20, MMX)\ QPEL16_OP(mc21, MMX)\ QPEL16_OP(mc22, MMX)\ QPEL16_OP(mc23, MMX)\ QPEL16_OP(mc30, MMX)\ QPEL16_OP(mc31, MMX)\ QPEL16_OP(mc32, MMX)\ QPEL16_OP(mc33, MMX) #if ARCH_X86_32 && HAVE_YASM && CONFIG_H264QPEL // ARCH_X86_64 implies SSE2+ QPEL16(mmxext) #endif #endif /* HAVE_YASM */ #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \ do { \ c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \ c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \ c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \ c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \ c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \ c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \ c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \ c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \ c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \ c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \ c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \ c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \ c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \ c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \ c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \ c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \ } while (0) #define H264_QPEL_FUNCS(x, y, CPU) \ do { \ c->put_h264_qpel_pixels_tab[0][x + y * 4] = put_h264_qpel16_mc ## x ## y ## _ ## CPU; \ c->put_h264_qpel_pixels_tab[1][x + y * 4] = put_h264_qpel8_mc ## x ## y ## _ ## CPU; \ c->avg_h264_qpel_pixels_tab[0][x + y * 4] = avg_h264_qpel16_mc ## x ## y ## _ ## CPU; \ c->avg_h264_qpel_pixels_tab[1][x + y * 4] = avg_h264_qpel8_mc ## x ## y ## _ ## CPU; \ } while (0) #define H264_QPEL_FUNCS_10(x, y, CPU) \ do { \ c->put_h264_qpel_pixels_tab[0][x + y * 4] = ff_put_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \ c->put_h264_qpel_pixels_tab[1][x + y * 4] = ff_put_h264_qpel8_mc ## x ## y ## _10_ ## CPU; \ c->avg_h264_qpel_pixels_tab[0][x + y * 4] = ff_avg_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \ c->avg_h264_qpel_pixels_tab[1][x + y * 4] = ff_avg_h264_qpel8_mc ## x ## y ## _10_ ## CPU; \ } while (0) void ff_h264qpel_init_x86(H264QpelContext *c, int bit_depth) { int high_bit_depth = bit_depth > 8; int mm_flags = av_get_cpu_flags(); #if HAVE_MMXEXT_EXTERNAL if (!high_bit_depth) { SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmxext, ); SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmxext, ); SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmxext, ); SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmxext, ); SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmxext, ); SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmxext, ); } else if (bit_depth == 10) { #if !ARCH_X86_64 SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_mmxext, ff_); SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_mmxext, ff_); SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_mmxext, ff_); SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_mmxext, ff_); #endif SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 10_mmxext, ff_); SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 10_mmxext, ff_); } #endif #if HAVE_SSE2_EXTERNAL if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW) && !high_bit_depth) { // these functions are slower than mmx on AMD, but faster on Intel H264_QPEL_FUNCS(0, 0, sse2); } if (!high_bit_depth) { H264_QPEL_FUNCS(0, 1, sse2); H264_QPEL_FUNCS(0, 2, sse2); H264_QPEL_FUNCS(0, 3, sse2); H264_QPEL_FUNCS(1, 1, sse2); H264_QPEL_FUNCS(1, 2, sse2); H264_QPEL_FUNCS(1, 3, sse2); H264_QPEL_FUNCS(2, 1, sse2); H264_QPEL_FUNCS(2, 2, sse2); H264_QPEL_FUNCS(2, 3, sse2); H264_QPEL_FUNCS(3, 1, sse2); H264_QPEL_FUNCS(3, 2, sse2); H264_QPEL_FUNCS(3, 3, sse2); } if (bit_depth == 10) { SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_sse2, ff_); SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_sse2, ff_); SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_sse2, ff_); SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_sse2, ff_); H264_QPEL_FUNCS_10(1, 0, sse2_cache64); H264_QPEL_FUNCS_10(2, 0, sse2_cache64); H264_QPEL_FUNCS_10(3, 0, sse2_cache64); } #endif #if HAVE_SSSE3_EXTERNAL if (!high_bit_depth) { H264_QPEL_FUNCS(1, 0, ssse3); H264_QPEL_FUNCS(1, 1, ssse3); H264_QPEL_FUNCS(1, 2, ssse3); H264_QPEL_FUNCS(1, 3, ssse3); H264_QPEL_FUNCS(2, 0, ssse3); H264_QPEL_FUNCS(2, 1, ssse3); H264_QPEL_FUNCS(2, 2, ssse3); H264_QPEL_FUNCS(2, 3, ssse3); H264_QPEL_FUNCS(3, 0, ssse3); H264_QPEL_FUNCS(3, 1, ssse3); H264_QPEL_FUNCS(3, 2, ssse3); H264_QPEL_FUNCS(3, 3, ssse3); } if (bit_depth == 10) { H264_QPEL_FUNCS_10(1, 0, ssse3_cache64); H264_QPEL_FUNCS_10(2, 0, ssse3_cache64); H264_QPEL_FUNCS_10(3, 0, ssse3_cache64); } #endif #if HAVE_AVX_EXTERNAL if (bit_depth == 10) { H264_QPEL_FUNCS_10(1, 0, sse2); H264_QPEL_FUNCS_10(2, 0, sse2); H264_QPEL_FUNCS_10(3, 0, sse2); } #endif } | 22,638 |
1 | static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int count, BdrvRequestFlags flags) { int ret; BDRVQcow2State *s = bs->opaque; uint32_t head = offset % s->cluster_size; uint32_t tail = (offset + count) % s->cluster_size; trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, count); if (head || tail) { int64_t cl_start = (offset - head) >> BDRV_SECTOR_BITS; uint64_t off; unsigned int nr; assert(head + count <= s->cluster_size); /* check whether remainder of cluster already reads as zero */ if (!(is_zero_sectors(bs, cl_start, DIV_ROUND_UP(head, BDRV_SECTOR_SIZE)) && is_zero_sectors(bs, (offset + count) >> BDRV_SECTOR_BITS, DIV_ROUND_UP(-tail & (s->cluster_size - 1), BDRV_SECTOR_SIZE)))) { return -ENOTSUP; qemu_co_mutex_lock(&s->lock); /* We can have new write after previous check */ offset = cl_start << BDRV_SECTOR_BITS; count = s->cluster_size; nr = s->cluster_size; ret = qcow2_get_cluster_offset(bs, offset, &nr, &off); if (ret != QCOW2_CLUSTER_UNALLOCATED && ret != QCOW2_CLUSTER_ZERO_PLAIN && ret != QCOW2_CLUSTER_ZERO_ALLOC) { qemu_co_mutex_unlock(&s->lock); return -ENOTSUP; } else { qemu_co_mutex_lock(&s->lock); trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, count); /* Whatever is left can use real zero clusters */ ret = qcow2_zero_clusters(bs, offset, count >> BDRV_SECTOR_BITS, flags); qemu_co_mutex_unlock(&s->lock); return ret; | 22,639 |
1 | static void virtio_blk_complete_request(VirtIOBlockReq *req, unsigned char status) { VirtIOBlock *s = req->dev; VirtIODevice *vdev = VIRTIO_DEVICE(s); trace_virtio_blk_req_complete(req, status); stb_p(&req->in->status, status); virtqueue_push(s->vq, req->elem, req->qiov.size + sizeof(*req->in)); virtio_notify(vdev, s->vq); } | 22,641 |
1 | static void vfio_enable_msi(VFIODevice *vdev) { int ret, i; vfio_disable_interrupts(vdev); vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev); retry: vdev->msi_vectors = g_malloc0(vdev->nr_vectors * sizeof(VFIOMSIVector)); for (i = 0; i < vdev->nr_vectors; i++) { VFIOMSIVector *vector = &vdev->msi_vectors[i]; vector->vdev = vdev; vector->use = true; if (event_notifier_init(&vector->interrupt, 0)) { error_report("vfio: Error: event_notifier_init failed"); } vector->msg = msi_get_message(&vdev->pdev, i); /* * Attempt to enable route through KVM irqchip, * default to userspace handling if unavailable. */ vector->virq = kvm_irqchip_add_msi_route(kvm_state, vector->msg); if (vector->virq < 0 || kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->interrupt, NULL, vector->virq) < 0) { qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), vfio_msi_interrupt, NULL, vector); } } ret = vfio_enable_vectors(vdev, false); if (ret) { if (ret < 0) { error_report("vfio: Error: Failed to setup MSI fds: %m"); } else if (ret != vdev->nr_vectors) { error_report("vfio: Error: Failed to enable %d " "MSI vectors, retry with %d", vdev->nr_vectors, ret); } for (i = 0; i < vdev->nr_vectors; i++) { VFIOMSIVector *vector = &vdev->msi_vectors[i]; if (vector->virq >= 0) { kvm_irqchip_remove_irqfd_notifier(kvm_state, &vector->interrupt, vector->virq); kvm_irqchip_release_virq(kvm_state, vector->virq); vector->virq = -1; } else { qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), NULL, NULL, NULL); } event_notifier_cleanup(&vector->interrupt); } g_free(vdev->msi_vectors); if (ret > 0 && ret != vdev->nr_vectors) { vdev->nr_vectors = ret; goto retry; } vdev->nr_vectors = 0; return; } vdev->interrupt = VFIO_INT_MSI; DPRINTF("%s(%04x:%02x:%02x.%x) Enabled %d MSI vectors\n", __func__, vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function, vdev->nr_vectors); } | 22,642 |
1 | static int decode_lowdelay(DiracContext *s) { AVCodecContext *avctx = s->avctx; int slice_x, slice_y, bufsize; int64_t coef_buf_size, bytes = 0; const uint8_t *buf; DiracSlice *slices; SliceCoeffs tmp[MAX_DWT_LEVELS]; int slice_num = 0; if (s->slice_params_num_buf != (s->num_x * s->num_y)) { s->slice_params_buf = av_realloc_f(s->slice_params_buf, s->num_x * s->num_y, sizeof(DiracSlice)); if (!s->slice_params_buf) { av_log(s->avctx, AV_LOG_ERROR, "slice params buffer allocation failure\n"); return AVERROR(ENOMEM); } s->slice_params_num_buf = s->num_x * s->num_y; } slices = s->slice_params_buf; /* 8 becacuse that's how much the golomb reader could overread junk data * from another plane/slice at most, and 512 because SIMD */ coef_buf_size = subband_coeffs(s, s->num_x - 1, s->num_y - 1, 0, tmp) + 8; coef_buf_size = (coef_buf_size << (1 + s->pshift)) + 512; if (s->threads_num_buf != avctx->thread_count || s->thread_buf_size != coef_buf_size) { s->threads_num_buf = avctx->thread_count; s->thread_buf_size = coef_buf_size; s->thread_buf = av_realloc_f(s->thread_buf, avctx->thread_count, s->thread_buf_size); if (!s->thread_buf) { av_log(s->avctx, AV_LOG_ERROR, "thread buffer allocation failure\n"); return AVERROR(ENOMEM); } } align_get_bits(&s->gb); /*[DIRAC_STD] 13.5.2 Slices. slice(sx,sy) */ buf = s->gb.buffer + get_bits_count(&s->gb)/8; bufsize = get_bits_left(&s->gb); if (s->hq_picture) { int i; for (slice_y = 0; bufsize > 0 && slice_y < s->num_y; slice_y++) { for (slice_x = 0; bufsize > 0 && slice_x < s->num_x; slice_x++) { bytes = s->highquality.prefix_bytes + 1; for (i = 0; i < 3; i++) { if (bytes <= bufsize/8) bytes += buf[bytes] * s->highquality.size_scaler + 1; } if (bytes >= INT_MAX || bytes*8 > bufsize) { av_log(s->avctx, AV_LOG_ERROR, "too many bytes\n"); return AVERROR_INVALIDDATA; } slices[slice_num].bytes = bytes; slices[slice_num].slice_x = slice_x; slices[slice_num].slice_y = slice_y; init_get_bits(&slices[slice_num].gb, buf, bufsize); slice_num++; buf += bytes; if (bufsize/8 >= bytes) bufsize -= bytes*8; else bufsize = 0; } } if (s->num_x*s->num_y != slice_num) { av_log(s->avctx, AV_LOG_ERROR, "too few slices\n"); return AVERROR_INVALIDDATA; } avctx->execute2(avctx, decode_hq_slice_row, slices, NULL, s->num_y); } else { for (slice_y = 0; bufsize > 0 && slice_y < s->num_y; slice_y++) { for (slice_x = 0; bufsize > 0 && slice_x < s->num_x; slice_x++) { bytes = (slice_num+1) * (int64_t)s->lowdelay.bytes.num / s->lowdelay.bytes.den - slice_num * (int64_t)s->lowdelay.bytes.num / s->lowdelay.bytes.den; slices[slice_num].bytes = bytes; slices[slice_num].slice_x = slice_x; slices[slice_num].slice_y = slice_y; init_get_bits(&slices[slice_num].gb, buf, bufsize); slice_num++; buf += bytes; if (bufsize/8 >= bytes) bufsize -= bytes*8; else bufsize = 0; } } avctx->execute(avctx, decode_lowdelay_slice, slices, NULL, slice_num, sizeof(DiracSlice)); /* [DIRAC_STD] 13.5.2 Slices */ } if (s->dc_prediction) { if (s->pshift) { intra_dc_prediction_10(&s->plane[0].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */ intra_dc_prediction_10(&s->plane[1].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */ intra_dc_prediction_10(&s->plane[2].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */ } else { intra_dc_prediction_8(&s->plane[0].band[0][0]); intra_dc_prediction_8(&s->plane[1].band[0][0]); intra_dc_prediction_8(&s->plane[2].band[0][0]); } } return 0; } | 22,643 |
1 | static int xface_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet) { XFaceContext *xface = avctx->priv_data; ProbRangesQueue pq = {{ 0 }, 0}; uint8_t bitmap_copy[XFACE_PIXELS]; BigInt b = {0}; int i, j, k, ret = 0; const uint8_t *buf; uint8_t *p; char intbuf[XFACE_MAX_DIGITS]; if (avctx->width || avctx->height) { if (avctx->width != XFACE_WIDTH || avctx->height != XFACE_HEIGHT) { av_log(avctx, AV_LOG_ERROR, "Size value %dx%d not supported, only accepts a size of %dx%d\n", avctx->width, avctx->height, XFACE_WIDTH, XFACE_HEIGHT); return AVERROR(EINVAL); } } avctx->width = XFACE_WIDTH; avctx->height = XFACE_HEIGHT; /* convert image from MONOWHITE to 1=black 0=white bitmap */ buf = frame->data[0]; i = j = 0; do { for (k = 0; k < 8; k++) xface->bitmap[i++] = (buf[j]>>(7-k))&1; if (++j == XFACE_WIDTH/8) { buf += frame->linesize[0]; j = 0; } } while (i < XFACE_PIXELS); /* create a copy of bitmap */ memcpy(bitmap_copy, xface->bitmap, XFACE_PIXELS); ff_xface_generate_face(xface->bitmap, bitmap_copy); encode_block(xface->bitmap, 16, 16, 0, &pq); encode_block(xface->bitmap + 16, 16, 16, 0, &pq); encode_block(xface->bitmap + 32, 16, 16, 0, &pq); encode_block(xface->bitmap + XFACE_WIDTH * 16, 16, 16, 0, &pq); encode_block(xface->bitmap + XFACE_WIDTH * 16 + 16, 16, 16, 0, &pq); encode_block(xface->bitmap + XFACE_WIDTH * 16 + 32, 16, 16, 0, &pq); encode_block(xface->bitmap + XFACE_WIDTH * 32, 16, 16, 0, &pq); encode_block(xface->bitmap + XFACE_WIDTH * 32 + 16, 16, 16, 0, &pq); encode_block(xface->bitmap + XFACE_WIDTH * 32 + 32, 16, 16, 0, &pq); while (pq.prob_ranges_idx > 0) push_integer(&b, pq.prob_ranges[--pq.prob_ranges_idx]); /* write the inverted big integer in b to intbuf */ i = 0; while (b.nb_words) { uint8_t r; ff_big_div(&b, XFACE_PRINTS, &r); intbuf[i++] = r + XFACE_FIRST_PRINT; } if ((ret = ff_alloc_packet2(avctx, pkt, i+2)) < 0) return ret; /* revert the number, and close the buffer */ p = pkt->data; while (--i >= 0) *(p++) = intbuf[i]; *(p++) = '\n'; *(p++) = 0; pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; return 0; } | 22,644 |
1 | static int mov_text_decode_frame(AVCodecContext *avctx, void *data, int *got_sub_ptr, AVPacket *avpkt) { AVSubtitle *sub = data; int ret, ts_start, ts_end; AVBPrint buf; char *ptr = avpkt->data; char *end; //char *ptr_temp; int text_length, tsmb_type, style_entries, tsmb_size; int **style_start = {0,}; int **style_end = {0,}; int **style_flags = {0,}; const uint8_t *tsmb; int index, i; int *flag; int *style_pos; if (!ptr || avpkt->size < 2) return AVERROR_INVALIDDATA; /* * A packet of size two with value zero is an empty subtitle * used to mark the end of the previous non-empty subtitle. * We can just drop them here as we have duration information * already. If the value is non-zero, then it's technically a * bad packet. */ if (avpkt->size == 2) return AV_RB16(ptr) == 0 ? 0 : AVERROR_INVALIDDATA; /* * The first two bytes of the packet are the length of the text string * In complex cases, there are style descriptors appended to the string * so we can't just assume the packet size is the string size. */ text_length = AV_RB16(ptr); end = ptr + FFMIN(2 + text_length, avpkt->size); ptr += 2; ts_start = av_rescale_q(avpkt->pts, avctx->time_base, (AVRational){1,100}); ts_end = av_rescale_q(avpkt->pts + avpkt->duration, avctx->time_base, (AVRational){1,100}); tsmb_size = 0; // Note that the spec recommends lines be no longer than 2048 characters. av_bprint_init(&buf, 0, AV_BPRINT_SIZE_UNLIMITED); if (text_length + 2 != avpkt->size) { while (text_length + 2 + tsmb_size < avpkt->size) { tsmb = ptr + text_length + tsmb_size; tsmb_size = AV_RB32(tsmb); tsmb += 4; tsmb_type = AV_RB32(tsmb); tsmb += 4; if (tsmb_type == MKBETAG('s','t','y','l')) { style_entries = AV_RB16(tsmb); tsmb += 2; for(i = 0; i < style_entries; i++) { style_pos = av_malloc(4); *style_pos = AV_RB16(tsmb); index = i; av_dynarray_add(&style_start, &index, style_pos); tsmb += 2; style_pos = av_malloc(4); *style_pos = AV_RB16(tsmb); index = i; av_dynarray_add(&style_end, &index, style_pos); tsmb += 2; // fontID = AV_RB16(tsmb); tsmb += 2; flag = av_malloc(4); *flag = AV_RB8(tsmb); index = i; av_dynarray_add(&style_flags, &index, flag); //fontsize=AV_RB8(tsmb); tsmb += 2; // text-color-rgba tsmb += 4; } text_to_ass(&buf, ptr, end, style_start, style_end, style_flags, style_entries); av_freep(&style_start); av_freep(&style_end); av_freep(&style_flags); } } } else text_to_ass(&buf, ptr, end, NULL, NULL, 0, 0); ret = ff_ass_add_rect_bprint(sub, &buf, ts_start, ts_end - ts_start); av_bprint_finalize(&buf, NULL); if (ret < 0) return ret; *got_sub_ptr = sub->num_rects > 0; return avpkt->size; } | 22,645 |
1 | av_cold void ff_snow_common_end(SnowContext *s) { int plane_index, level, orientation, i; av_freep(&s->spatial_dwt_buffer); av_freep(&s->temp_dwt_buffer); av_freep(&s->spatial_idwt_buffer); av_freep(&s->temp_idwt_buffer); av_freep(&s->run_buffer); s->m.me.temp= NULL; av_freep(&s->m.me.scratchpad); av_freep(&s->m.me.map); av_freep(&s->m.me.score_map); av_freep(&s->m.obmc_scratchpad); av_freep(&s->block); av_freep(&s->scratchbuf); av_freep(&s->emu_edge_buffer); for(i=0; i<MAX_REF_FRAMES; i++){ av_freep(&s->ref_mvs[i]); av_freep(&s->ref_scores[i]); if(s->last_picture[i]->data[0]) { av_assert0(s->last_picture[i]->data[0] != s->current_picture->data[0]); } av_frame_free(&s->last_picture[i]); } for(plane_index=0; plane_index < s->nb_planes; plane_index++){ for(level=s->spatial_decomposition_count-1; level>=0; level--){ for(orientation=level ? 1 : 0; orientation<4; orientation++){ SubBand *b= &s->plane[plane_index].band[level][orientation]; av_freep(&b->x_coeff); } } } av_frame_free(&s->mconly_picture); av_frame_free(&s->current_picture); } | 22,646 |
1 | HELPER_LD_ATOMIC(ll, lw) #ifdef TARGET_MIPS64 HELPER_LD_ATOMIC(lld, ld) #endif #undef HELPER_LD_ATOMIC #define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \ target_ulong helper_##name(CPUMIPSState *env, target_ulong arg1, \ target_ulong arg2, int mem_idx) \ { \ target_long tmp; \ \ if (arg2 & almask) { \ env->CP0_BadVAddr = arg2; \ helper_raise_exception(env, EXCP_AdES); \ } \ if (do_translate_address(env, arg2, 1) == env->lladdr) { \ tmp = do_##ld_insn(env, arg2, mem_idx); \ if (tmp == env->llval) { \ do_##st_insn(env, arg2, arg1, mem_idx); \ return 1; \ } \ } \ return 0; \ } HELPER_ST_ATOMIC(sc, lw, sw, 0x3) #ifdef TARGET_MIPS64 HELPER_ST_ATOMIC(scd, ld, sd, 0x7) #endif #undef HELPER_ST_ATOMIC #endif #ifdef TARGET_WORDS_BIGENDIAN #define GET_LMASK(v) ((v) & 3) #define GET_OFFSET(addr, offset) (addr + (offset)) #else #define GET_LMASK(v) (((v) & 3) ^ 3) #define GET_OFFSET(addr, offset) (addr - (offset)) #endif target_ulong helper_lwl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, int mem_idx) { target_ulong tmp; tmp = do_lbu(env, arg2, mem_idx); arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24); if (GET_LMASK(arg2) <= 2) { tmp = do_lbu(env, GET_OFFSET(arg2, 1), mem_idx); arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16); } if (GET_LMASK(arg2) <= 1) { tmp = do_lbu(env, GET_OFFSET(arg2, 2), mem_idx); arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8); } if (GET_LMASK(arg2) == 0) { tmp = do_lbu(env, GET_OFFSET(arg2, 3), mem_idx); arg1 = (arg1 & 0xFFFFFF00) | tmp; } return (int32_t)arg1; } | 22,647 |
0 | static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, UINT8 * buf, int buf_size) { MPADecodeContext *s = avctx->priv_data; UINT32 header; UINT8 *buf_ptr; int len, out_size; short *out_samples = data; *data_size = 0; buf_ptr = buf; while (buf_size > 0) { len = s->inbuf_ptr - s->inbuf; if (s->frame_size == 0) { /* special case for next header for first frame in free format case (XXX: find a simpler method) */ if (s->free_format_next_header != 0) { s->inbuf[0] = s->free_format_next_header >> 24; s->inbuf[1] = s->free_format_next_header >> 16; s->inbuf[2] = s->free_format_next_header >> 8; s->inbuf[3] = s->free_format_next_header; s->inbuf_ptr = s->inbuf + 4; s->free_format_next_header = 0; goto got_header; } /* no header seen : find one. We need at least HEADER_SIZE bytes to parse it */ len = HEADER_SIZE - len; if (len > buf_size) len = buf_size; if (len > 0) { memcpy(s->inbuf_ptr, buf_ptr, len); buf_ptr += len; buf_size -= len; s->inbuf_ptr += len; } if ((s->inbuf_ptr - s->inbuf) >= HEADER_SIZE) { got_header: header = (s->inbuf[0] << 24) | (s->inbuf[1] << 16) | (s->inbuf[2] << 8) | s->inbuf[3]; if (check_header(header) < 0) { /* no sync found : move by one byte (inefficient, but simple!) */ memcpy(s->inbuf, s->inbuf + 1, s->inbuf_ptr - s->inbuf - 1); s->inbuf_ptr--; dprintf("skip %x\n", header); /* reset free format frame size to give a chance to get a new bitrate */ s->free_format_frame_size = 0; } else { if (decode_header(s, header) == 1) { /* free format: compute frame size */ s->frame_size = -1; memcpy(s->inbuf, s->inbuf + 1, s->inbuf_ptr - s->inbuf - 1); s->inbuf_ptr--; } else { /* update codec info */ avctx->sample_rate = s->sample_rate; avctx->channels = s->nb_channels; avctx->bit_rate = s->bit_rate; avctx->frame_size = s->frame_size; } } } } else if (s->frame_size == -1) { /* free format : find next sync to compute frame size */ len = MPA_MAX_CODED_FRAME_SIZE - len; if (len > buf_size) len = buf_size; if (len == 0) { /* frame too long: resync */ s->frame_size = 0; } else { UINT8 *p, *pend; UINT32 header1; int padding; memcpy(s->inbuf_ptr, buf_ptr, len); /* check for header */ p = s->inbuf_ptr - 3; pend = s->inbuf_ptr + len - 4; while (p <= pend) { header = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]; header1 = (s->inbuf[0] << 24) | (s->inbuf[1] << 16) | (s->inbuf[2] << 8) | s->inbuf[3]; /* check with high probability that we have a valid header */ if ((header & SAME_HEADER_MASK) == (header1 & SAME_HEADER_MASK)) { /* header found: update pointers */ len = (p + 4) - s->inbuf_ptr; buf_ptr += len; buf_size -= len; s->inbuf_ptr = p; /* compute frame size */ s->free_format_next_header = header; s->free_format_frame_size = s->inbuf_ptr - s->inbuf; padding = (header1 >> 9) & 1; if (s->layer == 1) s->free_format_frame_size -= padding * 4; else s->free_format_frame_size -= padding; dprintf("free frame size=%d padding=%d\n", s->free_format_frame_size, padding); decode_header(s, header1); goto next_data; } p++; } /* not found: simply increase pointers */ buf_ptr += len; s->inbuf_ptr += len; buf_size -= len; } } else if (len < s->frame_size) { if (s->frame_size > MPA_MAX_CODED_FRAME_SIZE) s->frame_size = MPA_MAX_CODED_FRAME_SIZE; len = s->frame_size - len; if (len > buf_size) len = buf_size; else if (len < 4) len = buf_size > 4 ? 4 : buf_size; memcpy(s->inbuf_ptr, buf_ptr, len); buf_ptr += len; s->inbuf_ptr += len; buf_size -= len; } else { out_size = mp_decode_frame(s, out_samples); s->inbuf_ptr = s->inbuf; s->frame_size = 0; *data_size = out_size; break; } next_data: } return buf_ptr - buf; } | 22,648 |
0 | static void mov_write_uuidprof_tag(AVIOContext *pb, AVFormatContext *s) { AVStream *video_st = s->streams[0]; AVCodecParameters *video_par = s->streams[0]->codecpar; AVCodecParameters *audio_par = s->streams[1]->codecpar; int audio_rate = audio_par->sample_rate; int64_t frame_rate = (video_st->avg_frame_rate.num * 0x10000LL) / video_st->avg_frame_rate.den; int audio_kbitrate = audio_par->bit_rate / 1000; int video_kbitrate = FFMIN(video_par->bit_rate / 1000, 800 - audio_kbitrate); avio_wb32(pb, 0x94); /* size */ ffio_wfourcc(pb, "uuid"); ffio_wfourcc(pb, "PROF"); avio_wb32(pb, 0x21d24fce); /* 96 bit UUID */ avio_wb32(pb, 0xbb88695c); avio_wb32(pb, 0xfac9c740); avio_wb32(pb, 0x0); /* ? */ avio_wb32(pb, 0x3); /* 3 sections ? */ avio_wb32(pb, 0x14); /* size */ ffio_wfourcc(pb, "FPRF"); avio_wb32(pb, 0x0); /* ? */ avio_wb32(pb, 0x0); /* ? */ avio_wb32(pb, 0x0); /* ? */ avio_wb32(pb, 0x2c); /* size */ ffio_wfourcc(pb, "APRF"); /* audio */ avio_wb32(pb, 0x0); avio_wb32(pb, 0x2); /* TrackID */ ffio_wfourcc(pb, "mp4a"); avio_wb32(pb, 0x20f); avio_wb32(pb, 0x0); avio_wb32(pb, audio_kbitrate); avio_wb32(pb, audio_kbitrate); avio_wb32(pb, audio_rate); avio_wb32(pb, audio_par->channels); avio_wb32(pb, 0x34); /* size */ ffio_wfourcc(pb, "VPRF"); /* video */ avio_wb32(pb, 0x0); avio_wb32(pb, 0x1); /* TrackID */ if (video_par->codec_id == AV_CODEC_ID_H264) { ffio_wfourcc(pb, "avc1"); avio_wb16(pb, 0x014D); avio_wb16(pb, 0x0015); } else { ffio_wfourcc(pb, "mp4v"); avio_wb16(pb, 0x0000); avio_wb16(pb, 0x0103); } avio_wb32(pb, 0x0); avio_wb32(pb, video_kbitrate); avio_wb32(pb, video_kbitrate); avio_wb32(pb, frame_rate); avio_wb32(pb, frame_rate); avio_wb16(pb, video_par->width); avio_wb16(pb, video_par->height); avio_wb32(pb, 0x010001); /* ? */ } | 22,649 |
1 | void rgb32tobgr24(const uint8_t *src, uint8_t *dst, unsigned int src_size) { unsigned i; unsigned num_pixels = src_size >> 2; for(i=0; i<num_pixels; i++) { dst[3*i + 0] = src[4*i + 2]; dst[3*i + 1] = src[4*i + 1]; dst[3*i + 2] = src[4*i + 0]; } } | 22,650 |
1 | static long do_sigreturn_v2(CPUARMState *env) { abi_ulong frame_addr; struct sigframe_v2 *frame; /* * Since we stacked the signal on a 64-bit boundary, * then 'sp' should be word aligned here. If it's * not, then the user is trying to mess with us. */ if (env->regs[13] & 7) goto badframe; frame_addr = env->regs[13]; if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) goto badframe; if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) goto badframe; unlock_user_struct(frame, frame_addr, 0); return env->regs[0]; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV /* , current */); return 0; } | 22,651 |
1 | static PCIDevice *find_dev(sPAPREnvironment *spapr, uint64_t buid, uint32_t config_addr) { int devfn = (config_addr >> 8) & 0xFF; sPAPRPHBState *phb; QLIST_FOREACH(phb, &spapr->phbs, list) { BusChild *kid; if (phb->buid != buid) { continue; } QTAILQ_FOREACH(kid, &phb->host_state.bus->qbus.children, sibling) { PCIDevice *dev = (PCIDevice *)kid->child; if (dev->devfn == devfn) { return dev; } } } return NULL; } | 22,653 |
1 | int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi) { ICSState *ics = &icp->ics[src]; int irq; if (irq_hint) { assert(src == xics_find_source(icp, irq_hint)); if (!ICS_IRQ_FREE(ics, irq_hint - ics->offset)) { trace_xics_alloc_failed_hint(src, irq_hint); return -1; } irq = irq_hint; } else { irq = ics_find_free_block(ics, 1, 1); if (irq < 0) { trace_xics_alloc_failed_no_left(src); return -1; } irq += ics->offset; } ics_set_irq_type(ics, irq - ics->offset, lsi); trace_xics_alloc(src, irq); return irq; } | 22,654 |
0 | static int ftp_store(FTPContext *s) { char command[CONTROL_BUFFER_SIZE]; const int stor_codes[] = {150, 0}; snprintf(command, sizeof(command), "STOR %s\r\n", s->path); if (!ftp_send_command(s, command, stor_codes, NULL)) return AVERROR(EIO); s->state = UPLOADING; return 0; } | 22,656 |
0 | enum AVPixelFormat avpriv_fmt_v4l2ff(uint32_t v4l2_fmt, enum AVCodecID codec_id) { int i; for (i = 0; avpriv_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) { if (avpriv_fmt_conversion_table[i].v4l2_fmt == v4l2_fmt && avpriv_fmt_conversion_table[i].codec_id == codec_id) { return avpriv_fmt_conversion_table[i].ff_fmt; } } return AV_PIX_FMT_NONE; } | 22,657 |
0 | void ff_rtp_send_mpegvideo(AVFormatContext *s1, const uint8_t *buf1, int size) { RTPDemuxContext *s = s1->priv_data; int len, h, max_packet_size; uint8_t *q; int begin_of_slice, end_of_slice, frame_type, temporal_reference; max_packet_size = s->max_payload_size; begin_of_slice = 1; end_of_slice = 0; frame_type = 0; temporal_reference = 0; while (size > 0) { int begin_of_sequence; begin_of_sequence = 0; len = max_packet_size - 4; if (len >= size) { len = size; end_of_slice = 1; } else { const uint8_t *r, *r1; int start_code; r1 = buf1; while (1) { start_code = -1; r = ff_find_start_code(r1, buf1 + size, &start_code); if((start_code & 0xFFFFFF00) == 0x100) { /* New start code found */ if (start_code == 0x100) { frame_type = (r[1] & 0x38) >> 3; temporal_reference = (int)r[0] << 2 | r[1] >> 6; } if (start_code == 0x1B8) { begin_of_sequence = 1; } if (r - buf1 < len) { /* The current slice fits in the packet */ if (begin_of_slice == 0) { /* no slice at the beginning of the packet... */ end_of_slice = 1; len = r - buf1 - 4; break; } r1 = r; } else { if (r - r1 < max_packet_size) { len = r1 - buf1 - 4; end_of_slice = 1; } break; } } else { break; } } } h = 0; h |= temporal_reference << 16; h |= begin_of_sequence << 13; h |= begin_of_slice << 12; h |= end_of_slice << 11; h |= frame_type << 8; q = s->buf; *q++ = h >> 24; *q++ = h >> 16; *q++ = h >> 8; *q++ = h; memcpy(q, buf1, len); q += len; /* 90 KHz time stamp */ s->timestamp = s->cur_timestamp; ff_rtp_send_data(s1, s->buf, q - s->buf, (len == size)); buf1 += len; size -= len; begin_of_slice = end_of_slice; end_of_slice = 0; } } | 22,658 |
1 | static int archipelago_submit_request(BDRVArchipelagoState *s, uint64_t bufidx, size_t count, off_t offset, ArchipelagoAIOCB *aio_cb, ArchipelagoSegmentedRequest *segreq, int op) { int ret, targetlen; char *target; void *data = NULL; struct xseg_request *req; AIORequestData *reqdata = g_malloc(sizeof(AIORequestData)); targetlen = strlen(s->volname); req = xseg_get_request(s->xseg, s->srcport, s->vportno, X_ALLOC); if (!req) { archipelagolog("Cannot get XSEG request\n"); goto err_exit2; } ret = xseg_prep_request(s->xseg, req, targetlen, count); if (ret < 0) { archipelagolog("Cannot prepare XSEG request\n"); goto err_exit; } target = xseg_get_target(s->xseg, req); if (!target) { archipelagolog("Cannot get XSEG target\n"); goto err_exit; } memcpy(target, s->volname, targetlen); req->size = count; req->offset = offset; switch (op) { case ARCHIP_OP_READ: req->op = X_READ; break; case ARCHIP_OP_WRITE: req->op = X_WRITE; break; case ARCHIP_OP_FLUSH: req->op = X_FLUSH; break; } reqdata->volname = s->volname; reqdata->offset = offset; reqdata->size = count; reqdata->bufidx = bufidx; reqdata->aio_cb = aio_cb; reqdata->segreq = segreq; reqdata->op = op; xseg_set_req_data(s->xseg, req, reqdata); if (op == ARCHIP_OP_WRITE) { data = xseg_get_data(s->xseg, req); if (!data) { archipelagolog("Cannot get XSEG data\n"); goto err_exit; } qemu_iovec_to_buf(aio_cb->qiov, bufidx, data, count); } xport p = xseg_submit(s->xseg, req, s->srcport, X_ALLOC); if (p == NoPort) { archipelagolog("Could not submit XSEG request\n"); goto err_exit; } xseg_signal(s->xseg, p); return 0; err_exit: g_free(reqdata); xseg_put_request(s->xseg, req, s->srcport); return -EIO; err_exit2: g_free(reqdata); return -EIO; } | 22,659 |
1 | int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); VirtioBusState *vbus = VIRTIO_BUS(qbus); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); int i, r; if (!k->set_host_notifier) { fprintf(stderr, "binding does not support host notifiers\n"); r = -ENOSYS; goto fail; } for (i = 0; i < hdev->nvqs; ++i) { r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true); if (r < 0) { fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r); goto fail_vq; } } return 0; fail_vq: while (--i >= 0) { r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false); if (r < 0) { fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r); fflush(stderr); } assert (r >= 0); } fail: return r; } | 22,660 |
1 | static int multiwrite_f(BlockDriverState *bs, int argc, char **argv) { struct timeval t1, t2; int Cflag = 0, qflag = 0; int c, cnt; char **buf; int64_t offset, first_offset = 0; /* Some compilers get confused and warn if this is not initialized. */ int total = 0; int nr_iov; int nr_reqs; int pattern = 0xcd; QEMUIOVector *qiovs; int i; BlockRequest *reqs; while ((c = getopt(argc, argv, "CqP:")) != EOF) { switch (c) { case 'C': Cflag = 1; break; case 'q': qflag = 1; break; case 'P': pattern = parse_pattern(optarg); if (pattern < 0) { return 0; } break; default: return qemuio_command_usage(&writev_cmd); } } if (optind > argc - 2) { return qemuio_command_usage(&writev_cmd); } nr_reqs = 1; for (i = optind; i < argc; i++) { if (!strcmp(argv[i], ";")) { nr_reqs++; } } reqs = g_malloc0(nr_reqs * sizeof(*reqs)); buf = g_malloc0(nr_reqs * sizeof(*buf)); qiovs = g_malloc(nr_reqs * sizeof(*qiovs)); for (i = 0; i < nr_reqs && optind < argc; i++) { int j; /* Read the offset of the request */ offset = cvtnum(argv[optind]); if (offset < 0) { printf("non-numeric offset argument -- %s\n", argv[optind]); goto out; } optind++; if (offset & 0x1ff) { printf("offset %lld is not sector aligned\n", (long long)offset); goto out; } if (i == 0) { first_offset = offset; } /* Read lengths for qiov entries */ for (j = optind; j < argc; j++) { if (!strcmp(argv[j], ";")) { break; } } nr_iov = j - optind; /* Build request */ buf[i] = create_iovec(bs, &qiovs[i], &argv[optind], nr_iov, pattern); if (buf[i] == NULL) { goto out; } reqs[i].qiov = &qiovs[i]; reqs[i].sector = offset >> 9; reqs[i].nb_sectors = reqs[i].qiov->size >> 9; optind = j + 1; pattern++; } /* If there were empty requests at the end, ignore them */ nr_reqs = i; gettimeofday(&t1, NULL); cnt = do_aio_multiwrite(bs, reqs, nr_reqs, &total); gettimeofday(&t2, NULL); if (cnt < 0) { printf("aio_multiwrite failed: %s\n", strerror(-cnt)); goto out; } if (qflag) { goto out; } /* Finally, report back -- -C gives a parsable format */ t2 = tsub(t2, t1); print_report("wrote", &t2, first_offset, total, total, cnt, Cflag); out: for (i = 0; i < nr_reqs; i++) { qemu_io_free(buf[i]); if (reqs[i].qiov != NULL) { qemu_iovec_destroy(&qiovs[i]); } } g_free(buf); g_free(reqs); g_free(qiovs); return 0; } | 22,661 |
0 | static void check_consistency(FFFrameQueue *fq) { #if ASSERT_LEVEL >= 2 uint64_t nb_samples = 0; size_t i; av_assert0(fq->queued == fq->total_frames_head - fq->total_frames_tail); for (i = 0; i < fq->queued; i++) nb_samples += bucket(fq, i)->frame->nb_samples; av_assert0(nb_samples == fq->total_samples_head - fq->total_samples_tail); #endif } | 22,663 |
0 | void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0, int log2_trafo_size, int slice_or_tiles_up_boundary, int slice_or_tiles_left_boundary) { MvField *tab_mvf = s->ref->tab_mvf; int log2_min_pu_size = s->sps->log2_min_pu_size; int log2_min_tu_size = s->sps->log2_min_tb_size; int min_pu_width = s->sps->min_pu_width; int min_tu_width = s->sps->min_tb_width; int is_intra = tab_mvf[(y0 >> log2_min_pu_size) * min_pu_width + (x0 >> log2_min_pu_size)].is_intra; int i, j, bs; if (y0 > 0 && (y0 & 7) == 0) { int yp_pu = (y0 - 1) >> log2_min_pu_size; int yq_pu = y0 >> log2_min_pu_size; int yp_tu = (y0 - 1) >> log2_min_tu_size; int yq_tu = y0 >> log2_min_tu_size; for (i = 0; i < (1 << log2_trafo_size); i += 4) { int x_pu = (x0 + i) >> log2_min_pu_size; int x_tu = (x0 + i) >> log2_min_tu_size; MvField *top = &tab_mvf[yp_pu * min_pu_width + x_pu]; MvField *curr = &tab_mvf[yq_pu * min_pu_width + x_pu]; uint8_t top_cbf_luma = s->cbf_luma[yp_tu * min_tu_width + x_tu]; uint8_t curr_cbf_luma = s->cbf_luma[yq_tu * min_tu_width + x_tu]; RefPicList *top_refPicList = ff_hevc_get_ref_list(s, s->ref, x0 + i, y0 - 1); bs = boundary_strength(s, curr, curr_cbf_luma, top, top_cbf_luma, top_refPicList, 1); if (!s->sh.slice_loop_filter_across_slices_enabled_flag && (slice_or_tiles_up_boundary & 1) && (y0 % (1 << s->sps->log2_ctb_size)) == 0) bs = 0; else if (!s->pps->loop_filter_across_tiles_enabled_flag && (slice_or_tiles_up_boundary & 2) && (y0 % (1 << s->sps->log2_ctb_size)) == 0) bs = 0; if (y0 == 0 || s->sh.disable_deblocking_filter_flag == 1) bs = 0; if (bs) s->horizontal_bs[((x0 + i) + y0 * s->bs_width) >> 2] = bs; } } // bs for TU internal horizontal PU boundaries if (log2_trafo_size > s->sps->log2_min_pu_size && !is_intra) for (j = 8; j < (1 << log2_trafo_size); j += 8) { int yp_pu = (y0 + j - 1) >> log2_min_pu_size; int yq_pu = (y0 + j) >> log2_min_pu_size; int yp_tu = (y0 + j - 1) >> log2_min_tu_size; int yq_tu = (y0 + j) >> log2_min_tu_size; for (i = 0; i < (1 << log2_trafo_size); i += 4) { int x_pu = (x0 + i) >> log2_min_pu_size; int x_tu = (x0 + i) >> log2_min_tu_size; MvField *top = &tab_mvf[yp_pu * min_pu_width + x_pu]; MvField *curr = &tab_mvf[yq_pu * min_pu_width + x_pu]; uint8_t top_cbf_luma = s->cbf_luma[yp_tu * min_tu_width + x_tu]; uint8_t curr_cbf_luma = s->cbf_luma[yq_tu * min_tu_width + x_tu]; RefPicList *top_refPicList = ff_hevc_get_ref_list(s, s->ref, x0 + i, y0 + j - 1); bs = boundary_strength(s, curr, curr_cbf_luma, top, top_cbf_luma, top_refPicList, 0); if (s->sh.disable_deblocking_filter_flag == 1) bs = 0; if (bs) s->horizontal_bs[((x0 + i) + (y0 + j) * s->bs_width) >> 2] = bs; } } // bs for vertical TU boundaries if (x0 > 0 && (x0 & 7) == 0) { int xp_pu = (x0 - 1) >> log2_min_pu_size; int xq_pu = x0 >> log2_min_pu_size; int xp_tu = (x0 - 1) >> log2_min_tu_size; int xq_tu = x0 >> log2_min_tu_size; for (i = 0; i < (1 << log2_trafo_size); i += 4) { int y_pu = (y0 + i) >> log2_min_pu_size; int y_tu = (y0 + i) >> log2_min_tu_size; MvField *left = &tab_mvf[y_pu * min_pu_width + xp_pu]; MvField *curr = &tab_mvf[y_pu * min_pu_width + xq_pu]; uint8_t left_cbf_luma = s->cbf_luma[y_tu * min_tu_width + xp_tu]; uint8_t curr_cbf_luma = s->cbf_luma[y_tu * min_tu_width + xq_tu]; RefPicList *left_refPicList = ff_hevc_get_ref_list(s, s->ref, x0 - 1, y0 + i); bs = boundary_strength(s, curr, curr_cbf_luma, left, left_cbf_luma, left_refPicList, 1); if (!s->sh.slice_loop_filter_across_slices_enabled_flag && (slice_or_tiles_left_boundary & 1) && (x0 % (1 << s->sps->log2_ctb_size)) == 0) bs = 0; else if (!s->pps->loop_filter_across_tiles_enabled_flag && (slice_or_tiles_left_boundary & 2) && (x0 % (1 << s->sps->log2_ctb_size)) == 0) bs = 0; if (x0 == 0 || s->sh.disable_deblocking_filter_flag == 1) bs = 0; if (bs) s->vertical_bs[(x0 >> 3) + ((y0 + i) >> 2) * s->bs_width] = bs; } } // bs for TU internal vertical PU boundaries if (log2_trafo_size > log2_min_pu_size && !is_intra) for (j = 0; j < (1 << log2_trafo_size); j += 4) { int y_pu = (y0 + j) >> log2_min_pu_size; int y_tu = (y0 + j) >> log2_min_tu_size; for (i = 8; i < (1 << log2_trafo_size); i += 8) { int xp_pu = (x0 + i - 1) >> log2_min_pu_size; int xq_pu = (x0 + i) >> log2_min_pu_size; int xp_tu = (x0 + i - 1) >> log2_min_tu_size; int xq_tu = (x0 + i) >> log2_min_tu_size; MvField *left = &tab_mvf[y_pu * min_pu_width + xp_pu]; MvField *curr = &tab_mvf[y_pu * min_pu_width + xq_pu]; uint8_t left_cbf_luma = s->cbf_luma[y_tu * min_tu_width + xp_tu]; uint8_t curr_cbf_luma = s->cbf_luma[y_tu * min_tu_width + xq_tu]; RefPicList *left_refPicList = ff_hevc_get_ref_list(s, s->ref, x0 + i - 1, y0 + j); bs = boundary_strength(s, curr, curr_cbf_luma, left, left_cbf_luma, left_refPicList, 0); if (s->sh.disable_deblocking_filter_flag == 1) bs = 0; if (bs) s->vertical_bs[((x0 + i) >> 3) + ((y0 + j) >> 2) * s->bs_width] = bs; } } } | 22,665 |
1 | static void mips_cps_realize(DeviceState *dev, Error **errp) { MIPSCPSState *s = MIPS_CPS(dev); CPUMIPSState *env; MIPSCPU *cpu; int i; Error *err = NULL; target_ulong gcr_base; bool itu_present = false; for (i = 0; i < s->num_vp; i++) { cpu = cpu_mips_init(s->cpu_model); if (cpu == NULL) { error_setg(errp, "%s: CPU initialization failed", __func__); return; } /* Init internal devices */ cpu_mips_irq_init_cpu(cpu); cpu_mips_clock_init(cpu); env = &cpu->env; if (cpu_mips_itu_supported(env)) { itu_present = true; /* Attach ITC Tag to the VP */ env->itc_tag = mips_itu_get_tag_region(&s->itu); } qemu_register_reset(main_cpu_reset, cpu); } cpu = MIPS_CPU(first_cpu); env = &cpu->env; /* Inter-Thread Communication Unit */ if (itu_present) { object_initialize(&s->itu, sizeof(s->itu), TYPE_MIPS_ITU); qdev_set_parent_bus(DEVICE(&s->itu), sysbus_get_default()); object_property_set_int(OBJECT(&s->itu), 16, "num-fifo", &err); object_property_set_int(OBJECT(&s->itu), 16, "num-semaphores", &err); object_property_set_bool(OBJECT(&s->itu), true, "realized", &err); if (err != NULL) { error_propagate(errp, err); return; } memory_region_add_subregion(&s->container, 0, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->itu), 0)); } /* Cluster Power Controller */ object_initialize(&s->cpc, sizeof(s->cpc), TYPE_MIPS_CPC); qdev_set_parent_bus(DEVICE(&s->cpc), sysbus_get_default()); object_property_set_int(OBJECT(&s->cpc), s->num_vp, "num-vp", &err); object_property_set_int(OBJECT(&s->cpc), 1, "vp-start-running", &err); object_property_set_bool(OBJECT(&s->cpc), true, "realized", &err); if (err != NULL) { error_propagate(errp, err); return; } memory_region_add_subregion(&s->container, 0, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->cpc), 0)); /* Global Interrupt Controller */ object_initialize(&s->gic, sizeof(s->gic), TYPE_MIPS_GIC); qdev_set_parent_bus(DEVICE(&s->gic), sysbus_get_default()); object_property_set_int(OBJECT(&s->gic), s->num_vp, "num-vp", &err); object_property_set_int(OBJECT(&s->gic), 128, "num-irq", &err); object_property_set_bool(OBJECT(&s->gic), true, "realized", &err); if (err != NULL) { error_propagate(errp, err); return; } memory_region_add_subregion(&s->container, 0, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->gic), 0)); /* Global Configuration Registers */ gcr_base = env->CP0_CMGCRBase << 4; object_initialize(&s->gcr, sizeof(s->gcr), TYPE_MIPS_GCR); qdev_set_parent_bus(DEVICE(&s->gcr), sysbus_get_default()); object_property_set_int(OBJECT(&s->gcr), s->num_vp, "num-vp", &err); object_property_set_int(OBJECT(&s->gcr), 0x800, "gcr-rev", &err); object_property_set_int(OBJECT(&s->gcr), gcr_base, "gcr-base", &err); object_property_set_link(OBJECT(&s->gcr), OBJECT(&s->gic.mr), "gic", &err); object_property_set_link(OBJECT(&s->gcr), OBJECT(&s->cpc.mr), "cpc", &err); object_property_set_bool(OBJECT(&s->gcr), true, "realized", &err); if (err != NULL) { error_propagate(errp, err); return; } memory_region_add_subregion(&s->container, gcr_base, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->gcr), 0)); } | 22,667 |
1 | static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias) { int i, shnum, nsyms, sym_idx = 0, str_idx = 0; struct elf_shdr *shdr; char *strings = NULL; struct syminfo *s = NULL; struct elf_sym *new_syms, *syms = NULL; shnum = hdr->e_shnum; i = shnum * sizeof(struct elf_shdr); shdr = (struct elf_shdr *)alloca(i); if (pread(fd, shdr, i, hdr->e_shoff) != i) { return; } bswap_shdr(shdr, shnum); for (i = 0; i < shnum; ++i) { if (shdr[i].sh_type == SHT_SYMTAB) { sym_idx = i; str_idx = shdr[i].sh_link; goto found; } } /* There will be no symbol table if the file was stripped. */ return; found: /* Now know where the strtab and symtab are. Snarf them. */ s = g_try_new(struct syminfo, 1); if (!s) { goto give_up; } i = shdr[str_idx].sh_size; s->disas_strtab = strings = g_try_malloc(i); if (!strings || pread(fd, strings, i, shdr[str_idx].sh_offset) != i) { goto give_up; } i = shdr[sym_idx].sh_size; syms = g_try_malloc(i); if (!syms || pread(fd, syms, i, shdr[sym_idx].sh_offset) != i) { goto give_up; } nsyms = i / sizeof(struct elf_sym); for (i = 0; i < nsyms; ) { bswap_sym(syms + i); /* Throw away entries which we do not need. */ if (syms[i].st_shndx == SHN_UNDEF || syms[i].st_shndx >= SHN_LORESERVE || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) { if (i < --nsyms) { syms[i] = syms[nsyms]; } } else { #if defined(TARGET_ARM) || defined (TARGET_MIPS) /* The bottom address bit marks a Thumb or MIPS16 symbol. */ syms[i].st_value &= ~(target_ulong)1; #endif syms[i].st_value += load_bias; i++; } } /* No "useful" symbol. */ if (nsyms == 0) { goto give_up; } /* Attempt to free the storage associated with the local symbols that we threw away. Whether or not this has any effect on the memory allocation depends on the malloc implementation and how many symbols we managed to discard. */ new_syms = g_try_renew(struct elf_sym, syms, nsyms); if (new_syms == NULL) { goto give_up; } syms = new_syms; qsort(syms, nsyms, sizeof(*syms), symcmp); s->disas_num_syms = nsyms; #if ELF_CLASS == ELFCLASS32 s->disas_symtab.elf32 = syms; #else s->disas_symtab.elf64 = syms; #endif s->lookup_symbol = lookup_symbolxx; s->next = syminfos; syminfos = s; return; give_up: g_free(s); g_free(strings); g_free(syms); } | 22,668 |
1 | static void qvirtio_pci_set_queue_address(QVirtioDevice *d, uint32_t pfn) { QVirtioPCIDevice *dev = (QVirtioPCIDevice *)d; qpci_io_writel(dev->pdev, dev->addr + VIRTIO_PCI_QUEUE_PFN, pfn); } | 22,669 |
1 | static int swf_write_audio(AVFormatContext *s, const uint8_t *buf, int size) { ByteIOContext *pb = &s->pb; put_swf_tag(s, TAG_STREAMBLOCK | TAG_LONG); put_buffer(pb, buf, size); put_swf_end_tag(s); put_flush_packet(&s->pb); return 0; } | 22,670 |
1 | static int avisynth_read_packet_audio(AVFormatContext *s, AVPacket *pkt, int discard) { AviSynthContext *avs = s->priv_data; AVRational fps, samplerate; int samples; const char* error; if (avs->curr_sample >= avs->vi->num_audio_samples) return AVERROR_EOF; fps.num = avs->vi->fps_numerator; fps.den = avs->vi->fps_denominator; samplerate.num = avs->vi->audio_samples_per_second; samplerate.den = 1; if (avs_has_video(avs->vi)) { if (avs->curr_frame < avs->vi->num_frames) samples = av_rescale_q(avs->curr_frame, samplerate, fps) - avs->curr_sample; else samples = av_rescale_q(1, samplerate, fps); } else { samples = 1000; } // After seeking, audio may catch up with video. if (samples <= 0) { pkt->size = 0; pkt->data = NULL; return 0; } if (avs->curr_sample + samples > avs->vi->num_audio_samples) samples = avs->vi->num_audio_samples - avs->curr_sample; // This must happen even if the stream is discarded to prevent desync. avs->curr_sample += samples; if (discard) return 0; pkt->pts = avs->curr_sample; pkt->dts = avs->curr_sample; pkt->duration = samples; pkt->size = avs_bytes_per_channel_sample(avs->vi) * samples * avs->vi->nchannels; if (!pkt->size) return AVERROR_UNKNOWN; pkt->data = av_malloc(pkt->size); if (!pkt->data) return AVERROR_UNKNOWN; avs_library->avs_get_audio(avs->clip, pkt->data, avs->curr_sample, samples); error = avs_library->avs_clip_get_error(avs->clip); if (error) { av_log(s, AV_LOG_ERROR, "%s\n", error); avs->error = 1; av_freep(&pkt->data); return AVERROR_UNKNOWN; } return 0; } | 22,671 |
1 | static int vc1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size, n_slices = 0, i, ret; VC1Context *v = avctx->priv_data; MpegEncContext *s = &v->s; AVFrame *pict = data; uint8_t *buf2 = NULL; const uint8_t *buf_start = buf; int mb_height, n_slices1; struct { uint8_t *buf; GetBitContext gb; int mby_start; } *slices = NULL, *tmp; /* no supplementary picture */ if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) { /* special case for last picture */ if (s->low_delay == 0 && s->next_picture_ptr) { if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0) return ret; s->next_picture_ptr = NULL; *got_frame = 1; } return 0; } //for advanced profile we may need to parse and unescape data if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) { int buf_size2 = 0; buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */ const uint8_t *start, *end, *next; int size; next = buf; for (start = buf, end = buf + buf_size; next < end; start = next) { next = find_next_marker(start + 4, end); size = next - start - 4; if (size <= 0) continue; switch (AV_RB32(start)) { case VC1_CODE_FRAME: if (avctx->hwaccel) buf_start = start; buf_size2 = vc1_unescape_buffer(start + 4, size, buf2); break; case VC1_CODE_FIELD: { int buf_size3; tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1)); if (!tmp) goto err; slices = tmp; slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!slices[n_slices].buf) goto err; buf_size3 = vc1_unescape_buffer(start + 4, size, slices[n_slices].buf); init_get_bits(&slices[n_slices].gb, slices[n_slices].buf, buf_size3 << 3); /* assuming that the field marker is at the exact middle, hope it's correct */ slices[n_slices].mby_start = s->mb_height >> 1; n_slices1 = n_slices - 1; // index of the last slice of the first field n_slices++; break; } case VC1_CODE_ENTRYPOINT: /* it should be before frame data */ buf_size2 = vc1_unescape_buffer(start + 4, size, buf2); init_get_bits(&s->gb, buf2, buf_size2 * 8); ff_vc1_decode_entry_point(avctx, v, &s->gb); break; case VC1_CODE_SLICE: { int buf_size3; tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1)); if (!tmp) goto err; slices = tmp; slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!slices[n_slices].buf) goto err; buf_size3 = vc1_unescape_buffer(start + 4, size, slices[n_slices].buf); init_get_bits(&slices[n_slices].gb, slices[n_slices].buf, buf_size3 << 3); slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9); n_slices++; break; } } } } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */ const uint8_t *divider; int buf_size3; divider = find_next_marker(buf, buf + buf_size); if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) { av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n"); goto err; } else { // found field marker, unescape second field tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1)); if (!tmp) goto err; slices = tmp; slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!slices[n_slices].buf) goto err; buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf); init_get_bits(&slices[n_slices].gb, slices[n_slices].buf, buf_size3 << 3); slices[n_slices].mby_start = s->mb_height >> 1; n_slices1 = n_slices - 1; n_slices++; } buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2); } else { buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2); } init_get_bits(&s->gb, buf2, buf_size2*8); } else init_get_bits(&s->gb, buf, buf_size*8); if (v->res_sprite) { v->new_sprite = !get_bits1(&s->gb); v->two_sprites = get_bits1(&s->gb); /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means we're using the sprite compositor. These are intentionally kept separate so you can get the raw sprites by using the wmv3 decoder for WMVP or the vc1 one for WVP2 */ if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) { if (v->new_sprite) { // switch AVCodecContext parameters to those of the sprites avctx->width = avctx->coded_width = v->sprite_width; avctx->height = avctx->coded_height = v->sprite_height; } else { goto image; } } } if (s->context_initialized && (s->width != avctx->coded_width || s->height != avctx->coded_height)) { ff_vc1_decode_end(avctx); } if (!s->context_initialized) { if (ff_msmpeg4_decode_init(avctx) < 0) goto err; if (ff_vc1_decode_init_alloc_tables(v) < 0) { ff_MPV_common_end(s); goto err; } s->low_delay = !avctx->has_b_frames || v->res_sprite; if (v->profile == PROFILE_ADVANCED) { s->h_edge_pos = avctx->coded_width; s->v_edge_pos = avctx->coded_height; } } // do parse frame header v->pic_header_flag = 0; v->first_pic_header_flag = 1; if (v->profile < PROFILE_ADVANCED) { if (ff_vc1_parse_frame_header(v, &s->gb) < 0) { goto err; } } else { if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) { goto err; } } v->first_pic_header_flag = 0; if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) && s->pict_type != AV_PICTURE_TYPE_I) { av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n"); goto err; } // for skipping the frame s->current_picture.f.pict_type = s->pict_type; s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; /* skip B-frames if we don't have reference frames */ if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) { goto err; } if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) || avctx->skip_frame >= AVDISCARD_ALL) { goto end; } if (s->next_p_frame_damaged) { if (s->pict_type == AV_PICTURE_TYPE_B) goto end; else s->next_p_frame_damaged = 0; } if (ff_MPV_frame_start(s, avctx) < 0) { goto err; } // process pulldown flags s->current_picture_ptr->f.repeat_pict = 0; // Pulldown flags are only valid when 'broadcast' has been set. // So ticks_per_frame will be 2 if (v->rff) { // repeat field s->current_picture_ptr->f.repeat_pict = 1; } else if (v->rptfrm) { // repeat frames s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2; } s->me.qpel_put = s->dsp.put_qpel_pixels_tab; s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab; if (avctx->hwaccel) { if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0) goto err; if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0) goto err; if (avctx->hwaccel->end_frame(avctx) < 0) goto err; } else { int header_ret = 0; ff_mpeg_er_frame_start(s); v->bits = buf_size * 8; v->end_mb_x = s->mb_width; if (v->field_mode) { s->current_picture.f.linesize[0] <<= 1; s->current_picture.f.linesize[1] <<= 1; s->current_picture.f.linesize[2] <<= 1; s->linesize <<= 1; s->uvlinesize <<= 1; } mb_height = s->mb_height >> v->field_mode; if (!mb_height) { av_log(v->s.avctx, AV_LOG_ERROR, "Invalid mb_height.\n"); goto err; } for (i = 0; i <= n_slices; i++) { if (i > 0 && slices[i - 1].mby_start >= mb_height) { if (v->field_mode <= 0) { av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond " "picture boundary (%d >= %d)\n", i, slices[i - 1].mby_start, mb_height); continue; } v->second_field = 1; v->blocks_off = s->mb_width * s->mb_height << 1; v->mb_off = s->mb_stride * s->mb_height >> 1; } else { v->second_field = 0; v->blocks_off = 0; v->mb_off = 0; } if (i) { v->pic_header_flag = 0; if (v->field_mode && i == n_slices1 + 2) { if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) { av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n"); if (avctx->err_recognition & AV_EF_EXPLODE) goto err; continue; } } else if (get_bits1(&s->gb)) { v->pic_header_flag = 1; if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) { av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n"); if (avctx->err_recognition & AV_EF_EXPLODE) goto err; continue; } } } if (header_ret < 0) continue; s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height); if (!v->field_mode || v->second_field) s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height); else s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height); ff_vc1_decode_blocks(v); if (i != n_slices) s->gb = slices[i].gb; } if (v->field_mode) { v->second_field = 0; s->current_picture.f.linesize[0] >>= 1; s->current_picture.f.linesize[1] >>= 1; s->current_picture.f.linesize[2] >>= 1; s->linesize >>= 1; s->uvlinesize >>= 1; if (v->s.pict_type != AV_PICTURE_TYPE_BI && v->s.pict_type != AV_PICTURE_TYPE_B) { FFSWAP(uint8_t *, v->mv_f_next[0], v->mv_f[0]); FFSWAP(uint8_t *, v->mv_f_next[1], v->mv_f[1]); } } av_dlog(s->avctx, "Consumed %i/%i bits\n", get_bits_count(&s->gb), s->gb.size_in_bits); // if (get_bits_count(&s->gb) > buf_size * 8) // return -1; if (!v->field_mode) ff_er_frame_end(&s->er); } ff_MPV_frame_end(s); if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) { image: avctx->width = avctx->coded_width = v->output_width; avctx->height = avctx->coded_height = v->output_height; if (avctx->skip_frame >= AVDISCARD_NONREF) goto end; #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER if (vc1_decode_sprites(v, &s->gb)) goto err; #endif if ((ret = av_frame_ref(pict, v->sprite_output_frame)) < 0) goto err; *got_frame = 1; } else { if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0) goto err; ff_print_debug_info(s, s->current_picture_ptr); *got_frame = 1; } else if (s->last_picture_ptr != NULL) { if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0) goto err; ff_print_debug_info(s, s->last_picture_ptr); *got_frame = 1; } } end: av_free(buf2); for (i = 0; i < n_slices; i++) av_free(slices[i].buf); av_free(slices); return buf_size; err: av_free(buf2); for (i = 0; i < n_slices; i++) av_free(slices[i].buf); av_free(slices); return -1; } | 22,672 |
0 | altivec_yuv2packedX (SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, uint8_t *dest, int dstW, int dstY) { int i,j; short tmp __attribute__((aligned (16))); int16_t *p; short *f; vector signed short X,X0,X1,Y0,U0,V0,Y1,U1,V1,U,V; vector signed short R0,G0,B0,R1,G1,B1; vector unsigned char R,G,B,pels[3]; vector unsigned char *out,*nout; vector signed short RND = vec_splat((vector signed short)AVV(1<<3),0); vector unsigned short SCL = vec_splat((vector unsigned short)AVV(4),0); unsigned long scratch[16] __attribute__ ((aligned (16))); vector signed short *vYCoeffsBank, *vCCoeffsBank; vector signed short *YCoeffs, *CCoeffs; vYCoeffsBank = malloc (sizeof (vector signed short)*lumFilterSize*dstW); vCCoeffsBank = malloc (sizeof (vector signed short)*chrFilterSize*dstW); for (i=0;i<lumFilterSize*dstW;i++) { tmp = c->vLumFilter[i]; p = &vYCoeffsBank[i]; for (j=0;j<8;j++) p[j] = tmp; } for (i=0;i<chrFilterSize*dstW;i++) { tmp = c->vChrFilter[i]; p = &vCCoeffsBank[i]; for (j=0;j<8;j++) p[j] = tmp; } YCoeffs = vYCoeffsBank+dstY*lumFilterSize; CCoeffs = vCCoeffsBank+dstY*chrFilterSize; out = (vector unsigned char *)dest; for(i=0; i<dstW; i+=16){ Y0 = RND; Y1 = RND; /* extract 16 coeffs from lumSrc */ for(j=0; j<lumFilterSize; j++) { X0 = vec_ld (0, &lumSrc[j][i]); X1 = vec_ld (16, &lumSrc[j][i]); Y0 = vec_mradds (X0, YCoeffs[j], Y0); Y1 = vec_mradds (X1, YCoeffs[j], Y1); } U = RND; V = RND; /* extract 8 coeffs from U,V */ for(j=0; j<chrFilterSize; j++) { X = vec_ld (0, &chrSrc[j][i/2]); U = vec_mradds (X, CCoeffs[j], U); X = vec_ld (0, &chrSrc[j][i/2+2048]); V = vec_mradds (X, CCoeffs[j], V); } /* scale and clip signals */ Y0 = vec_sra (Y0, SCL); Y1 = vec_sra (Y1, SCL); U = vec_sra (U, SCL); V = vec_sra (V, SCL); Y0 = vec_clip (Y0); Y1 = vec_clip (Y1); U = vec_clip (U); V = vec_clip (V); /* now we have Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15 U= u0 u1 u2 u3 u4 u5 u6 u7 V= v0 v1 v2 v3 v4 v5 v6 v7 Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15 U0= u0 u0 u1 u1 u2 u2 u3 u3 U1= u4 u4 u5 u5 u6 u6 u7 u7 V0= v0 v0 v1 v1 v2 v2 v3 v3 V1= v4 v4 v5 v5 v6 v6 v7 v7 */ U0 = vec_mergeh (U,U); V0 = vec_mergeh (V,V); U1 = vec_mergel (U,U); V1 = vec_mergel (V,V); cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0); cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1); R = vec_packclp (R0,R1); G = vec_packclp (G0,G1); B = vec_packclp (B0,B1); out_rgba (R,G,B,out); } if (i < dstW) { i -= 16; Y0 = RND; Y1 = RND; /* extract 16 coeffs from lumSrc */ for(j=0; j<lumFilterSize; j++) { X0 = vec_ld (0, &lumSrc[j][i]); X1 = vec_ld (16, &lumSrc[j][i]); Y0 = vec_mradds (X0, YCoeffs[j], Y0); Y1 = vec_mradds (X1, YCoeffs[j], Y1); } U = RND; V = RND; /* extract 8 coeffs from U,V */ for(j=0; j<chrFilterSize; j++) { X = vec_ld (0, &chrSrc[j][i/2]); U = vec_mradds (X, CCoeffs[j], U); X = vec_ld (0, &chrSrc[j][i/2+2048]); V = vec_mradds (X, CCoeffs[j], V); } /* scale and clip signals */ Y0 = vec_sra (Y0, SCL); Y1 = vec_sra (Y1, SCL); U = vec_sra (U, SCL); V = vec_sra (V, SCL); Y0 = vec_clip (Y0); Y1 = vec_clip (Y1); U = vec_clip (U); V = vec_clip (V); /* now we have Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15 U= u0 u1 u2 u3 u4 u5 u6 u7 V= v0 v1 v2 v3 v4 v5 v6 v7 Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15 U0= u0 u0 u1 u1 u2 u2 u3 u3 U1= u4 u4 u5 u5 u6 u6 u7 u7 V0= v0 v0 v1 v1 v2 v2 v3 v3 V1= v4 v4 v5 v5 v6 v6 v7 v7 */ U0 = vec_mergeh (U,U); V0 = vec_mergeh (V,V); U1 = vec_mergel (U,U); V1 = vec_mergel (V,V); cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0); cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1); R = vec_packclp (R0,R1); G = vec_packclp (G0,G1); B = vec_packclp (B0,B1); nout = (vector unsigned char *)scratch; out_rgba (R,G,B,nout); memcpy (&((uint32_t*)dest)[i], scratch, (dstW-i)/4); } if (vYCoeffsBank) free (vYCoeffsBank); if (vCCoeffsBank) free (vCCoeffsBank); } | 22,673 |
0 | static av_cold int vaapi_encode_h265_init_fixed_qp(AVCodecContext *avctx) { VAAPIEncodeContext *ctx = avctx->priv_data; VAAPIEncodeH265Context *priv = ctx->priv_data; VAAPIEncodeH265Options *opt = ctx->codec_options; priv->fixed_qp_p = opt->qp; if (avctx->i_quant_factor > 0.0) priv->fixed_qp_idr = (int)((priv->fixed_qp_p * avctx->i_quant_factor + avctx->i_quant_offset) + 0.5); else priv->fixed_qp_idr = priv->fixed_qp_p; if (avctx->b_quant_factor > 0.0) priv->fixed_qp_b = (int)((priv->fixed_qp_p * avctx->b_quant_factor + avctx->b_quant_offset) + 0.5); else priv->fixed_qp_b = priv->fixed_qp_p; av_log(avctx, AV_LOG_DEBUG, "Using fixed QP = " "%d / %d / %d for IDR- / P- / B-frames.\n", priv->fixed_qp_idr, priv->fixed_qp_p, priv->fixed_qp_b); return 0; } | 22,674 |
1 | int av_parser_parse2(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int64_t pts, int64_t dts, int64_t pos) { int index, i; uint8_t dummy_buf[FF_INPUT_BUFFER_PADDING_SIZE]; if (!(s->flags & PARSER_FLAG_FETCHED_OFFSET)) { s->next_frame_offset = s->cur_offset = pos; s->flags |= PARSER_FLAG_FETCHED_OFFSET; } if (buf_size == 0) { /* padding is always necessary even if EOF, so we add it here */ memset(dummy_buf, 0, sizeof(dummy_buf)); buf = dummy_buf; } else if (s->cur_offset + buf_size != s->cur_frame_end[s->cur_frame_start_index]) { /* skip remainder packets */ /* add a new packet descriptor */ i = (s->cur_frame_start_index + 1) & (AV_PARSER_PTS_NB - 1); s->cur_frame_start_index = i; s->cur_frame_offset[i] = s->cur_offset; s->cur_frame_end[i] = s->cur_offset + buf_size; s->cur_frame_pts[i] = pts; s->cur_frame_dts[i] = dts; s->cur_frame_pos[i] = pos; } if (s->fetch_timestamp) { s->fetch_timestamp = 0; s->last_pts = s->pts; s->last_dts = s->dts; s->last_pos = s->pos; ff_fetch_timestamp(s, 0, 0); } /* WARNING: the returned index can be negative */ index = s->parser->parser_parse(s, avctx, (const uint8_t **) poutbuf, poutbuf_size, buf, buf_size); /* update the file pointer */ if (*poutbuf_size) { /* fill the data for the current frame */ s->frame_offset = s->next_frame_offset; /* offset of the next frame */ s->next_frame_offset = s->cur_offset + index; s->fetch_timestamp = 1; } if (index < 0) index = 0; s->cur_offset += index; return index; } | 22,675 |
1 | static void fill_prefetch_fifo(struct omap_gpmc_s *s) { /* Fill the prefetch FIFO by reading data from NAND. * We do this synchronously, unlike the hardware which * will do this asynchronously. We refill when the * FIFO has THRESHOLD bytes free, and we always refill * as much data as possible starting at the top end * of the FIFO. * (We have to refill at THRESHOLD rather than waiting * for the FIFO to empty to allow for the case where * the FIFO size isn't an exact multiple of THRESHOLD * and we're doing DMA transfers.) * This means we never need to handle wrap-around in * the fifo-reading code, and the next byte of data * to read is always fifo[63 - fifopointer]. */ int fptr; int cs = prefetch_cs(s->prefetch.config1); int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0); int bytes; /* Don't believe the bit of the OMAP TRM that says that COUNTVALUE * and TRANSFERCOUNT are in units of 16 bit words for 16 bit NAND. * Instead believe the bit that says it is always a byte count. */ bytes = 64 - s->prefetch.fifopointer; if (bytes > s->prefetch.count) { bytes = s->prefetch.count; s->prefetch.count -= bytes; s->prefetch.fifopointer += bytes; fptr = 64 - s->prefetch.fifopointer; /* Move the existing data in the FIFO so it sits just * before what we're about to read in */ while (fptr < (64 - bytes)) { s->prefetch.fifo[fptr] = s->prefetch.fifo[fptr + bytes]; fptr++; while (fptr < 64) { uint32_t v = omap_nand_read(&s->cs_file[cs], 0, 2); s->prefetch.fifo[fptr++] = v & 0xff; s->prefetch.fifo[fptr++] = (v >> 8) & 0xff; } else { s->prefetch.fifo[fptr++] = omap_nand_read(&s->cs_file[cs], 0, 1); if (s->prefetch.startengine && (s->prefetch.count == 0)) { /* This was the final transfer: raise TERMINALCOUNTSTATUS */ s->irqst |= 2; s->prefetch.startengine = 0; /* If there are any bytes in the FIFO at this point then * we must raise a DMA request (either this is a final part * transfer, or we filled the FIFO in which case we certainly * have THRESHOLD bytes available) */ if (s->prefetch.fifopointer != 0) { omap_gpmc_dma_update(s, 1); omap_gpmc_int_update(s); | 22,676 |
0 | av_cold int ff_ac3_encode_init(AVCodecContext *avctx) { AC3EncodeContext *s = avctx->priv_data; int ret, frame_size_58; s->avctx = avctx; s->eac3 = avctx->codec_id == AV_CODEC_ID_EAC3; ff_ac3_common_init(); ret = validate_options(s); if (ret) return ret; avctx->frame_size = AC3_BLOCK_SIZE * s->num_blocks; avctx->delay = AC3_BLOCK_SIZE; s->bitstream_mode = avctx->audio_service_type; if (s->bitstream_mode == AV_AUDIO_SERVICE_TYPE_KARAOKE) s->bitstream_mode = 0x7; s->bits_written = 0; s->samples_written = 0; /* calculate crc_inv for both possible frame sizes */ frame_size_58 = (( s->frame_size >> 2) + ( s->frame_size >> 4)) << 1; s->crc_inv[0] = pow_poly((CRC16_POLY >> 1), (8 * frame_size_58) - 16, CRC16_POLY); if (s->bit_alloc.sr_code == 1) { frame_size_58 = (((s->frame_size+2) >> 2) + ((s->frame_size+2) >> 4)) << 1; s->crc_inv[1] = pow_poly((CRC16_POLY >> 1), (8 * frame_size_58) - 16, CRC16_POLY); } /* set function pointers */ if (CONFIG_AC3_FIXED_ENCODER && s->fixed_point) { s->mdct_end = ff_ac3_fixed_mdct_end; s->mdct_init = ff_ac3_fixed_mdct_init; s->allocate_sample_buffers = ff_ac3_fixed_allocate_sample_buffers; } else if (CONFIG_AC3_ENCODER || CONFIG_EAC3_ENCODER) { s->mdct_end = ff_ac3_float_mdct_end; s->mdct_init = ff_ac3_float_mdct_init; s->allocate_sample_buffers = ff_ac3_float_allocate_sample_buffers; } if (CONFIG_EAC3_ENCODER && s->eac3) s->output_frame_header = ff_eac3_output_frame_header; else s->output_frame_header = ac3_output_frame_header; set_bandwidth(s); exponent_init(s); bit_alloc_init(s); ret = s->mdct_init(s); if (ret) goto init_fail; ret = allocate_buffers(s); if (ret) goto init_fail; ff_audiodsp_init(&s->adsp); ff_me_cmp_init(&s->mecc, avctx); ff_ac3dsp_init(&s->ac3dsp, avctx->flags & CODEC_FLAG_BITEXACT); dprint_options(s); return 0; init_fail: ff_ac3_encode_close(avctx); return ret; } | 22,677 |
0 | static int dirac_unpack_prediction_parameters(DiracContext *s) { static const uint8_t default_blen[] = { 4, 12, 16, 24 }; static const uint8_t default_bsep[] = { 4, 8, 12, 16 }; GetBitContext *gb = &s->gb; unsigned idx, ref; align_get_bits(gb); /* [DIRAC_STD] 11.2.2 Block parameters. block_parameters() */ /* Luma and Chroma are equal. 11.2.3 */ idx = svq3_get_ue_golomb(gb); /* [DIRAC_STD] index */ if (idx > 4) { av_log(s->avctx, AV_LOG_ERROR, "Block prediction index too high\n"); return -1; } if (idx == 0) { s->plane[0].xblen = svq3_get_ue_golomb(gb); s->plane[0].yblen = svq3_get_ue_golomb(gb); s->plane[0].xbsep = svq3_get_ue_golomb(gb); s->plane[0].ybsep = svq3_get_ue_golomb(gb); } else { /*[DIRAC_STD] preset_block_params(index). Table 11.1 */ s->plane[0].xblen = default_blen[idx-1]; s->plane[0].yblen = default_blen[idx-1]; s->plane[0].xbsep = default_bsep[idx-1]; s->plane[0].ybsep = default_bsep[idx-1]; } /*[DIRAC_STD] 11.2.4 motion_data_dimensions() Calculated in function dirac_unpack_block_motion_data */ if (s->plane[0].xbsep < s->plane[0].xblen/2 || s->plane[0].ybsep < s->plane[0].yblen/2) { av_log(s->avctx, AV_LOG_ERROR, "Block separation too small\n"); return -1; } if (s->plane[0].xbsep > s->plane[0].xblen || s->plane[0].ybsep > s->plane[0].yblen) { av_log(s->avctx, AV_LOG_ERROR, "Block seperation greater than size\n"); return -1; } if (FFMAX(s->plane[0].xblen, s->plane[0].yblen) > MAX_BLOCKSIZE) { av_log(s->avctx, AV_LOG_ERROR, "Unsupported large block size\n"); return -1; } /*[DIRAC_STD] 11.2.5 Motion vector precision. motion_vector_precision() Read motion vector precision */ s->mv_precision = svq3_get_ue_golomb(gb); if (s->mv_precision > 3) { av_log(s->avctx, AV_LOG_ERROR, "MV precision finer than eighth-pel\n"); return -1; } /*[DIRAC_STD] 11.2.6 Global motion. global_motion() Read the global motion compensation parameters */ s->globalmc_flag = get_bits1(gb); if (s->globalmc_flag) { memset(s->globalmc, 0, sizeof(s->globalmc)); /* [DIRAC_STD] pan_tilt(gparams) */ for (ref = 0; ref < s->num_refs; ref++) { if (get_bits1(gb)) { s->globalmc[ref].pan_tilt[0] = dirac_get_se_golomb(gb); s->globalmc[ref].pan_tilt[1] = dirac_get_se_golomb(gb); } /* [DIRAC_STD] zoom_rotate_shear(gparams) zoom/rotation/shear parameters */ if (get_bits1(gb)) { s->globalmc[ref].zrs_exp = svq3_get_ue_golomb(gb); s->globalmc[ref].zrs[0][0] = dirac_get_se_golomb(gb); s->globalmc[ref].zrs[0][1] = dirac_get_se_golomb(gb); s->globalmc[ref].zrs[1][0] = dirac_get_se_golomb(gb); s->globalmc[ref].zrs[1][1] = dirac_get_se_golomb(gb); } else { s->globalmc[ref].zrs[0][0] = 1; s->globalmc[ref].zrs[1][1] = 1; } /* [DIRAC_STD] perspective(gparams) */ if (get_bits1(gb)) { s->globalmc[ref].perspective_exp = svq3_get_ue_golomb(gb); s->globalmc[ref].perspective[0] = dirac_get_se_golomb(gb); s->globalmc[ref].perspective[1] = dirac_get_se_golomb(gb); } } } /*[DIRAC_STD] 11.2.7 Picture prediction mode. prediction_mode() Picture prediction mode, not currently used. */ if (svq3_get_ue_golomb(gb)) { av_log(s->avctx, AV_LOG_ERROR, "Unknown picture prediction mode\n"); return -1; } /* [DIRAC_STD] 11.2.8 Reference picture weight. reference_picture_weights() just data read, weight calculation will be done later on. */ s->weight_log2denom = 1; s->weight[0] = 1; s->weight[1] = 1; if (get_bits1(gb)) { s->weight_log2denom = svq3_get_ue_golomb(gb); s->weight[0] = dirac_get_se_golomb(gb); if (s->num_refs == 2) s->weight[1] = dirac_get_se_golomb(gb); } return 0; } | 22,678 |
1 | static inline void xchg_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg){ MpegEncContext * const s = &h->s; int temp8, i; uint64_t temp64; src_y -= linesize + 1; src_cb -= uvlinesize + 1; src_cr -= uvlinesize + 1; #define XCHG(a,b,t,xchg)\ t= a;\ if(xchg)\ a= b;\ b= t; for(i=0; i<17; i++){ XCHG(h->left_border[i ], src_y [i* linesize], temp8, xchg); } XCHG(*(uint64_t*)(h->top_border[s->mb_x]+0), *(uint64_t*)(src_y +1), temp64, xchg); XCHG(*(uint64_t*)(h->top_border[s->mb_x]+8), *(uint64_t*)(src_y +9), temp64, 1); if(!(s->flags&CODEC_FLAG_GRAY)){ for(i=0; i<9; i++){ XCHG(h->left_border[i+17 ], src_cb[i*uvlinesize], temp8, xchg); XCHG(h->left_border[i+17+9], src_cr[i*uvlinesize], temp8, xchg); } XCHG(*(uint64_t*)(h->top_border[s->mb_x]+16), *(uint64_t*)(src_cb+1), temp64, 1); XCHG(*(uint64_t*)(h->top_border[s->mb_x]+24), *(uint64_t*)(src_cr+1), temp64, 1); } } | 22,679 |
1 | void vnc_disconnect_finish(VncState *vs) { int i; vnc_jobs_join(vs); /* Wait encoding jobs */ vnc_lock_output(vs); vnc_qmp_event(vs, QAPI_EVENT_VNC_DISCONNECTED); buffer_free(&vs->input); buffer_free(&vs->output); buffer_free(&vs->ws_input); buffer_free(&vs->ws_output); qapi_free_VncClientInfo(vs->info); vnc_zlib_clear(vs); vnc_tight_clear(vs); vnc_zrle_clear(vs); #ifdef CONFIG_VNC_TLS vnc_tls_client_cleanup(vs); #endif /* CONFIG_VNC_TLS */ #ifdef CONFIG_VNC_SASL vnc_sasl_client_cleanup(vs); #endif /* CONFIG_VNC_SASL */ audio_del(vs); vnc_release_modifiers(vs); if (vs->initialized) { QTAILQ_REMOVE(&vs->vd->clients, vs, next); qemu_remove_mouse_mode_change_notifier(&vs->mouse_mode_notifier); } if (vs->vd->lock_key_sync) qemu_remove_led_event_handler(vs->led); vnc_unlock_output(vs); qemu_mutex_destroy(&vs->output_mutex); if (vs->bh != NULL) { qemu_bh_delete(vs->bh); } buffer_free(&vs->jobs_buffer); for (i = 0; i < VNC_STAT_ROWS; ++i) { g_free(vs->lossy_rect[i]); } g_free(vs->lossy_rect); g_free(vs); } | 22,680 |
1 | static int get_whole_cluster(BlockDriverState *bs, uint64_t cluster_offset, uint64_t offset, int allocate) { uint64_t parent_cluster_offset; BDRVVmdkState *s = bs->opaque; uint8_t whole_grain[s->cluster_sectors*512]; // 128 sectors * 512 bytes each = grain size 64KB // we will be here if it's first write on non-exist grain(cluster). // try to read from parent image, if exist if (s->hd->backing_hd) { BDRVVmdkState *ps = s->hd->backing_hd->opaque; if (!vmdk_is_cid_valid(bs)) return -1; parent_cluster_offset = get_cluster_offset(s->hd->backing_hd, offset, allocate); if (bdrv_pread(ps->hd, parent_cluster_offset, whole_grain, ps->cluster_sectors*512) != ps->cluster_sectors*512) return -1; if (bdrv_pwrite(s->hd, cluster_offset << 9, whole_grain, sizeof(whole_grain)) != sizeof(whole_grain)) return -1; } return 0; } | 22,681 |
1 | static int curl_open(BlockDriverState *bs, QDict *options, int flags) { BDRVCURLState *s = bs->opaque; CURLState *state = NULL; QemuOpts *opts; Error *local_err = NULL; const char *file; double d; static int inited = 0; if (flags & BDRV_O_RDWR) { qerror_report(ERROR_CLASS_GENERIC_ERROR, "curl block device does not support writes"); return -EROFS; } opts = qemu_opts_create_nofail(&runtime_opts); qemu_opts_absorb_qdict(opts, options, &local_err); if (error_is_set(&local_err)) { qerror_report_err(local_err); error_free(local_err); goto out_noclean; } s->readahead_size = qemu_opt_get_size(opts, "readahead", READ_AHEAD_SIZE); if ((s->readahead_size & 0x1ff) != 0) { fprintf(stderr, "HTTP_READAHEAD_SIZE %zd is not a multiple of 512\n", s->readahead_size); goto out_noclean; } file = qemu_opt_get(opts, "url"); if (file == NULL) { qerror_report(ERROR_CLASS_GENERIC_ERROR, "curl block driver requires " "an 'url' option"); goto out_noclean; } if (!inited) { curl_global_init(CURL_GLOBAL_ALL); inited = 1; } DPRINTF("CURL: Opening %s\n", file); s->url = g_strdup(file); state = curl_init_state(s); if (!state) goto out_noclean; // Get file size curl_easy_setopt(state->curl, CURLOPT_NOBODY, 1); curl_easy_setopt(state->curl, CURLOPT_WRITEFUNCTION, (void *)curl_size_cb); if (curl_easy_perform(state->curl)) goto out; curl_easy_getinfo(state->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &d); if (d) s->len = (size_t)d; else if(!s->len) goto out; DPRINTF("CURL: Size = %zd\n", s->len); curl_clean_state(state); curl_easy_cleanup(state->curl); state->curl = NULL; // Now we know the file exists and its size, so let's // initialize the multi interface! s->multi = curl_multi_init(); curl_multi_setopt(s->multi, CURLMOPT_SOCKETDATA, s); curl_multi_setopt(s->multi, CURLMOPT_SOCKETFUNCTION, curl_sock_cb); curl_multi_do(s); qemu_opts_del(opts); return 0; out: fprintf(stderr, "CURL: Error opening file: %s\n", state->errmsg); curl_easy_cleanup(state->curl); state->curl = NULL; out_noclean: g_free(s->url); qemu_opts_del(opts); return -EINVAL; } | 22,682 |
1 | static int do_subchannel_work(SubchDev *sch) { if (!sch->do_subchannel_work) { return -EINVAL; } g_assert(sch->curr_status.scsw.ctrl & SCSW_CTRL_MASK_FCTL); return sch->do_subchannel_work(sch); } | 22,684 |
1 | void ff_free_stream(AVFormatContext *s, AVStream *st){ av_assert0(s->nb_streams>0); av_assert0(s->streams[ s->nb_streams-1 ] == st); if (st->codec) { avcodec_close(st->codec); } if (st->parser) { av_parser_close(st->parser); } if (st->attached_pic.data) av_free_packet(&st->attached_pic); av_dict_free(&st->metadata); av_freep(&st->probe_data.buf); av_freep(&st->index_entries); av_freep(&st->codec->extradata); av_freep(&st->codec->subtitle_header); av_freep(&st->codec); av_freep(&st->priv_data); if (st->info) av_freep(&st->info->duration_error); av_freep(&st->info); av_freep(&s->streams[ --s->nb_streams ]); } | 22,685 |
1 | void visit_type_uint8(Visitor *v, uint8_t *obj, const char *name, Error **errp) { int64_t value; if (!error_is_set(errp)) { if (v->type_uint8) { v->type_uint8(v, obj, name, errp); } else { value = *obj; v->type_int(v, &value, name, errp); if (value < 0 || value > UINT8_MAX) { error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", "uint8_t"); return; } *obj = value; } } } | 22,686 |
1 | static struct omap_uwire_s *omap_uwire_init(MemoryRegion *system_memory, hwaddr base, qemu_irq txirq, qemu_irq rxirq, qemu_irq dma, omap_clk clk) { struct omap_uwire_s *s = (struct omap_uwire_s *) g_malloc0(sizeof(struct omap_uwire_s)); s->txirq = txirq; s->rxirq = rxirq; s->txdrq = dma; omap_uwire_reset(s); memory_region_init_io(&s->iomem, NULL, &omap_uwire_ops, s, "omap-uwire", 0x800); memory_region_add_subregion(system_memory, base, &s->iomem); return s; } | 22,687 |
1 | ImgReSampleContext *img_resample_full_init(int owidth, int oheight, int iwidth, int iheight, int topBand, int bottomBand, int leftBand, int rightBand, int padtop, int padbottom, int padleft, int padright) { ImgReSampleContext *s; s = av_mallocz(sizeof(ImgReSampleContext)); if (!s) if((unsigned)owidth >= UINT_MAX / (LINE_BUF_HEIGHT + NB_TAPS)) s->line_buf = av_mallocz(owidth * (LINE_BUF_HEIGHT + NB_TAPS)); if (!s->line_buf) goto fail; s->owidth = owidth; s->oheight = oheight; s->iwidth = iwidth; s->iheight = iheight; s->topBand = topBand; s->bottomBand = bottomBand; s->leftBand = leftBand; s->rightBand = rightBand; s->padtop = padtop; s->padbottom = padbottom; s->padleft = padleft; s->padright = padright; s->pad_owidth = owidth - (padleft + padright); s->pad_oheight = oheight - (padtop + padbottom); s->h_incr = ((iwidth - leftBand - rightBand) * POS_FRAC) / s->pad_owidth; s->v_incr = ((iheight - topBand - bottomBand) * POS_FRAC) / s->pad_oheight; av_build_filter(&s->h_filters[0][0], (float) s->pad_owidth / (float) (iwidth - leftBand - rightBand), NB_TAPS, NB_PHASES, 1<<FILTER_BITS, 0); av_build_filter(&s->v_filters[0][0], (float) s->pad_oheight / (float) (iheight - topBand - bottomBand), NB_TAPS, NB_PHASES, 1<<FILTER_BITS, 0); return s; fail: av_free(s); } | 22,689 |
0 | static void FUNCC(pred8x16_vertical_add)(uint8_t *pix, const int *block_offset, const int16_t *block, ptrdiff_t stride) { int i; for(i=0; i<4; i++) FUNCC(pred4x4_vertical_add)(pix + block_offset[i], block + i*16*sizeof(pixel), stride); for(i=4; i<8; i++) FUNCC(pred4x4_vertical_add)(pix + block_offset[i+4], block + i*16*sizeof(pixel), stride); } | 22,690 |
1 | hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { MIPSCPU *cpu = MIPS_CPU(cs); hwaddr phys_addr; int prot; if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, 0, ACCESS_INT) != 0) { return -1; } return phys_addr; } | 22,691 |
1 | qemu_irq qemu_irq_split(qemu_irq irq1, qemu_irq irq2) { qemu_irq *s = g_malloc0(2 * sizeof(qemu_irq)); s[0] = irq1; s[1] = irq2; return qemu_allocate_irqs(qemu_splitirq, s, 1)[0]; } | 22,692 |
1 | static int cmv_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; CmvContext *s = avctx->priv_data; const uint8_t *buf_end = buf + buf_size; if (AV_RL32(buf)==MVIh_TAG||AV_RB32(buf)==MVIh_TAG) { cmv_process_header(s, buf+EA_PREAMBLE_SIZE, buf_end); return buf_size; } if (av_image_check_size(s->width, s->height, 0, s->avctx)) return -1; /* shuffle */ if (s->last2_frame.data[0]) avctx->release_buffer(avctx, &s->last2_frame); FFSWAP(AVFrame, s->last_frame, s->last2_frame); FFSWAP(AVFrame, s->frame, s->last_frame); s->frame.reference = 1; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID; if (avctx->get_buffer(avctx, &s->frame)<0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); buf += EA_PREAMBLE_SIZE; if ((buf[0]&1)) { // subtype cmv_decode_inter(s, buf+2, buf_end); s->frame.key_frame = 0; s->frame.pict_type = AV_PICTURE_TYPE_P; }else{ s->frame.key_frame = 1; s->frame.pict_type = AV_PICTURE_TYPE_I; cmv_decode_intra(s, buf+2, buf_end); } *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; return buf_size; } | 22,694 |
0 | static void save_display_set(DVBSubContext *ctx) { DVBSubRegion *region; DVBSubRegionDisplay *display; DVBSubCLUT *clut; uint32_t *clut_table; int x_pos, y_pos, width, height; int x, y, y_off, x_off; uint32_t *pbuf; char filename[32]; static int fileno_index = 0; x_pos = -1; y_pos = -1; width = 0; height = 0; for (display = ctx->display_list; display != NULL; display = display->next) { region = get_region(ctx, display->region_id); if (x_pos == -1) { x_pos = display->x_pos; y_pos = display->y_pos; width = region->width; height = region->height; } else { if (display->x_pos < x_pos) { width += (x_pos - display->x_pos); x_pos = display->x_pos; } if (display->y_pos < y_pos) { height += (y_pos - display->y_pos); y_pos = display->y_pos; } if (display->x_pos + region->width > x_pos + width) { width = display->x_pos + region->width - x_pos; } if (display->y_pos + region->height > y_pos + height) { height = display->y_pos + region->height - y_pos; } } } if (x_pos >= 0) { pbuf = av_malloc(width * height * 4); for (display = ctx->display_list; display != NULL; display = display->next) { region = get_region(ctx, display->region_id); x_off = display->x_pos - x_pos; y_off = display->y_pos - y_pos; clut = get_clut(ctx, region->clut); if (clut == 0) clut = &default_clut; switch (region->depth) { case 2: clut_table = clut->clut4; break; case 8: clut_table = clut->clut256; break; case 4: default: clut_table = clut->clut16; break; } for (y = 0; y < region->height; y++) { for (x = 0; x < region->width; x++) { pbuf[((y + y_off) * width) + x_off + x] = clut_table[region->pbuf[y * region->width + x]]; } } } snprintf(filename, 32, "dvbs.%d", fileno_index); png_save2(filename, pbuf, width, height); av_free(pbuf); } fileno_index++; } | 22,695 |
0 | static int fic_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { FICContext *ctx = avctx->priv_data; uint8_t *src = avpkt->data; int ret; int slice, nslices; int msize; int tsize; uint8_t *sdata; if ((ret = ff_reget_buffer(avctx, ctx->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return ret; } /* Header + at least one slice (4) */ if (avpkt->size < FIC_HEADER_SIZE + 4) { av_log(avctx, AV_LOG_ERROR, "Frame data is too small.\n"); return AVERROR_INVALIDDATA; } /* Check for header. */ if (memcmp(src, fic_header, 7)) av_log(avctx, AV_LOG_WARNING, "Invalid FIC Header.\n"); /* Is it a skip frame? */ if (src[17]) goto skip; nslices = src[13]; if (!nslices) { av_log(avctx, AV_LOG_ERROR, "Zero slices found.\n"); return AVERROR_INVALIDDATA; } /* High or Low Quality Matrix? */ ctx->qmat = src[23] ? fic_qmat_hq : fic_qmat_lq; /* Skip cursor data. */ tsize = AV_RB24(src + 24); if (tsize > avpkt->size - FIC_HEADER_SIZE) { av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size.\n"); return AVERROR_INVALIDDATA; } /* Slice height for all but the last slice. */ ctx->slice_h = 16 * (ctx->aligned_height >> 4) / nslices; if (ctx->slice_h % 16) ctx->slice_h = FFALIGN(ctx->slice_h - 16, 16); /* First slice offset and remaining data. */ sdata = src + tsize + FIC_HEADER_SIZE + 4 * nslices; msize = avpkt->size - nslices * 4 - tsize - FIC_HEADER_SIZE; if (msize <= 0) { av_log(avctx, AV_LOG_ERROR, "Not enough frame data to decode.\n"); return AVERROR_INVALIDDATA; } /* * Set the frametype to I initially. It will be set to P if the frame * has any dependencies (skip blocks). There will be a race condition * inside the slice decode function to set these, but we do not care. * since they will only ever be set to 0/P. */ ctx->frame->key_frame = 1; ctx->frame->pict_type = AV_PICTURE_TYPE_I; /* Allocate slice data. */ av_fast_malloc(&ctx->slice_data, &ctx->slice_data_size, nslices * sizeof(ctx->slice_data[0])); if (!ctx->slice_data_size) { av_log(avctx, AV_LOG_ERROR, "Could not allocate slice data.\n"); return AVERROR(ENOMEM); } memset(ctx->slice_data, 0, nslices * sizeof(ctx->slice_data[0])); for (slice = 0; slice < nslices; slice++) { unsigned slice_off = AV_RB32(src + tsize + FIC_HEADER_SIZE + slice * 4); unsigned slice_size; int y_off = ctx->slice_h * slice; int slice_h = ctx->slice_h; /* * Either read the slice size, or consume all data left. * Also, special case the last slight height. */ if (slice == nslices - 1) { slice_size = msize; slice_h = FFALIGN(avctx->height - ctx->slice_h * (nslices - 1), 16); } else { slice_size = AV_RB32(src + tsize + FIC_HEADER_SIZE + slice * 4 + 4); } if (slice_size < slice_off || slice_size > msize) continue; slice_size -= slice_off; ctx->slice_data[slice].src = sdata + slice_off; ctx->slice_data[slice].src_size = slice_size; ctx->slice_data[slice].slice_h = slice_h; ctx->slice_data[slice].y_off = y_off; } if (ret = avctx->execute(avctx, fic_decode_slice, ctx->slice_data, NULL, nslices, sizeof(ctx->slice_data[0])) < 0) return ret; skip: *got_frame = 1; if ((ret = av_frame_ref(data, ctx->frame)) < 0) return ret; return avpkt->size; } | 22,696 |
0 | static int decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src){ H264Context *h= dst->priv_data, *h1= src->priv_data; MpegEncContext * const s = &h->s, * const s1 = &h1->s; int inited = s->context_initialized, err; int i; if(dst == src || !s1->context_initialized) return 0; err = ff_mpeg_update_thread_context(dst, src); if(err) return err; //FIXME handle width/height changing if(!inited){ for(i = 0; i < MAX_SPS_COUNT; i++) av_freep(h->sps_buffers + i); for(i = 0; i < MAX_PPS_COUNT; i++) av_freep(h->pps_buffers + i); memcpy(&h->s + 1, &h1->s + 1, sizeof(H264Context) - sizeof(MpegEncContext)); //copy all fields after MpegEnc memset(h->sps_buffers, 0, sizeof(h->sps_buffers)); memset(h->pps_buffers, 0, sizeof(h->pps_buffers)); if (ff_h264_alloc_tables(h) < 0) { av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n"); return AVERROR(ENOMEM); } context_init(h); for(i=0; i<2; i++){ h->rbsp_buffer[i] = NULL; h->rbsp_buffer_size[i] = 0; } h->thread_context[0] = h; // frame_start may not be called for the next thread (if it's decoding a bottom field) // so this has to be allocated here h->s.obmc_scratchpad = av_malloc(16*6*s->linesize); s->dsp.clear_blocks(h->mb); s->dsp.clear_blocks(h->mb+(24*16<<h->pixel_shift)); } //extradata/NAL handling h->is_avc = h1->is_avc; //SPS/PPS copy_parameter_set((void**)h->sps_buffers, (void**)h1->sps_buffers, MAX_SPS_COUNT, sizeof(SPS)); h->sps = h1->sps; copy_parameter_set((void**)h->pps_buffers, (void**)h1->pps_buffers, MAX_PPS_COUNT, sizeof(PPS)); h->pps = h1->pps; //Dequantization matrices //FIXME these are big - can they be only copied when PPS changes? copy_fields(h, h1, dequant4_buffer, dequant4_coeff); for(i=0; i<6; i++) h->dequant4_coeff[i] = h->dequant4_buffer[0] + (h1->dequant4_coeff[i] - h1->dequant4_buffer[0]); for(i=0; i<6; i++) h->dequant8_coeff[i] = h->dequant8_buffer[0] + (h1->dequant8_coeff[i] - h1->dequant8_buffer[0]); h->dequant_coeff_pps = h1->dequant_coeff_pps; //POC timing copy_fields(h, h1, poc_lsb, redundant_pic_count); //reference lists copy_fields(h, h1, ref_count, list_count); copy_fields(h, h1, ref_list, intra_gb); copy_fields(h, h1, short_ref, cabac_init_idc); copy_picture_range(h->short_ref, h1->short_ref, 32, s, s1); copy_picture_range(h->long_ref, h1->long_ref, 32, s, s1); copy_picture_range(h->delayed_pic, h1->delayed_pic, MAX_DELAYED_PIC_COUNT+2, s, s1); h->last_slice_type = h1->last_slice_type; h->sync = h1->sync; if(!s->current_picture_ptr) return 0; if(!s->dropable) { err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); h->prev_poc_msb = h->poc_msb; h->prev_poc_lsb = h->poc_lsb; } h->prev_frame_num_offset= h->frame_num_offset; h->prev_frame_num = h->frame_num; h->outputed_poc = h->next_outputed_poc; return err; } | 22,698 |
1 | static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn) { int rd, wrd; int rdhi, rdlo, rd0, rd1, i; TCGv addr; TCGv tmp, tmp2, tmp3; if ((insn & 0x0e000e00) == 0x0c000000) { if ((insn & 0x0fe00ff0) == 0x0c400000) { wrd = insn & 0xf; rdlo = (insn >> 12) & 0xf; rdhi = (insn >> 16) & 0xf; if (insn & ARM_CP_RW_BIT) { /* TMRRC */ iwmmxt_load_reg(cpu_V0, wrd); tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0); tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0); } else { /* TMCRR */ tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]); iwmmxt_store_reg(cpu_V0, wrd); gen_op_iwmmxt_set_mup(); } return 0; } wrd = (insn >> 12) & 0xf; addr = new_tmp(); if (gen_iwmmxt_address(s, insn, addr)) { return 1; } if (insn & ARM_CP_RW_BIT) { if ((insn >> 28) == 0xf) { /* WLDRW wCx */ tmp = new_tmp(); tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s)); iwmmxt_store_creg(wrd, tmp); } else { i = 1; if (insn & (1 << 8)) { if (insn & (1 << 22)) { /* WLDRD */ tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s)); i = 0; } else { /* WLDRW wRd */ tmp = gen_ld32(addr, IS_USER(s)); } } else { if (insn & (1 << 22)) { /* WLDRH */ tmp = gen_ld16u(addr, IS_USER(s)); } else { /* WLDRB */ tmp = gen_ld8u(addr, IS_USER(s)); } } if (i) { tcg_gen_extu_i32_i64(cpu_M0, tmp); dead_tmp(tmp); } gen_op_iwmmxt_movq_wRn_M0(wrd); } } else { if ((insn >> 28) == 0xf) { /* WSTRW wCx */ tmp = iwmmxt_load_creg(wrd); gen_st32(tmp, addr, IS_USER(s)); } else { gen_op_iwmmxt_movq_M0_wRn(wrd); tmp = new_tmp(); if (insn & (1 << 8)) { if (insn & (1 << 22)) { /* WSTRD */ dead_tmp(tmp); tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s)); } else { /* WSTRW wRd */ tcg_gen_trunc_i64_i32(tmp, cpu_M0); gen_st32(tmp, addr, IS_USER(s)); } } else { if (insn & (1 << 22)) { /* WSTRH */ tcg_gen_trunc_i64_i32(tmp, cpu_M0); gen_st16(tmp, addr, IS_USER(s)); } else { /* WSTRB */ tcg_gen_trunc_i64_i32(tmp, cpu_M0); gen_st8(tmp, addr, IS_USER(s)); } } } } return 0; } if ((insn & 0x0f000000) != 0x0e000000) return 1; switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) { case 0x000: /* WOR */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 0) & 0xf; rd1 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); gen_op_iwmmxt_orq_M0_wRn(rd1); gen_op_iwmmxt_setpsr_nz(); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x011: /* TMCR */ if (insn & 0xf) return 1; rd = (insn >> 12) & 0xf; wrd = (insn >> 16) & 0xf; switch (wrd) { case ARM_IWMMXT_wCID: case ARM_IWMMXT_wCASF: break; case ARM_IWMMXT_wCon: gen_op_iwmmxt_set_cup(); /* Fall through. */ case ARM_IWMMXT_wCSSF: tmp = iwmmxt_load_creg(wrd); tmp2 = load_reg(s, rd); tcg_gen_andc_i32(tmp, tmp, tmp2); dead_tmp(tmp2); iwmmxt_store_creg(wrd, tmp); break; case ARM_IWMMXT_wCGR0: case ARM_IWMMXT_wCGR1: case ARM_IWMMXT_wCGR2: case ARM_IWMMXT_wCGR3: gen_op_iwmmxt_set_cup(); tmp = load_reg(s, rd); iwmmxt_store_creg(wrd, tmp); break; default: return 1; } break; case 0x100: /* WXOR */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 0) & 0xf; rd1 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); gen_op_iwmmxt_xorq_M0_wRn(rd1); gen_op_iwmmxt_setpsr_nz(); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x111: /* TMRC */ if (insn & 0xf) return 1; rd = (insn >> 12) & 0xf; wrd = (insn >> 16) & 0xf; tmp = iwmmxt_load_creg(wrd); store_reg(s, rd, tmp); break; case 0x300: /* WANDN */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 0) & 0xf; rd1 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); tcg_gen_neg_i64(cpu_M0, cpu_M0); gen_op_iwmmxt_andq_M0_wRn(rd1); gen_op_iwmmxt_setpsr_nz(); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x200: /* WAND */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 0) & 0xf; rd1 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); gen_op_iwmmxt_andq_M0_wRn(rd1); gen_op_iwmmxt_setpsr_nz(); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x810: case 0xa10: /* WMADD */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 0) & 0xf; rd1 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); if (insn & (1 << 21)) gen_op_iwmmxt_maddsq_M0_wRn(rd1); else gen_op_iwmmxt_madduq_M0_wRn(rd1); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); switch ((insn >> 22) & 3) { case 0: gen_op_iwmmxt_unpacklb_M0_wRn(rd1); break; case 1: gen_op_iwmmxt_unpacklw_M0_wRn(rd1); break; case 2: gen_op_iwmmxt_unpackll_M0_wRn(rd1); break; case 3: return 1; } gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); switch ((insn >> 22) & 3) { case 0: gen_op_iwmmxt_unpackhb_M0_wRn(rd1); break; case 1: gen_op_iwmmxt_unpackhw_M0_wRn(rd1); break; case 2: gen_op_iwmmxt_unpackhl_M0_wRn(rd1); break; case 3: return 1; } gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); if (insn & (1 << 22)) gen_op_iwmmxt_sadw_M0_wRn(rd1); else gen_op_iwmmxt_sadb_M0_wRn(rd1); if (!(insn & (1 << 20))) gen_op_iwmmxt_addl_M0_wRn(wrd); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); if (insn & (1 << 21)) { if (insn & (1 << 20)) gen_op_iwmmxt_mulshw_M0_wRn(rd1); else gen_op_iwmmxt_mulslw_M0_wRn(rd1); } else { if (insn & (1 << 20)) gen_op_iwmmxt_muluhw_M0_wRn(rd1); else gen_op_iwmmxt_mululw_M0_wRn(rd1); } gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); if (insn & (1 << 21)) gen_op_iwmmxt_macsw_M0_wRn(rd1); else gen_op_iwmmxt_macuw_M0_wRn(rd1); if (!(insn & (1 << 20))) { iwmmxt_load_reg(cpu_V1, wrd); tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1); } gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); switch ((insn >> 22) & 3) { case 0: gen_op_iwmmxt_cmpeqb_M0_wRn(rd1); break; case 1: gen_op_iwmmxt_cmpeqw_M0_wRn(rd1); break; case 2: gen_op_iwmmxt_cmpeql_M0_wRn(rd1); break; case 3: return 1; } gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); if (insn & (1 << 22)) { if (insn & (1 << 20)) gen_op_iwmmxt_avgw1_M0_wRn(rd1); else gen_op_iwmmxt_avgw0_M0_wRn(rd1); } else { if (insn & (1 << 20)) gen_op_iwmmxt_avgb1_M0_wRn(rd1); else gen_op_iwmmxt_avgb0_M0_wRn(rd1); } gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3)); tcg_gen_andi_i32(tmp, tmp, 7); iwmmxt_load_reg(cpu_V1, rd1); gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp); dead_tmp(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */ if (((insn >> 6) & 3) == 3) return 1; rd = (insn >> 12) & 0xf; wrd = (insn >> 16) & 0xf; tmp = load_reg(s, rd); gen_op_iwmmxt_movq_M0_wRn(wrd); switch ((insn >> 6) & 3) { case 0: tmp2 = tcg_const_i32(0xff); tmp3 = tcg_const_i32((insn & 7) << 3); break; case 1: tmp2 = tcg_const_i32(0xffff); tmp3 = tcg_const_i32((insn & 3) << 4); break; case 2: tmp2 = tcg_const_i32(0xffffffff); tmp3 = tcg_const_i32((insn & 1) << 5); break; default: TCGV_UNUSED(tmp2); TCGV_UNUSED(tmp3); } gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3); tcg_temp_free(tmp3); tcg_temp_free(tmp2); dead_tmp(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */ rd = (insn >> 12) & 0xf; wrd = (insn >> 16) & 0xf; if (rd == 15 || ((insn >> 22) & 3) == 3) return 1; gen_op_iwmmxt_movq_M0_wRn(wrd); tmp = new_tmp(); switch ((insn >> 22) & 3) { case 0: tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3); tcg_gen_trunc_i64_i32(tmp, cpu_M0); if (insn & 8) { tcg_gen_ext8s_i32(tmp, tmp); } else { tcg_gen_andi_i32(tmp, tmp, 0xff); } break; case 1: tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4); tcg_gen_trunc_i64_i32(tmp, cpu_M0); if (insn & 8) { tcg_gen_ext16s_i32(tmp, tmp); } else { tcg_gen_andi_i32(tmp, tmp, 0xffff); } break; case 2: tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5); tcg_gen_trunc_i64_i32(tmp, cpu_M0); break; } store_reg(s, rd, tmp); break; case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */ if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3) return 1; tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF); switch ((insn >> 22) & 3) { case 0: tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0); break; case 1: tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4); break; case 2: tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12); break; } tcg_gen_shli_i32(tmp, tmp, 28); gen_set_nzcv(tmp); dead_tmp(tmp); break; case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */ if (((insn >> 6) & 3) == 3) return 1; rd = (insn >> 12) & 0xf; wrd = (insn >> 16) & 0xf; tmp = load_reg(s, rd); switch ((insn >> 6) & 3) { case 0: gen_helper_iwmmxt_bcstb(cpu_M0, tmp); break; case 1: gen_helper_iwmmxt_bcstw(cpu_M0, tmp); break; case 2: gen_helper_iwmmxt_bcstl(cpu_M0, tmp); break; } dead_tmp(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */ if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3) return 1; tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF); tmp2 = new_tmp(); tcg_gen_mov_i32(tmp2, tmp); switch ((insn >> 22) & 3) { case 0: for (i = 0; i < 7; i ++) { tcg_gen_shli_i32(tmp2, tmp2, 4); tcg_gen_and_i32(tmp, tmp, tmp2); } break; case 1: for (i = 0; i < 3; i ++) { tcg_gen_shli_i32(tmp2, tmp2, 8); tcg_gen_and_i32(tmp, tmp, tmp2); } break; case 2: tcg_gen_shli_i32(tmp2, tmp2, 16); tcg_gen_and_i32(tmp, tmp, tmp2); break; } gen_set_nzcv(tmp); dead_tmp(tmp2); dead_tmp(tmp); break; case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */ wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); switch ((insn >> 22) & 3) { case 0: gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0); break; case 1: gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0); break; case 2: gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0); break; case 3: return 1; } gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */ if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3) return 1; tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF); tmp2 = new_tmp(); tcg_gen_mov_i32(tmp2, tmp); switch ((insn >> 22) & 3) { case 0: for (i = 0; i < 7; i ++) { tcg_gen_shli_i32(tmp2, tmp2, 4); tcg_gen_or_i32(tmp, tmp, tmp2); } break; case 1: for (i = 0; i < 3; i ++) { tcg_gen_shli_i32(tmp2, tmp2, 8); tcg_gen_or_i32(tmp, tmp, tmp2); } break; case 2: tcg_gen_shli_i32(tmp2, tmp2, 16); tcg_gen_or_i32(tmp, tmp, tmp2); break; } gen_set_nzcv(tmp); dead_tmp(tmp2); dead_tmp(tmp); break; case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */ rd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3) return 1; gen_op_iwmmxt_movq_M0_wRn(rd0); tmp = new_tmp(); switch ((insn >> 22) & 3) { case 0: gen_helper_iwmmxt_msbb(tmp, cpu_M0); break; case 1: gen_helper_iwmmxt_msbw(tmp, cpu_M0); break; case 2: gen_helper_iwmmxt_msbl(tmp, cpu_M0); break; } store_reg(s, rd, tmp); break; case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */ case 0x906: case 0xb06: case 0xd06: case 0xf06: wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); switch ((insn >> 22) & 3) { case 0: if (insn & (1 << 21)) gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1); else gen_op_iwmmxt_cmpgtub_M0_wRn(rd1); break; case 1: if (insn & (1 << 21)) gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1); else gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1); break; case 2: if (insn & (1 << 21)) gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1); else gen_op_iwmmxt_cmpgtul_M0_wRn(rd1); break; case 3: return 1; } gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */ case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e: wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); switch ((insn >> 22) & 3) { case 0: if (insn & (1 << 21)) gen_op_iwmmxt_unpacklsb_M0(); else gen_op_iwmmxt_unpacklub_M0(); break; case 1: if (insn & (1 << 21)) gen_op_iwmmxt_unpacklsw_M0(); else gen_op_iwmmxt_unpackluw_M0(); break; case 2: if (insn & (1 << 21)) gen_op_iwmmxt_unpacklsl_M0(); else gen_op_iwmmxt_unpacklul_M0(); break; case 3: return 1; } gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */ case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c: wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); switch ((insn >> 22) & 3) { case 0: if (insn & (1 << 21)) gen_op_iwmmxt_unpackhsb_M0(); else gen_op_iwmmxt_unpackhub_M0(); break; case 1: if (insn & (1 << 21)) gen_op_iwmmxt_unpackhsw_M0(); else gen_op_iwmmxt_unpackhuw_M0(); break; case 2: if (insn & (1 << 21)) gen_op_iwmmxt_unpackhsl_M0(); else gen_op_iwmmxt_unpackhul_M0(); break; case 3: return 1; } gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */ case 0x214: case 0x614: case 0xa14: case 0xe14: if (((insn >> 22) & 3) == 0) return 1; wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); tmp = new_tmp(); if (gen_iwmmxt_shift(insn, 0xff, tmp)) { dead_tmp(tmp); return 1; } switch ((insn >> 22) & 3) { case 1: gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp); break; case 2: gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp); break; case 3: gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp); break; } dead_tmp(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */ case 0x014: case 0x414: case 0x814: case 0xc14: if (((insn >> 22) & 3) == 0) return 1; wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); tmp = new_tmp(); if (gen_iwmmxt_shift(insn, 0xff, tmp)) { dead_tmp(tmp); return 1; } switch ((insn >> 22) & 3) { case 1: gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp); break; case 2: gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp); break; case 3: gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp); break; } dead_tmp(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */ case 0x114: case 0x514: case 0x914: case 0xd14: if (((insn >> 22) & 3) == 0) return 1; wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); tmp = new_tmp(); if (gen_iwmmxt_shift(insn, 0xff, tmp)) { dead_tmp(tmp); return 1; } switch ((insn >> 22) & 3) { case 1: gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp); break; case 2: gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp); break; case 3: gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp); break; } dead_tmp(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */ case 0x314: case 0x714: case 0xb14: case 0xf14: if (((insn >> 22) & 3) == 0) return 1; wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); tmp = new_tmp(); switch ((insn >> 22) & 3) { case 1: if (gen_iwmmxt_shift(insn, 0xf, tmp)) { dead_tmp(tmp); return 1; } gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp); break; case 2: if (gen_iwmmxt_shift(insn, 0x1f, tmp)) { dead_tmp(tmp); return 1; } gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp); break; case 3: if (gen_iwmmxt_shift(insn, 0x3f, tmp)) { dead_tmp(tmp); return 1; } gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp); break; } dead_tmp(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */ case 0x916: case 0xb16: case 0xd16: case 0xf16: wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); switch ((insn >> 22) & 3) { case 0: if (insn & (1 << 21)) gen_op_iwmmxt_minsb_M0_wRn(rd1); else gen_op_iwmmxt_minub_M0_wRn(rd1); break; case 1: if (insn & (1 << 21)) gen_op_iwmmxt_minsw_M0_wRn(rd1); else gen_op_iwmmxt_minuw_M0_wRn(rd1); break; case 2: if (insn & (1 << 21)) gen_op_iwmmxt_minsl_M0_wRn(rd1); else gen_op_iwmmxt_minul_M0_wRn(rd1); break; case 3: return 1; } gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */ case 0x816: case 0xa16: case 0xc16: case 0xe16: wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); switch ((insn >> 22) & 3) { case 0: if (insn & (1 << 21)) gen_op_iwmmxt_maxsb_M0_wRn(rd1); else gen_op_iwmmxt_maxub_M0_wRn(rd1); break; case 1: if (insn & (1 << 21)) gen_op_iwmmxt_maxsw_M0_wRn(rd1); else gen_op_iwmmxt_maxuw_M0_wRn(rd1); break; case 2: if (insn & (1 << 21)) gen_op_iwmmxt_maxsl_M0_wRn(rd1); else gen_op_iwmmxt_maxul_M0_wRn(rd1); break; case 3: return 1; } gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */ case 0x402: case 0x502: case 0x602: case 0x702: wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); tmp = tcg_const_i32((insn >> 20) & 3); iwmmxt_load_reg(cpu_V1, rd1); gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp); tcg_temp_free(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */ case 0x41a: case 0x51a: case 0x61a: case 0x71a: case 0x81a: case 0x91a: case 0xa1a: case 0xb1a: case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a: wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); switch ((insn >> 20) & 0xf) { case 0x0: gen_op_iwmmxt_subnb_M0_wRn(rd1); break; case 0x1: gen_op_iwmmxt_subub_M0_wRn(rd1); break; case 0x3: gen_op_iwmmxt_subsb_M0_wRn(rd1); break; case 0x4: gen_op_iwmmxt_subnw_M0_wRn(rd1); break; case 0x5: gen_op_iwmmxt_subuw_M0_wRn(rd1); break; case 0x7: gen_op_iwmmxt_subsw_M0_wRn(rd1); break; case 0x8: gen_op_iwmmxt_subnl_M0_wRn(rd1); break; case 0x9: gen_op_iwmmxt_subul_M0_wRn(rd1); break; case 0xb: gen_op_iwmmxt_subsl_M0_wRn(rd1); break; default: return 1; } gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */ case 0x41e: case 0x51e: case 0x61e: case 0x71e: case 0x81e: case 0x91e: case 0xa1e: case 0xb1e: case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e: wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f)); gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp); tcg_temp_free(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */ case 0x418: case 0x518: case 0x618: case 0x718: case 0x818: case 0x918: case 0xa18: case 0xb18: case 0xc18: case 0xd18: case 0xe18: case 0xf18: wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); switch ((insn >> 20) & 0xf) { case 0x0: gen_op_iwmmxt_addnb_M0_wRn(rd1); break; case 0x1: gen_op_iwmmxt_addub_M0_wRn(rd1); break; case 0x3: gen_op_iwmmxt_addsb_M0_wRn(rd1); break; case 0x4: gen_op_iwmmxt_addnw_M0_wRn(rd1); break; case 0x5: gen_op_iwmmxt_adduw_M0_wRn(rd1); break; case 0x7: gen_op_iwmmxt_addsw_M0_wRn(rd1); break; case 0x8: gen_op_iwmmxt_addnl_M0_wRn(rd1); break; case 0x9: gen_op_iwmmxt_addul_M0_wRn(rd1); break; case 0xb: gen_op_iwmmxt_addsl_M0_wRn(rd1); break; default: return 1; } gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */ case 0x408: case 0x508: case 0x608: case 0x708: case 0x808: case 0x908: case 0xa08: case 0xb08: case 0xc08: case 0xd08: case 0xe08: case 0xf08: if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0) return 1; wrd = (insn >> 12) & 0xf; rd0 = (insn >> 16) & 0xf; rd1 = (insn >> 0) & 0xf; gen_op_iwmmxt_movq_M0_wRn(rd0); switch ((insn >> 22) & 3) { case 1: if (insn & (1 << 21)) gen_op_iwmmxt_packsw_M0_wRn(rd1); else gen_op_iwmmxt_packuw_M0_wRn(rd1); break; case 2: if (insn & (1 << 21)) gen_op_iwmmxt_packsl_M0_wRn(rd1); else gen_op_iwmmxt_packul_M0_wRn(rd1); break; case 3: if (insn & (1 << 21)) gen_op_iwmmxt_packsq_M0_wRn(rd1); else gen_op_iwmmxt_packuq_M0_wRn(rd1); break; } gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); gen_op_iwmmxt_set_cup(); break; case 0x201: case 0x203: case 0x205: case 0x207: case 0x209: case 0x20b: case 0x20d: case 0x20f: case 0x211: case 0x213: case 0x215: case 0x217: case 0x219: case 0x21b: case 0x21d: case 0x21f: wrd = (insn >> 5) & 0xf; rd0 = (insn >> 12) & 0xf; rd1 = (insn >> 0) & 0xf; if (rd0 == 0xf || rd1 == 0xf) return 1; gen_op_iwmmxt_movq_M0_wRn(wrd); tmp = load_reg(s, rd0); tmp2 = load_reg(s, rd1); switch ((insn >> 16) & 0xf) { case 0x0: /* TMIA */ gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2); break; case 0x8: /* TMIAPH */ gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2); break; case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */ if (insn & (1 << 16)) tcg_gen_shri_i32(tmp, tmp, 16); if (insn & (1 << 17)) tcg_gen_shri_i32(tmp2, tmp2, 16); gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2); break; default: dead_tmp(tmp2); dead_tmp(tmp); return 1; } dead_tmp(tmp2); dead_tmp(tmp); gen_op_iwmmxt_movq_wRn_M0(wrd); gen_op_iwmmxt_set_mup(); break; default: return 1; } return 0; } | 22,700 |
1 | static void unterminated_dict_comma(void) { QObject *obj = qobject_from_json("{'abc':32,", NULL); g_assert(obj == NULL); } | 22,701 |
1 | static void rtl8139_io_writel(void *opaque, uint8_t addr, uint32_t val) { RTL8139State *s = opaque; addr &= 0xfc; switch (addr) { case RxMissed: DPRINTF("RxMissed clearing on write\n"); s->RxMissed = 0; break; case TxConfig: rtl8139_TxConfig_write(s, val); break; case RxConfig: rtl8139_RxConfig_write(s, val); break; case TxStatus0 ... TxStatus0+4*4-1: rtl8139_TxStatus_write(s, addr-TxStatus0, val); break; case TxAddr0 ... TxAddr0+4*4-1: rtl8139_TxAddr_write(s, addr-TxAddr0, val); break; case RxBuf: rtl8139_RxBuf_write(s, val); break; case RxRingAddrLO: DPRINTF("C+ RxRing low bits write val=0x%08x\n", val); s->RxRingAddrLO = val; break; case RxRingAddrHI: DPRINTF("C+ RxRing high bits write val=0x%08x\n", val); s->RxRingAddrHI = val; break; case Timer: DPRINTF("TCTR Timer reset on write\n"); s->TCTR_base = qemu_get_clock_ns(vm_clock); rtl8139_set_next_tctr_time(s, s->TCTR_base); break; case FlashReg: DPRINTF("FlashReg TimerInt write val=0x%08x\n", val); if (s->TimerInt != val) { s->TimerInt = val; rtl8139_set_next_tctr_time(s, qemu_get_clock_ns(vm_clock)); } break; default: DPRINTF("ioport write(l) addr=0x%x val=0x%08x via write(b)\n", addr, val); rtl8139_io_writeb(opaque, addr, val & 0xff); rtl8139_io_writeb(opaque, addr + 1, (val >> 8) & 0xff); rtl8139_io_writeb(opaque, addr + 2, (val >> 16) & 0xff); rtl8139_io_writeb(opaque, addr + 3, (val >> 24) & 0xff); break; } } | 22,703 |
1 | static int dvvideo_close(AVCodecContext *c) { DVVideoContext *s = c->priv_data; av_free(s->dv_anchor); return 0; } | 22,704 |
1 | udp_attach(struct socket *so) { if((so->s = socket(AF_INET,SOCK_DGRAM,0)) != -1) { so->so_expire = curtime + SO_EXPIRE; insque(so, &so->slirp->udb); } return(so->s); } | 22,706 |
1 | static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; ShowWavesContext *showwaves = ctx->priv; const int nb_samples = insamples->audio->nb_samples; AVFilterBufferRef *outpicref = showwaves->outpicref; int linesize = outpicref ? outpicref->linesize[0] : 0; int16_t *p = (int16_t *)insamples->data[0]; int nb_channels = av_get_channel_layout_nb_channels(insamples->audio->channel_layout); int i, j, h; const int n = showwaves->n; const int x = 255 / (nb_channels * n); /* multiplication factor, pre-computed to avoid in-loop divisions */ /* draw data in the buffer */ for (i = 0; i < nb_samples; i++) { if (!outpicref) { showwaves->outpicref = outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, outlink->w, outlink->h); if (!outpicref) return AVERROR(ENOMEM); outpicref->video->w = outlink->w; outpicref->video->h = outlink->h; outpicref->pts = insamples->pts + av_rescale_q((p - (int16_t *)insamples->data[0]) / nb_channels, (AVRational){ 1, inlink->sample_rate }, outlink->time_base); linesize = outpicref->linesize[0]; memset(outpicref->data[0], 0, showwaves->h*linesize); } for (j = 0; j < nb_channels; j++) { h = showwaves->h/2 - av_rescale(*p++, showwaves->h/2, MAX_INT16); if (h >= 0 && h < outlink->h) *(outpicref->data[0] + showwaves->buf_idx + h * linesize) += x; } showwaves->sample_count_mod++; if (showwaves->sample_count_mod == n) { showwaves->sample_count_mod = 0; showwaves->buf_idx++; } if (showwaves->buf_idx == showwaves->w) push_frame(outlink); } avfilter_unref_buffer(insamples); return 0; } | 22,707 |
1 | rgb16_32ToUV_half_c_template(uint8_t *dstU, uint8_t *dstV, const uint8_t *src, int width, enum PixelFormat origin, int shr, int shg, int shb, int shp, int maskr, int maskg, int maskb, int rsh, int gsh, int bsh, int S) { const int ru = RU << rsh, gu = GU << gsh, bu = BU << bsh, rv = RV << rsh, gv = GV << gsh, bv = BV << bsh, rnd = 257 << S, maskgx = ~(maskr | maskb); int i; maskr |= maskr << 1; maskb |= maskb << 1; maskg |= maskg << 1; for (i = 0; i < width; i++) { int px0 = input_pixel(2 * i + 0) >> shp; int px1 = input_pixel(2 * i + 1) >> shp; int b, r, g = (px0 & maskgx) + (px1 & maskgx); int rb = px0 + px1 - g; b = (rb & maskb) >> shb; if (shp || origin == PIX_FMT_BGR565LE || origin == PIX_FMT_BGR565BE || origin == PIX_FMT_RGB565LE || origin == PIX_FMT_RGB565BE) { g >>= shg; } else { g = (g & maskg) >> shg; } r = (rb & maskr) >> shr; dstU[i] = (ru * r + gu * g + bu * b + rnd) >> (S + 1); dstV[i] = (rv * r + gv * g + bv * b + rnd) >> (S + 1); } } | 22,708 |
1 | __org_qemu_x_Union1 *qmp___org_qemu_x_command(__org_qemu_x_EnumList *a, __org_qemu_x_StructList *b, __org_qemu_x_Union2 *c, __org_qemu_x_Alt *d, Error **errp) { __org_qemu_x_Union1 *ret = g_new0(__org_qemu_x_Union1, 1); ret->type = ORG_QEMU_X_UNION1_KIND___ORG_QEMU_X_BRANCH; ret->u.__org_qemu_x_branch = strdup("blah1"); return ret; | 22,711 |
1 | static int64_t realloc_refcount_block(BlockDriverState *bs, int reftable_index, uint64_t offset) { BDRVQcowState *s = bs->opaque; int64_t new_offset = 0; void *refcount_block = NULL; int ret; /* allocate new refcount block */ new_offset = qcow2_alloc_clusters(bs, s->cluster_size); if (new_offset < 0) { fprintf(stderr, "Could not allocate new cluster: %s\n", strerror(-new_offset)); ret = new_offset; goto fail; } /* fetch current refcount block content */ ret = qcow2_cache_get(bs, s->refcount_block_cache, offset, &refcount_block); if (ret < 0) { fprintf(stderr, "Could not fetch refcount block: %s\n", strerror(-ret)); goto fail; } /* new block has not yet been entered into refcount table, therefore it is * no refcount block yet (regarding this check) */ ret = qcow2_pre_write_overlap_check(bs, 0, new_offset, s->cluster_size); if (ret < 0) { fprintf(stderr, "Could not write refcount block; metadata overlap " "check failed: %s\n", strerror(-ret)); /* the image will be marked corrupt, so don't even attempt on freeing * the cluster */ new_offset = 0; goto fail; } /* write to new block */ ret = bdrv_write(bs->file, new_offset / BDRV_SECTOR_SIZE, refcount_block, s->cluster_sectors); if (ret < 0) { fprintf(stderr, "Could not write refcount block: %s\n", strerror(-ret)); goto fail; } /* update refcount table */ assert(!offset_into_cluster(s, new_offset)); s->refcount_table[reftable_index] = new_offset; ret = write_reftable_entry(bs, reftable_index); if (ret < 0) { fprintf(stderr, "Could not update refcount table: %s\n", strerror(-ret)); goto fail; } fail: if (new_offset && (ret < 0)) { qcow2_free_clusters(bs, new_offset, s->cluster_size, QCOW2_DISCARD_ALWAYS); } if (refcount_block) { if (ret < 0) { qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block); } else { ret = qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block); } } if (ret < 0) { return ret; } return new_offset; } | 22,712 |
1 | static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf, size_t len) { IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); IMXENETBufDesc bd; uint32_t flags = 0; uint32_t addr; uint32_t crc; uint32_t buf_addr; uint8_t *crc_ptr; unsigned int buf_len; size_t size = len; FEC_PRINTF("len %d\n", (int)size); if (!s->regs[ENET_RDAR]) { qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n", TYPE_IMX_FEC, __func__); return 0; } /* 4 bytes for the CRC. */ size += 4; crc = cpu_to_be32(crc32(~0, buf, size)); crc_ptr = (uint8_t *) &crc; /* Huge frames are truncted. */ if (size > ENET_MAX_FRAME_SIZE) { size = ENET_MAX_FRAME_SIZE; flags |= ENET_BD_TR | ENET_BD_LG; } /* Frames larger than the user limit just set error flags. */ if (size > (s->regs[ENET_RCR] >> 16)) { flags |= ENET_BD_LG; } addr = s->rx_descriptor; while (size > 0) { imx_enet_read_bd(&bd, addr); if ((bd.flags & ENET_BD_E) == 0) { /* No descriptors available. Bail out. */ /* * FIXME: This is wrong. We should probably either * save the remainder for when more RX buffers are * available, or flag an error. */ qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n", TYPE_IMX_FEC, __func__); break; } buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR]; bd.length = buf_len; size -= buf_len; FEC_PRINTF("rx_bd 0x%x length %d\n", addr, bd.length); /* The last 4 bytes are the CRC. */ if (size < 4) { buf_len += size - 4; } buf_addr = bd.data; dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); buf += buf_len; if (size < 4) { dma_memory_write(&address_space_memory, buf_addr + buf_len, crc_ptr, 4 - size); crc_ptr += 4 - size; } bd.flags &= ~ENET_BD_E; if (size == 0) { /* Last buffer in frame. */ bd.flags |= flags | ENET_BD_L; FEC_PRINTF("rx frame flags %04x\n", bd.flags); if (bd.option & ENET_BD_RX_INT) { s->regs[ENET_EIR] |= ENET_INT_RXF; } } else { if (bd.option & ENET_BD_RX_INT) { s->regs[ENET_EIR] |= ENET_INT_RXB; } } imx_enet_write_bd(&bd, addr); /* Advance to the next descriptor. */ if ((bd.flags & ENET_BD_W) != 0) { addr = s->regs[ENET_RDSR]; } else { addr += sizeof(bd); } } s->rx_descriptor = addr; imx_eth_enable_rx(s); imx_eth_update(s); return len; } | 22,713 |
1 | static void test_machine(gconstpointer data) { const char *machine = data; char *args; QDict *response; args = g_strdup_printf("-machine %s", machine); qtest_start(args); test_properties("/machine"); response = qmp("{ 'execute': 'quit' }"); g_assert(qdict_haskey(response, "return")); qtest_end(); g_free(args); } | 22,714 |
1 | int av_reallocp_array(void *ptr, size_t nmemb, size_t size) { void **ptrptr = ptr; *ptrptr = av_realloc_f(*ptrptr, nmemb, size); if (!*ptrptr && !(nmemb && size)) return AVERROR(ENOMEM); return 0; } | 22,715 |
1 | bool qemu_co_queue_next(CoQueue *queue) { struct unlock_bh *unlock_bh; Coroutine *next; next = QTAILQ_FIRST(&queue->entries); if (next) { QTAILQ_REMOVE(&queue->entries, next, co_queue_next); QTAILQ_INSERT_TAIL(&unlock_bh_queue, next, co_queue_next); trace_qemu_co_queue_next(next); unlock_bh = qemu_malloc(sizeof(*unlock_bh)); unlock_bh->bh = qemu_bh_new(qemu_co_queue_next_bh, unlock_bh); qemu_bh_schedule(unlock_bh->bh); } return (next != NULL); } | 22,716 |
1 | static int aac_decode_frame_int(AVCodecContext *avctx, void *data, int *got_frame_ptr, GetBitContext *gb, AVPacket *avpkt) { AACContext *ac = avctx->priv_data; ChannelElement *che = NULL, *che_prev = NULL; enum RawDataBlockType elem_type, che_prev_type = TYPE_END; int err, elem_id; int samples = 0, multiplier, audio_found = 0, pce_found = 0; int is_dmono, sce_count = 0; int payload_alignment; ac->frame = data; if (show_bits(gb, 12) == 0xfff) { if ((err = parse_adts_frame_header(ac, gb)) < 0) { av_log(avctx, AV_LOG_ERROR, "Error decoding AAC frame header.\n"); goto fail; } if (ac->oc[1].m4ac.sampling_index > 12) { av_log(ac->avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", ac->oc[1].m4ac.sampling_index); err = AVERROR_INVALIDDATA; goto fail; } } if ((err = frame_configure_elements(avctx)) < 0) goto fail; // The FF_PROFILE_AAC_* defines are all object_type - 1 // This may lead to an undefined profile being signaled ac->avctx->profile = ac->oc[1].m4ac.object_type - 1; payload_alignment = get_bits_count(gb); ac->tags_mapped = 0; // parse while ((elem_type = get_bits(gb, 3)) != TYPE_END) { elem_id = get_bits(gb, 4); if (avctx->debug & FF_DEBUG_STARTCODE) av_log(avctx, AV_LOG_DEBUG, "Elem type:%x id:%x\n", elem_type, elem_id); if (!avctx->channels && elem_type != TYPE_PCE) { err = AVERROR_INVALIDDATA; goto fail; } if (elem_type < TYPE_DSE) { if (!(che=get_che(ac, elem_type, elem_id))) { av_log(ac->avctx, AV_LOG_ERROR, "channel element %d.%d is not allocated\n", elem_type, elem_id); err = AVERROR_INVALIDDATA; goto fail; } samples = 1024; che->present = 1; } switch (elem_type) { case TYPE_SCE: err = decode_ics(ac, &che->ch[0], gb, 0, 0); audio_found = 1; sce_count++; break; case TYPE_CPE: err = decode_cpe(ac, gb, che); audio_found = 1; break; case TYPE_CCE: err = decode_cce(ac, gb, che); break; case TYPE_LFE: err = decode_ics(ac, &che->ch[0], gb, 0, 0); audio_found = 1; break; case TYPE_DSE: err = skip_data_stream_element(ac, gb); break; case TYPE_PCE: { uint8_t layout_map[MAX_ELEM_ID*4][3]; int tags; push_output_configuration(ac); tags = decode_pce(avctx, &ac->oc[1].m4ac, layout_map, gb, payload_alignment); if (tags < 0) { err = tags; break; } if (pce_found) { av_log(avctx, AV_LOG_ERROR, "Not evaluating a further program_config_element as this construct is dubious at best.\n"); pop_output_configuration(ac); } else { err = output_configure(ac, layout_map, tags, OC_TRIAL_PCE, 1); if (!err) ac->oc[1].m4ac.chan_config = 0; pce_found = 1; } break; } case TYPE_FIL: if (elem_id == 15) elem_id += get_bits(gb, 8) - 1; if (get_bits_left(gb) < 8 * elem_id) { av_log(avctx, AV_LOG_ERROR, "TYPE_FIL: "overread_err); err = AVERROR_INVALIDDATA; goto fail; } while (elem_id > 0) elem_id -= decode_extension_payload(ac, gb, elem_id, che_prev, che_prev_type); err = 0; /* FIXME */ break; default: err = AVERROR_BUG; /* should not happen, but keeps compiler happy */ break; } if (elem_type < TYPE_DSE) { che_prev = che; che_prev_type = elem_type; } if (err) goto fail; if (get_bits_left(gb) < 3) { av_log(avctx, AV_LOG_ERROR, overread_err); err = AVERROR_INVALIDDATA; goto fail; } } if (!avctx->channels) { *got_frame_ptr = 0; return 0; } multiplier = (ac->oc[1].m4ac.sbr == 1) ? ac->oc[1].m4ac.ext_sample_rate > ac->oc[1].m4ac.sample_rate : 0; samples <<= multiplier; spectral_to_sample(ac, samples); if (ac->oc[1].status && audio_found) { avctx->sample_rate = ac->oc[1].m4ac.sample_rate << multiplier; avctx->frame_size = samples; ac->oc[1].status = OC_LOCKED; } if (multiplier) avctx->internal->skip_samples_multiplier = 2; if (!ac->frame->data[0] && samples) { av_log(avctx, AV_LOG_ERROR, "no frame data found\n"); err = AVERROR_INVALIDDATA; goto fail; } if (samples) { ac->frame->nb_samples = samples; ac->frame->sample_rate = avctx->sample_rate; } else av_frame_unref(ac->frame); *got_frame_ptr = !!samples; /* for dual-mono audio (SCE + SCE) */ is_dmono = ac->dmono_mode && sce_count == 2 && ac->oc[1].channel_layout == (AV_CH_FRONT_LEFT | AV_CH_FRONT_RIGHT); if (is_dmono) { if (ac->dmono_mode == 1) ((AVFrame *)data)->data[1] =((AVFrame *)data)->data[0]; else if (ac->dmono_mode == 2) ((AVFrame *)data)->data[0] =((AVFrame *)data)->data[1]; } return 0; fail: pop_output_configuration(ac); return err; } | 22,717 |
1 | static void spatial_compose97i_dy_buffered(dwt_compose_t *cs, slice_buffer * sb, int width, int height, int stride_line){ int y = cs->y; int mirror0 = mirror(y - 1, height - 1); int mirror1 = mirror(y + 0, height - 1); int mirror2 = mirror(y + 1, height - 1); int mirror3 = mirror(y + 2, height - 1); int mirror4 = mirror(y + 3, height - 1); int mirror5 = mirror(y + 4, height - 1); DWTELEM *b0= cs->b0; DWTELEM *b1= cs->b1; DWTELEM *b2= cs->b2; DWTELEM *b3= cs->b3; DWTELEM *b4= slice_buffer_get_line(sb, mirror4 * stride_line); DWTELEM *b5= slice_buffer_get_line(sb, mirror5 * stride_line); {START_TIMER if(y>0 && y+4<height){ vertical_compose97i(b0, b1, b2, b3, b4, b5, width); }else{ if(mirror3 <= mirror5) vertical_compose97iL1(b3, b4, b5, width); if(mirror2 <= mirror4) vertical_compose97iH1(b2, b3, b4, width); if(mirror1 <= mirror3) vertical_compose97iL0(b1, b2, b3, width); if(mirror0 <= mirror2) vertical_compose97iH0(b0, b1, b2, width); } if(width>400){ STOP_TIMER("vertical_compose97i")}} {START_TIMER if(y-1>= 0) horizontal_compose97i(b0, width); if(mirror0 <= mirror2) horizontal_compose97i(b1, width); if(width>400 && mirror0 <= mirror2){ STOP_TIMER("horizontal_compose97i")}} cs->b0=b2; cs->b1=b3; cs->b2=b4; cs->b3=b5; cs->y += 2; } | 22,719 |
1 | static void read_table(AVFormatContext *avctx, AVStream *st, int (*parse)(AVFormatContext *avctx, AVStream *st, const char *name, int size)) { int count, i; AVIOContext *pb = avctx->pb; avio_skip(pb, 4); count = avio_rb32(pb); avio_skip(pb, 4); for (i = 0; i < count; i++) { char name[17]; int size; avio_read(pb, name, 16); name[sizeof(name) - 1] = 0; size = avio_rb32(pb); if (parse(avctx, st, name, size) < 0) { avpriv_request_sample(avctx, "Variable %s", name); avio_skip(pb, size); } } } | 22,720 |
0 | static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_size) { const uint8_t *s = src; const uint8_t *end; #if COMPILE_TEMPLATE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; #if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile( "movq %0, %%mm7 \n\t" "movq %1, %%mm6 \n\t" ::"m"(red_15mask),"m"(green_15mask)); mm_end = end - 15; while (s < mm_end) { __asm__ volatile( PREFETCH" 32%1 \n\t" "movd %1, %%mm0 \n\t" "movd 3%1, %%mm3 \n\t" "punpckldq 6%1, %%mm0 \n\t" "punpckldq 9%1, %%mm3 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm0, %%mm2 \n\t" "movq %%mm3, %%mm4 \n\t" "movq %%mm3, %%mm5 \n\t" "psllq $7, %%mm0 \n\t" "psllq $7, %%mm3 \n\t" "pand %%mm7, %%mm0 \n\t" "pand %%mm7, %%mm3 \n\t" "psrlq $6, %%mm1 \n\t" "psrlq $6, %%mm4 \n\t" "pand %%mm6, %%mm1 \n\t" "pand %%mm6, %%mm4 \n\t" "psrlq $19, %%mm2 \n\t" "psrlq $19, %%mm5 \n\t" "pand %2, %%mm2 \n\t" "pand %2, %%mm5 \n\t" "por %%mm1, %%mm0 \n\t" "por %%mm4, %%mm3 \n\t" "por %%mm2, %%mm0 \n\t" "por %%mm5, %%mm3 \n\t" "psllq $16, %%mm3 \n\t" "por %%mm3, %%mm0 \n\t" MOVNTQ" %%mm0, %0 \n\t" :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); d += 4; s += 12; } __asm__ volatile(SFENCE:::"memory"); __asm__ volatile(EMMS:::"memory"); #endif while (s < end) { const int r = *s++; const int g = *s++; const int b = *s++; *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7); } } | 22,721 |
0 | static int avi_extract_stream_metadata(AVFormatContext *s, AVStream *st) { GetByteContext gb; uint8_t *data = st->codecpar->extradata; int data_size = st->codecpar->extradata_size; int tag, offset; if (!data || data_size < 8) { return AVERROR_INVALIDDATA; } bytestream2_init(&gb, data, data_size); tag = bytestream2_get_le32(&gb); switch (tag) { case MKTAG('A', 'V', 'I', 'F'): // skip 4 byte padding bytestream2_skip(&gb, 4); offset = bytestream2_tell(&gb); bytestream2_init(&gb, data + offset, data_size - offset); // decode EXIF tags from IFD, AVI is always little-endian return avpriv_exif_decode_ifd(s, &gb, 1, 0, &st->metadata); break; case MKTAG('C', 'A', 'S', 'I'): avpriv_request_sample(s, "RIFF stream data tag type CASI (%u)", tag); break; case MKTAG('Z', 'o', 'r', 'a'): avpriv_request_sample(s, "RIFF stream data tag type Zora (%u)", tag); break; default: break; } return 0; } | 22,722 |
0 | static void virtio_vmstate_change(void *opaque, int running, RunState state) { VirtIODevice *vdev = opaque; BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK); if (running) { vdev->vm_running = running; } if (backend_run) { virtio_set_status(vdev, vdev->status); } if (k->vmstate_change) { k->vmstate_change(qbus->parent, backend_run); } if (!backend_run) { virtio_set_status(vdev, vdev->status); } if (!running) { vdev->vm_running = running; } } | 22,723 |
0 | static int qcow2_set_key(BlockDriverState *bs, const char *key) { BDRVQcow2State *s = bs->opaque; uint8_t keybuf[16]; int len, i; Error *err = NULL; memset(keybuf, 0, 16); len = strlen(key); if (len > 16) len = 16; /* XXX: we could compress the chars to 7 bits to increase entropy */ for(i = 0;i < len;i++) { keybuf[i] = key[i]; } assert(bs->encrypted); qcrypto_cipher_free(s->cipher); s->cipher = qcrypto_cipher_new( QCRYPTO_CIPHER_ALG_AES_128, QCRYPTO_CIPHER_MODE_CBC, keybuf, G_N_ELEMENTS(keybuf), &err); if (!s->cipher) { /* XXX would be nice if errors in this method could * be properly propagate to the caller. Would need * the bdrv_set_key() API signature to be fixed. */ error_free(err); return -1; } return 0; } | 22,725 |
0 | void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val) { uint32_t op2; uint32_t crm; op2 = (insn >> 5) & 7; crm = insn & 0xf; switch ((insn >> 16) & 0xf) { case 0: /* ID codes. */ if (arm_feature(env, ARM_FEATURE_XSCALE)) break; if (arm_feature(env, ARM_FEATURE_OMAPCP)) break; goto bad_reg; case 1: /* System configuration. */ if (arm_feature(env, ARM_FEATURE_OMAPCP)) op2 = 0; switch (op2) { case 0: if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0) env->cp15.c1_sys = val; /* ??? Lots of these bits are not implemented. */ /* This may enable/disable the MMU, so do a TLB flush. */ tlb_flush(env, 1); break; case 1: if (arm_feature(env, ARM_FEATURE_XSCALE)) { env->cp15.c1_xscaleauxcr = val; break; } goto bad_reg; case 2: if (arm_feature(env, ARM_FEATURE_XSCALE)) goto bad_reg; env->cp15.c1_coproc = val; /* ??? Is this safe when called from within a TB? */ tb_flush(env); break; default: goto bad_reg; } break; case 2: /* MMU Page table control / MPU cache control. */ if (arm_feature(env, ARM_FEATURE_MPU)) { switch (op2) { case 0: env->cp15.c2_data = val; break; case 1: env->cp15.c2_insn = val; break; default: goto bad_reg; } } else { env->cp15.c2_base = val; } break; case 3: /* MMU Domain access control / MPU write buffer control. */ env->cp15.c3 = val; break; case 4: /* Reserved. */ goto bad_reg; case 5: /* MMU Fault status / MPU access permission. */ if (arm_feature(env, ARM_FEATURE_OMAPCP)) op2 = 0; switch (op2) { case 0: if (arm_feature(env, ARM_FEATURE_MPU)) val = extended_mpu_ap_bits(val); env->cp15.c5_data = val; break; case 1: if (arm_feature(env, ARM_FEATURE_MPU)) val = extended_mpu_ap_bits(val); env->cp15.c5_insn = val; break; case 2: if (!arm_feature(env, ARM_FEATURE_MPU)) goto bad_reg; env->cp15.c5_data = val; break; case 3: if (!arm_feature(env, ARM_FEATURE_MPU)) goto bad_reg; env->cp15.c5_insn = val; break; default: goto bad_reg; } break; case 6: /* MMU Fault address / MPU base/size. */ if (arm_feature(env, ARM_FEATURE_MPU)) { if (crm >= 8) goto bad_reg; env->cp15.c6_region[crm] = val; } else { if (arm_feature(env, ARM_FEATURE_OMAPCP)) op2 = 0; switch (op2) { case 0: env->cp15.c6_data = val; break; case 1: env->cp15.c6_insn = val; break; default: goto bad_reg; } } break; case 7: /* Cache control. */ env->cp15.c15_i_max = 0x000; env->cp15.c15_i_min = 0xff0; /* No cache, so nothing to do. */ break; case 8: /* MMU TLB control. */ switch (op2) { case 0: /* Invalidate all. */ tlb_flush(env, 0); break; case 1: /* Invalidate single TLB entry. */ #if 0 /* ??? This is wrong for large pages and sections. */ /* As an ugly hack to make linux work we always flush a 4K pages. */ val &= 0xfffff000; tlb_flush_page(env, val); tlb_flush_page(env, val + 0x400); tlb_flush_page(env, val + 0x800); tlb_flush_page(env, val + 0xc00); #else tlb_flush(env, 1); #endif break; default: goto bad_reg; } break; case 9: if (arm_feature(env, ARM_FEATURE_OMAPCP)) break; switch (crm) { case 0: /* Cache lockdown. */ switch (op2) { case 0: env->cp15.c9_data = val; break; case 1: env->cp15.c9_insn = val; break; default: goto bad_reg; } break; case 1: /* TCM memory region registers. */ /* Not implemented. */ goto bad_reg; default: goto bad_reg; } break; case 10: /* MMU TLB lockdown. */ /* ??? TLB lockdown not implemented. */ break; case 12: /* Reserved. */ goto bad_reg; case 13: /* Process ID. */ switch (op2) { case 0: if (!arm_feature(env, ARM_FEATURE_MPU)) goto bad_reg; /* Unlike real hardware the qemu TLB uses virtual addresses, not modified virtual addresses, so this causes a TLB flush. */ if (env->cp15.c13_fcse != val) tlb_flush(env, 1); env->cp15.c13_fcse = val; break; case 1: /* This changes the ASID, so do a TLB flush. */ if (env->cp15.c13_context != val && !arm_feature(env, ARM_FEATURE_MPU)) tlb_flush(env, 0); env->cp15.c13_context = val; break; default: goto bad_reg; } break; case 14: /* Reserved. */ goto bad_reg; case 15: /* Implementation specific. */ if (arm_feature(env, ARM_FEATURE_XSCALE)) { if (op2 == 0 && crm == 1) { if (env->cp15.c15_cpar != (val & 0x3fff)) { /* Changes cp0 to cp13 behavior, so needs a TB flush. */ tb_flush(env); env->cp15.c15_cpar = val & 0x3fff; } break; } goto bad_reg; } if (arm_feature(env, ARM_FEATURE_OMAPCP)) { switch (crm) { case 0: break; case 1: /* Set TI925T configuration. */ env->cp15.c15_ticonfig = val & 0xe7; env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */ ARM_CPUID_TI915T : ARM_CPUID_TI925T; break; case 2: /* Set I_max. */ env->cp15.c15_i_max = val; break; case 3: /* Set I_min. */ env->cp15.c15_i_min = val; break; case 4: /* Set thread-ID. */ env->cp15.c15_threadid = val & 0xffff; break; case 8: /* Wait-for-interrupt (deprecated). */ cpu_interrupt(env, CPU_INTERRUPT_HALT); break; default: goto bad_reg; } } break; } return; bad_reg: /* ??? For debugging only. Should raise illegal instruction exception. */ cpu_abort(env, "Unimplemented cp15 register write\n"); } | 22,726 |
0 | static size_t stream_process_s2mem(struct Stream *s, unsigned char *buf, size_t len, uint32_t *app) { uint32_t prev_d; unsigned int rxlen; size_t pos = 0; int sof = 1; if (!stream_running(s) || stream_idle(s)) { return 0; } while (len) { stream_desc_load(s, s->regs[R_CURDESC]); if (s->desc.status & SDESC_STATUS_COMPLETE) { s->regs[R_DMASR] |= DMASR_HALTED; break; } rxlen = s->desc.control & SDESC_CTRL_LEN_MASK; if (rxlen > len) { /* It fits. */ rxlen = len; } cpu_physical_memory_write(s->desc.buffer_address, buf + pos, rxlen); len -= rxlen; pos += rxlen; /* Update the descriptor. */ if (!len) { int i; stream_complete(s); for (i = 0; i < 5; i++) { s->desc.app[i] = app[i]; } s->desc.status |= SDESC_STATUS_EOF; } s->desc.status |= sof << SDESC_STATUS_SOF_BIT; s->desc.status |= SDESC_STATUS_COMPLETE; stream_desc_store(s, s->regs[R_CURDESC]); sof = 0; /* Advance. */ prev_d = s->regs[R_CURDESC]; s->regs[R_CURDESC] = s->desc.nxtdesc; if (prev_d == s->regs[R_TAILDESC]) { s->regs[R_DMASR] |= DMASR_IDLE; break; } } return pos; } | 22,727 |
0 | static void gen_arith(DisasContext *ctx, uint32_t opc, int rd, int rs, int rt) { const char *opn = "arith"; if (rd == 0 && opc != OPC_ADD && opc != OPC_SUB && opc != OPC_DADD && opc != OPC_DSUB) { /* If no destination, treat it as a NOP. For add & sub, we must generate the overflow exception when needed. */ MIPS_DEBUG("NOP"); return; } switch (opc) { case OPC_ADD: { TCGv t0 = tcg_temp_local_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); int l1 = gen_new_label(); gen_load_gpr(t1, rs); gen_load_gpr(t2, rt); tcg_gen_add_tl(t0, t1, t2); tcg_gen_ext32s_tl(t0, t0); tcg_gen_xor_tl(t1, t1, t2); tcg_gen_xor_tl(t2, t0, t2); tcg_gen_andc_tl(t1, t2, t1); tcg_temp_free(t2); tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); tcg_temp_free(t1); /* operands of same sign, result different sign */ generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(l1); gen_store_gpr(t0, rd); tcg_temp_free(t0); } opn = "add"; break; case OPC_ADDU: if (rs != 0 && rt != 0) { tcg_gen_add_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); } else if (rs == 0 && rt != 0) { tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]); } else if (rs != 0 && rt == 0) { tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]); } else { tcg_gen_movi_tl(cpu_gpr[rd], 0); } opn = "addu"; break; case OPC_SUB: { TCGv t0 = tcg_temp_local_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); int l1 = gen_new_label(); gen_load_gpr(t1, rs); gen_load_gpr(t2, rt); tcg_gen_sub_tl(t0, t1, t2); tcg_gen_ext32s_tl(t0, t0); tcg_gen_xor_tl(t2, t1, t2); tcg_gen_xor_tl(t1, t0, t1); tcg_gen_and_tl(t1, t1, t2); tcg_temp_free(t2); tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); tcg_temp_free(t1); /* operands of different sign, first operand and result different sign */ generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(l1); gen_store_gpr(t0, rd); tcg_temp_free(t0); } opn = "sub"; break; case OPC_SUBU: if (rs != 0 && rt != 0) { tcg_gen_sub_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); } else if (rs == 0 && rt != 0) { tcg_gen_neg_tl(cpu_gpr[rd], cpu_gpr[rt]); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); } else if (rs != 0 && rt == 0) { tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]); } else { tcg_gen_movi_tl(cpu_gpr[rd], 0); } opn = "subu"; break; #if defined(TARGET_MIPS64) case OPC_DADD: { TCGv t0 = tcg_temp_local_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); int l1 = gen_new_label(); gen_load_gpr(t1, rs); gen_load_gpr(t2, rt); tcg_gen_add_tl(t0, t1, t2); tcg_gen_xor_tl(t1, t1, t2); tcg_gen_xor_tl(t2, t0, t2); tcg_gen_andc_tl(t1, t2, t1); tcg_temp_free(t2); tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); tcg_temp_free(t1); /* operands of same sign, result different sign */ generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(l1); gen_store_gpr(t0, rd); tcg_temp_free(t0); } opn = "dadd"; break; case OPC_DADDU: if (rs != 0 && rt != 0) { tcg_gen_add_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]); } else if (rs == 0 && rt != 0) { tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]); } else if (rs != 0 && rt == 0) { tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]); } else { tcg_gen_movi_tl(cpu_gpr[rd], 0); } opn = "daddu"; break; case OPC_DSUB: { TCGv t0 = tcg_temp_local_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); int l1 = gen_new_label(); gen_load_gpr(t1, rs); gen_load_gpr(t2, rt); tcg_gen_sub_tl(t0, t1, t2); tcg_gen_xor_tl(t2, t1, t2); tcg_gen_xor_tl(t1, t0, t1); tcg_gen_and_tl(t1, t1, t2); tcg_temp_free(t2); tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); tcg_temp_free(t1); /* operands of different sign, first operand and result different sign */ generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(l1); gen_store_gpr(t0, rd); tcg_temp_free(t0); } opn = "dsub"; break; case OPC_DSUBU: if (rs != 0 && rt != 0) { tcg_gen_sub_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]); } else if (rs == 0 && rt != 0) { tcg_gen_neg_tl(cpu_gpr[rd], cpu_gpr[rt]); } else if (rs != 0 && rt == 0) { tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]); } else { tcg_gen_movi_tl(cpu_gpr[rd], 0); } opn = "dsubu"; break; #endif case OPC_MUL: if (likely(rs != 0 && rt != 0)) { tcg_gen_mul_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]); tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); } else { tcg_gen_movi_tl(cpu_gpr[rd], 0); } opn = "mul"; break; } (void)opn; /* avoid a compiler warning */ MIPS_DEBUG("%s %s, %s, %s", opn, regnames[rd], regnames[rs], regnames[rt]); } | 22,728 |
0 | static inline void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset) { gen_mov_reg_FCC0(dst, src, fcc_offset); gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset); tcg_gen_xori_tl(cpu_tmp0, cpu_tmp0, 0x1); tcg_gen_and_tl(dst, dst, cpu_tmp0); } | 22,729 |
0 | void qemu_clock_register_reset_notifier(QEMUClockType type, Notifier *notifier) { QEMUClock *clock = qemu_clock_ptr(type); notifier_list_add(&clock->reset_notifiers, notifier); } | 22,730 |
0 | DriveInfo *drive_init(QemuOpts *all_opts, BlockInterfaceType block_default_type) { const char *value; DriveInfo *dinfo = NULL; QDict *bs_opts; QemuOpts *legacy_opts; DriveMediaType media = MEDIA_DISK; BlockInterfaceType type; int cyls, heads, secs, translation; int max_devs, bus_id, unit_id, index; const char *devaddr; bool read_only = false; bool copy_on_read; const char *filename; Error *local_err = NULL; /* Change legacy command line options into QMP ones */ qemu_opt_rename(all_opts, "iops", "throttling.iops-total"); qemu_opt_rename(all_opts, "iops_rd", "throttling.iops-read"); qemu_opt_rename(all_opts, "iops_wr", "throttling.iops-write"); qemu_opt_rename(all_opts, "bps", "throttling.bps-total"); qemu_opt_rename(all_opts, "bps_rd", "throttling.bps-read"); qemu_opt_rename(all_opts, "bps_wr", "throttling.bps-write"); qemu_opt_rename(all_opts, "iops_max", "throttling.iops-total-max"); qemu_opt_rename(all_opts, "iops_rd_max", "throttling.iops-read-max"); qemu_opt_rename(all_opts, "iops_wr_max", "throttling.iops-write-max"); qemu_opt_rename(all_opts, "bps_max", "throttling.bps-total-max"); qemu_opt_rename(all_opts, "bps_rd_max", "throttling.bps-read-max"); qemu_opt_rename(all_opts, "bps_wr_max", "throttling.bps-write-max"); qemu_opt_rename(all_opts, "iops_size", "throttling.iops-size"); qemu_opt_rename(all_opts, "readonly", "read-only"); value = qemu_opt_get(all_opts, "cache"); if (value) { int flags = 0; if (bdrv_parse_cache_flags(value, &flags) != 0) { error_report("invalid cache option"); return NULL; } /* Specific options take precedence */ if (!qemu_opt_get(all_opts, "cache.writeback")) { qemu_opt_set_bool(all_opts, "cache.writeback", !!(flags & BDRV_O_CACHE_WB)); } if (!qemu_opt_get(all_opts, "cache.direct")) { qemu_opt_set_bool(all_opts, "cache.direct", !!(flags & BDRV_O_NOCACHE)); } if (!qemu_opt_get(all_opts, "cache.no-flush")) { qemu_opt_set_bool(all_opts, "cache.no-flush", !!(flags & BDRV_O_NO_FLUSH)); } qemu_opt_unset(all_opts, "cache"); } /* Get a QDict for processing the options */ bs_opts = qdict_new(); qemu_opts_to_qdict(all_opts, bs_opts); legacy_opts = qemu_opts_create(&qemu_legacy_drive_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(legacy_opts, bs_opts, &local_err); if (error_is_set(&local_err)) { qerror_report_err(local_err); error_free(local_err); goto fail; } /* Deprecated option boot=[on|off] */ if (qemu_opt_get(legacy_opts, "boot") != NULL) { fprintf(stderr, "qemu-kvm: boot=on|off is deprecated and will be " "ignored. Future versions will reject this parameter. Please " "update your scripts.\n"); } /* Media type */ value = qemu_opt_get(legacy_opts, "media"); if (value) { if (!strcmp(value, "disk")) { media = MEDIA_DISK; } else if (!strcmp(value, "cdrom")) { media = MEDIA_CDROM; read_only = true; } else { error_report("'%s' invalid media", value); goto fail; } } /* copy-on-read is disabled with a warning for read-only devices */ read_only |= qemu_opt_get_bool(legacy_opts, "read-only", false); copy_on_read = qemu_opt_get_bool(legacy_opts, "copy-on-read", false); if (read_only && copy_on_read) { error_report("warning: disabling copy-on-read on read-only drive"); copy_on_read = false; } qdict_put(bs_opts, "read-only", qstring_from_str(read_only ? "on" : "off")); qdict_put(bs_opts, "copy-on-read", qstring_from_str(copy_on_read ? "on" :"off")); /* Controller type */ value = qemu_opt_get(legacy_opts, "if"); if (value) { for (type = 0; type < IF_COUNT && strcmp(value, if_name[type]); type++) { } if (type == IF_COUNT) { error_report("unsupported bus type '%s'", value); goto fail; } } else { type = block_default_type; } /* Geometry */ cyls = qemu_opt_get_number(legacy_opts, "cyls", 0); heads = qemu_opt_get_number(legacy_opts, "heads", 0); secs = qemu_opt_get_number(legacy_opts, "secs", 0); if (cyls || heads || secs) { if (cyls < 1) { error_report("invalid physical cyls number"); goto fail; } if (heads < 1) { error_report("invalid physical heads number"); goto fail; } if (secs < 1) { error_report("invalid physical secs number"); goto fail; } } translation = BIOS_ATA_TRANSLATION_AUTO; value = qemu_opt_get(legacy_opts, "trans"); if (value != NULL) { if (!cyls) { error_report("'%s' trans must be used with cyls, heads and secs", value); goto fail; } if (!strcmp(value, "none")) { translation = BIOS_ATA_TRANSLATION_NONE; } else if (!strcmp(value, "lba")) { translation = BIOS_ATA_TRANSLATION_LBA; } else if (!strcmp(value, "auto")) { translation = BIOS_ATA_TRANSLATION_AUTO; } else { error_report("'%s' invalid translation type", value); goto fail; } } if (media == MEDIA_CDROM) { if (cyls || secs || heads) { error_report("CHS can't be set with media=cdrom"); goto fail; } } /* Device address specified by bus/unit or index. * If none was specified, try to find the first free one. */ bus_id = qemu_opt_get_number(legacy_opts, "bus", 0); unit_id = qemu_opt_get_number(legacy_opts, "unit", -1); index = qemu_opt_get_number(legacy_opts, "index", -1); max_devs = if_max_devs[type]; if (index != -1) { if (bus_id != 0 || unit_id != -1) { error_report("index cannot be used with bus and unit"); goto fail; } bus_id = drive_index_to_bus_id(type, index); unit_id = drive_index_to_unit_id(type, index); } if (unit_id == -1) { unit_id = 0; while (drive_get(type, bus_id, unit_id) != NULL) { unit_id++; if (max_devs && unit_id >= max_devs) { unit_id -= max_devs; bus_id++; } } } if (max_devs && unit_id >= max_devs) { error_report("unit %d too big (max is %d)", unit_id, max_devs - 1); goto fail; } if (drive_get(type, bus_id, unit_id) != NULL) { error_report("drive with bus=%d, unit=%d (index=%d) exists", bus_id, unit_id, index); goto fail; } /* no id supplied -> create one */ if (qemu_opts_id(all_opts) == NULL) { char *new_id; const char *mediastr = ""; if (type == IF_IDE || type == IF_SCSI) { mediastr = (media == MEDIA_CDROM) ? "-cd" : "-hd"; } if (max_devs) { new_id = g_strdup_printf("%s%i%s%i", if_name[type], bus_id, mediastr, unit_id); } else { new_id = g_strdup_printf("%s%s%i", if_name[type], mediastr, unit_id); } qdict_put(bs_opts, "id", qstring_from_str(new_id)); g_free(new_id); } /* Add virtio block device */ devaddr = qemu_opt_get(legacy_opts, "addr"); if (devaddr && type != IF_VIRTIO) { error_report("addr is not supported by this bus type"); goto fail; } if (type == IF_VIRTIO) { QemuOpts *devopts; devopts = qemu_opts_create(qemu_find_opts("device"), NULL, 0, &error_abort); if (arch_type == QEMU_ARCH_S390X) { qemu_opt_set(devopts, "driver", "virtio-blk-s390"); } else { qemu_opt_set(devopts, "driver", "virtio-blk-pci"); } qemu_opt_set(devopts, "drive", qdict_get_str(bs_opts, "id")); if (devaddr) { qemu_opt_set(devopts, "addr", devaddr); } } filename = qemu_opt_get(legacy_opts, "file"); /* Actual block device init: Functionality shared with blockdev-add */ dinfo = blockdev_init(filename, bs_opts, type, &local_err); if (dinfo == NULL) { if (error_is_set(&local_err)) { qerror_report_err(local_err); error_free(local_err); } goto fail; } else { assert(!error_is_set(&local_err)); } /* Set legacy DriveInfo fields */ dinfo->enable_auto_del = true; dinfo->opts = all_opts; dinfo->cyls = cyls; dinfo->heads = heads; dinfo->secs = secs; dinfo->trans = translation; dinfo->bus = bus_id; dinfo->unit = unit_id; dinfo->devaddr = devaddr; switch(type) { case IF_IDE: case IF_SCSI: case IF_XEN: case IF_NONE: dinfo->media_cd = media == MEDIA_CDROM; break; default: break; } fail: qemu_opts_del(legacy_opts); return dinfo; } | 22,731 |
0 | static int cd_read_sector(IDEState *s, int lba, uint8_t *buf, int sector_size) { int ret; switch(sector_size) { case 2048: block_acct_start(bdrv_get_stats(s->bs), &s->acct, 4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); ret = bdrv_read(s->bs, (int64_t)lba << 2, buf, 4); block_acct_done(bdrv_get_stats(s->bs), &s->acct); break; case 2352: block_acct_start(bdrv_get_stats(s->bs), &s->acct, 4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); ret = bdrv_read(s->bs, (int64_t)lba << 2, buf + 16, 4); block_acct_done(bdrv_get_stats(s->bs), &s->acct); if (ret < 0) return ret; cd_data_to_raw(buf, lba); break; default: ret = -EIO; break; } return ret; } | 22,732 |
0 | static void av_always_inline filter_mb_edgecv( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, H264Context *h, int intra ) { const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset; const int alpha = alpha_table[index_a]; const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset]; if (alpha ==0 || beta == 0) return; if( bS[0] < 4 || !intra ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0]]+1; tc[1] = tc0_table[index_a][bS[1]]+1; tc[2] = tc0_table[index_a][bS[2]]+1; tc[3] = tc0_table[index_a][bS[3]]+1; h->h264dsp.h264_h_loop_filter_chroma(pix, stride, alpha, beta, tc); } else { h->h264dsp.h264_h_loop_filter_chroma_intra(pix, stride, alpha, beta); } } | 22,733 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.