label
int64
0
1
func1
stringlengths
23
97k
id
int64
0
27.3k
0
static int load_bitmap_data(BlockDriverState *bs, const uint64_t *bitmap_table, uint32_t bitmap_table_size, BdrvDirtyBitmap *bitmap) { int ret = 0; BDRVQcow2State *s = bs->opaque; uint64_t sector, sbc; uint64_t bm_size = bdrv_dirty_bitmap_size(bitmap); uint64_t bm_sectors = DIV_ROUND_UP(bm_size, BDRV_SECTOR_SIZE); uint8_t *buf = NULL; uint64_t i, tab_size = size_to_clusters(s, bdrv_dirty_bitmap_serialization_size(bitmap, 0, bm_sectors)); if (tab_size != bitmap_table_size || tab_size > BME_MAX_TABLE_SIZE) { return -EINVAL; } buf = g_malloc(s->cluster_size); sbc = sectors_covered_by_bitmap_cluster(s, bitmap); for (i = 0, sector = 0; i < tab_size; ++i, sector += sbc) { uint64_t count = MIN(bm_sectors - sector, sbc); uint64_t entry = bitmap_table[i]; uint64_t offset = entry & BME_TABLE_ENTRY_OFFSET_MASK; assert(check_table_entry(entry, s->cluster_size) == 0); if (offset == 0) { if (entry & BME_TABLE_ENTRY_FLAG_ALL_ONES) { bdrv_dirty_bitmap_deserialize_ones(bitmap, sector, count, false); } else { /* No need to deserialize zeros because the dirty bitmap is * already cleared */ } } else { ret = bdrv_pread(bs->file, offset, buf, s->cluster_size); if (ret < 0) { goto finish; } bdrv_dirty_bitmap_deserialize_part(bitmap, buf, sector, count, false); } } ret = 0; bdrv_dirty_bitmap_deserialize_finish(bitmap); finish: g_free(buf); return ret; }
22,857
0
static coroutine_fn int nbd_negotiate(NBDClientNewData *data) { NBDClient *client = data->client; char buf[8 + 8 + 8 + 128]; int rc; const int myflags = (NBD_FLAG_HAS_FLAGS | NBD_FLAG_SEND_TRIM | NBD_FLAG_SEND_FLUSH | NBD_FLAG_SEND_FUA); bool oldStyle; /* Old style negotiation header without options [ 0 .. 7] passwd ("NBDMAGIC") [ 8 .. 15] magic (NBD_CLIENT_MAGIC) [16 .. 23] size [24 .. 25] server flags (0) [26 .. 27] export flags [28 .. 151] reserved (0) New style negotiation header with options [ 0 .. 7] passwd ("NBDMAGIC") [ 8 .. 15] magic (NBD_OPTS_MAGIC) [16 .. 17] server flags (0) ....options sent.... [18 .. 25] size [26 .. 27] export flags [28 .. 151] reserved (0) */ qio_channel_set_blocking(client->ioc, false, NULL); rc = -EINVAL; TRACE("Beginning negotiation."); memset(buf, 0, sizeof(buf)); memcpy(buf, "NBDMAGIC", 8); oldStyle = client->exp != NULL && !client->tlscreds; if (oldStyle) { assert ((client->exp->nbdflags & ~65535) == 0); TRACE("advertising size %" PRIu64 " and flags %x", client->exp->size, client->exp->nbdflags | myflags); stq_be_p(buf + 8, NBD_CLIENT_MAGIC); stq_be_p(buf + 16, client->exp->size); stw_be_p(buf + 26, client->exp->nbdflags | myflags); } else { stq_be_p(buf + 8, NBD_OPTS_MAGIC); stw_be_p(buf + 16, NBD_FLAG_FIXED_NEWSTYLE); } if (oldStyle) { if (client->tlscreds) { TRACE("TLS cannot be enabled with oldstyle protocol"); goto fail; } if (nbd_negotiate_write(client->ioc, buf, sizeof(buf)) != sizeof(buf)) { LOG("write failed"); goto fail; } } else { if (nbd_negotiate_write(client->ioc, buf, 18) != 18) { LOG("write failed"); goto fail; } rc = nbd_negotiate_options(client); if (rc != 0) { LOG("option negotiation failed"); goto fail; } assert ((client->exp->nbdflags & ~65535) == 0); TRACE("advertising size %" PRIu64 " and flags %x", client->exp->size, client->exp->nbdflags | myflags); stq_be_p(buf + 18, client->exp->size); stw_be_p(buf + 26, client->exp->nbdflags | myflags); if (nbd_negotiate_write(client->ioc, buf + 18, sizeof(buf) - 18) != sizeof(buf) - 18) { LOG("write failed"); goto fail; } } TRACE("Negotiation succeeded."); rc = 0; fail: return rc; }
22,858
0
void bdrv_io_plug(BlockDriverState *bs) { BlockDriver *drv = bs->drv; if (drv && drv->bdrv_io_plug) { drv->bdrv_io_plug(bs); } else if (bs->file) { bdrv_io_plug(bs->file); } }
22,859
0
void cpu_watchpoint_remove_all(CPUState *env, int mask) { CPUWatchpoint *wp, *next; TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) { if (wp->flags & mask) cpu_watchpoint_remove_by_ref(env, wp); } }
22,860
0
static void hybrid_synthesis(float out[2][38][64], float in[91][32][2], int is34, int len) { int i, n; if (is34) { for (n = 0; n < len; n++) { memset(out[0][n], 0, 5*sizeof(out[0][n][0])); memset(out[1][n], 0, 5*sizeof(out[1][n][0])); for (i = 0; i < 12; i++) { out[0][n][0] += in[ i][n][0]; out[1][n][0] += in[ i][n][1]; } for (i = 0; i < 8; i++) { out[0][n][1] += in[12+i][n][0]; out[1][n][1] += in[12+i][n][1]; } for (i = 0; i < 4; i++) { out[0][n][2] += in[20+i][n][0]; out[1][n][2] += in[20+i][n][1]; out[0][n][3] += in[24+i][n][0]; out[1][n][3] += in[24+i][n][1]; out[0][n][4] += in[28+i][n][0]; out[1][n][4] += in[28+i][n][1]; } } for (i = 0; i < 59; i++) { for (n = 0; n < len; n++) { out[0][n][i+5] = in[i+32][n][0]; out[1][n][i+5] = in[i+32][n][1]; } } } else { for (n = 0; n < len; n++) { out[0][n][0] = in[0][n][0] + in[1][n][0] + in[2][n][0] + in[3][n][0] + in[4][n][0] + in[5][n][0]; out[1][n][0] = in[0][n][1] + in[1][n][1] + in[2][n][1] + in[3][n][1] + in[4][n][1] + in[5][n][1]; out[0][n][1] = in[6][n][0] + in[7][n][0]; out[1][n][1] = in[6][n][1] + in[7][n][1]; out[0][n][2] = in[8][n][0] + in[9][n][0]; out[1][n][2] = in[8][n][1] + in[9][n][1]; } for (i = 0; i < 61; i++) { for (n = 0; n < len; n++) { out[0][n][i+3] = in[i+10][n][0]; out[1][n][i+3] = in[i+10][n][1]; } } } }
22,861
0
static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame) { AVCodecInternal *avci = avctx->internal; InternalBuffer *buf; int buf_size, ret; buf_size = av_samples_get_buffer_size(NULL, avctx->channels, frame->nb_samples, avctx->sample_fmt, 0); if (buf_size < 0) return AVERROR(EINVAL); /* allocate InternalBuffer if needed */ if (!avci->buffer) { avci->buffer = av_mallocz(sizeof(InternalBuffer)); if (!avci->buffer) return AVERROR(ENOMEM); } buf = avci->buffer; /* if there is a previously-used internal buffer, check its size and * channel count to see if we can reuse it */ if (buf->extended_data) { /* if current buffer is too small, free it */ if (buf->extended_data[0] && buf_size > buf->audio_data_size) { av_free(buf->extended_data[0]); if (buf->extended_data != buf->data) av_free(buf->extended_data); buf->extended_data = NULL; buf->data[0] = NULL; } /* if number of channels has changed, reset and/or free extended data * pointers but leave data buffer in buf->data[0] for reuse */ if (buf->nb_channels != avctx->channels) { if (buf->extended_data != buf->data) av_free(buf->extended_data); buf->extended_data = NULL; } } /* if there is no previous buffer or the previous buffer cannot be used * as-is, allocate a new buffer and/or rearrange the channel pointers */ if (!buf->extended_data) { if (!buf->data[0]) { if (!(buf->data[0] = av_mallocz(buf_size))) return AVERROR(ENOMEM); buf->audio_data_size = buf_size; } if ((ret = avcodec_fill_audio_frame(frame, avctx->channels, avctx->sample_fmt, buf->data[0], buf->audio_data_size, 0))) return ret; if (frame->extended_data == frame->data) buf->extended_data = buf->data; else buf->extended_data = frame->extended_data; memcpy(buf->data, frame->data, sizeof(frame->data)); buf->linesize[0] = frame->linesize[0]; buf->nb_channels = avctx->channels; } else { /* copy InternalBuffer info to the AVFrame */ frame->extended_data = buf->extended_data; frame->linesize[0] = buf->linesize[0]; memcpy(frame->data, buf->data, sizeof(frame->data)); } frame->type = FF_BUFFER_TYPE_INTERNAL; ff_init_buffer_info(avctx, frame); if (avctx->debug & FF_DEBUG_BUFFERS) av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p, " "internal audio buffer used\n", frame); return 0; }
22,862
0
static void tracked_request_begin(BdrvTrackedRequest *req, BlockDriverState *bs, int64_t offset, unsigned int bytes, bool is_write) { *req = (BdrvTrackedRequest){ .bs = bs, .offset = offset, .bytes = bytes, .is_write = is_write, .co = qemu_coroutine_self(), .serialising = false, .overlap_offset = offset, .overlap_bytes = bytes, }; qemu_co_queue_init(&req->wait_queue); QLIST_INSERT_HEAD(&bs->tracked_requests, req, list); }
22,863
0
static uint64_t omap_id_read(void *opaque, hwaddr addr, unsigned size) { struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; if (size != 4) { return omap_badwidth_read32(opaque, addr); } switch (addr) { case 0xfffe1800: /* DIE_ID_LSB */ return 0xc9581f0e; case 0xfffe1804: /* DIE_ID_MSB */ return 0xa8858bfa; case 0xfffe2000: /* PRODUCT_ID_LSB */ return 0x00aaaafc; case 0xfffe2004: /* PRODUCT_ID_MSB */ return 0xcafeb574; case 0xfffed400: /* JTAG_ID_LSB */ switch (s->mpu_model) { case omap310: return 0x03310315; case omap1510: return 0x03310115; default: hw_error("%s: bad mpu model\n", __FUNCTION__); } break; case 0xfffed404: /* JTAG_ID_MSB */ switch (s->mpu_model) { case omap310: return 0xfb57402f; case omap1510: return 0xfb47002f; default: hw_error("%s: bad mpu model\n", __FUNCTION__); } break; } OMAP_BAD_REG(addr); return 0; }
22,864
0
static void pc_compat_1_7(MachineState *machine) { pc_compat_2_0(machine); smbios_defaults = false; gigabyte_align = false; option_rom_has_mr = true; x86_cpu_change_kvm_default("x2apic", NULL); }
22,865
0
static void v9fs_create_post_mksock(V9fsState *s, V9fsCreateState *vs, int err) { if (err) { err = -errno; goto out; } err = v9fs_do_chmod(s, &vs->fullname, vs->perm & 0777); v9fs_create_post_perms(s, vs, err); return; out: v9fs_post_create(s, vs, err); }
22,866
0
void qemu_peer_set_vnet_hdr_len(NetClientState *nc, int len) { if (!nc->peer || !nc->peer->info->set_vnet_hdr_len) { return; } nc->peer->info->set_vnet_hdr_len(nc->peer, len); }
22,868
0
BusState *qdev_get_child_bus(DeviceState *dev, const char *name) { BusState *bus; LIST_FOREACH(bus, &dev->child_bus, sibling) { if (strcmp(name, bus->name) == 0) { return bus; } } return NULL; }
22,869
0
static bool e1000_mit_state_needed(void *opaque) { E1000State *s = opaque; return s->compat_flags & E1000_FLAG_MIT; }
22,870
0
void helper_ldq_l_raw(uint64_t t0, uint64_t t1) { env->lock = t1; ldl_raw(t1, t0); }
22,871
0
void xen_pt_config_delete(XenPCIPassthroughState *s) { struct XenPTRegGroup *reg_group, *next_grp; struct XenPTReg *reg, *next_reg; /* free MSI/MSI-X info table */ if (s->msix) { xen_pt_msix_delete(s); } if (s->msi) { g_free(s->msi); } /* free all register group entry */ QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) { /* free all register entry */ QLIST_FOREACH_SAFE(reg, &reg_group->reg_tbl_list, entries, next_reg) { QLIST_REMOVE(reg, entries); g_free(reg); } QLIST_REMOVE(reg_group, entries); g_free(reg_group); } }
22,872
0
static void luma_mc(HEVCContext *s, int16_t *dst, ptrdiff_t dststride, AVFrame *ref, const Mv *mv, int x_off, int y_off, int block_w, int block_h) { HEVCLocalContext *lc = &s->HEVClc; uint8_t *src = ref->data[0]; ptrdiff_t srcstride = ref->linesize[0]; int pic_width = s->ps.sps->width; int pic_height = s->ps.sps->height; int mx = mv->x & 3; int my = mv->y & 3; int extra_left = ff_hevc_qpel_extra_before[mx]; int extra_top = ff_hevc_qpel_extra_before[my]; x_off += mv->x >> 2; y_off += mv->y >> 2; src += y_off * srcstride + (x_off << s->ps.sps->pixel_shift); if (x_off < extra_left || y_off < extra_top || x_off >= pic_width - block_w - ff_hevc_qpel_extra_after[mx] || y_off >= pic_height - block_h - ff_hevc_qpel_extra_after[my]) { const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift; int offset = extra_top * srcstride + (extra_left << s->ps.sps->pixel_shift); int buf_offset = extra_top * edge_emu_stride + (extra_left << s->ps.sps->pixel_shift); s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset, edge_emu_stride, srcstride, block_w + ff_hevc_qpel_extra[mx], block_h + ff_hevc_qpel_extra[my], x_off - extra_left, y_off - extra_top, pic_width, pic_height); src = lc->edge_emu_buffer + buf_offset; srcstride = edge_emu_stride; } s->hevcdsp.put_hevc_qpel[my][mx](dst, dststride, src, srcstride, block_w, block_h, lc->mc_buffer); }
22,873
0
static gboolean gd_scroll_event(GtkWidget *widget, GdkEventScroll *scroll, void *opaque) { VirtualConsole *vc = opaque; InputButton btn; if (scroll->direction == GDK_SCROLL_UP) { btn = INPUT_BUTTON_WHEEL_UP; } else if (scroll->direction == GDK_SCROLL_DOWN) { btn = INPUT_BUTTON_WHEEL_DOWN; } else { return TRUE; } qemu_input_queue_btn(vc->gfx.dcl.con, btn, true); qemu_input_event_sync(); qemu_input_queue_btn(vc->gfx.dcl.con, btn, false); qemu_input_event_sync(); return TRUE; }
22,874
1
static int nfs_parse_uri(const char *filename, QDict *options, Error **errp) { URI *uri = NULL; QueryParams *qp = NULL; int ret = -EINVAL, i; uri = uri_parse(filename); if (!uri) { error_setg(errp, "Invalid URI specified"); goto out; } if (strcmp(uri->scheme, "nfs") != 0) { error_setg(errp, "URI scheme must be 'nfs'"); goto out; } if (!uri->server) { error_setg(errp, "missing hostname in URI"); goto out; } if (!uri->path) { error_setg(errp, "missing file path in URI"); goto out; } qp = query_params_parse(uri->query); if (!qp) { error_setg(errp, "could not parse query parameters"); goto out; } qdict_put(options, "server.host", qstring_from_str(uri->server)); qdict_put(options, "server.type", qstring_from_str("inet")); qdict_put(options, "path", qstring_from_str(uri->path)); for (i = 0; i < qp->n; i++) { if (!qp->p[i].value) { error_setg(errp, "Value for NFS parameter expected: %s", qp->p[i].name); goto out; } if (parse_uint_full(qp->p[i].value, NULL, 0)) { error_setg(errp, "Illegal value for NFS parameter: %s", qp->p[i].name); goto out; } if (!strcmp(qp->p[i].name, "uid")) { qdict_put(options, "user", qstring_from_str(qp->p[i].value)); } else if (!strcmp(qp->p[i].name, "gid")) { qdict_put(options, "group", qstring_from_str(qp->p[i].value)); } else if (!strcmp(qp->p[i].name, "tcp-syncnt")) { qdict_put(options, "tcp-syn-count", qstring_from_str(qp->p[i].value)); } else if (!strcmp(qp->p[i].name, "readahead")) { qdict_put(options, "readahead-size", qstring_from_str(qp->p[i].value)); } else if (!strcmp(qp->p[i].name, "pagecache")) { qdict_put(options, "page-cache-size", qstring_from_str(qp->p[i].value)); } else if (!strcmp(qp->p[i].name, "debug")) { qdict_put(options, "debug", qstring_from_str(qp->p[i].value)); } else { error_setg(errp, "Unknown NFS parameter name: %s", qp->p[i].name); goto out; } } ret = 0; out: if (qp) { query_params_free(qp); } if (uri) { uri_free(uri); } return ret; }
22,877
1
void helper_rfi(CPUPPCState *env) { do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1], ~((target_ulong)0x783F0000), 1); }
22,878
1
QEMUFile *qemu_popen_cmd(const char *command, const char *mode) { FILE *stdio_file; QEMUFileStdio *s; if (mode == NULL || (mode[0] != 'r' && mode[0] != 'w') || mode[1] != 0) { fprintf(stderr, "qemu_popen: Argument validity check failed\n"); return NULL; } stdio_file = popen(command, mode); if (stdio_file == NULL) { return NULL; } s = g_malloc0(sizeof(QEMUFileStdio)); s->stdio_file = stdio_file; if (mode[0] == 'r') { s->file = qemu_fopen_ops(s, &stdio_pipe_read_ops); } else { s->file = qemu_fopen_ops(s, &stdio_pipe_write_ops); } return s->file; }
22,879
1
static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, const siginfo_t *info) { int sig = host_to_target_signal(info->si_signo); tinfo->si_signo = sig; tinfo->si_errno = 0; tinfo->si_code = info->si_code; if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) { /* Should never come here, but who knows. The information for the target is irrelevant. */ tinfo->_sifields._sigfault._addr = 0; } else if (sig == TARGET_SIGIO) { tinfo->_sifields._sigpoll._band = info->si_band; tinfo->_sifields._sigpoll._fd = info->si_fd; } else if (sig == TARGET_SIGCHLD) { tinfo->_sifields._sigchld._pid = info->si_pid; tinfo->_sifields._sigchld._uid = info->si_uid; tinfo->_sifields._sigchld._status = host_to_target_waitstatus(info->si_status); tinfo->_sifields._sigchld._utime = info->si_utime; tinfo->_sifields._sigchld._stime = info->si_stime; } else if (sig >= TARGET_SIGRTMIN) { tinfo->_sifields._rt._pid = info->si_pid; tinfo->_sifields._rt._uid = info->si_uid; /* XXX: potential problem if 64 bit */ tinfo->_sifields._rt._sigval.sival_ptr = (abi_ulong)(unsigned long)info->si_value.sival_ptr; } }
22,880
1
static int init_image(TiffContext *s, AVFrame *frame) { int ret; switch (s->planar * 1000 + s->bpp * 10 + s->bppcount) { case 11: s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK; break; case 81: s->avctx->pix_fmt = s->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8; break; case 243: s->avctx->pix_fmt = AV_PIX_FMT_RGB24; break; case 161: s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE; break; case 162: s->avctx->pix_fmt = AV_PIX_FMT_YA8; break; case 322: s->avctx->pix_fmt = s->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE; break; case 324: s->avctx->pix_fmt = AV_PIX_FMT_RGBA; break; case 483: s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE; break; case 644: s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE; break; case 1243: s->avctx->pix_fmt = AV_PIX_FMT_GBRP; break; case 1324: s->avctx->pix_fmt = AV_PIX_FMT_GBRAP; break; case 1483: s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE; break; case 1644: s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE; break; default: "This format is not supported (bpp=%d, bppcount=%d)\n", if (s->width != s->avctx->width || s->height != s->avctx->height) { ret = ff_set_dimensions(s->avctx, s->width, s->height); if (ret < 0) return ret; if ((ret = ff_get_buffer(s->avctx, frame, 0)) < 0) { av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) { memcpy(frame->data[1], s->palette, sizeof(s->palette)); return 0;
22,881
0
static int jpeg2000_decode_packet(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile, int *tp_index, Jpeg2000CodingStyle *codsty, Jpeg2000ResLevel *rlevel, int precno, int layno, uint8_t *expn, int numgbits) { int bandno, cblkno, ret, nb_code_blocks; int cwsno; if (bytestream2_get_bytes_left(&s->g) == 0 && s->bit_index == 8) { if (*tp_index < FF_ARRAY_ELEMS(tile->tile_part) - 1) { s->g = tile->tile_part[++(*tp_index)].tpg; } } if (bytestream2_peek_be32(&s->g) == 0xFF910004) bytestream2_skip(&s->g, 6); if (!(ret = get_bits(s, 1))) { jpeg2000_flush(s); return 0; } else if (ret < 0) return ret; for (bandno = 0; bandno < rlevel->nbands; bandno++) { Jpeg2000Band *band = rlevel->band + bandno; Jpeg2000Prec *prec = band->prec + precno; if (band->coord[0][0] == band->coord[0][1] || band->coord[1][0] == band->coord[1][1]) continue; nb_code_blocks = prec->nb_codeblocks_height * prec->nb_codeblocks_width; for (cblkno = 0; cblkno < nb_code_blocks; cblkno++) { Jpeg2000Cblk *cblk = prec->cblk + cblkno; int incl, newpasses, llen; if (cblk->npasses) incl = get_bits(s, 1); else incl = tag_tree_decode(s, prec->cblkincl + cblkno, layno + 1) == layno; if (!incl) continue; else if (incl < 0) return incl; if (!cblk->npasses) { int v = expn[bandno] + numgbits - 1 - tag_tree_decode(s, prec->zerobits + cblkno, 100); if (v < 0) { av_log(s->avctx, AV_LOG_ERROR, "nonzerobits %d invalid\n", v); return AVERROR_INVALIDDATA; } cblk->nonzerobits = v; } if ((newpasses = getnpasses(s)) < 0) return newpasses; av_assert2(newpasses > 0); if (cblk->npasses + newpasses >= JPEG2000_MAX_PASSES) { avpriv_request_sample(s->avctx, "Too many passes\n"); return AVERROR_PATCHWELCOME; } if ((llen = getlblockinc(s)) < 0) return llen; if (cblk->lblock + llen + av_log2(newpasses) > 16) { avpriv_request_sample(s->avctx, "Block with length beyond 16 bits\n"); return AVERROR_PATCHWELCOME; } cblk->lblock += llen; cblk->nb_lengthinc = 0; cblk->nb_terminationsinc = 0; do { int newpasses1 = 0; while (newpasses1 < newpasses) { newpasses1 ++; if (needs_termination(codsty->cblk_style, cblk->npasses + newpasses1 - 1)) { cblk->nb_terminationsinc ++; break; } } if ((ret = get_bits(s, av_log2(newpasses1) + cblk->lblock)) < 0) return ret; if (ret > sizeof(cblk->data)) { avpriv_request_sample(s->avctx, "Block with lengthinc greater than %"SIZE_SPECIFIER"", sizeof(cblk->data)); return AVERROR_PATCHWELCOME; } cblk->lengthinc[cblk->nb_lengthinc++] = ret; cblk->npasses += newpasses1; newpasses -= newpasses1; } while(newpasses); } } jpeg2000_flush(s); if (codsty->csty & JPEG2000_CSTY_EPH) { if (bytestream2_peek_be16(&s->g) == JPEG2000_EPH) bytestream2_skip(&s->g, 2); else av_log(s->avctx, AV_LOG_ERROR, "EPH marker not found.\n"); } for (bandno = 0; bandno < rlevel->nbands; bandno++) { Jpeg2000Band *band = rlevel->band + bandno; Jpeg2000Prec *prec = band->prec + precno; nb_code_blocks = prec->nb_codeblocks_height * prec->nb_codeblocks_width; for (cblkno = 0; cblkno < nb_code_blocks; cblkno++) { Jpeg2000Cblk *cblk = prec->cblk + cblkno; for (cwsno = 0; cwsno < cblk->nb_lengthinc; cwsno ++) { if ( bytestream2_get_bytes_left(&s->g) < cblk->lengthinc[cwsno] || sizeof(cblk->data) < cblk->length + cblk->lengthinc[cwsno] + 4 ) { av_log(s->avctx, AV_LOG_ERROR, "Block length %"PRIu16" or lengthinc %d is too large, left %d\n", cblk->length, cblk->lengthinc[cwsno], bytestream2_get_bytes_left(&s->g)); return AVERROR_INVALIDDATA; } bytestream2_get_bufferu(&s->g, cblk->data + cblk->length, cblk->lengthinc[cwsno]); cblk->length += cblk->lengthinc[cwsno]; cblk->lengthinc[cwsno] = 0; if (cblk->nb_terminationsinc) { cblk->nb_terminationsinc--; cblk->nb_terminations++; cblk->data[cblk->length++] = 0xFF; cblk->data[cblk->length++] = 0xFF; cblk->data_start[cblk->nb_terminations] = cblk->length; } } } } return 0; }
22,882
0
rdt_parse_sdp_line (AVFormatContext *s, int st_index, PayloadContext *rdt, const char *line) { AVStream *stream = s->streams[st_index]; const char *p = line; if (av_strstart(p, "OpaqueData:buffer;", &p)) { rdt->mlti_data = rdt_parse_b64buf(&rdt->mlti_data_size, p); } else if (av_strstart(p, "StartTime:integer;", &p)) stream->first_dts = atoi(p); else if (av_strstart(p, "ASMRuleBook:string;", &p)) { int n, first = -1; for (n = 0; n < s->nb_streams; n++) if (s->streams[n]->id == stream->id) { int count = s->streams[n]->index + 1; if (first == -1) first = n; if (rdt->nb_rmst < count) { RMStream **rmst= av_realloc(rdt->rmst, count*sizeof(*rmst)); if (!rmst) return AVERROR(ENOMEM); memset(rmst + rdt->nb_rmst, 0, (count - rdt->nb_rmst) * sizeof(*rmst)); rdt->rmst = rmst; rdt->nb_rmst = count; } rdt->rmst[s->streams[n]->index] = ff_rm_alloc_rmstream(); rdt_load_mdpr(rdt, s->streams[n], (n - first) * 2); if (s->streams[n]->codec->codec_id == CODEC_ID_AAC) s->streams[n]->codec->frame_size = 1; // FIXME } } return 0; }
22,883
0
static void psy_3gpp_analyze_channel(FFPsyContext *ctx, int channel, const float *coefs, const FFPsyWindowInfo *wi) { AacPsyContext *pctx = (AacPsyContext*) ctx->model_priv_data; AacPsyChannel *pch = &pctx->ch[channel]; int i, w, g; float desired_bits, desired_pe, delta_pe, reduction= NAN, spread_en[128] = {0}; float a = 0.0f, active_lines = 0.0f, norm_fac = 0.0f; float pe = pctx->chan_bitrate > 32000 ? 0.0f : FFMAX(50.0f, 100.0f - pctx->chan_bitrate * 100.0f / 32000.0f); const int num_bands = ctx->num_bands[wi->num_windows == 8]; const uint8_t *band_sizes = ctx->bands[wi->num_windows == 8]; AacPsyCoeffs *coeffs = pctx->psy_coef[wi->num_windows == 8]; const float avoid_hole_thr = wi->num_windows == 8 ? PSY_3GPP_AH_THR_SHORT : PSY_3GPP_AH_THR_LONG; //calculate energies, initial thresholds and related values - 5.4.2 "Threshold Calculation" calc_thr_3gpp(wi, num_bands, pch, band_sizes, coefs); //modify thresholds and energies - spread, threshold in quiet, pre-echo control for (w = 0; w < wi->num_windows*16; w += 16) { AacPsyBand *bands = &pch->band[w]; /* 5.4.2.3 "Spreading" & 5.4.3 "Spread Energy Calculation" */ spread_en[0] = bands[0].energy; for (g = 1; g < num_bands; g++) { bands[g].thr = FFMAX(bands[g].thr, bands[g-1].thr * coeffs[g].spread_hi[0]); spread_en[w+g] = FFMAX(bands[g].energy, spread_en[w+g-1] * coeffs[g].spread_hi[1]); } for (g = num_bands - 2; g >= 0; g--) { bands[g].thr = FFMAX(bands[g].thr, bands[g+1].thr * coeffs[g].spread_low[0]); spread_en[w+g] = FFMAX(spread_en[w+g], spread_en[w+g+1] * coeffs[g].spread_low[1]); } //5.4.2.4 "Threshold in quiet" for (g = 0; g < num_bands; g++) { AacPsyBand *band = &bands[g]; band->thr_quiet = band->thr = FFMAX(band->thr, coeffs[g].ath); //5.4.2.5 "Pre-echo control" if (!(wi->window_type[0] == LONG_STOP_SEQUENCE || (wi->window_type[1] == LONG_START_SEQUENCE && !w))) band->thr = FFMAX(PSY_3GPP_RPEMIN*band->thr, FFMIN(band->thr, PSY_3GPP_RPELEV*pch->prev_band[w+g].thr_quiet)); /* 5.6.1.3.1 "Preparatory steps of the perceptual entropy calculation" */ pe += calc_pe_3gpp(band); a += band->pe_const; active_lines += band->active_lines; /* 5.6.1.3.3 "Selection of the bands for avoidance of holes" */ if (spread_en[w+g] * avoid_hole_thr > band->energy || coeffs[g].min_snr > 1.0f) band->avoid_holes = PSY_3GPP_AH_NONE; else band->avoid_holes = PSY_3GPP_AH_INACTIVE; } } /* 5.6.1.3.2 "Calculation of the desired perceptual entropy" */ ctx->ch[channel].entropy = pe; desired_bits = calc_bit_demand(pctx, pe, ctx->bitres.bits, ctx->bitres.size, wi->num_windows == 8); desired_pe = PSY_3GPP_BITS_TO_PE(desired_bits); /* NOTE: PE correction is kept simple. During initial testing it had very * little effect on the final bitrate. Probably a good idea to come * back and do more testing later. */ if (ctx->bitres.bits > 0) desired_pe *= av_clipf(pctx->pe.previous / PSY_3GPP_BITS_TO_PE(ctx->bitres.bits), 0.85f, 1.15f); pctx->pe.previous = PSY_3GPP_BITS_TO_PE(desired_bits); if (desired_pe < pe) { /* 5.6.1.3.4 "First Estimation of the reduction value" */ for (w = 0; w < wi->num_windows*16; w += 16) { reduction = calc_reduction_3gpp(a, desired_pe, pe, active_lines); pe = 0.0f; a = 0.0f; active_lines = 0.0f; for (g = 0; g < num_bands; g++) { AacPsyBand *band = &pch->band[w+g]; band->thr = calc_reduced_thr_3gpp(band, coeffs[g].min_snr, reduction); /* recalculate PE */ pe += calc_pe_3gpp(band); a += band->pe_const; active_lines += band->active_lines; } } /* 5.6.1.3.5 "Second Estimation of the reduction value" */ for (i = 0; i < 2; i++) { float pe_no_ah = 0.0f, desired_pe_no_ah; active_lines = a = 0.0f; for (w = 0; w < wi->num_windows*16; w += 16) { for (g = 0; g < num_bands; g++) { AacPsyBand *band = &pch->band[w+g]; if (band->avoid_holes != PSY_3GPP_AH_ACTIVE) { pe_no_ah += band->pe; a += band->pe_const; active_lines += band->active_lines; } } } desired_pe_no_ah = FFMAX(desired_pe - (pe - pe_no_ah), 0.0f); if (active_lines > 0.0f) reduction += calc_reduction_3gpp(a, desired_pe_no_ah, pe_no_ah, active_lines); pe = 0.0f; for (w = 0; w < wi->num_windows*16; w += 16) { for (g = 0; g < num_bands; g++) { AacPsyBand *band = &pch->band[w+g]; if (active_lines > 0.0f) band->thr = calc_reduced_thr_3gpp(band, coeffs[g].min_snr, reduction); pe += calc_pe_3gpp(band); band->norm_fac = band->active_lines / band->thr; norm_fac += band->norm_fac; } } delta_pe = desired_pe - pe; if (fabs(delta_pe) > 0.05f * desired_pe) break; } if (pe < 1.15f * desired_pe) { /* 6.6.1.3.6 "Final threshold modification by linearization" */ norm_fac = 1.0f / norm_fac; for (w = 0; w < wi->num_windows*16; w += 16) { for (g = 0; g < num_bands; g++) { AacPsyBand *band = &pch->band[w+g]; if (band->active_lines > 0.5f) { float delta_sfb_pe = band->norm_fac * norm_fac * delta_pe; float thr = band->thr; thr *= exp2f(delta_sfb_pe / band->active_lines); if (thr > coeffs[g].min_snr * band->energy && band->avoid_holes == PSY_3GPP_AH_INACTIVE) thr = FFMAX(band->thr, coeffs[g].min_snr * band->energy); band->thr = thr; } } } } else { /* 5.6.1.3.7 "Further perceptual entropy reduction" */ g = num_bands; while (pe > desired_pe && g--) { for (w = 0; w < wi->num_windows*16; w+= 16) { AacPsyBand *band = &pch->band[w+g]; if (band->avoid_holes != PSY_3GPP_AH_NONE && coeffs[g].min_snr < PSY_SNR_1DB) { coeffs[g].min_snr = PSY_SNR_1DB; band->thr = band->energy * PSY_SNR_1DB; pe += band->active_lines * 1.5f - band->pe; } } } /* TODO: allow more holes (unused without mid/side) */ } } for (w = 0; w < wi->num_windows*16; w += 16) { for (g = 0; g < num_bands; g++) { AacPsyBand *band = &pch->band[w+g]; FFPsyBand *psy_band = &ctx->ch[channel].psy_bands[w+g]; psy_band->threshold = band->thr; psy_band->energy = band->energy; } } memcpy(pch->prev_band, pch->band, sizeof(pch->band)); }
22,884
0
static int avi_write_ix(AVFormatContext *s) { AVIOContext *pb = s->pb; AVIContext *avi = s->priv_data; char tag[5]; char ix_tag[] = "ix00"; int i, j; assert(pb->seekable); if (avi->riff_id > AVI_MASTER_INDEX_SIZE) return -1; for (i = 0; i < s->nb_streams; i++) { AVIStream *avist = s->streams[i]->priv_data; int64_t ix, pos; avi_stream2fourcc(tag, i, s->streams[i]->codecpar->codec_type); ix_tag[3] = '0' + i; /* Writing AVI OpenDML leaf index chunk */ ix = avio_tell(pb); ffio_wfourcc(pb, ix_tag); /* ix?? */ avio_wl32(pb, avist->indexes.entry * 8 + 24); /* chunk size */ avio_wl16(pb, 2); /* wLongsPerEntry */ avio_w8(pb, 0); /* bIndexSubType (0 == frame index) */ avio_w8(pb, 1); /* bIndexType (1 == AVI_INDEX_OF_CHUNKS) */ avio_wl32(pb, avist->indexes.entry); /* nEntriesInUse */ ffio_wfourcc(pb, tag); /* dwChunkId */ avio_wl64(pb, avi->movi_list); /* qwBaseOffset */ avio_wl32(pb, 0); /* dwReserved_3 (must be 0) */ for (j = 0; j < avist->indexes.entry; j++) { AVIIentry *ie = avi_get_ientry(&avist->indexes, j); avio_wl32(pb, ie->pos + 8); avio_wl32(pb, ((uint32_t) ie->len & ~0x80000000) | (ie->flags & 0x10 ? 0 : 0x80000000)); } avio_flush(pb); pos = avio_tell(pb); /* Updating one entry in the AVI OpenDML master index */ avio_seek(pb, avist->indexes.indx_start - 8, SEEK_SET); ffio_wfourcc(pb, "indx"); /* enabling this entry */ avio_skip(pb, 8); avio_wl32(pb, avi->riff_id); /* nEntriesInUse */ avio_skip(pb, 16 * avi->riff_id); avio_wl64(pb, ix); /* qwOffset */ avio_wl32(pb, pos - ix); /* dwSize */ avio_wl32(pb, avist->indexes.entry); /* dwDuration */ avio_seek(pb, pos, SEEK_SET); } return 0; }
22,885
0
int ff_dxva2_decode_init(AVCodecContext *avctx) { FFDXVASharedContext *sctx = DXVA_SHARED_CONTEXT(avctx); AVHWFramesContext *frames_ctx = NULL; int ret = 0; // Old API. if (avctx->hwaccel_context) return 0; // (avctx->pix_fmt is not updated yet at this point) sctx->pix_fmt = avctx->hwaccel->pix_fmt; if (avctx->codec_id == AV_CODEC_ID_H264 && (avctx->profile & ~FF_PROFILE_H264_CONSTRAINED) > FF_PROFILE_H264_HIGH) { av_log(avctx, AV_LOG_VERBOSE, "Unsupported H.264 profile for DXVA HWAccel: %d\n",avctx->profile); return AVERROR(ENOTSUP); } if (avctx->codec_id == AV_CODEC_ID_HEVC && avctx->profile != FF_PROFILE_HEVC_MAIN && avctx->profile != FF_PROFILE_HEVC_MAIN_10) { av_log(avctx, AV_LOG_VERBOSE, "Unsupported HEVC profile for DXVA HWAccel: %d\n", avctx->profile); return AVERROR(ENOTSUP); } if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) { av_log(avctx, AV_LOG_ERROR, "Either a hw_frames_ctx or a hw_device_ctx needs to be set for hardware decoding.\n"); return AVERROR(EINVAL); } if (avctx->hw_frames_ctx) { frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; } else { avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx); if (!avctx->hw_frames_ctx) return AVERROR(ENOMEM); frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; dxva_adjust_hwframes(avctx, frames_ctx); ret = av_hwframe_ctx_init(avctx->hw_frames_ctx); if (ret < 0) goto fail; } sctx->device_ctx = frames_ctx->device_ctx; if (frames_ctx->format != sctx->pix_fmt || !((sctx->pix_fmt == AV_PIX_FMT_D3D11 && CONFIG_D3D11VA) || (sctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD && CONFIG_DXVA2))) { av_log(avctx, AV_LOG_ERROR, "Invalid pixfmt for hwaccel!\n"); ret = AVERROR(EINVAL); goto fail; } #if CONFIG_D3D11VA if (sctx->pix_fmt == AV_PIX_FMT_D3D11) { AVD3D11VADeviceContext *device_hwctx = frames_ctx->device_ctx->hwctx; AVD3D11VAContext *d3d11_ctx = &sctx->ctx.d3d11va; HRESULT hr; ff_dxva2_lock(avctx); ret = d3d11va_create_decoder(avctx); ff_dxva2_unlock(avctx); if (ret < 0) goto fail; d3d11_ctx->decoder = sctx->d3d11_decoder; d3d11_ctx->video_context = device_hwctx->video_context; d3d11_ctx->cfg = &sctx->d3d11_config; d3d11_ctx->surface_count = sctx->nb_d3d11_views; d3d11_ctx->surface = sctx->d3d11_views; d3d11_ctx->workaround = sctx->workaround; d3d11_ctx->context_mutex = INVALID_HANDLE_VALUE; } #endif #if CONFIG_DXVA2 if (sctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) { AVDXVA2FramesContext *frames_hwctx = frames_ctx->hwctx; struct dxva_context *dxva_ctx = &sctx->ctx.dxva2; ff_dxva2_lock(avctx); ret = dxva2_create_decoder(avctx); ff_dxva2_unlock(avctx); if (ret < 0) goto fail; dxva_ctx->decoder = sctx->dxva2_decoder; dxva_ctx->cfg = &sctx->dxva2_config; dxva_ctx->surface = frames_hwctx->surfaces; dxva_ctx->surface_count = frames_hwctx->nb_surfaces; dxva_ctx->workaround = sctx->workaround; } #endif return 0; fail: ff_dxva2_decode_uninit(avctx); return ret; }
22,886
0
static av_cold int twolame_encode_init(AVCodecContext *avctx) { TWOLAMEContext *s = avctx->priv_data; int ret; avctx->frame_size = TWOLAME_SAMPLES_PER_FRAME; avctx->delay = 512 - 32 + 1; s->glopts = twolame_init(); if (!s->glopts) return AVERROR(ENOMEM); twolame_set_verbosity(s->glopts, s->verbosity); twolame_set_mode(s->glopts, s->mode); twolame_set_psymodel(s->glopts, s->psymodel); twolame_set_energy_levels(s->glopts, s->energy); twolame_set_error_protection(s->glopts, s->error_protection); twolame_set_copyright(s->glopts, s->copyright); twolame_set_original(s->glopts, s->original); twolame_set_num_channels(s->glopts, avctx->channels); twolame_set_in_samplerate(s->glopts, avctx->sample_rate); twolame_set_out_samplerate(s->glopts, avctx->sample_rate); if (avctx->flags & CODEC_FLAG_QSCALE || !avctx->bit_rate) { twolame_set_VBR(s->glopts, TRUE); twolame_set_VBR_level(s->glopts, avctx->global_quality / (float) FF_QP2LAMBDA); av_log(avctx, AV_LOG_WARNING, "VBR in MP2 is a hack, use another codec that supports it.\n"); } else { twolame_set_bitrate(s->glopts, avctx->bit_rate / 1000); } ret = twolame_init_params(s->glopts); if (ret) { twolame_encode_close(avctx); return AVERROR_UNKNOWN; } return 0; }
22,887
1
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { SheerVideoContext *s = avctx->priv_data; ThreadFrame frame = { .f = data }; AVFrame *p = data; GetBitContext gb; unsigned format; int ret; if (avpkt->size <= 20) if (AV_RL32(avpkt->data) != MKTAG('S','h','i','r') && AV_RL32(avpkt->data) != MKTAG('Z','w','a','k')) s->alt = 0; format = AV_RL32(avpkt->data + 16); av_log(avctx, AV_LOG_DEBUG, "format: %s\n", av_fourcc2str(format)); switch (format) { case MKTAG(' ', 'R', 'G', 'B'): avctx->pix_fmt = AV_PIX_FMT_RGB0; s->decode_frame = decode_rgb; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_r_rgb, 256); ret |= build_vlc(&s->vlc[1], l_g_rgb, 256); break; case MKTAG(' ', 'r', 'G', 'B'): avctx->pix_fmt = AV_PIX_FMT_RGB0; s->decode_frame = decode_rgbi; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_r_rgbi, 256); ret |= build_vlc(&s->vlc[1], l_g_rgbi, 256); break; case MKTAG('A', 'R', 'G', 'X'): avctx->pix_fmt = AV_PIX_FMT_GBRAP10; s->decode_frame = decode_argx; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_r_rgbx, 1024); ret |= build_vlc(&s->vlc[1], l_g_rgbx, 1024); break; case MKTAG('A', 'r', 'G', 'X'): avctx->pix_fmt = AV_PIX_FMT_GBRAP10; s->decode_frame = decode_argxi; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_r_rgbxi, 1024); ret |= build_vlc(&s->vlc[1], l_g_rgbxi, 1024); break; case MKTAG('R', 'G', 'B', 'X'): avctx->pix_fmt = AV_PIX_FMT_GBRP10; s->decode_frame = decode_rgbx; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_r_rgbx, 1024); ret |= build_vlc(&s->vlc[1], l_g_rgbx, 1024); break; case MKTAG('r', 'G', 'B', 'X'): avctx->pix_fmt = AV_PIX_FMT_GBRP10; s->decode_frame = decode_rgbxi; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_r_rgbxi, 1024); ret |= build_vlc(&s->vlc[1], l_g_rgbxi, 1024); break; case MKTAG('A', 'R', 'G', 'B'): avctx->pix_fmt = AV_PIX_FMT_ARGB; s->decode_frame = decode_argb; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_r_rgb, 256); ret |= build_vlc(&s->vlc[1], l_g_rgb, 256); break; case MKTAG('A', 'r', 'G', 'B'): avctx->pix_fmt = AV_PIX_FMT_ARGB; s->decode_frame = decode_argbi; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_r_rgbi, 256); ret |= build_vlc(&s->vlc[1], l_g_rgbi, 256); break; case MKTAG('A', 'Y', 'B', 'R'): s->alt = 1; case MKTAG('A', 'Y', 'b', 'R'): avctx->pix_fmt = AV_PIX_FMT_YUVA444P; s->decode_frame = decode_aybr; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_y_ybr, 256); ret |= build_vlc(&s->vlc[1], l_u_ybr, 256); break; case MKTAG('A', 'y', 'B', 'R'): s->alt = 1; case MKTAG('A', 'y', 'b', 'R'): avctx->pix_fmt = AV_PIX_FMT_YUVA444P; s->decode_frame = decode_aybri; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_y_ybri, 256); ret |= build_vlc(&s->vlc[1], l_u_ybri, 256); break; case MKTAG(' ', 'Y', 'B', 'R'): s->alt = 1; case MKTAG(' ', 'Y', 'b', 'R'): avctx->pix_fmt = AV_PIX_FMT_YUV444P; s->decode_frame = decode_ybr; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_y_ybr, 256); ret |= build_vlc(&s->vlc[1], l_u_ybr, 256); break; case MKTAG(' ', 'y', 'B', 'R'): s->alt = 1; case MKTAG(' ', 'y', 'b', 'R'): avctx->pix_fmt = AV_PIX_FMT_YUV444P; s->decode_frame = decode_ybri; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_y_ybri, 256); ret |= build_vlc(&s->vlc[1], l_u_ybri, 256); break; case MKTAG('Y', 'B', 'R', 0x0a): avctx->pix_fmt = AV_PIX_FMT_YUV444P10; s->decode_frame = decode_ybr10; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_y_ybr10, 1024); ret |= build_vlc(&s->vlc[1], l_u_ybr10, 1024); break; case MKTAG('y', 'B', 'R', 0x0a): avctx->pix_fmt = AV_PIX_FMT_YUV444P10; s->decode_frame = decode_ybr10i; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_y_ybr10i, 1024); ret |= build_vlc(&s->vlc[1], l_u_ybr10i, 1024); break; case MKTAG('C', 'A', '4', 'p'): avctx->pix_fmt = AV_PIX_FMT_YUVA444P10; s->decode_frame = decode_ca4p; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_y_ybr10, 1024); ret |= build_vlc(&s->vlc[1], l_u_ybr10, 1024); break; case MKTAG('C', 'A', '4', 'i'): avctx->pix_fmt = AV_PIX_FMT_YUVA444P10; s->decode_frame = decode_ca4i; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_y_ybr10i, 1024); ret |= build_vlc(&s->vlc[1], l_u_ybr10i, 1024); break; case MKTAG('B', 'Y', 'R', 'Y'): avctx->pix_fmt = AV_PIX_FMT_YUV422P; s->decode_frame = decode_byry; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_y_byry, 256); ret |= build_vlc(&s->vlc[1], l_u_byry, 256); break; case MKTAG('B', 'Y', 'R', 'y'): avctx->pix_fmt = AV_PIX_FMT_YUV422P; s->decode_frame = decode_byryi; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_y_byryi, 256); ret |= build_vlc(&s->vlc[1], l_u_byryi, 256); break; case MKTAG('Y', 'b', 'Y', 'r'): avctx->pix_fmt = AV_PIX_FMT_YUV422P; s->decode_frame = decode_ybyr; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_y_ybyr, 256); ret |= build_vlc(&s->vlc[1], l_u_ybyr, 256); break; case MKTAG('C', '8', '2', 'p'): avctx->pix_fmt = AV_PIX_FMT_YUVA422P; s->decode_frame = decode_c82p; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_y_byry, 256); ret |= build_vlc(&s->vlc[1], l_u_byry, 256); break; case MKTAG('C', '8', '2', 'i'): avctx->pix_fmt = AV_PIX_FMT_YUVA422P; s->decode_frame = decode_c82i; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_y_byryi, 256); ret |= build_vlc(&s->vlc[1], l_u_byryi, 256); break; case MKTAG(0xa2, 'Y', 'R', 'Y'): avctx->pix_fmt = AV_PIX_FMT_YUV422P10; s->decode_frame = decode_yry10; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_y_yry10, 1024); ret |= build_vlc(&s->vlc[1], l_u_yry10, 1024); break; case MKTAG(0xa2, 'Y', 'R', 'y'): avctx->pix_fmt = AV_PIX_FMT_YUV422P10; s->decode_frame = decode_yry10i; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_y_yry10i, 1024); ret |= build_vlc(&s->vlc[1], l_u_yry10i, 1024); break; case MKTAG('C', 'A', '2', 'p'): avctx->pix_fmt = AV_PIX_FMT_YUVA422P10; s->decode_frame = decode_ca2p; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_y_yry10, 1024); ret |= build_vlc(&s->vlc[1], l_u_yry10, 1024); break; case MKTAG('C', 'A', '2', 'i'): avctx->pix_fmt = AV_PIX_FMT_YUVA422P10; s->decode_frame = decode_ca2i; if (s->format != format) { ret = build_vlc(&s->vlc[0], l_y_yry10i, 1024); ret |= build_vlc(&s->vlc[1], l_u_yry10i, 1024); break; default: avpriv_request_sample(avctx, "unsupported format: 0x%X", format); return AVERROR_PATCHWELCOME; if (s->format != format) { if (ret < 0) return ret; s->format = format; p->pict_type = AV_PICTURE_TYPE_I; p->key_frame = 1; if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) return ret; if ((ret = init_get_bits8(&gb, avpkt->data + 20, avpkt->size - 20)) < 0) return ret; s->decode_frame(avctx, p, &gb); *got_frame = 1; return avpkt->size;
22,888
1
static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size, uint64_t *l2_table, uint64_t mask) { int i; uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask; if (!offset) return 0; for (i = 0; i < nb_clusters; i++) if (offset + i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask)) break; return i; }
22,889
1
static int add_metadata_from_side_data(AVCodecContext *avctx, AVFrame *frame) { int size, ret = 0; const uint8_t *side_metadata; const uint8_t *end; side_metadata = av_packet_get_side_data(avctx->pkt, AV_PKT_DATA_STRINGS_METADATA, &size); if (!side_metadata) goto end; end = side_metadata + size; while (side_metadata < end) { const uint8_t *key = side_metadata; const uint8_t *val = side_metadata + strlen(key) + 1; int ret = av_dict_set(avpriv_frame_get_metadatap(frame), key, val, 0); if (ret < 0) break; side_metadata = val + strlen(val) + 1; } end: return ret; }
22,891
1
static void parse_type_str(Visitor *v, const char *name, char **obj, Error **errp) { StringInputVisitor *siv = to_siv(v); if (siv->string) { *obj = g_strdup(siv->string); } else { *obj = NULL; error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", "string"); } }
22,892
1
static void register_subpage(MemoryRegionSection *section) { subpage_t *subpage; target_phys_addr_t base = section->offset_within_address_space & TARGET_PAGE_MASK; MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS); MemoryRegionSection subsection = { .offset_within_address_space = base, .size = TARGET_PAGE_SIZE, }; target_phys_addr_t start, end; assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); if (!(existing->mr->subpage)) { subpage = subpage_init(base); subsection.mr = &subpage->iomem; phys_page_set(base >> TARGET_PAGE_BITS, 1, phys_section_add(&subsection)); } else { subpage = container_of(existing->mr, subpage_t, iomem); } start = section->offset_within_address_space & ~TARGET_PAGE_MASK; end = start + section->size; subpage_register(subpage, start, end, phys_section_add(section)); }
22,893
1
static int decode_wdlt(GetByteContext *gb, uint8_t *frame, int width, int height) { const uint8_t *frame_end = frame + width * height; uint8_t *line_ptr; int count, i, v, lines, segments; int y = 0; lines = bytestream2_get_le16(gb); if (lines > height) return AVERROR_INVALIDDATA; while (lines--) { if (bytestream2_get_bytes_left(gb) < 2) return AVERROR_INVALIDDATA; segments = bytestream2_get_le16u(gb); while ((segments & 0xC000) == 0xC000) { unsigned skip_lines = -(int16_t)segments; unsigned delta = -((int16_t)segments * width); if (frame_end - frame <= delta || y + lines + skip_lines > height) return AVERROR_INVALIDDATA; frame += delta; y += skip_lines; segments = bytestream2_get_le16(gb); } if (frame_end <= frame) return AVERROR_INVALIDDATA; if (segments & 0x8000) { frame[width - 1] = segments & 0xFF; segments = bytestream2_get_le16(gb); } line_ptr = frame; if (frame_end - frame < width) return AVERROR_INVALIDDATA; frame += width; y++; while (segments--) { if (frame - line_ptr <= bytestream2_peek_byte(gb)) return AVERROR_INVALIDDATA; line_ptr += bytestream2_get_byte(gb); count = (int8_t)bytestream2_get_byte(gb); if (count >= 0) { if (frame - line_ptr < count * 2) return AVERROR_INVALIDDATA; if (bytestream2_get_buffer(gb, line_ptr, count * 2) != count * 2) return AVERROR_INVALIDDATA; line_ptr += count * 2; } else { count = -count; if (frame - line_ptr < count * 2) return AVERROR_INVALIDDATA; v = bytestream2_get_le16(gb); for (i = 0; i < count; i++) bytestream_put_le16(&line_ptr, v); } } } return 0; }
22,894
1
static void adb_mouse_event(void *opaque, int dx1, int dy1, int dz1, int buttons_state) { MouseState *s = opaque; s->dx += dx1; s->dy += dy1; s->dz += dz1; s->buttons_state = buttons_state; }
22,896
1
void cache_insert(PageCache *cache, uint64_t addr, uint8_t *pdata) { CacheItem *it = NULL; g_assert(cache); g_assert(cache->page_cache); /* actual update of entry */ it = cache_get_by_addr(cache, addr); if (!it->it_data) { cache->num_items++; } it->it_data = pdata; it->it_age = ++cache->max_item_age; it->it_addr = addr; }
22,897
1
void replay_bh_schedule_event(QEMUBH *bh) { if (replay_mode != REPLAY_MODE_NONE) { uint64_t id = replay_get_current_step(); replay_add_event(REPLAY_ASYNC_EVENT_BH, bh, NULL, id); } else { qemu_bh_schedule(bh); } }
22,898
0
static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx, const int16_t *audio, const int16_t *la, int channel, int prev_type) { AacPsyContext *pctx = (AacPsyContext*) ctx->model_priv_data; AacPsyChannel *pch = &pctx->ch[channel]; int grouping = 0; int uselongblock = 1; int attacks[AAC_NUM_BLOCKS_SHORT + 1] = { 0 }; int i; FFPsyWindowInfo wi; memset(&wi, 0, sizeof(wi)); if (la) { float hpfsmpl[AAC_BLOCK_SIZE_LONG]; float const *pf = hpfsmpl; float attack_intensity[(AAC_NUM_BLOCKS_SHORT + 1) * PSY_LAME_NUM_SUBBLOCKS]; float energy_subshort[(AAC_NUM_BLOCKS_SHORT + 1) * PSY_LAME_NUM_SUBBLOCKS]; float energy_short[AAC_NUM_BLOCKS_SHORT + 1] = { 0 }; int chans = ctx->avctx->channels; const int16_t *firbuf = la + (AAC_BLOCK_SIZE_SHORT/4 - PSY_LAME_FIR_LEN) * chans; int j, att_sum = 0; /* LAME comment: apply high pass filter of fs/4 */ for (i = 0; i < AAC_BLOCK_SIZE_LONG; i++) { float sum1, sum2; sum1 = firbuf[(i + ((PSY_LAME_FIR_LEN - 1) / 2)) * chans]; sum2 = 0.0; for (j = 0; j < ((PSY_LAME_FIR_LEN - 1) / 2) - 1; j += 2) { sum1 += psy_fir_coeffs[j] * (firbuf[(i + j) * chans] + firbuf[(i + PSY_LAME_FIR_LEN - j) * chans]); sum2 += psy_fir_coeffs[j + 1] * (firbuf[(i + j + 1) * chans] + firbuf[(i + PSY_LAME_FIR_LEN - j - 1) * chans]); } hpfsmpl[i] = sum1 + sum2; } /* Calculate the energies of each sub-shortblock */ for (i = 0; i < PSY_LAME_NUM_SUBBLOCKS; i++) { energy_subshort[i] = pch->prev_energy_subshort[i + ((AAC_NUM_BLOCKS_SHORT - 1) * PSY_LAME_NUM_SUBBLOCKS)]; assert(pch->prev_energy_subshort[i + ((AAC_NUM_BLOCKS_SHORT - 2) * PSY_LAME_NUM_SUBBLOCKS + 1)] > 0); attack_intensity[i] = energy_subshort[i] / pch->prev_energy_subshort[i + ((AAC_NUM_BLOCKS_SHORT - 2) * PSY_LAME_NUM_SUBBLOCKS + 1)]; energy_short[0] += energy_subshort[i]; } for (i = 0; i < AAC_NUM_BLOCKS_SHORT * PSY_LAME_NUM_SUBBLOCKS; i++) { float const *const pfe = pf + AAC_BLOCK_SIZE_LONG / (AAC_NUM_BLOCKS_SHORT * PSY_LAME_NUM_SUBBLOCKS); float p = 1.0f; for (; pf < pfe; pf++) if (p < fabsf(*pf)) p = fabsf(*pf); pch->prev_energy_subshort[i] = energy_subshort[i + PSY_LAME_NUM_SUBBLOCKS] = p; energy_short[1 + i / PSY_LAME_NUM_SUBBLOCKS] += p; /* FIXME: The indexes below are [i + 3 - 2] in the LAME source. * Obviously the 3 and 2 have some significance, or this would be just [i + 1] * (which is what we use here). What the 3 stands for is ambigious, as it is both * number of short blocks, and the number of sub-short blocks. * It seems that LAME is comparing each sub-block to sub-block + 1 in the * previous block. */ if (p > energy_subshort[i + 1]) p = p / energy_subshort[i + 1]; else if (energy_subshort[i + 1] > p * 10.0f) p = energy_subshort[i + 1] / (p * 10.0f); else p = 0.0; attack_intensity[i + PSY_LAME_NUM_SUBBLOCKS] = p; } /* compare energy between sub-short blocks */ for (i = 0; i < (AAC_NUM_BLOCKS_SHORT + 1) * PSY_LAME_NUM_SUBBLOCKS; i++) if (!attacks[i / PSY_LAME_NUM_SUBBLOCKS]) if (attack_intensity[i] > pch->attack_threshold) attacks[i / PSY_LAME_NUM_SUBBLOCKS] = (i % PSY_LAME_NUM_SUBBLOCKS) + 1; /* should have energy change between short blocks, in order to avoid periodic signals */ /* Good samples to show the effect are Trumpet test songs */ /* GB: tuned (1) to avoid too many short blocks for test sample TRUMPET */ /* RH: tuned (2) to let enough short blocks through for test sample FSOL and SNAPS */ for (i = 1; i < AAC_NUM_BLOCKS_SHORT + 1; i++) { float const u = energy_short[i - 1]; float const v = energy_short[i]; float const m = FFMAX(u, v); if (m < 40000) { /* (2) */ if (u < 1.7f * v && v < 1.7f * u) { /* (1) */ if (i == 1 && attacks[0] < attacks[i]) attacks[0] = 0; attacks[i] = 0; } } att_sum += attacks[i]; } if (attacks[0] <= pch->prev_attack) attacks[0] = 0; att_sum += attacks[0]; /* 3 below indicates the previous attack happened in the last sub-block of the previous sequence */ if (pch->prev_attack == 3 || att_sum) { uselongblock = 0; if (attacks[1] && attacks[0]) attacks[1] = 0; if (attacks[2] && attacks[1]) attacks[2] = 0; if (attacks[3] && attacks[2]) attacks[3] = 0; if (attacks[4] && attacks[3]) attacks[4] = 0; if (attacks[5] && attacks[4]) attacks[5] = 0; if (attacks[6] && attacks[5]) attacks[6] = 0; if (attacks[7] && attacks[6]) attacks[7] = 0; if (attacks[8] && attacks[7]) attacks[8] = 0; } } else { /* We have no lookahead info, so just use same type as the previous sequence. */ uselongblock = !(prev_type == EIGHT_SHORT_SEQUENCE); } lame_apply_block_type(pch, &wi, uselongblock); wi.window_type[1] = prev_type; if (wi.window_type[0] != EIGHT_SHORT_SEQUENCE) { wi.num_windows = 1; wi.grouping[0] = 1; if (wi.window_type[0] == LONG_START_SEQUENCE) wi.window_shape = 0; else wi.window_shape = 1; } else { int lastgrp = 0; wi.num_windows = 8; wi.window_shape = 0; for (i = 0; i < 8; i++) { if (!((pch->next_grouping >> i) & 1)) lastgrp = i; wi.grouping[lastgrp]++; } } /* Determine grouping, based on the location of the first attack, and save for * the next frame. * FIXME: Move this to analysis. * TODO: Tune groupings depending on attack location * TODO: Handle more than one attack in a group */ for (i = 0; i < 9; i++) { if (attacks[i]) { grouping = i; break; } } pch->next_grouping = window_grouping[grouping]; pch->prev_attack = attacks[8]; return wi; }
22,899
0
static void float_to_int16_stride_altivec(int16_t *dst, const float *src, long len, int stride) { int i, j; vector signed short d, s; for (i = 0; i < len - 7; i += 8) { d = float_to_int16_one_altivec(src + i); for (j = 0; j < 8; j++) { s = vec_splat(d, j); vec_ste(s, 0, dst); dst += stride; } } }
22,900
1
static int blk_connect(struct XenDevice *xendev) { struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); int pers, index, qflags; bool readonly = true; /* read-only ? */ if (blkdev->directiosafe) { qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO; } else { qflags = BDRV_O_CACHE_WB; } if (strcmp(blkdev->mode, "w") == 0) { qflags |= BDRV_O_RDWR; readonly = false; } if (blkdev->feature_discard) { qflags |= BDRV_O_UNMAP; } /* init qemu block driver */ index = (blkdev->xendev.dev - 202 * 256) / 16; blkdev->dinfo = drive_get(IF_XEN, 0, index); if (!blkdev->dinfo) { Error *local_err = NULL; /* setup via xenbus -> create new block driver instance */ xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n"); blkdev->bs = bdrv_new(blkdev->dev, &local_err); if (local_err) { blkdev->bs = NULL; } if (blkdev->bs) { BlockDriver *drv = bdrv_find_whitelisted_format(blkdev->fileproto, readonly); if (bdrv_open(&blkdev->bs, blkdev->filename, NULL, NULL, qflags, drv, &local_err) != 0) { xen_be_printf(&blkdev->xendev, 0, "error: %s\n", error_get_pretty(local_err)); error_free(local_err); bdrv_unref(blkdev->bs); blkdev->bs = NULL; } } if (!blkdev->bs) { return -1; } } else { /* setup via qemu cmdline -> already setup for us */ xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n"); blkdev->bs = blkdev->dinfo->bdrv; if (bdrv_is_read_only(blkdev->bs) && !readonly) { xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive"); blkdev->bs = NULL; return -1; } /* blkdev->bs is not create by us, we get a reference * so we can bdrv_unref() unconditionally */ bdrv_ref(blkdev->bs); } bdrv_attach_dev_nofail(blkdev->bs, blkdev); blkdev->file_size = bdrv_getlength(blkdev->bs); if (blkdev->file_size < 0) { xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n", (int)blkdev->file_size, strerror(-blkdev->file_size), bdrv_get_format_name(blkdev->bs) ?: "-"); blkdev->file_size = 0; } xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\"," " size %" PRId64 " (%" PRId64 " MB)\n", blkdev->type, blkdev->fileproto, blkdev->filename, blkdev->file_size, blkdev->file_size >> 20); /* Fill in number of sector size and number of sectors */ xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk); xenstore_write_be_int64(&blkdev->xendev, "sectors", blkdev->file_size / blkdev->file_blk); if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) { return -1; } if (xenstore_read_fe_int(&blkdev->xendev, "event-channel", &blkdev->xendev.remote_port) == -1) { return -1; } if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) { blkdev->feature_persistent = FALSE; } else { blkdev->feature_persistent = !!pers; } blkdev->protocol = BLKIF_PROTOCOL_NATIVE; if (blkdev->xendev.protocol) { if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) { blkdev->protocol = BLKIF_PROTOCOL_X86_32; } if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) { blkdev->protocol = BLKIF_PROTOCOL_X86_64; } } blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev, blkdev->xendev.dom, blkdev->ring_ref, PROT_READ | PROT_WRITE); if (!blkdev->sring) { return -1; } blkdev->cnt_map++; switch (blkdev->protocol) { case BLKIF_PROTOCOL_NATIVE: { blkif_sring_t *sring_native = blkdev->sring; BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE); break; } case BLKIF_PROTOCOL_X86_32: { blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring; BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE); break; } case BLKIF_PROTOCOL_X86_64: { blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring; BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE); break; } } if (blkdev->feature_persistent) { /* Init persistent grants */ blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST; blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp, NULL, NULL, (GDestroyNotify)destroy_grant); blkdev->persistent_gnt_count = 0; } xen_be_bind_evtchn(&blkdev->xendev); xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, " "remote port %d, local port %d\n", blkdev->xendev.protocol, blkdev->ring_ref, blkdev->xendev.remote_port, blkdev->xendev.local_port); return 0; }
22,901
0
static void vp3_idct_dc_add_c(uint8_t *dest/*align 8*/, int line_size, const DCTELEM *block/*align 16*/){ int i, dc = (block[0] + 15) >> 5; for(i = 0; i < 8; i++){ dest[0] = av_clip_uint8(dest[0] + dc); dest[1] = av_clip_uint8(dest[1] + dc); dest[2] = av_clip_uint8(dest[2] + dc); dest[3] = av_clip_uint8(dest[3] + dc); dest[4] = av_clip_uint8(dest[4] + dc); dest[5] = av_clip_uint8(dest[5] + dc); dest[6] = av_clip_uint8(dest[6] + dc); dest[7] = av_clip_uint8(dest[7] + dc); dest += line_size; } }
22,903
1
static void usb_net_handle_destroy(USBDevice *dev) { USBNetState *s = (USBNetState *) dev; /* TODO: remove the nd_table[] entry */ qemu_del_vlan_client(s->vc); rndis_clear_responsequeue(s); qemu_free(s); }
22,904
1
static av_cold int libopenjpeg_encode_close(AVCodecContext *avctx) { LibOpenJPEGContext *ctx = avctx->priv_data; opj_cio_close(ctx->stream); ctx->stream = NULL; opj_destroy_compress(ctx->compress); ctx->compress = NULL; opj_image_destroy(ctx->image); ctx->image = NULL; av_freep(&avctx->coded_frame); return 0; }
22,905
1
FFAMediaFormat *ff_AMediaFormat_new(void) { JNIEnv *env = NULL; FFAMediaFormat *format = NULL; format = av_mallocz(sizeof(FFAMediaFormat)); if (!format) { return NULL; } format->class = &amediaformat_class; env = ff_jni_get_env(format); if (!env) { av_freep(&format); return NULL; } if (ff_jni_init_jfields(env, &format->jfields, jni_amediaformat_mapping, 1, format) < 0) { goto fail; } format->object = (*env)->NewObject(env, format->jfields.mediaformat_class, format->jfields.init_id); if (!format->object) { goto fail; } format->object = (*env)->NewGlobalRef(env, format->object); if (!format->object) { goto fail; } return format; fail: ff_jni_reset_jfields(env, &format->jfields, jni_amediaformat_mapping, 1, format); av_freep(&format); return NULL; }
22,907
1
static int deband_8_coupling_c(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) { DebandContext *s = ctx->priv; ThreadData *td = arg; AVFrame *in = td->in; AVFrame *out = td->out; const int start = (s->planeheight[0] * jobnr ) / nb_jobs; const int end = (s->planeheight[0] * (jobnr+1)) / nb_jobs; int x, y, p; for (y = start; y < end; y++) { const int pos = y * s->planewidth[0]; for (x = 0; x < s->planewidth[p]; x++) { const int x_pos = s->x_pos[pos + x]; const int y_pos = s->y_pos[pos + x]; int avg[4], cmp[4] = { 0 }, src[4]; for (p = 0; p < s->nb_components; p++) { const uint8_t *src_ptr = (const uint8_t *)in->data[p]; const int src_linesize = in->linesize[p]; const int thr = s->thr[p]; const int w = s->planewidth[p] - 1; const int h = s->planeheight[p] - 1; const int ref0 = src_ptr[av_clip(y + y_pos, 0, h) * src_linesize + av_clip(x + x_pos, 0, w)]; const int ref1 = src_ptr[av_clip(y + -y_pos, 0, h) * src_linesize + av_clip(x + x_pos, 0, w)]; const int ref2 = src_ptr[av_clip(y + -y_pos, 0, h) * src_linesize + av_clip(x + -x_pos, 0, w)]; const int ref3 = src_ptr[av_clip(y + y_pos, 0, h) * src_linesize + av_clip(x + -x_pos, 0, w)]; const int src0 = src_ptr[y * src_linesize + x]; src[p] = src0; avg[p] = get_avg(ref0, ref1, ref2, ref3); if (s->blur) { cmp[p] = FFABS(src0 - avg[p]) < thr; } else { cmp[p] = (FFABS(src0 - ref0) < thr) && (FFABS(src0 - ref1) < thr) && (FFABS(src0 - ref2) < thr) && (FFABS(src0 - ref3) < thr); } } for (p = 0; p < s->nb_components; p++) if (!cmp[p]) break; if (p == s->nb_components) { for (p = 0; p < s->nb_components; p++) { const int dst_linesize = out->linesize[p]; out->data[p][y * dst_linesize + x] = avg[p]; } } else { for (p = 0; p < s->nb_components; p++) { const int dst_linesize = out->linesize[p]; out->data[p][y * dst_linesize + x] = src[p]; } } } } return 0; }
22,908
1
int qemu_egl_rendernode_open(void) { DIR *dir; struct dirent *e; int r, fd; char *p; dir = opendir("/dev/dri"); if (!dir) { return -1; } fd = -1; while ((e = readdir(dir))) { if (e->d_type != DT_CHR) { continue; } if (strncmp(e->d_name, "renderD", 7)) { continue; } r = asprintf(&p, "/dev/dri/%s", e->d_name); if (r < 0) { return -1; } r = open(p, O_RDWR | O_CLOEXEC | O_NOCTTY | O_NONBLOCK); if (r < 0) { free(p); continue; } fd = r; free(p); break; } closedir(dir); if (fd < 0) { return -1; } return fd; }
22,909
1
void qemu_check_nic_model(NICInfo *nd, const char *model) { const char *models[2]; models[0] = model; models[1] = NULL; qemu_check_nic_model_list(nd, models, model); }
22,910
1
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) { int i, ret; Picture *pic; s->mb_skipped = 0; if (!ff_thread_can_start_frame(avctx)) { av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n"); return -1; } /* mark & release old frames */ if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f->buf[0]) { ff_mpeg_unref_picture(s, s->last_picture_ptr); } /* release forgotten pictures */ /* if (mpeg124/h263) */ for (i = 0; i < MAX_PICTURE_COUNT; i++) { if (&s->picture[i] != s->last_picture_ptr && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference && !s->picture[i].needs_realloc) { if (!(avctx->active_thread_type & FF_THREAD_FRAME)) av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n"); ff_mpeg_unref_picture(s, &s->picture[i]); } } ff_mpeg_unref_picture(s, &s->current_picture); release_unused_pictures(s); if (s->current_picture_ptr && s->current_picture_ptr->f->buf[0] == NULL) { // we already have a unused image // (maybe it was set before reading the header) pic = s->current_picture_ptr; } else { i = ff_find_unused_picture(s, 0); if (i < 0) { av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n"); return i; } pic = &s->picture[i]; } pic->reference = 0; if (!s->droppable) { if (s->pict_type != AV_PICTURE_TYPE_B) pic->reference = 3; } pic->f->coded_picture_number = s->coded_picture_number++; if (ff_alloc_picture(s, pic, 0) < 0) return -1; s->current_picture_ptr = pic; // FIXME use only the vars from current_pic s->current_picture_ptr->f->top_field_first = s->top_field_first; if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { if (s->picture_structure != PICT_FRAME) s->current_picture_ptr->f->top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field; } s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame && !s->progressive_sequence; s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME; s->current_picture_ptr->f->pict_type = s->pict_type; // if (s->flags && CODEC_FLAG_QSCALE) // s->current_picture_ptr->quality = s->new_picture_ptr->quality; s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; if ((ret = ff_mpeg_ref_picture(s, &s->current_picture, s->current_picture_ptr)) < 0) return ret; if (s->pict_type != AV_PICTURE_TYPE_B) { s->last_picture_ptr = s->next_picture_ptr; if (!s->droppable) s->next_picture_ptr = s->current_picture_ptr; } av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr, s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL, s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL, s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL, s->pict_type, s->droppable); if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f->buf[0] == NULL) && (s->pict_type != AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)) { int h_chroma_shift, v_chroma_shift; av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift); if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0]) av_log(avctx, AV_LOG_DEBUG, "allocating dummy last picture for B frame\n"); else if (s->pict_type != AV_PICTURE_TYPE_I) av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n"); else if (s->picture_structure != PICT_FRAME) av_log(avctx, AV_LOG_DEBUG, "allocate dummy last picture for field based first keyframe\n"); /* Allocate a dummy frame */ i = ff_find_unused_picture(s, 0); if (i < 0) { av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n"); return i; } s->last_picture_ptr = &s->picture[i]; s->last_picture_ptr->reference = 3; s->last_picture_ptr->f->key_frame = 0; s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P; if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) { s->last_picture_ptr = NULL; return -1; } if (!avctx->hwaccel) { for(i=0; i<avctx->height; i++) memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 0x80, avctx->width); for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) { memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i, 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift)); memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i, 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift)); } if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){ for(i=0; i<avctx->height; i++) memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width); } } ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0); ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1); } if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f->buf[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) { /* Allocate a dummy frame */ i = ff_find_unused_picture(s, 0); if (i < 0) { av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n"); return i; } s->next_picture_ptr = &s->picture[i]; s->next_picture_ptr->reference = 3; s->next_picture_ptr->f->key_frame = 0; s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P; if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) { s->next_picture_ptr = NULL; return -1; } ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0); ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1); } #if 0 // BUFREF-FIXME memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data)); memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data)); #endif if (s->last_picture_ptr) { ff_mpeg_unref_picture(s, &s->last_picture); if (s->last_picture_ptr->f->buf[0] && (ret = ff_mpeg_ref_picture(s, &s->last_picture, s->last_picture_ptr)) < 0) return ret; } if (s->next_picture_ptr) { ff_mpeg_unref_picture(s, &s->next_picture); if (s->next_picture_ptr->f->buf[0] && (ret = ff_mpeg_ref_picture(s, &s->next_picture, s->next_picture_ptr)) < 0) return ret; } av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f->buf[0])); if (s->picture_structure!= PICT_FRAME) { int i; for (i = 0; i < 4; i++) { if (s->picture_structure == PICT_BOTTOM_FIELD) { s->current_picture.f->data[i] += s->current_picture.f->linesize[i]; } s->current_picture.f->linesize[i] *= 2; s->last_picture.f->linesize[i] *= 2; s->next_picture.f->linesize[i] *= 2; } } s->err_recognition = avctx->err_recognition; /* set dequantizer, we can't do it during init as * it might change for mpeg4 and we can't do it in the header * decode as init is not called for mpeg4 there yet */ if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra; s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter; } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) { s->dct_unquantize_intra = s->dct_unquantize_h263_intra; s->dct_unquantize_inter = s->dct_unquantize_h263_inter; } else { s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra; s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter; } return 0; }
22,911
1
static void test_identify(void) { uint8_t data; uint16_t buf[256]; int i; int ret; ide_test_start( "-vnc none " "-drive file=%s,if=ide,serial=%s,cache=writeback " "-global ide-hd.ver=%s", tmp_path, "testdisk", "version"); /* IDENTIFY command on device 0*/ outb(IDE_BASE + reg_device, 0); outb(IDE_BASE + reg_command, CMD_IDENTIFY); /* Read in the IDENTIFY buffer and check registers */ data = inb(IDE_BASE + reg_device); g_assert_cmpint(data & DEV, ==, 0); for (i = 0; i < 256; i++) { data = inb(IDE_BASE + reg_status); assert_bit_set(data, DRDY | DRQ); assert_bit_clear(data, BSY | DF | ERR); ((uint16_t*) buf)[i] = inw(IDE_BASE + reg_data); } data = inb(IDE_BASE + reg_status); assert_bit_set(data, DRDY); assert_bit_clear(data, BSY | DF | ERR | DRQ); /* Check serial number/version in the buffer */ string_cpu_to_be16(&buf[10], 20); ret = memcmp(&buf[10], "testdisk ", 20); g_assert(ret == 0); string_cpu_to_be16(&buf[23], 8); ret = memcmp(&buf[23], "version ", 8); g_assert(ret == 0); /* Write cache enabled bit */ assert_bit_set(buf[85], 0x20); ide_test_quit(); }
22,912
1
static void gen_window_check2(DisasContext *dc, unsigned r1, unsigned r2) { gen_window_check1(dc, r1 > r2 ? r1 : r2); }
22,913
1
static int decode_0(AVCodecContext *avctx, uint8_t code, uint8_t *pkt) { PAFVideoDecContext *c = avctx->priv_data; uint32_t opcode_size, offset; uint8_t *dst, *dend, mask = 0, color = 0, a, b, p; const uint8_t *src, *send, *opcodes; int i, j, x = 0; i = bytestream2_get_byte(&c->gb); if (i) { if (code & 0x10) { int align; align = bytestream2_tell(&c->gb) & 3; if (align) bytestream2_skip(&c->gb, 4 - align); } do { a = bytestream2_get_byte(&c->gb); b = bytestream2_get_byte(&c->gb); p = (a & 0xC0) >> 6; dst = c->frame[p] + get_video_page_offset(avctx, a, b); dend = c->frame[p] + c->frame_size; offset = (b & 0x7F) * 2; j = bytestream2_get_le16(&c->gb) + offset; do { offset++; if (dst + 3 * avctx->width + 4 > dend) return AVERROR_INVALIDDATA; copy4h(avctx, dst); if ((offset & 0x3F) == 0) dst += avctx->width * 3; dst += 4; } while (offset < j); } while (--i); } dst = c->frame[c->current_frame]; do { a = bytestream2_get_byte(&c->gb); b = bytestream2_get_byte(&c->gb); p = (a & 0xC0) >> 6; src = c->frame[p] + get_video_page_offset(avctx, a, b); send = c->frame[p] + c->frame_size; if (src + 3 * avctx->width + 4 > send) return AVERROR_INVALIDDATA; copy_block4(dst, src, avctx->width, avctx->width, 4); i++; if ((i & 0x3F) == 0) dst += avctx->width * 3; dst += 4; } while (i < c->video_size / 16); opcode_size = bytestream2_get_le16(&c->gb); bytestream2_skip(&c->gb, 2); if (bytestream2_get_bytes_left(&c->gb) < opcode_size) return AVERROR_INVALIDDATA; opcodes = pkt + bytestream2_tell(&c->gb); bytestream2_skipu(&c->gb, opcode_size); dst = c->frame[c->current_frame]; for (i = 0; i < avctx->height; i += 4, dst += avctx->width * 3) { for (j = 0; j < avctx->width; j += 4, dst += 4) { int opcode, k = 0; if (x > opcode_size) return AVERROR_INVALIDDATA; if (j & 4) { opcode = opcodes[x] & 15; x++; } else { opcode = opcodes[x] >> 4; } while (block_sequences[opcode][k]) { offset = avctx->width * 2; code = block_sequences[opcode][k++]; switch (code) { case 2: offset = 0; case 3: color = bytestream2_get_byte(&c->gb); case 4: mask = bytestream2_get_byte(&c->gb); copy_color_mask(avctx, mask, dst + offset, color); break; case 5: offset = 0; case 6: a = bytestream2_get_byte(&c->gb); b = bytestream2_get_byte(&c->gb); p = (a & 0xC0) >> 6; src = c->frame[p] + get_video_page_offset(avctx, a, b); send = c->frame[p] + c->frame_size; case 7: if (src + offset + avctx->width + 4 > send) return AVERROR_INVALIDDATA; mask = bytestream2_get_byte(&c->gb); copy_src_mask(avctx, mask, dst + offset, src + offset); break; } } } } return 0; }
22,914
1
static int64_t ogg_read_timestamp(AVFormatContext *s, int stream_index, int64_t *pos_arg, int64_t pos_limit) { struct ogg *ogg = s->priv_data; struct ogg_stream *os = ogg->streams + stream_index; AVIOContext *bc = s->pb; int64_t pts = AV_NOPTS_VALUE; int i; avio_seek(bc, *pos_arg, SEEK_SET); ogg_reset(ogg); while (avio_tell(bc) < pos_limit && !ogg_packet(s, &i, NULL, NULL, pos_arg)) { if (i == stream_index) { pts = ogg_calc_pts(s, i, NULL); if (os->keyframe_seek && !(os->pflags & AV_PKT_FLAG_KEY)) pts = AV_NOPTS_VALUE; } if (pts != AV_NOPTS_VALUE) break; } ogg_reset(ogg); return pts; }
22,915
1
static int scaling_list_data(GetBitContext *gb, AVCodecContext *avctx, ScalingList *sl, HEVCSPS *sps) { uint8_t scaling_list_pred_mode_flag; int32_t scaling_list_dc_coef[2][6]; int size_id, matrix_id, pos; int i; for (size_id = 0; size_id < 4; size_id++) for (matrix_id = 0; matrix_id < 6; matrix_id += ((size_id == 3) ? 3 : 1)) { scaling_list_pred_mode_flag = get_bits1(gb); if (!scaling_list_pred_mode_flag) { unsigned int delta = get_ue_golomb_long(gb); /* Only need to handle non-zero delta. Zero means default, * which should already be in the arrays. */ if (delta) { // Copy from previous array. if (matrix_id < delta) { av_log(avctx, AV_LOG_ERROR, "Invalid delta in scaling list data: %d.\n", delta); return AVERROR_INVALIDDATA; } memcpy(sl->sl[size_id][matrix_id], sl->sl[size_id][matrix_id - delta], size_id > 0 ? 64 : 16); if (size_id > 1) sl->sl_dc[size_id - 2][matrix_id] = sl->sl_dc[size_id - 2][matrix_id - delta]; } } else { int next_coef, coef_num; int32_t scaling_list_delta_coef; next_coef = 8; coef_num = FFMIN(64, 1 << (4 + (size_id << 1))); if (size_id > 1) { scaling_list_dc_coef[size_id - 2][matrix_id] = get_se_golomb(gb) + 8; next_coef = scaling_list_dc_coef[size_id - 2][matrix_id]; sl->sl_dc[size_id - 2][matrix_id] = next_coef; } for (i = 0; i < coef_num; i++) { if (size_id == 0) pos = 4 * ff_hevc_diag_scan4x4_y[i] + ff_hevc_diag_scan4x4_x[i]; else pos = 8 * ff_hevc_diag_scan8x8_y[i] + ff_hevc_diag_scan8x8_x[i]; scaling_list_delta_coef = get_se_golomb(gb); next_coef = (next_coef + scaling_list_delta_coef + 256) % 256; sl->sl[size_id][matrix_id][pos] = next_coef; } } } if (sps->chroma_format_idc == 3) { for (i = 0; i < 64; i++) { sl->sl[3][1][i] = sl->sl[2][1][i]; sl->sl[3][2][i] = sl->sl[2][2][i]; sl->sl[3][4][i] = sl->sl[2][4][i]; sl->sl[3][5][i] = sl->sl[2][5][i]; } sl->sl_dc[1][1] = sl->sl_dc[0][1]; sl->sl_dc[1][2] = sl->sl_dc[0][2]; sl->sl_dc[1][4] = sl->sl_dc[0][4]; sl->sl_dc[1][5] = sl->sl_dc[0][5]; } return 0; }
22,916
1
static int recode_subtitle(AVCodecContext *avctx, AVPacket *outpkt, const AVPacket *inpkt) { #if CONFIG_ICONV iconv_t cd = (iconv_t)-1; int ret = 0; char *inb, *outb; size_t inl, outl; AVPacket tmp; #endif if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER) return 0; #if CONFIG_ICONV cd = iconv_open("UTF-8", avctx->sub_charenc); av_assert0(cd != (iconv_t)-1); inb = inpkt->data; inl = inpkt->size; if (inl >= INT_MAX / UTF8_MAX_BYTES - FF_INPUT_BUFFER_PADDING_SIZE) { av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n"); ret = AVERROR(ENOMEM); goto end; } ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES); if (ret < 0) goto end; outpkt->buf = tmp.buf; outpkt->data = tmp.data; outpkt->size = tmp.size; outb = outpkt->data; outl = outpkt->size; if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 || iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 || outl >= outpkt->size || inl != 0) { av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" " "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc); av_free_packet(&tmp); ret = AVERROR(errno); goto end; } outpkt->size -= outl; outpkt->data[outpkt->size - 1] = '\0'; end: if (cd != (iconv_t)-1) iconv_close(cd); return ret; #else av_assert0(!"requesting subtitles recoding without iconv"); #endif }
22,917
1
void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment) { size_t align = QEMU_VMALLOC_ALIGN; size_t total = size + align - getpagesize(); void *ptr = mmap(0, total, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); size_t offset = QEMU_ALIGN_UP((uintptr_t)ptr, align) - (uintptr_t)ptr; void *ptr1; if (ptr == MAP_FAILED) { return NULL; } if (alignment) { *alignment = align; } ptr1 = mmap(ptr + offset, size, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); if (ptr1 == MAP_FAILED) { munmap(ptr, total); return NULL; } ptr += offset; total -= offset; if (offset > 0) { munmap(ptr - offset, offset); } if (total > size) { munmap(ptr + size, total - size); } trace_qemu_anon_ram_alloc(size, ptr); return ptr; }
22,918
1
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) { int i, r; for (i = 0; i < hdev->nvqs; ++i) { vhost_virtqueue_cleanup(hdev, vdev, hdev->vqs + i, i); } vhost_client_sync_dirty_bitmap(&hdev->client, 0, (target_phys_addr_t)~0x0ull); r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false); if (r < 0) { fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r); fflush(stderr); } assert (r >= 0); hdev->started = false; g_free(hdev->log); hdev->log = NULL; hdev->log_size = 0; }
22,919
0
static int ffm_read_data(AVFormatContext *s, uint8_t *buf, int size, int first) { FFMContext *ffm = s->priv_data; ByteIOContext *pb = s->pb; int len, fill_size, size1, frame_offset; size1 = size; while (size > 0) { redo: len = ffm->packet_end - ffm->packet_ptr; if (len < 0) return -1; if (len > size) len = size; if (len == 0) { if (url_ftell(pb) == ffm->file_size) url_fseek(pb, ffm->packet_size, SEEK_SET); retry_read: get_be16(pb); /* PACKET_ID */ fill_size = get_be16(pb); ffm->pts = get_be64(pb); ffm->first_frame_in_packet = 1; frame_offset = get_be16(pb); get_buffer(pb, ffm->packet, ffm->packet_size - FFM_HEADER_SIZE); ffm->packet_end = ffm->packet + (ffm->packet_size - FFM_HEADER_SIZE - fill_size); if (ffm->packet_end < ffm->packet) return -1; /* if first packet or resynchronization packet, we must handle it specifically */ if (ffm->first_packet || (frame_offset & 0x8000)) { if (!frame_offset) { /* This packet has no frame headers in it */ if (url_ftell(pb) >= ffm->packet_size * 3) { url_fseek(pb, -ffm->packet_size * 2, SEEK_CUR); goto retry_read; } /* This is bad, we cannot find a valid frame header */ return 0; } ffm->first_packet = 0; if ((frame_offset & 0x7ffff) < FFM_HEADER_SIZE) return -1; ffm->packet_ptr = ffm->packet + (frame_offset & 0x7fff) - FFM_HEADER_SIZE; if (!first) break; } else { ffm->packet_ptr = ffm->packet; } goto redo; } memcpy(buf, ffm->packet_ptr, len); buf += len; ffm->packet_ptr += len; size -= len; first = 0; } return size1 - size; }
22,920
0
static int mpegts_write_section1(MpegTSSection *s, int tid, int id, int version, int sec_num, int last_sec_num, uint8_t *buf, int len) { uint8_t section[1024], *q; unsigned int tot_len; /* reserved_future_use field must be set to 1 for SDT */ unsigned int flags = tid == SDT_TID ? 0xf000 : 0xb000; tot_len = 3 + 5 + len + 4; /* check if not too big */ if (tot_len > 1024) return -1; q = section; *q++ = tid; put16(&q, flags | (len + 5 + 4)); /* 5 byte header + 4 byte CRC */ put16(&q, id); *q++ = 0xc1 | (version << 1); /* current_next_indicator = 1 */ *q++ = sec_num; *q++ = last_sec_num; memcpy(q, buf, len); mpegts_write_section(s, section, tot_len); return 0; }
22,921
0
static int compare_doubles(const double *a, const double *b, int len, double max_diff) { int i; for (i = 0; i < len; i++) { if (fabs(a[i] - b[i]) > max_diff) { av_log(NULL, AV_LOG_ERROR, "%d: %- .12f - %- .12f = % .12g\n", i, a[i], b[i], a[i] - b[i]); return -1; } } return 0; }
22,922
0
static int fourxm_read_header(AVFormatContext *s, AVFormatParameters *ap) { ByteIOContext *pb = s->pb; unsigned int fourcc_tag; unsigned int size; int header_size; FourxmDemuxContext *fourxm = s->priv_data; unsigned char *header; int i; int current_track = -1; AVStream *st; fourxm->track_count = 0; fourxm->tracks = NULL; fourxm->selected_track = 0; fourxm->fps = 1.0; /* skip the first 3 32-bit numbers */ url_fseek(pb, 12, SEEK_CUR); /* check for LIST-HEAD */ GET_LIST_HEADER(); header_size = size - 4; if (fourcc_tag != HEAD_TAG || size < 4) return AVERROR_INVALIDDATA; /* allocate space for the header and load the whole thing */ header = av_malloc(header_size); if (!header) return AVERROR(ENOMEM); if (get_buffer(pb, header, header_size) != header_size) return AVERROR(EIO); /* take the lazy approach and search for any and all vtrk and strk chunks */ for (i = 0; i < header_size - 8; i++) { fourcc_tag = AV_RL32(&header[i]); size = AV_RL32(&header[i + 4]); if (fourcc_tag == std__TAG) { fourxm->fps = av_int2flt(AV_RL32(&header[i + 12])); } else if (fourcc_tag == vtrk_TAG) { /* check that there is enough data */ if (size != vtrk_SIZE) { av_free(header); return AVERROR_INVALIDDATA; } fourxm->width = AV_RL32(&header[i + 36]); fourxm->height = AV_RL32(&header[i + 40]); /* allocate a new AVStream */ st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); av_set_pts_info(st, 60, 1, fourxm->fps); fourxm->video_stream_index = st->index; st->codec->codec_type = CODEC_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_4XM; st->codec->extradata_size = 4; st->codec->extradata = av_malloc(4); AV_WL32(st->codec->extradata, AV_RL32(&header[i + 16])); st->codec->width = fourxm->width; st->codec->height = fourxm->height; i += 8 + size; } else if (fourcc_tag == strk_TAG) { /* check that there is enough data */ if (size != strk_SIZE) { av_free(header); return AVERROR_INVALIDDATA; } current_track = AV_RL32(&header[i + 8]); if (current_track + 1 > fourxm->track_count) { fourxm->track_count = current_track + 1; if((unsigned)fourxm->track_count >= UINT_MAX / sizeof(AudioTrack)) return -1; fourxm->tracks = av_realloc(fourxm->tracks, fourxm->track_count * sizeof(AudioTrack)); if (!fourxm->tracks) { av_free(header); return AVERROR(ENOMEM); } } fourxm->tracks[current_track].adpcm = AV_RL32(&header[i + 12]); fourxm->tracks[current_track].channels = AV_RL32(&header[i + 36]); fourxm->tracks[current_track].sample_rate = AV_RL32(&header[i + 40]); fourxm->tracks[current_track].bits = AV_RL32(&header[i + 44]); i += 8 + size; /* allocate a new AVStream */ st = av_new_stream(s, current_track); if (!st) return AVERROR(ENOMEM); av_set_pts_info(st, 60, 1, fourxm->tracks[current_track].sample_rate); fourxm->tracks[current_track].stream_index = st->index; st->codec->codec_type = CODEC_TYPE_AUDIO; st->codec->codec_tag = 0; st->codec->channels = fourxm->tracks[current_track].channels; st->codec->sample_rate = fourxm->tracks[current_track].sample_rate; st->codec->bits_per_coded_sample = fourxm->tracks[current_track].bits; st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * st->codec->bits_per_coded_sample; st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample; if (fourxm->tracks[current_track].adpcm) st->codec->codec_id = CODEC_ID_ADPCM_4XM; else if (st->codec->bits_per_coded_sample == 8) st->codec->codec_id = CODEC_ID_PCM_U8; else st->codec->codec_id = CODEC_ID_PCM_S16LE; } } av_free(header); /* skip over the LIST-MOVI chunk (which is where the stream should be */ GET_LIST_HEADER(); if (fourcc_tag != MOVI_TAG) return AVERROR_INVALIDDATA; /* initialize context members */ fourxm->video_pts = -1; /* first frame will push to 0 */ fourxm->audio_pts = 0; return 0; }
22,923
0
static int flush_packet(AVFormatContext *ctx, int stream_index, int64_t pts, int64_t dts, int64_t scr, int trailer_size) { MpegMuxContext *s = ctx->priv_data; StreamInfo *stream = ctx->streams[stream_index]->priv_data; uint8_t *buf_ptr; int size, payload_size, startcode, id, stuffing_size, i, header_len; int packet_size; uint8_t buffer[128]; int zero_trail_bytes = 0; int pad_packet_bytes = 0; int pes_flags; int general_pack = 0; /*"general" pack without data specific to one stream?*/ int nb_frames; id = stream->id; #if 0 printf("packet ID=%2x PTS=%0.3f\n", id, pts / 90000.0); #endif buf_ptr = buffer; if ((s->packet_number % s->pack_header_freq) == 0 || s->last_scr != scr) { /* output pack and systems header if needed */ size = put_pack_header(ctx, buf_ptr, scr); buf_ptr += size; s->last_scr= scr; if (s->is_vcd) { /* there is exactly one system header for each stream in a VCD MPEG, One in the very first video packet and one in the very first audio packet (see VCD standard p. IV-7 and IV-8).*/ if (stream->packet_number==0) { size = put_system_header(ctx, buf_ptr, id); buf_ptr += size; } } else if (s->is_dvd) { if (stream->align_iframe || s->packet_number == 0){ int PES_bytes_to_fill = s->packet_size - size - 10; if (pts != AV_NOPTS_VALUE) { if (dts != pts) PES_bytes_to_fill -= 5 + 5; else PES_bytes_to_fill -= 5; } if (stream->bytes_to_iframe == 0 || s->packet_number == 0) { size = put_system_header(ctx, buf_ptr, 0); buf_ptr += size; size = buf_ptr - buffer; put_buffer(ctx->pb, buffer, size); put_be32(ctx->pb, PRIVATE_STREAM_2); put_be16(ctx->pb, 0x03d4); // length put_byte(ctx->pb, 0x00); // substream ID, 00=PCI for (i = 0; i < 979; i++) put_byte(ctx->pb, 0x00); put_be32(ctx->pb, PRIVATE_STREAM_2); put_be16(ctx->pb, 0x03fa); // length put_byte(ctx->pb, 0x01); // substream ID, 01=DSI for (i = 0; i < 1017; i++) put_byte(ctx->pb, 0x00); memset(buffer, 0, 128); buf_ptr = buffer; s->packet_number++; stream->align_iframe = 0; scr += s->packet_size*90000LL / (s->mux_rate*50LL); //FIXME rounding and first few bytes of each packet size = put_pack_header(ctx, buf_ptr, scr); s->last_scr= scr; buf_ptr += size; /* GOP Start */ } else if (stream->bytes_to_iframe < PES_bytes_to_fill) { pad_packet_bytes = PES_bytes_to_fill - stream->bytes_to_iframe; } } } else { if ((s->packet_number % s->system_header_freq) == 0) { size = put_system_header(ctx, buf_ptr, 0); buf_ptr += size; } } } size = buf_ptr - buffer; put_buffer(ctx->pb, buffer, size); packet_size = s->packet_size - size; if (s->is_vcd && id == AUDIO_ID) /* The VCD standard demands that 20 zero bytes follow each audio pack (see standard p. IV-8).*/ zero_trail_bytes += 20; if ((s->is_vcd && stream->packet_number==0) || (s->is_svcd && s->packet_number==0)) { /* for VCD the first pack of each stream contains only the pack header, the system header and lots of padding (see VCD standard p. IV-6). In the case of an audio pack, 20 zero bytes are also added at the end.*/ /* For SVCD we fill the very first pack to increase compatibility with some DVD players. Not mandated by the standard.*/ if (s->is_svcd) general_pack = 1; /* the system header refers to both streams and no stream data*/ pad_packet_bytes = packet_size - zero_trail_bytes; } packet_size -= pad_packet_bytes + zero_trail_bytes; if (packet_size > 0) { /* packet header size */ packet_size -= 6; /* packet header */ if (s->is_mpeg2) { header_len = 3; if (stream->packet_number==0) header_len += 3; /* PES extension */ header_len += 1; /* obligatory stuffing byte */ } else { header_len = 0; } if (pts != AV_NOPTS_VALUE) { if (dts != pts) header_len += 5 + 5; else header_len += 5; } else { if (!s->is_mpeg2) header_len++; } payload_size = packet_size - header_len; if (id < 0xc0) { startcode = PRIVATE_STREAM_1; payload_size -= 1; if (id >= 0x40) { payload_size -= 3; if (id >= 0xa0) payload_size -= 3; } } else { startcode = 0x100 + id; } stuffing_size = payload_size - av_fifo_size(stream->fifo); // first byte does not fit -> reset pts/dts + stuffing if(payload_size <= trailer_size && pts != AV_NOPTS_VALUE){ int timestamp_len=0; if(dts != pts) timestamp_len += 5; if(pts != AV_NOPTS_VALUE) timestamp_len += s->is_mpeg2 ? 5 : 4; pts=dts= AV_NOPTS_VALUE; header_len -= timestamp_len; if (s->is_dvd && stream->align_iframe) { pad_packet_bytes += timestamp_len; packet_size -= timestamp_len; } else { payload_size += timestamp_len; } stuffing_size += timestamp_len; if(payload_size > trailer_size) stuffing_size += payload_size - trailer_size; } if (pad_packet_bytes > 0 && pad_packet_bytes <= 7) { // can't use padding, so use stuffing packet_size += pad_packet_bytes; payload_size += pad_packet_bytes; // undo the previous adjustment if (stuffing_size < 0) { stuffing_size = pad_packet_bytes; } else { stuffing_size += pad_packet_bytes; } pad_packet_bytes = 0; } if (stuffing_size < 0) stuffing_size = 0; if (stuffing_size > 16) { /*<=16 for MPEG-1, <=32 for MPEG-2*/ pad_packet_bytes += stuffing_size; packet_size -= stuffing_size; payload_size -= stuffing_size; stuffing_size = 0; } nb_frames= get_nb_frames(ctx, stream, payload_size - stuffing_size); put_be32(ctx->pb, startcode); put_be16(ctx->pb, packet_size); if (!s->is_mpeg2) for(i=0;i<stuffing_size;i++) put_byte(ctx->pb, 0xff); if (s->is_mpeg2) { put_byte(ctx->pb, 0x80); /* mpeg2 id */ pes_flags=0; if (pts != AV_NOPTS_VALUE) { pes_flags |= 0x80; if (dts != pts) pes_flags |= 0x40; } /* Both the MPEG-2 and the SVCD standards demand that the P-STD_buffer_size field be included in the first packet of every stream. (see SVCD standard p. 26 V.2.3.1 and V.2.3.2 and MPEG-2 standard 2.7.7) */ if (stream->packet_number == 0) pes_flags |= 0x01; put_byte(ctx->pb, pes_flags); /* flags */ put_byte(ctx->pb, header_len - 3 + stuffing_size); if (pes_flags & 0x80) /*write pts*/ put_timestamp(ctx->pb, (pes_flags & 0x40) ? 0x03 : 0x02, pts); if (pes_flags & 0x40) /*write dts*/ put_timestamp(ctx->pb, 0x01, dts); if (pes_flags & 0x01) { /*write pes extension*/ put_byte(ctx->pb, 0x10); /* flags */ /* P-STD buffer info */ if (id == AUDIO_ID) put_be16(ctx->pb, 0x4000 | stream->max_buffer_size/ 128); else put_be16(ctx->pb, 0x6000 | stream->max_buffer_size/1024); } } else { if (pts != AV_NOPTS_VALUE) { if (dts != pts) { put_timestamp(ctx->pb, 0x03, pts); put_timestamp(ctx->pb, 0x01, dts); } else { put_timestamp(ctx->pb, 0x02, pts); } } else { put_byte(ctx->pb, 0x0f); } } if (s->is_mpeg2) { /* special stuffing byte that is always written to prevent accidental generation of start codes. */ put_byte(ctx->pb, 0xff); for(i=0;i<stuffing_size;i++) put_byte(ctx->pb, 0xff); } if (startcode == PRIVATE_STREAM_1) { put_byte(ctx->pb, id); if (id >= 0xa0) { /* LPCM (XXX: check nb_frames) */ put_byte(ctx->pb, 7); put_be16(ctx->pb, 4); /* skip 3 header bytes */ put_byte(ctx->pb, stream->lpcm_header[0]); put_byte(ctx->pb, stream->lpcm_header[1]); put_byte(ctx->pb, stream->lpcm_header[2]); } else if (id >= 0x40) { /* AC-3 */ put_byte(ctx->pb, nb_frames); put_be16(ctx->pb, trailer_size+1); } } /* output data */ assert(payload_size - stuffing_size <= av_fifo_size(stream->fifo)); av_fifo_generic_read(stream->fifo, ctx->pb, payload_size - stuffing_size, &put_buffer); stream->bytes_to_iframe -= payload_size - stuffing_size; }else{ payload_size= stuffing_size= 0; } if (pad_packet_bytes > 0) put_padding_packet(ctx,ctx->pb, pad_packet_bytes); for(i=0;i<zero_trail_bytes;i++) put_byte(ctx->pb, 0x00); put_flush_packet(ctx->pb); s->packet_number++; /* only increase the stream packet number if this pack actually contains something that is specific to this stream! I.e. a dedicated header or some data.*/ if (!general_pack) stream->packet_number++; return payload_size - stuffing_size; }
22,924
0
static int _decode_exponents(int expstr, int ngrps, uint8_t absexp, uint8_t *gexps, uint8_t *dexps) { int exps; int i = 0; while (ngrps--) { exps = gexps[i++]; absexp += exp_1[exps]; assert(absexp <= 24); switch (expstr) { case AC3_EXPSTR_D45: *(dexps++) = absexp; *(dexps++) = absexp; case AC3_EXPSTR_D25: *(dexps++) = absexp; case AC3_EXPSTR_D15: *(dexps++) = absexp; } absexp += exp_2[exps]; assert(absexp <= 24); switch (expstr) { case AC3_EXPSTR_D45: *(dexps++) = absexp; *(dexps++) = absexp; case AC3_EXPSTR_D25: *(dexps++) = absexp; case AC3_EXPSTR_D15: *(dexps++) = absexp; } absexp += exp_3[exps]; assert(absexp <= 24); switch (expstr) { case AC3_EXPSTR_D45: *(dexps++) = absexp; *(dexps++) = absexp; case AC3_EXPSTR_D25: *(dexps++) = absexp; case AC3_EXPSTR_D15: *(dexps++) = absexp; } } return 0; }
22,925
0
static void draw_axis_yuv(AVFrame *out, AVFrame *axis, const ColorFloat *c, int off) { int fmt = out->format, x, y, yh, w = axis->width, h = axis->height; int offh = (fmt == AV_PIX_FMT_YUV420P) ? off / 2 : off; float a, rcp_255 = 1.0f / 255.0f; uint8_t *vy = out->data[0], *vu = out->data[1], *vv = out->data[2]; uint8_t *vay = axis->data[0], *vau = axis->data[1], *vav = axis->data[2], *vaa = axis->data[3]; int lsy = out->linesize[0], lsu = out->linesize[1], lsv = out->linesize[2]; int lsay = axis->linesize[0], lsau = axis->linesize[1], lsav = axis->linesize[2], lsaa = axis->linesize[3]; uint8_t *lpy, *lpu, *lpv, *lpay, *lpau, *lpav, *lpaa; for (y = 0; y < h; y += 2) { yh = (fmt == AV_PIX_FMT_YUV420P) ? y / 2 : y; lpy = vy + (off + y) * lsy; lpu = vu + (offh + yh) * lsu; lpv = vv + (offh + yh) * lsv; lpay = vay + y * lsay; lpau = vau + yh * lsau; lpav = vav + yh * lsav; lpaa = vaa + y * lsaa; for (x = 0; x < w; x += 2) { a = rcp_255 * (*lpaa++); *lpy++ = a * (*lpay++) + (1.0f - a) * c[x].yuv.y + 0.5f; *lpu++ = a * (*lpau++) + (1.0f - a) * c[x].yuv.u + 0.5f; *lpv++ = a * (*lpav++) + (1.0f - a) * c[x].yuv.v + 0.5f; /* u and v are skipped on yuv422p and yuv420p */ a = rcp_255 * (*lpaa++); *lpy++ = a * (*lpay++) + (1.0f - a) * c[x+1].yuv.y + 0.5f; if (fmt == AV_PIX_FMT_YUV444P) { *lpu++ = a * (*lpau++) + (1.0f - a) * c[x+1].yuv.u + 0.5f; *lpv++ = a * (*lpav++) + (1.0f - a) * c[x+1].yuv.v + 0.5f; } } lpy = vy + (off + y + 1) * lsy; lpu = vu + (off + y + 1) * lsu; lpv = vv + (off + y + 1) * lsv; lpay = vay + (y + 1) * lsay; lpau = vau + (y + 1) * lsau; lpav = vav + (y + 1) * lsav; lpaa = vaa + (y + 1) * lsaa; for (x = 0; x < out->width; x += 2) { /* u and v are skipped on yuv420p */ a = rcp_255 * (*lpaa++); *lpy++ = a * (*lpay++) + (1.0f - a) * c[x].yuv.y + 0.5f; if (fmt != AV_PIX_FMT_YUV420P) { *lpu++ = a * (*lpau++) + (1.0f - a) * c[x].yuv.u + 0.5f; *lpv++ = a * (*lpav++) + (1.0f - a) * c[x].yuv.v + 0.5f; } /* u and v are skipped on yuv422p and yuv420p */ a = rcp_255 * (*lpaa++); *lpy++ = a * (*lpay++) + (1.0f - a) * c[x+1].yuv.y + 0.5f; if (fmt == AV_PIX_FMT_YUV444P) { *lpu++ = a * (*lpau++) + (1.0f - a) * c[x+1].yuv.u + 0.5f; *lpv++ = a * (*lpav++) + (1.0f - a) * c[x+1].yuv.v + 0.5f; } } } }
22,926
0
static int shift_data(AVFormatContext *s) { int ret = 0, moov_size; MOVMuxContext *mov = s->priv_data; int64_t pos, pos_end = avio_tell(s->pb); uint8_t *buf, *read_buf[2]; int read_buf_id = 0; int read_size[2]; AVIOContext *read_pb; if (mov->flags & FF_MOV_FLAG_FRAGMENT) moov_size = compute_sidx_size(s); else moov_size = compute_moov_size(s); if (moov_size < 0) return moov_size; buf = av_malloc(moov_size * 2); if (!buf) return AVERROR(ENOMEM); read_buf[0] = buf; read_buf[1] = buf + moov_size; /* Shift the data: the AVIO context of the output can only be used for * writing, so we re-open the same output, but for reading. It also avoids * a read/seek/write/seek back and forth. */ avio_flush(s->pb); ret = avio_open(&read_pb, s->filename, AVIO_FLAG_READ); if (ret < 0) { av_log(s, AV_LOG_ERROR, "Unable to re-open %s output file for " "the second pass (faststart)\n", s->filename); goto end; } /* mark the end of the shift to up to the last data we wrote, and get ready * for writing */ pos_end = avio_tell(s->pb); avio_seek(s->pb, mov->reserved_header_pos + moov_size, SEEK_SET); /* start reading at where the new moov will be placed */ avio_seek(read_pb, mov->reserved_header_pos, SEEK_SET); pos = avio_tell(read_pb); #define READ_BLOCK do { \ read_size[read_buf_id] = avio_read(read_pb, read_buf[read_buf_id], moov_size); \ read_buf_id ^= 1; \ } while (0) /* shift data by chunk of at most moov_size */ READ_BLOCK; do { int n; READ_BLOCK; n = read_size[read_buf_id]; if (n <= 0) break; avio_write(s->pb, read_buf[read_buf_id], n); pos += n; } while (pos < pos_end); avio_close(read_pb); end: av_free(buf); return ret; }
22,927
0
void ff_do_elbg(int *points, int dim, int numpoints, int *codebook, int numCB, int max_steps, int *closest_cb, AVLFG *rand_state) { int dist; elbg_data elbg_d; elbg_data *elbg = &elbg_d; int i, j, k, last_error, steps=0; int *dist_cb = av_malloc(numpoints*sizeof(int)); int *size_part = av_malloc(numCB*sizeof(int)); cell *list_buffer = av_malloc(numpoints*sizeof(cell)); cell *free_cells; int best_dist, best_idx = 0; elbg->error = INT_MAX; elbg->dim = dim; elbg->numCB = numCB; elbg->codebook = codebook; elbg->cells = av_malloc(numCB*sizeof(cell *)); elbg->utility = av_malloc(numCB*sizeof(int)); elbg->nearest_cb = closest_cb; elbg->points = points; elbg->utility_inc = av_malloc(numCB*sizeof(int)); elbg->scratchbuf = av_malloc(5*dim*sizeof(int)); elbg->rand_state = rand_state; do { free_cells = list_buffer; last_error = elbg->error; steps++; memset(elbg->utility, 0, numCB*sizeof(int)); memset(elbg->cells, 0, numCB*sizeof(cell *)); elbg->error = 0; /* This loop evaluate the actual Voronoi partition. It is the most costly part of the algorithm. */ for (i=0; i < numpoints; i++) { best_dist = distance_limited(elbg->points + i*elbg->dim, elbg->codebook + best_idx*elbg->dim, dim, INT_MAX); for (k=0; k < elbg->numCB; k++) { dist = distance_limited(elbg->points + i*elbg->dim, elbg->codebook + k*elbg->dim, dim, best_dist); if (dist < best_dist) { best_dist = dist; best_idx = k; } } elbg->nearest_cb[i] = best_idx; dist_cb[i] = best_dist; elbg->error += dist_cb[i]; elbg->utility[elbg->nearest_cb[i]] += dist_cb[i]; free_cells->index = i; free_cells->next = elbg->cells[elbg->nearest_cb[i]]; elbg->cells[elbg->nearest_cb[i]] = free_cells; free_cells++; } do_shiftings(elbg); memset(size_part, 0, numCB*sizeof(int)); memset(elbg->codebook, 0, elbg->numCB*dim*sizeof(int)); for (i=0; i < numpoints; i++) { size_part[elbg->nearest_cb[i]]++; for (j=0; j < elbg->dim; j++) elbg->codebook[elbg->nearest_cb[i]*elbg->dim + j] += elbg->points[i*elbg->dim + j]; } for (i=0; i < elbg->numCB; i++) vect_division(elbg->codebook + i*elbg->dim, elbg->codebook + i*elbg->dim, size_part[i], elbg->dim); } while(((last_error - elbg->error) > DELTA_ERR_MAX*elbg->error) && (steps < max_steps)); av_free(dist_cb); av_free(size_part); av_free(elbg->utility); av_free(list_buffer); av_free(elbg->cells); av_free(elbg->utility_inc); av_free(elbg->scratchbuf); }
22,928
0
int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size) { MpegEncContext *s = avctx->priv_data; int ret; AVFrame *pict = data; #ifdef PRINT_FRAME_TIME uint64_t time= rdtsc(); #endif #ifdef DEBUG printf("*****frame %d size=%d\n", avctx->frame_number, buf_size); printf("bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]); #endif s->flags= avctx->flags; s->flags2= avctx->flags2; /* no supplementary picture */ if (buf_size == 0) { /* special case for last picture */ if (s->low_delay==0 && s->next_picture_ptr) { *pict= *(AVFrame*)s->next_picture_ptr; s->next_picture_ptr= NULL; *data_size = sizeof(AVFrame); } return 0; } if(s->flags&CODEC_FLAG_TRUNCATED){ int next; if(s->codec_id==CODEC_ID_MPEG4){ next= ff_mpeg4_find_frame_end(&s->parse_context, buf, buf_size); }else if(s->codec_id==CODEC_ID_H263){ next= h263_find_frame_end(&s->parse_context, buf, buf_size); }else{ av_log(s->avctx, AV_LOG_ERROR, "this codec doesnt support truncated bitstreams\n"); return -1; } if( ff_combine_frame(&s->parse_context, next, &buf, &buf_size) < 0 ) return buf_size; } retry: if(s->bitstream_buffer_size && (s->divx_packed || buf_size<20)){ //divx 5.01+/xvid frame reorder init_get_bits(&s->gb, s->bitstream_buffer, s->bitstream_buffer_size*8); }else init_get_bits(&s->gb, buf, buf_size*8); s->bitstream_buffer_size=0; if (!s->context_initialized) { if (MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix return -1; } //we need to set current_picture_ptr before reading the header, otherwise we cant store anyting im there if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){ int i= ff_find_unused_picture(s, 0); s->current_picture_ptr= &s->picture[i]; } /* let's go :-) */ if (s->msmpeg4_version==5) { ret= ff_wmv2_decode_picture_header(s); } else if (s->msmpeg4_version) { ret = msmpeg4_decode_picture_header(s); } else if (s->h263_pred) { if(s->avctx->extradata_size && s->picture_number==0){ GetBitContext gb; init_get_bits(&gb, s->avctx->extradata, s->avctx->extradata_size*8); ret = ff_mpeg4_decode_picture_header(s, &gb); } ret = ff_mpeg4_decode_picture_header(s, &s->gb); if(s->flags& CODEC_FLAG_LOW_DELAY) s->low_delay=1; } else if (s->codec_id == CODEC_ID_H263I) { ret = intel_h263_decode_picture_header(s); } else if (s->h263_flv) { ret = flv_h263_decode_picture_header(s); } else { ret = h263_decode_picture_header(s); } if(ret==FRAME_SKIPED) return get_consumed_bytes(s, buf_size); /* skip if the header was thrashed */ if (ret < 0){ av_log(s->avctx, AV_LOG_ERROR, "header damaged\n"); return -1; } avctx->has_b_frames= !s->low_delay; if(s->xvid_build==0 && s->divx_version==0 && s->lavc_build==0){ if(s->avctx->stream_codec_tag == ff_get_fourcc("XVID") || s->avctx->codec_tag == ff_get_fourcc("XVID") || s->avctx->codec_tag == ff_get_fourcc("XVIX")) s->xvid_build= -1; #if 0 if(s->avctx->codec_tag == ff_get_fourcc("DIVX") && s->vo_type==0 && s->vol_control_parameters==1 && s->padding_bug_score > 0 && s->low_delay) // XVID with modified fourcc s->xvid_build= -1; #endif } if(s->xvid_build==0 && s->divx_version==0 && s->lavc_build==0){ if(s->avctx->codec_tag == ff_get_fourcc("DIVX") && s->vo_type==0 && s->vol_control_parameters==0) s->divx_version= 400; //divx 4 } if(s->workaround_bugs&FF_BUG_AUTODETECT){ s->workaround_bugs &= ~FF_BUG_NO_PADDING; if(s->padding_bug_score > -2 && !s->data_partitioning && (s->divx_version || !s->resync_marker)) s->workaround_bugs |= FF_BUG_NO_PADDING; if(s->avctx->codec_tag == ff_get_fourcc("XVIX")) s->workaround_bugs|= FF_BUG_XVID_ILACE; if(s->avctx->codec_tag == ff_get_fourcc("UMP4")){ s->workaround_bugs|= FF_BUG_UMP4; } if(s->divx_version>=500){ s->workaround_bugs|= FF_BUG_QPEL_CHROMA; } if(s->divx_version>502){ s->workaround_bugs|= FF_BUG_QPEL_CHROMA2; } if(s->xvid_build && s->xvid_build<=3) s->padding_bug_score= 256*256*256*64; if(s->xvid_build && s->xvid_build<=1) s->workaround_bugs|= FF_BUG_QPEL_CHROMA; if(s->xvid_build && s->xvid_build<=12) s->workaround_bugs|= FF_BUG_EDGE; if(s->xvid_build && s->xvid_build<=32) s->workaround_bugs|= FF_BUG_DC_CLIP; #define SET_QPEL_FUNC(postfix1, postfix2) \ s->dsp.put_ ## postfix1 = ff_put_ ## postfix2;\ s->dsp.put_no_rnd_ ## postfix1 = ff_put_no_rnd_ ## postfix2;\ s->dsp.avg_ ## postfix1 = ff_avg_ ## postfix2; if(s->lavc_build && s->lavc_build<4653) s->workaround_bugs|= FF_BUG_STD_QPEL; if(s->lavc_build && s->lavc_build<4655) s->workaround_bugs|= FF_BUG_DIRECT_BLOCKSIZE; if(s->lavc_build && s->lavc_build<4670){ s->workaround_bugs|= FF_BUG_EDGE; } if(s->lavc_build && s->lavc_build<=4712) s->workaround_bugs|= FF_BUG_DC_CLIP; if(s->divx_version) s->workaround_bugs|= FF_BUG_DIRECT_BLOCKSIZE; //printf("padding_bug_score: %d\n", s->padding_bug_score); if(s->divx_version==501 && s->divx_build==20020416) s->padding_bug_score= 256*256*256*64; if(s->divx_version && s->divx_version<500){ s->workaround_bugs|= FF_BUG_EDGE; } if(s->divx_version) s->workaround_bugs|= FF_BUG_HPEL_CHROMA; #if 0 if(s->divx_version==500) s->padding_bug_score= 256*256*256*64; /* very ugly XVID padding bug detection FIXME/XXX solve this differently * lets hope this at least works */ if( s->resync_marker==0 && s->data_partitioning==0 && s->divx_version==0 && s->codec_id==CODEC_ID_MPEG4 && s->vo_type==0) s->workaround_bugs|= FF_BUG_NO_PADDING; if(s->lavc_build && s->lavc_build<4609) //FIXME not sure about the version num but a 4609 file seems ok s->workaround_bugs|= FF_BUG_NO_PADDING; #endif } if(s->workaround_bugs& FF_BUG_STD_QPEL){ SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_old_c) } if(avctx->debug & FF_DEBUG_BUGS) av_log(s->avctx, AV_LOG_DEBUG, "bugs: %X lavc_build:%d xvid_build:%d divx_version:%d divx_build:%d %s\n", s->workaround_bugs, s->lavc_build, s->xvid_build, s->divx_version, s->divx_build, s->divx_packed ? "p" : ""); #if 0 // dump bits per frame / qp / complexity { static FILE *f=NULL; if(!f) f=fopen("rate_qp_cplx.txt", "w"); fprintf(f, "%d %d %f\n", buf_size, s->qscale, buf_size*(double)s->qscale); } #endif /* After H263 & mpeg4 header decode we have the height, width,*/ /* and other parameters. So then we could init the picture */ /* FIXME: By the way H263 decoder is evolving it should have */ /* an H263EncContext */ if ( s->width != avctx->width || s->height != avctx->height) { /* H.263 could change picture size any time */ ParseContext pc= s->parse_context; //FIXME move these demuxng hack to avformat s->parse_context.buffer=0; MPV_common_end(s); s->parse_context= pc; } if (!s->context_initialized) { avctx->width = s->width; avctx->height = s->height; goto retry; } if((s->codec_id==CODEC_ID_H263 || s->codec_id==CODEC_ID_H263P)) s->gob_index = ff_h263_get_gob_height(s); // for hurry_up==5 s->current_picture.pict_type= s->pict_type; s->current_picture.key_frame= s->pict_type == I_TYPE; /* skip b frames if we dont have reference frames */ if(s->last_picture_ptr==NULL && s->pict_type==B_TYPE) return get_consumed_bytes(s, buf_size); /* skip b frames if we are in a hurry */ if(avctx->hurry_up && s->pict_type==B_TYPE) return get_consumed_bytes(s, buf_size); /* skip everything if we are in a hurry>=5 */ if(avctx->hurry_up>=5) return get_consumed_bytes(s, buf_size); if(s->next_p_frame_damaged){ if(s->pict_type==B_TYPE) return get_consumed_bytes(s, buf_size); else s->next_p_frame_damaged=0; } if(MPV_frame_start(s, avctx) < 0) return -1; #ifdef DEBUG printf("qscale=%d\n", s->qscale); #endif ff_er_frame_start(s); //the second part of the wmv2 header contains the MB skip bits which are stored in current_picture->mb_type //which isnt available before MPV_frame_start() if (s->msmpeg4_version==5){ if(ff_wmv2_decode_secondary_picture_header(s) < 0) return -1; } /* decode each macroblock */ s->mb_x=0; s->mb_y=0; decode_slice(s); while(s->mb_y<s->mb_height){ if(s->msmpeg4_version){ if(s->mb_x!=0 || (s->mb_y%s->slice_height)!=0 || get_bits_count(&s->gb) > s->gb.size_in_bits) break; }else{ if(ff_h263_resync(s)<0) break; } if(s->msmpeg4_version<4 && s->h263_pred) ff_mpeg4_clean_buffers(s); decode_slice(s); } if (s->h263_msmpeg4 && s->msmpeg4_version<4 && s->pict_type==I_TYPE) if(msmpeg4_decode_ext_header(s, buf_size) < 0){ s->error_status_table[s->mb_num-1]= AC_ERROR|DC_ERROR|MV_ERROR; } /* divx 5.01+ bistream reorder stuff */ if(s->codec_id==CODEC_ID_MPEG4 && s->bitstream_buffer_size==0 && s->divx_packed){ int current_pos= get_bits_count(&s->gb)>>3; int startcode_found=0; if( buf_size - current_pos > 5 && buf_size - current_pos < BITSTREAM_BUFFER_SIZE){ int i; for(i=current_pos; i<buf_size-3; i++){ if(buf[i]==0 && buf[i+1]==0 && buf[i+2]==1 && buf[i+3]==0xB6){ startcode_found=1; break; } } } if(s->gb.buffer == s->bitstream_buffer && buf_size>20){ //xvid style startcode_found=1; current_pos=0; } if(startcode_found){ memcpy(s->bitstream_buffer, buf + current_pos, buf_size - current_pos); s->bitstream_buffer_size= buf_size - current_pos; } } ff_er_frame_end(s); MPV_frame_end(s); assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type); assert(s->current_picture.pict_type == s->pict_type); if(s->pict_type==B_TYPE || s->low_delay){ *pict= *(AVFrame*)&s->current_picture; ff_print_debug_info(s, pict); } else { *pict= *(AVFrame*)&s->last_picture; if(pict) ff_print_debug_info(s, pict); } /* Return the Picture timestamp as the frame number */ /* we substract 1 because it is added on utils.c */ avctx->frame_number = s->picture_number - 1; /* dont output the last pic after seeking */ if(s->last_picture_ptr || s->low_delay) *data_size = sizeof(AVFrame); #ifdef PRINT_FRAME_TIME printf("%Ld\n", rdtsc()-time); #endif return get_consumed_bytes(s, buf_size); }
22,929
1
static void vnc_dpy_copy(DisplayChangeListener *dcl, DisplayState *ds, int src_x, int src_y, int dst_x, int dst_y, int w, int h) { VncDisplay *vd = ds->opaque; VncState *vs, *vn; uint8_t *src_row; uint8_t *dst_row; int i, x, y, pitch, inc, w_lim, s; int cmp_bytes; vnc_refresh_server_surface(vd); QTAILQ_FOREACH_SAFE(vs, &vd->clients, next, vn) { if (vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) { vs->force_update = 1; vnc_update_client_sync(vs, 1); /* vs might be free()ed here */ } } /* do bitblit op on the local surface too */ pitch = vnc_server_fb_stride(vd); src_row = vnc_server_fb_ptr(vd, src_x, src_y); dst_row = vnc_server_fb_ptr(vd, dst_x, dst_y); y = dst_y; inc = 1; if (dst_y > src_y) { /* copy backwards */ src_row += pitch * (h-1); dst_row += pitch * (h-1); pitch = -pitch; y = dst_y + h - 1; inc = -1; } w_lim = w - (16 - (dst_x % 16)); if (w_lim < 0) w_lim = w; else w_lim = w - (w_lim % 16); for (i = 0; i < h; i++) { for (x = 0; x <= w_lim; x += s, src_row += cmp_bytes, dst_row += cmp_bytes) { if (x == w_lim) { if ((s = w - w_lim) == 0) break; } else if (!x) { s = (16 - (dst_x % 16)); s = MIN(s, w_lim); } else { s = 16; } cmp_bytes = s * VNC_SERVER_FB_BYTES; if (memcmp(src_row, dst_row, cmp_bytes) == 0) continue; memmove(dst_row, src_row, cmp_bytes); QTAILQ_FOREACH(vs, &vd->clients, next) { if (!vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) { set_bit(((x + dst_x) / 16), vs->dirty[y]); } } } src_row += pitch - w * VNC_SERVER_FB_BYTES; dst_row += pitch - w * VNC_SERVER_FB_BYTES; y += inc; } QTAILQ_FOREACH(vs, &vd->clients, next) { if (vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) { vnc_copy(vs, src_x, src_y, dst_x, dst_y, w, h); } } }
22,930
1
static void FUNCC(pred8x8_vertical)(uint8_t *_src, int _stride){ int i; pixel *src = (pixel*)_src; int stride = _stride/sizeof(pixel); const pixel4 a= ((pixel4*)(src-stride))[0]; const pixel4 b= ((pixel4*)(src-stride))[1]; for(i=0; i<8; i++){ ((pixel4*)(src+i*stride))[0]= a; ((pixel4*)(src+i*stride))[1]= b; } }
22,932
1
static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame) { VTContext *vtctx = avctx->internal->hwaccel_priv_data; CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame; OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf); enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format); int width = CVPixelBufferGetWidth(pixbuf); int height = CVPixelBufferGetHeight(pixbuf); AVHWFramesContext *cached_frames; int ret; ret = ff_videotoolbox_buffer_create(vtctx, frame); if (ret < 0) return ret; // Old API code path. if (!vtctx->cached_hw_frames_ctx) return 0; cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data; if (cached_frames->sw_format != sw_format || cached_frames->width != width || cached_frames->height != height) { AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref); AVHWFramesContext *hw_frames; if (!hw_frames_ctx) return AVERROR(ENOMEM); hw_frames = (AVHWFramesContext*)hw_frames_ctx->data; hw_frames->format = cached_frames->format; hw_frames->sw_format = sw_format; hw_frames->width = width; hw_frames->height = height; ret = av_hwframe_ctx_init(hw_frames_ctx); if (ret < 0) { av_buffer_unref(&hw_frames_ctx); return ret; } av_buffer_unref(&vtctx->cached_hw_frames_ctx); vtctx->cached_hw_frames_ctx = hw_frames_ctx; } av_assert0(!frame->hw_frames_ctx); frame->hw_frames_ctx = av_buffer_ref(vtctx->cached_hw_frames_ctx); if (!frame->hw_frames_ctx) return AVERROR(ENOMEM); return 0; }
22,933
1
static void inner_add_yblock_bw_16_obmc_32_sse2(const uint8_t *obmc, const long obmc_stride, uint8_t * * block, int b_w, long b_h, int src_x, int src_y, long src_stride, slice_buffer * sb, int add, uint8_t * dst8){ snow_inner_add_yblock_sse2_header snow_inner_add_yblock_sse2_start_16("xmm1", "xmm5", "3", "0") snow_inner_add_yblock_sse2_accum_16("2", "16") snow_inner_add_yblock_sse2_accum_16("1", "512") snow_inner_add_yblock_sse2_accum_16("0", "528") "mov %0, %%"REG_d" \n\t" "movdqa %%xmm1, %%xmm0 \n\t" "movdqa %%xmm5, %%xmm4 \n\t" "punpcklwd %%xmm7, %%xmm0 \n\t" "paddd (%%"REG_D"), %%xmm0 \n\t" "punpckhwd %%xmm7, %%xmm1 \n\t" "paddd 16(%%"REG_D"), %%xmm1 \n\t" "punpcklwd %%xmm7, %%xmm4 \n\t" "paddd 32(%%"REG_D"), %%xmm4 \n\t" "punpckhwd %%xmm7, %%xmm5 \n\t" "paddd 48(%%"REG_D"), %%xmm5 \n\t" "paddd %%xmm3, %%xmm0 \n\t" "paddd %%xmm3, %%xmm1 \n\t" "paddd %%xmm3, %%xmm4 \n\t" "paddd %%xmm3, %%xmm5 \n\t" "psrad $8, %%xmm0 \n\t" /* FRAC_BITS. */ "psrad $8, %%xmm1 \n\t" /* FRAC_BITS. */ "psrad $8, %%xmm4 \n\t" /* FRAC_BITS. */ "psrad $8, %%xmm5 \n\t" /* FRAC_BITS. */ "packssdw %%xmm1, %%xmm0 \n\t" "packssdw %%xmm5, %%xmm4 \n\t" "packuswb %%xmm4, %%xmm0 \n\t" "movdqu %%xmm0, (%%"REG_d") \n\t" snow_inner_add_yblock_sse2_end_16 }
22,934
1
restore_fpu_state(CPUSPARCState *env, qemu_siginfo_fpu_t *fpu) { int err; #if 0 #ifdef CONFIG_SMP if (current->flags & PF_USEDFPU) regs->psr &= ~PSR_EF; #else if (current == last_task_used_math) { last_task_used_math = 0; regs->psr &= ~PSR_EF; } #endif current->used_math = 1; current->flags &= ~PF_USEDFPU; #endif #if 0 if (verify_area (VERIFY_READ, fpu, sizeof(*fpu))) return -EFAULT; #endif #if 0 /* XXX: incorrect */ err = __copy_from_user(&env->fpr[0], &fpu->si_float_regs[0], (sizeof(unsigned long) * 32)); #endif err |= __get_user(env->fsr, &fpu->si_fsr); #if 0 err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth); if (current->thread.fpqdepth != 0) err |= __copy_from_user(&current->thread.fpqueue[0], &fpu->si_fpqueue[0], ((sizeof(unsigned long) + (sizeof(unsigned long *)))*16)); #endif return err; }
22,935
1
static void gen_vfp_msr(TCGv tmp) { tcg_gen_mov_i32(cpu_F0s, tmp); dead_tmp(tmp); }
22,936
0
static int ff_asf_get_packet(AVFormatContext *s, AVIOContext *pb) { ASFContext *asf = s->priv_data; uint32_t packet_length, padsize; int rsize = 8; int c, d, e, off; // if we do not know packet size, allow skipping up to 32 kB off= 32768; if (s->packet_size > 0) off= (avio_tell(pb) - s->data_offset) % s->packet_size + 3; c=d=e=-1; while(off-- > 0){ c=d; d=e; e= avio_r8(pb); if(c == 0x82 && !d && !e) break; } if (c != 0x82) { /** * This code allows handling of -EAGAIN at packet boundaries (i.e. * if the packet sync code above triggers -EAGAIN). This does not * imply complete -EAGAIN handling support at random positions in * the stream. */ if (pb->error == AVERROR(EAGAIN)) return AVERROR(EAGAIN); if (!url_feof(pb)) av_log(s, AV_LOG_ERROR, "ff asf bad header %x at:%"PRId64"\n", c, avio_tell(pb)); } if ((c & 0x8f) == 0x82) { if (d || e) { if (!url_feof(pb)) av_log(s, AV_LOG_ERROR, "ff asf bad non zero\n"); return -1; } c= avio_r8(pb); d= avio_r8(pb); rsize+=3; }else{ avio_seek(pb, -1, SEEK_CUR); //FIXME } asf->packet_flags = c; asf->packet_property = d; DO_2BITS(asf->packet_flags >> 5, packet_length, s->packet_size); DO_2BITS(asf->packet_flags >> 1, padsize, 0); // sequence ignored DO_2BITS(asf->packet_flags >> 3, padsize, 0); // padding length //the following checks prevent overflows and infinite loops if(!packet_length || packet_length >= (1U<<29)){ av_log(s, AV_LOG_ERROR, "invalid packet_length %d at:%"PRId64"\n", packet_length, avio_tell(pb)); return -1; } if(padsize >= packet_length){ av_log(s, AV_LOG_ERROR, "invalid padsize %d at:%"PRId64"\n", padsize, avio_tell(pb)); return -1; } asf->packet_timestamp = avio_rl32(pb); avio_rl16(pb); /* duration */ // rsize has at least 11 bytes which have to be present if (asf->packet_flags & 0x01) { asf->packet_segsizetype = avio_r8(pb); rsize++; asf->packet_segments = asf->packet_segsizetype & 0x3f; } else { asf->packet_segments = 1; asf->packet_segsizetype = 0x80; } asf->packet_size_left = packet_length - padsize - rsize; if (packet_length < asf->hdr.min_pktsize) padsize += asf->hdr.min_pktsize - packet_length; asf->packet_padsize = padsize; av_dlog(s, "packet: size=%d padsize=%d left=%d\n", s->packet_size, asf->packet_padsize, asf->packet_size_left); return 0; }
22,938
0
void checkasm_check_blockdsp(void) { LOCAL_ALIGNED_16(uint16_t, buf0, [6 * 8 * 8]); LOCAL_ALIGNED_16(uint16_t, buf1, [6 * 8 * 8]); AVCodecContext avctx = { 0 }; BlockDSPContext h; ff_blockdsp_init(&h, &avctx); check_clear(clear_block, 8 * 8); check_clear(clear_blocks, 8 * 8 * 6); report("blockdsp"); }
22,939
1
void stellaris_enet_init(NICInfo *nd, uint32_t base, qemu_irq irq) { stellaris_enet_state *s; int iomemtype; qemu_check_nic_model(nd, "stellaris"); s = (stellaris_enet_state *)qemu_mallocz(sizeof(stellaris_enet_state)); iomemtype = cpu_register_io_memory(0, stellaris_enet_readfn, stellaris_enet_writefn, s); cpu_register_physical_memory(base, 0x00001000, iomemtype); s->irq = irq; memcpy(s->macaddr, nd->macaddr, 6); if (nd->vlan) { s->vc = qemu_new_vlan_client(nd->vlan, nd->model, nd->name, stellaris_enet_receive, stellaris_enet_can_receive, s); qemu_format_nic_info_str(s->vc, s->macaddr); } stellaris_enet_reset(s); register_savevm("stellaris_enet", -1, 1, stellaris_enet_save, stellaris_enet_load, s); }
22,940
1
static int mkv_write_chapters(AVFormatContext *s) { MatroskaMuxContext *mkv = s->priv_data; AVIOContext *pb = s->pb; ebml_master chapters, editionentry; AVRational scale = {1, 1E9}; int i, ret; if (!s->nb_chapters || mkv->wrote_chapters) return 0; ret = mkv_add_seekhead_entry(mkv->main_seekhead, MATROSKA_ID_CHAPTERS, avio_tell(pb)); if (ret < 0) return ret; chapters = start_ebml_master(pb, MATROSKA_ID_CHAPTERS , 0); editionentry = start_ebml_master(pb, MATROSKA_ID_EDITIONENTRY, 0); put_ebml_uint(pb, MATROSKA_ID_EDITIONFLAGDEFAULT, 1); put_ebml_uint(pb, MATROSKA_ID_EDITIONFLAGHIDDEN , 0); for (i = 0; i < s->nb_chapters; i++) { ebml_master chapteratom, chapterdisplay; AVChapter *c = s->chapters[i]; int chapterstart = av_rescale_q(c->start, c->time_base, scale); int chapterend = av_rescale_q(c->end, c->time_base, scale); AVDictionaryEntry *t = NULL; if (chapterstart < 0 || chapterstart > chapterend) return AVERROR_INVALIDDATA; chapteratom = start_ebml_master(pb, MATROSKA_ID_CHAPTERATOM, 0); put_ebml_uint(pb, MATROSKA_ID_CHAPTERUID, c->id + mkv->chapter_id_offset); put_ebml_uint(pb, MATROSKA_ID_CHAPTERTIMESTART, chapterstart); put_ebml_uint(pb, MATROSKA_ID_CHAPTERTIMEEND, chapterend); put_ebml_uint(pb, MATROSKA_ID_CHAPTERFLAGHIDDEN , 0); put_ebml_uint(pb, MATROSKA_ID_CHAPTERFLAGENABLED, 1); if ((t = av_dict_get(c->metadata, "title", NULL, 0))) { chapterdisplay = start_ebml_master(pb, MATROSKA_ID_CHAPTERDISPLAY, 0); put_ebml_string(pb, MATROSKA_ID_CHAPSTRING, t->value); put_ebml_string(pb, MATROSKA_ID_CHAPLANG , "und"); end_ebml_master(pb, chapterdisplay); } end_ebml_master(pb, chapteratom); } end_ebml_master(pb, editionentry); end_ebml_master(pb, chapters); mkv->wrote_chapters = 1; return 0; }
22,941
1
static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val) { VRingMemoryRegionCaches *caches; hwaddr pa; if (!vq->notification) { return; } caches = atomic_rcu_read(&vq->vring.caches); pa = offsetof(VRingUsed, ring[vq->vring.num]); virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val); address_space_cache_invalidate(&caches->used, pa, sizeof(val)); }
22,942
0
void ff_er_frame_end(ERContext *s) { int *linesize = s->cur_pic->f.linesize; int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error; int distance; int threshold_part[4] = { 100, 100, 100 }; int threshold = 50; int is_intra_likely; int size = s->b8_stride * 2 * s->mb_height; /* We do not support ER of field pictures yet, * though it should not crash if enabled. */ if (!s->avctx->error_concealment || s->error_count == 0 || s->avctx->lowres || s->avctx->hwaccel || s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU || !s->cur_pic || s->cur_pic->field_picture || s->error_count == 3 * s->mb_width * (s->avctx->skip_top + s->avctx->skip_bottom)) { return; } for (mb_x = 0; mb_x < s->mb_width; mb_x++) { int status = s->error_status_table[mb_x + (s->mb_height - 1) * s->mb_stride]; if (status != 0x7F) break; } if ( mb_x == s->mb_width && s->avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO && (s->avctx->height&16) && s->error_count == 3 * s->mb_width * (s->avctx->skip_top + s->avctx->skip_bottom + 1) ) { av_log(s->avctx, AV_LOG_DEBUG, "ignoring last missing slice\n"); return; } if (s->last_pic) { if (s->last_pic->f.width != s->cur_pic->f.width || s->last_pic->f.height != s->cur_pic->f.height || s->last_pic->f.format != s->cur_pic->f.format) { av_log(s->avctx, AV_LOG_WARNING, "Cannot use previous picture in error concealment\n"); s->last_pic = NULL; } } if (s->next_pic) { if (s->next_pic->f.width != s->cur_pic->f.width || s->next_pic->f.height != s->cur_pic->f.height || s->next_pic->f.format != s->cur_pic->f.format) { av_log(s->avctx, AV_LOG_WARNING, "Cannot use next picture in error concealment\n"); s->next_pic = NULL; } } if (s->cur_pic->motion_val[0] == NULL) { av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n"); for (i = 0; i < 2; i++) { s->cur_pic->ref_index_buf[i] = av_buffer_allocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t)); s->cur_pic->motion_val_buf[i] = av_buffer_allocz((size + 4) * 2 * sizeof(uint16_t)); if (!s->cur_pic->ref_index_buf[i] || !s->cur_pic->motion_val_buf[i]) break; s->cur_pic->ref_index[i] = s->cur_pic->ref_index_buf[i]->data; s->cur_pic->motion_val[i] = (int16_t (*)[2])s->cur_pic->motion_val_buf[i]->data + 4; } if (i < 2) { for (i = 0; i < 2; i++) { av_buffer_unref(&s->cur_pic->ref_index_buf[i]); av_buffer_unref(&s->cur_pic->motion_val_buf[i]); s->cur_pic->ref_index[i] = NULL; s->cur_pic->motion_val[i] = NULL; } return; } } if (s->avctx->debug & FF_DEBUG_ER) { for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { int status = s->error_status_table[mb_x + mb_y * s->mb_stride]; av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status); } av_log(s->avctx, AV_LOG_DEBUG, "\n"); } } #if 1 /* handle overlapping slices */ for (error_type = 1; error_type <= 3; error_type++) { int end_ok = 0; for (i = s->mb_num - 1; i >= 0; i--) { const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; if (error & (1 << error_type)) end_ok = 1; if (error & (8 << error_type)) end_ok = 1; if (!end_ok) s->error_status_table[mb_xy] |= 1 << error_type; if (error & VP_START) end_ok = 0; } } #endif #if 1 /* handle slices with partitions of different length */ if (s->partitioned_frame) { int end_ok = 0; for (i = s->mb_num - 1; i >= 0; i--) { const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; if (error & ER_AC_END) end_ok = 0; if ((error & ER_MV_END) || (error & ER_DC_END) || (error & ER_AC_ERROR)) end_ok = 1; if (!end_ok) s->error_status_table[mb_xy]|= ER_AC_ERROR; if (error & VP_START) end_ok = 0; } } #endif /* handle missing slices */ if (s->avctx->err_recognition & AV_EF_EXPLODE) { int end_ok = 1; // FIXME + 100 hack for (i = s->mb_num - 2; i >= s->mb_width + 100; i--) { const int mb_xy = s->mb_index2xy[i]; int error1 = s->error_status_table[mb_xy]; int error2 = s->error_status_table[s->mb_index2xy[i + 1]]; if (error1 & VP_START) end_ok = 1; if (error2 == (VP_START | ER_MB_ERROR | ER_MB_END) && error1 != (VP_START | ER_MB_ERROR | ER_MB_END) && ((error1 & ER_AC_END) || (error1 & ER_DC_END) || (error1 & ER_MV_END))) { // end & uninit end_ok = 0; } if (!end_ok) s->error_status_table[mb_xy] |= ER_MB_ERROR; } } #if 1 /* backward mark errors */ distance = 9999999; for (error_type = 1; error_type <= 3; error_type++) { for (i = s->mb_num - 1; i >= 0; i--) { const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; if (!s->mbskip_table[mb_xy]) // FIXME partition specific distance++; if (error & (1 << error_type)) distance = 0; if (s->partitioned_frame) { if (distance < threshold_part[error_type - 1]) s->error_status_table[mb_xy] |= 1 << error_type; } else { if (distance < threshold) s->error_status_table[mb_xy] |= 1 << error_type; } if (error & VP_START) distance = 9999999; } } #endif /* forward mark errors */ error = 0; for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; int old_error = s->error_status_table[mb_xy]; if (old_error & VP_START) { error = old_error & ER_MB_ERROR; } else { error |= old_error & ER_MB_ERROR; s->error_status_table[mb_xy] |= error; } } #if 1 /* handle not partitioned case */ if (!s->partitioned_frame) { for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; error = s->error_status_table[mb_xy]; if (error & ER_MB_ERROR) error |= ER_MB_ERROR; s->error_status_table[mb_xy] = error; } } #endif dc_error = ac_error = mv_error = 0; for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; error = s->error_status_table[mb_xy]; if (error & ER_DC_ERROR) dc_error++; if (error & ER_AC_ERROR) ac_error++; if (error & ER_MV_ERROR) mv_error++; } av_log(s->avctx, AV_LOG_INFO, "concealing %d DC, %d AC, %d MV errors in %c frame\n", dc_error, ac_error, mv_error, av_get_picture_type_char(s->cur_pic->f.pict_type)); is_intra_likely = is_intra_more_likely(s); /* set unknown mb-type to most likely */ for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; error = s->error_status_table[mb_xy]; if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR))) continue; if (is_intra_likely) s->cur_pic->mb_type[mb_xy] = MB_TYPE_INTRA4x4; else s->cur_pic->mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0; } // change inter to intra blocks if no reference frames are available if (!(s->last_pic && s->last_pic->f.data[0]) && !(s->next_pic && s->next_pic->f.data[0])) for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; if (!IS_INTRA(s->cur_pic->mb_type[mb_xy])) s->cur_pic->mb_type[mb_xy] = MB_TYPE_INTRA4x4; } /* handle inter blocks with damaged AC */ for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_type = s->cur_pic->mb_type[mb_xy]; const int dir = !(s->last_pic && s->last_pic->f.data[0]); const int mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD; int mv_type; error = s->error_status_table[mb_xy]; if (IS_INTRA(mb_type)) continue; // intra if (error & ER_MV_ERROR) continue; // inter with damaged MV if (!(error & ER_AC_ERROR)) continue; // undamaged inter if (IS_8X8(mb_type)) { int mb_index = mb_x * 2 + mb_y * 2 * s->b8_stride; int j; mv_type = MV_TYPE_8X8; for (j = 0; j < 4; j++) { s->mv[0][j][0] = s->cur_pic->motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0]; s->mv[0][j][1] = s->cur_pic->motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1]; } } else { mv_type = MV_TYPE_16X16; s->mv[0][0][0] = s->cur_pic->motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0]; s->mv[0][0][1] = s->cur_pic->motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1]; } s->decode_mb(s->opaque, 0 /* FIXME h264 partitioned slices need this set */, mv_dir, mv_type, &s->mv, mb_x, mb_y, 0, 0); } } /* guess MVs */ if (s->cur_pic->f.pict_type == AV_PICTURE_TYPE_B) { for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { int xy = mb_x * 2 + mb_y * 2 * s->b8_stride; const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_type = s->cur_pic->mb_type[mb_xy]; int mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; error = s->error_status_table[mb_xy]; if (IS_INTRA(mb_type)) continue; if (!(error & ER_MV_ERROR)) continue; // inter with undamaged MV if (!(error & ER_AC_ERROR)) continue; // undamaged inter if (!(s->last_pic && s->last_pic->f.data[0])) mv_dir &= ~MV_DIR_FORWARD; if (!(s->next_pic && s->next_pic->f.data[0])) mv_dir &= ~MV_DIR_BACKWARD; if (s->pp_time) { int time_pp = s->pp_time; int time_pb = s->pb_time; av_assert0(s->avctx->codec_id != AV_CODEC_ID_H264); ff_thread_await_progress(&s->next_pic->tf, mb_y, 0); s->mv[0][0][0] = s->next_pic->motion_val[0][xy][0] * time_pb / time_pp; s->mv[0][0][1] = s->next_pic->motion_val[0][xy][1] * time_pb / time_pp; s->mv[1][0][0] = s->next_pic->motion_val[0][xy][0] * (time_pb - time_pp) / time_pp; s->mv[1][0][1] = s->next_pic->motion_val[0][xy][1] * (time_pb - time_pp) / time_pp; } else { s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->mv[1][0][0] = 0; s->mv[1][0][1] = 0; } s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0); } } } else guess_mv(s); #if FF_API_XVMC FF_DISABLE_DEPRECATION_WARNINGS /* the filters below are not XvMC compatible, skip them */ if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) goto ec_clean; FF_ENABLE_DEPRECATION_WARNINGS #endif /* FF_API_XVMC */ /* fill DC for inter blocks */ for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { int dc, dcu, dcv, y, n; int16_t *dc_ptr; uint8_t *dest_y, *dest_cb, *dest_cr; const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_type = s->cur_pic->mb_type[mb_xy]; error = s->error_status_table[mb_xy]; if (IS_INTRA(mb_type) && s->partitioned_frame) continue; // if (error & ER_MV_ERROR) // continue; // inter data damaged FIXME is this good? dest_y = s->cur_pic->f.data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; dest_cb = s->cur_pic->f.data[1] + mb_x * 8 + mb_y * 8 * linesize[1]; dest_cr = s->cur_pic->f.data[2] + mb_x * 8 + mb_y * 8 * linesize[2]; dc_ptr = &s->dc_val[0][mb_x * 2 + mb_y * 2 * s->b8_stride]; for (n = 0; n < 4; n++) { dc = 0; for (y = 0; y < 8; y++) { int x; for (x = 0; x < 8; x++) dc += dest_y[x + (n & 1) * 8 + (y + (n >> 1) * 8) * linesize[0]]; } dc_ptr[(n & 1) + (n >> 1) * s->b8_stride] = (dc + 4) >> 3; } dcu = dcv = 0; for (y = 0; y < 8; y++) { int x; for (x = 0; x < 8; x++) { dcu += dest_cb[x + y * linesize[1]]; dcv += dest_cr[x + y * linesize[2]]; } } s->dc_val[1][mb_x + mb_y * s->mb_stride] = (dcu + 4) >> 3; s->dc_val[2][mb_x + mb_y * s->mb_stride] = (dcv + 4) >> 3; } } #if 1 /* guess DC for damaged blocks */ guess_dc(s, s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride, 1); guess_dc(s, s->dc_val[1], s->mb_width , s->mb_height , s->mb_stride, 0); guess_dc(s, s->dc_val[2], s->mb_width , s->mb_height , s->mb_stride, 0); #endif /* filter luma DC */ filter181(s->dc_val[0], s->mb_width * 2, s->mb_height * 2, s->b8_stride); #if 1 /* render DC only intra */ for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { uint8_t *dest_y, *dest_cb, *dest_cr; const int mb_xy = mb_x + mb_y * s->mb_stride; const int mb_type = s->cur_pic->mb_type[mb_xy]; error = s->error_status_table[mb_xy]; if (IS_INTER(mb_type)) continue; if (!(error & ER_AC_ERROR)) continue; // undamaged dest_y = s->cur_pic->f.data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; dest_cb = s->cur_pic->f.data[1] + mb_x * 8 + mb_y * 8 * linesize[1]; dest_cr = s->cur_pic->f.data[2] + mb_x * 8 + mb_y * 8 * linesize[2]; put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y); } } #endif if (s->avctx->error_concealment & FF_EC_DEBLOCK) { /* filter horizontal block boundaries */ h_block_filter(s, s->cur_pic->f.data[0], s->mb_width * 2, s->mb_height * 2, linesize[0], 1); h_block_filter(s, s->cur_pic->f.data[1], s->mb_width, s->mb_height, linesize[1], 0); h_block_filter(s, s->cur_pic->f.data[2], s->mb_width, s->mb_height, linesize[2], 0); /* filter vertical block boundaries */ v_block_filter(s, s->cur_pic->f.data[0], s->mb_width * 2, s->mb_height * 2, linesize[0], 1); v_block_filter(s, s->cur_pic->f.data[1], s->mb_width, s->mb_height, linesize[1], 0); v_block_filter(s, s->cur_pic->f.data[2], s->mb_width, s->mb_height, linesize[2], 0); } ec_clean: /* clean a few tables */ for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; int error = s->error_status_table[mb_xy]; if (s->cur_pic->f.pict_type != AV_PICTURE_TYPE_B && (error & (ER_DC_ERROR | ER_MV_ERROR | ER_AC_ERROR))) { s->mbskip_table[mb_xy] = 0; } s->mbintra_table[mb_xy] = 1; } s->cur_pic = NULL; s->next_pic = NULL; s->last_pic = NULL; }
22,943
1
static void init_excp_601 (CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100; env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200; env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300; env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400; env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500; env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600; env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700; env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800; env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900; env->excp_vectors[POWERPC_EXCP_IO] = 0x00000A00; env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00; env->excp_vectors[POWERPC_EXCP_RUNM] = 0x00002000; env->excp_prefix = 0xFFF00000; /* Hardware reset vector */ env->hreset_vector = 0x00000100UL; #endif }
22,945
0
void ff_acelp_weighted_filter( int16_t *out, const int16_t* in, const int16_t *weight_pow, int filter_length) { int n; for(n=0; n<filter_length; n++) out[n] = (in[n] * weight_pow[n] + 0x4000) >> 15; /* (3.12) = (0.15) * (3.12) with rounding */ }
22,946
0
void ff_vp3_idct_c(DCTELEM *block/* align 16*/){ idct(NULL, 0, block, 0); }
22,947
0
static int seq_fill_buffer(SeqDemuxContext *seq, ByteIOContext *pb, int buffer_num, unsigned int data_offs, int data_size) { TiertexSeqFrameBuffer *seq_buffer; if (buffer_num >= SEQ_NUM_FRAME_BUFFERS) return AVERROR_INVALIDDATA; seq_buffer = &seq->frame_buffers[buffer_num]; if (seq_buffer->fill_size + data_size > seq_buffer->data_size) return AVERROR_INVALIDDATA; url_fseek(pb, seq->current_frame_offs + data_offs, SEEK_SET); if (get_buffer(pb, seq_buffer->data + seq_buffer->fill_size, data_size) != data_size) return AVERROR(EIO); seq_buffer->fill_size += data_size; return 0; }
22,948
0
static av_always_inline av_flatten void h264_loop_filter_luma_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0) { int i, d; for( i = 0; i < 4; i++ ) { if( tc0[i] < 0 ) { pix += 4*ystride; continue; } for( d = 0; d < 4; d++ ) { const int p0 = pix[-1*xstride]; const int p1 = pix[-2*xstride]; const int p2 = pix[-3*xstride]; const int q0 = pix[0]; const int q1 = pix[1*xstride]; const int q2 = pix[2*xstride]; if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { int tc = tc0[i]; int i_delta; if( FFABS( p2 - p0 ) < beta ) { if(tc0[i]) pix[-2*xstride] = p1 + av_clip( (( p2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - p1, -tc0[i], tc0[i] ); tc++; } if( FFABS( q2 - q0 ) < beta ) { if(tc0[i]) pix[ xstride] = q1 + av_clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc0[i], tc0[i] ); tc++; } i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); pix[-xstride] = av_clip_uint8( p0 + i_delta ); /* p0' */ pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */ } pix += ystride; } } }
22,950
1
static int io_open_default(AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **options) { return avio_open2(pb, url, flags, &s->interrupt_callback, options); }
22,951
1
static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length) { GetBitContext gb; int i; init_get_bits(&gb, src, length * 8); for (i = 0; i < 3; i++) { if (read_len_table(s->len[i], &gb) < 0) return -1; if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i]) < 0) return -1; ff_free_vlc(&s->vlc[i]); init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0); } generate_joint_tables(s); return (get_bits_count(&gb) + 7) / 8; }
22,952
1
int ff_pnm_decode_header(AVCodecContext *avctx, PNMContext * const s) { char buf1[32], tuple_type[32]; int h, w, depth, maxval; pnm_get(s, buf1, sizeof(buf1)); s->type= buf1[1]-'0'; if(buf1[0] != 'P') return -1; if (s->type==1 || s->type==4) { avctx->pix_fmt = PIX_FMT_MONOWHITE; } else if (s->type==2 || s->type==5) { if (avctx->codec_id == CODEC_ID_PGMYUV) avctx->pix_fmt = PIX_FMT_YUV420P; else avctx->pix_fmt = PIX_FMT_GRAY8; } else if (s->type==3 || s->type==6) { avctx->pix_fmt = PIX_FMT_RGB24; } else if (s->type==7) { w = -1; h = -1; maxval = -1; depth = -1; tuple_type[0] = '\0'; for (;;) { pnm_get(s, buf1, sizeof(buf1)); if (!strcmp(buf1, "WIDTH")) { pnm_get(s, buf1, sizeof(buf1)); w = strtol(buf1, NULL, 10); } else if (!strcmp(buf1, "HEIGHT")) { pnm_get(s, buf1, sizeof(buf1)); h = strtol(buf1, NULL, 10); } else if (!strcmp(buf1, "DEPTH")) { pnm_get(s, buf1, sizeof(buf1)); depth = strtol(buf1, NULL, 10); } else if (!strcmp(buf1, "MAXVAL")) { pnm_get(s, buf1, sizeof(buf1)); maxval = strtol(buf1, NULL, 10); } else if (!strcmp(buf1, "TUPLTYPE") || // FFmpeg used to write invalid files !strcmp(buf1, "TUPLETYPE")) { pnm_get(s, tuple_type, sizeof(tuple_type)); } else if (!strcmp(buf1, "ENDHDR")) { break; } else { return -1; } } /* check that all tags are present */ if (w <= 0 || h <= 0 || maxval <= 0 || depth <= 0 || tuple_type[0] == '\0' || av_image_check_size(w, h, 0, avctx)) return -1; avctx->width = w; avctx->height = h; if (depth == 1) { if (maxval == 1) avctx->pix_fmt = PIX_FMT_MONOWHITE; else avctx->pix_fmt = PIX_FMT_GRAY8; } else if (depth == 3) { if (maxval < 256) { avctx->pix_fmt = PIX_FMT_RGB24; } else { av_log(avctx, AV_LOG_ERROR, "16-bit components are only supported for grayscale\n"); avctx->pix_fmt = PIX_FMT_NONE; return -1; } } else if (depth == 4) { avctx->pix_fmt = PIX_FMT_RGB32; } else { return -1; } return 0; } else { return -1; } pnm_get(s, buf1, sizeof(buf1)); avctx->width = atoi(buf1); if (avctx->width <= 0) return -1; pnm_get(s, buf1, sizeof(buf1)); avctx->height = atoi(buf1); if(avctx->height <= 0 || av_image_check_size(avctx->width, avctx->height, 0, avctx)) return -1; if (avctx->pix_fmt != PIX_FMT_MONOWHITE) { pnm_get(s, buf1, sizeof(buf1)); s->maxval = atoi(buf1); if (s->maxval <= 0) { av_log(avctx, AV_LOG_ERROR, "Invalid maxval: %d\n", s->maxval); s->maxval = 255; } if (s->maxval >= 256) { if (avctx->pix_fmt == PIX_FMT_GRAY8) { avctx->pix_fmt = PIX_FMT_GRAY16BE; if (s->maxval != 65535) avctx->pix_fmt = PIX_FMT_GRAY16; } else if (avctx->pix_fmt == PIX_FMT_RGB24) { if (s->maxval > 255) avctx->pix_fmt = PIX_FMT_RGB48BE; } else { av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format\n"); avctx->pix_fmt = PIX_FMT_NONE; return -1; } } }else s->maxval=1; /* more check if YUV420 */ if (avctx->pix_fmt == PIX_FMT_YUV420P) { if ((avctx->width & 1) != 0) return -1; h = (avctx->height * 2); if ((h % 3) != 0) return -1; h /= 3; avctx->height = h; } return 0; }
22,953
1
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { size_t mmu_idx = get_mmuidx(oi); size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index]; target_ulong tlb_addr = tlbe->addr_write; TCGMemOp mop = get_memop(oi); int a_bits = get_alignment_bits(mop); int s_bits = mop & MO_SIZE; /* Adjust the given return address. */ retaddr -= GETPC_ADJ; /* Enforce guest required alignment. */ if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { /* ??? Maybe indicate atomic op to cpu_unaligned_access */ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); } /* Enforce qemu required alignment. */ if (unlikely(addr & ((1 << s_bits) - 1))) { /* We get here if guest alignment was not requested, or was not enforced by cpu_unaligned_access above. We might widen the access and emulate, but for now mark an exception and exit the cpu loop. */ goto stop_the_world; } /* Check TLB entry and enforce page permissions. */ if ((addr & TARGET_PAGE_MASK) != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); } tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK; } /* Check notdirty */ if (unlikely(tlb_addr & TLB_NOTDIRTY)) { tlb_set_dirty(ENV_GET_CPU(env), addr); tlb_addr = tlb_addr & ~TLB_NOTDIRTY; } /* Notice an IO access */ if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { /* There's really nothing that can be done to support this apart from stop-the-world. */ goto stop_the_world; } /* Let the guest notice RMW on a write-only page. */ if (unlikely(tlbe->addr_read != tlb_addr)) { tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_LOAD, mmu_idx, retaddr); /* Since we don't support reads and writes to different addresses, and we do have the proper page loaded for write, this shouldn't ever return. But just in case, handle via stop-the-world. */ goto stop_the_world; } return (void *)((uintptr_t)addr + tlbe->addend); stop_the_world: cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); }
22,960
0
static int decode_ref_pic_list_reordering(H264Context *h){ MpegEncContext * const s = &h->s; int list, index, pic_structure; print_short_term(h); print_long_term(h); if(h->slice_type==FF_I_TYPE || h->slice_type==FF_SI_TYPE) return 0; //FIXME move before func for(list=0; list<h->list_count; list++){ memcpy(h->ref_list[list], h->default_ref_list[list], sizeof(Picture)*h->ref_count[list]); if(get_bits1(&s->gb)){ int pred= h->curr_pic_num; for(index=0; ; index++){ unsigned int reordering_of_pic_nums_idc= get_ue_golomb(&s->gb); unsigned int pic_id; int i; Picture *ref = NULL; if(reordering_of_pic_nums_idc==3) break; if(index >= h->ref_count[list]){ av_log(h->s.avctx, AV_LOG_ERROR, "reference count overflow\n"); return -1; } if(reordering_of_pic_nums_idc<3){ if(reordering_of_pic_nums_idc<2){ const unsigned int abs_diff_pic_num= get_ue_golomb(&s->gb) + 1; int frame_num; if(abs_diff_pic_num > h->max_pic_num){ av_log(h->s.avctx, AV_LOG_ERROR, "abs_diff_pic_num overflow\n"); return -1; } if(reordering_of_pic_nums_idc == 0) pred-= abs_diff_pic_num; else pred+= abs_diff_pic_num; pred &= h->max_pic_num - 1; frame_num = pic_num_extract(h, pred, &pic_structure); for(i= h->short_ref_count-1; i>=0; i--){ ref = h->short_ref[i]; assert(ref->reference); assert(!ref->long_ref); if(ref->data[0] != NULL && ref->frame_num == frame_num && (ref->reference & pic_structure) && ref->long_ref == 0) // ignore non existing pictures by testing data[0] pointer break; } if(i>=0) ref->pic_id= pred; }else{ int long_idx; pic_id= get_ue_golomb(&s->gb); //long_term_pic_idx long_idx= pic_num_extract(h, pic_id, &pic_structure); if(long_idx>31){ av_log(h->s.avctx, AV_LOG_ERROR, "long_term_pic_idx overflow\n"); return -1; } ref = h->long_ref[long_idx]; assert(!(ref && !ref->reference)); if(ref && (ref->reference & pic_structure)){ ref->pic_id= pic_id; assert(ref->long_ref); i=0; }else{ i=-1; } } if (i < 0) { av_log(h->s.avctx, AV_LOG_ERROR, "reference picture missing during reorder\n"); memset(&h->ref_list[list][index], 0, sizeof(Picture)); //FIXME } else { for(i=index; i+1<h->ref_count[list]; i++){ if(ref->long_ref == h->ref_list[list][i].long_ref && ref->pic_id == h->ref_list[list][i].pic_id) break; } for(; i > index; i--){ h->ref_list[list][i]= h->ref_list[list][i-1]; } h->ref_list[list][index]= *ref; if (FIELD_PICTURE){ pic_as_field(&h->ref_list[list][index], pic_structure); } } }else{ av_log(h->s.avctx, AV_LOG_ERROR, "illegal reordering_of_pic_nums_idc\n"); return -1; } } } } for(list=0; list<h->list_count; list++){ for(index= 0; index < h->ref_count[list]; index++){ if(!h->ref_list[list][index].data[0]) h->ref_list[list][index]= s->current_picture; } } if(h->slice_type==FF_B_TYPE && !h->direct_spatial_mv_pred) direct_dist_scale_factor(h); direct_ref_list_init(h); return 0; }
22,961
0
static av_cold int X264_close(AVCodecContext *avctx) { X264Context *x4 = avctx->priv_data; av_freep(&avctx->extradata); av_freep(&x4->sei); if (x4->enc) { x264_encoder_close(x4->enc); x4->enc = NULL; } av_frame_free(&avctx->coded_frame); return 0; }
22,962
1
static int v9fs_receive_status(V9fsProxy *proxy, struct iovec *reply, int *status) { int retval; ProxyHeader header; *status = 0; reply->iov_len = 0; retval = socket_read(proxy->sockfd, reply->iov_base, PROXY_HDR_SZ); if (retval < 0) { return retval; } reply->iov_len = PROXY_HDR_SZ; proxy_unmarshal(reply, 0, "dd", &header.type, &header.size); if (header.size != sizeof(int)) { *status = -ENOBUFS; return 0; } retval = socket_read(proxy->sockfd, reply->iov_base + PROXY_HDR_SZ, header.size); if (retval < 0) { return retval; } reply->iov_len += header.size; proxy_unmarshal(reply, PROXY_HDR_SZ, "d", status); return 0; }
22,963
1
static target_long monitor_get_xer (const struct MonitorDef *md, int val) { CPUState *env = mon_get_cpu(); if (!env) return 0; return env->xer; }
22,964
1
static ssize_t qemu_fill_buffer(QEMUFile *f) { int len; int pending; assert(!qemu_file_is_writable(f)); pending = f->buf_size - f->buf_index; if (pending > 0) { memmove(f->buf, f->buf + f->buf_index, pending); } f->buf_index = 0; f->buf_size = pending; len = f->ops->get_buffer(f->opaque, f->buf + pending, f->pos, IO_BUF_SIZE - pending); if (len > 0) { f->buf_size += len; f->pos += len; } else if (len == 0) { qemu_file_set_error(f, -EIO); } else if (len != -EAGAIN) { qemu_file_set_error(f, len); } return len; }
22,965
1
static void unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb) { int i, j, k; int coding_mode; int motion_x[6]; int motion_y[6]; int last_motion_x = 0; int last_motion_y = 0; int prior_last_motion_x = 0; int prior_last_motion_y = 0; int current_macroblock; int current_fragment; debug_vp3(" vp3: unpacking motion vectors\n"); if (s->keyframe) { debug_vp3(" keyframe-- there are no motion vectors\n"); } else { memset(motion_x, 0, 6 * sizeof(int)); memset(motion_y, 0, 6 * sizeof(int)); /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */ coding_mode = get_bits(gb, 1); debug_vectors(" using %s scheme for unpacking motion vectors\n", (coding_mode == 0) ? "VLC" : "fixed-length"); /* iterate through all of the macroblocks that contain 1 or more * coded fragments */ for (i = 0; i < s->u_superblock_start; i++) { for (j = 0; j < 4; j++) { current_macroblock = s->superblock_macroblocks[i * 4 + j]; if ((current_macroblock == -1) || (!s->macroblock_coded[current_macroblock])) continue; current_fragment = s->macroblock_fragments[current_macroblock * 6]; switch (s->all_fragments[current_fragment].coding_method) { case MODE_INTER_PLUS_MV: case MODE_GOLDEN_MV: /* all 6 fragments use the same motion vector */ if (coding_mode == 0) { motion_x[0] = get_motion_vector_vlc(gb); motion_y[0] = get_motion_vector_vlc(gb); } else { motion_x[0] = get_motion_vector_fixed(gb); motion_y[0] = get_motion_vector_fixed(gb); } for (k = 1; k < 6; k++) { motion_x[k] = motion_x[0]; motion_y[k] = motion_y[0]; } /* vector maintenance, only on MODE_INTER_PLUS_MV */ if (s->all_fragments[current_fragment].coding_method == MODE_INTER_PLUS_MV) { prior_last_motion_x = last_motion_x; prior_last_motion_y = last_motion_y; last_motion_x = motion_x[0]; last_motion_y = motion_y[0]; } break; case MODE_INTER_FOURMV: /* fetch 4 vectors from the bitstream, one for each * Y fragment, then average for the C fragment vectors */ motion_x[4] = motion_y[4] = 0; for (k = 0; k < 4; k++) { if (coding_mode == 0) { motion_x[k] = get_motion_vector_vlc(gb); motion_y[k] = get_motion_vector_vlc(gb); } else { motion_x[k] = get_motion_vector_fixed(gb); motion_y[k] = get_motion_vector_fixed(gb); } motion_x[4] += motion_x[k]; motion_y[4] += motion_y[k]; } if (motion_x[4] >= 0) motion_x[4] = (motion_x[4] + 2) / 4; else motion_x[4] = (motion_x[4] - 2) / 4; motion_x[5] = motion_x[4]; if (motion_y[4] >= 0) motion_y[4] = (motion_y[4] + 2) / 4; else motion_y[4] = (motion_y[4] - 2) / 4; motion_y[5] = motion_y[4]; /* vector maintenance; vector[3] is treated as the * last vector in this case */ prior_last_motion_x = last_motion_x; prior_last_motion_y = last_motion_y; last_motion_x = motion_x[3]; last_motion_y = motion_y[3]; break; case MODE_INTER_LAST_MV: /* all 6 fragments use the last motion vector */ motion_x[0] = last_motion_x; motion_y[0] = last_motion_y; for (k = 1; k < 6; k++) { motion_x[k] = motion_x[0]; motion_y[k] = motion_y[0]; } /* no vector maintenance (last vector remains the * last vector) */ break; case MODE_INTER_PRIOR_LAST: /* all 6 fragments use the motion vector prior to the * last motion vector */ motion_x[0] = prior_last_motion_x; motion_y[0] = prior_last_motion_y; for (k = 1; k < 6; k++) { motion_x[k] = motion_x[0]; motion_y[k] = motion_y[0]; } /* vector maintenance */ prior_last_motion_x = last_motion_x; prior_last_motion_y = last_motion_y; last_motion_x = motion_x[0]; last_motion_y = motion_y[0]; break; } /* assign the motion vectors to the correct fragments */ debug_vectors(" vectors for macroblock starting @ fragment %d (coding method %d):\n", current_fragment, s->all_fragments[current_fragment].coding_method); for (k = 0; k < 6; k++) { current_fragment = s->macroblock_fragments[current_macroblock * 6 + k]; s->all_fragments[current_fragment].motion_x = motion_x[k]; s->all_fragments[current_fragment].motion_x = motion_y[k]; debug_vectors(" vector %d: fragment %d = (%d, %d)\n", k, current_fragment, motion_x[k], motion_y[k]); } } } } }
22,967
1
static int imc_decode_block(AVCodecContext *avctx, IMCContext *q, int ch) { int stream_format_code; int imc_hdr, i, j, ret; int flag; int bits, summer; int counter, bitscount; IMCChannel *chctx = q->chctx + ch; /* Check the frame header */ imc_hdr = get_bits(&q->gb, 9); if (imc_hdr & 0x18) { av_log(avctx, AV_LOG_ERROR, "frame header check failed!\n"); av_log(avctx, AV_LOG_ERROR, "got %X.\n", imc_hdr); stream_format_code = get_bits(&q->gb, 3); if (stream_format_code & 1) { av_log_ask_for_sample(avctx, "Stream format %X is not supported\n", stream_format_code); return AVERROR_PATCHWELCOME; if (stream_format_code & 0x04) chctx->decoder_reset = 1; if (chctx->decoder_reset) { for (i = 0; i < BANDS; i++) chctx->old_floor[i] = 1.0; for (i = 0; i < COEFFS; i++) chctx->CWdecoded[i] = 0; chctx->decoder_reset = 0; flag = get_bits1(&q->gb); imc_read_level_coeffs(q, stream_format_code, chctx->levlCoeffBuf); if (stream_format_code & 0x4) imc_decode_level_coefficients(q, chctx->levlCoeffBuf, chctx->flcoeffs1, chctx->flcoeffs2); else imc_decode_level_coefficients2(q, chctx->levlCoeffBuf, chctx->old_floor, chctx->flcoeffs1, chctx->flcoeffs2); memcpy(chctx->old_floor, chctx->flcoeffs1, 32 * sizeof(float)); counter = 0; for (i = 0; i < BANDS; i++) { if (chctx->levlCoeffBuf[i] == 16) { chctx->bandWidthT[i] = 0; counter++; } else chctx->bandWidthT[i] = band_tab[i + 1] - band_tab[i]; memset(chctx->bandFlagsBuf, 0, BANDS * sizeof(int)); for (i = 0; i < BANDS - 1; i++) { if (chctx->bandWidthT[i]) chctx->bandFlagsBuf[i] = get_bits1(&q->gb); imc_calculate_coeffs(q, chctx->flcoeffs1, chctx->flcoeffs2, chctx->bandWidthT, chctx->flcoeffs3, chctx->flcoeffs5); bitscount = 0; /* first 4 bands will be assigned 5 bits per coefficient */ if (stream_format_code & 0x2) { bitscount += 15; chctx->bitsBandT[0] = 5; chctx->CWlengthT[0] = 5; chctx->CWlengthT[1] = 5; chctx->CWlengthT[2] = 5; for (i = 1; i < 4; i++) { bits = (chctx->levlCoeffBuf[i] == 16) ? 0 : 5; chctx->bitsBandT[i] = bits; for (j = band_tab[i]; j < band_tab[i + 1]; j++) { chctx->CWlengthT[j] = bits; bitscount += bits; if (avctx->codec_id == AV_CODEC_ID_IAC) { bitscount += !!chctx->bandWidthT[BANDS - 1]; if (!(stream_format_code & 0x2)) bitscount += 16; if ((ret = bit_allocation(q, chctx, stream_format_code, 512 - bitscount - get_bits_count(&q->gb), flag)) < 0) { av_log(avctx, AV_LOG_ERROR, "Bit allocations failed\n"); chctx->decoder_reset = 1; return ret; for (i = 0; i < BANDS; i++) { chctx->sumLenArr[i] = 0; chctx->skipFlagRaw[i] = 0; for (j = band_tab[i]; j < band_tab[i + 1]; j++) chctx->sumLenArr[i] += chctx->CWlengthT[j]; if (chctx->bandFlagsBuf[i]) if ((((band_tab[i + 1] - band_tab[i]) * 1.5) > chctx->sumLenArr[i]) && (chctx->sumLenArr[i] > 0)) chctx->skipFlagRaw[i] = 1; imc_get_skip_coeff(q, chctx); for (i = 0; i < BANDS; i++) { chctx->flcoeffs6[i] = chctx->flcoeffs1[i]; /* band has flag set and at least one coded coefficient */ if (chctx->bandFlagsBuf[i] && (band_tab[i + 1] - band_tab[i]) != chctx->skipFlagCount[i]) { chctx->flcoeffs6[i] *= q->sqrt_tab[ band_tab[i + 1] - band_tab[i]] / q->sqrt_tab[(band_tab[i + 1] - band_tab[i] - chctx->skipFlagCount[i])]; /* calculate bits left, bits needed and adjust bit allocation */ bits = summer = 0; for (i = 0; i < BANDS; i++) { if (chctx->bandFlagsBuf[i]) { for (j = band_tab[i]; j < band_tab[i + 1]; j++) { if (chctx->skipFlags[j]) { summer += chctx->CWlengthT[j]; chctx->CWlengthT[j] = 0; bits += chctx->skipFlagBits[i]; summer -= chctx->skipFlagBits[i]; imc_adjust_bit_allocation(q, chctx, summer); for (i = 0; i < BANDS; i++) { chctx->sumLenArr[i] = 0; for (j = band_tab[i]; j < band_tab[i + 1]; j++) if (!chctx->skipFlags[j]) chctx->sumLenArr[i] += chctx->CWlengthT[j]; memset(chctx->codewords, 0, sizeof(chctx->codewords)); if (imc_get_coeffs(q, chctx) < 0) { av_log(avctx, AV_LOG_ERROR, "Read coefficients failed\n"); chctx->decoder_reset = 1; if (inverse_quant_coeff(q, chctx, stream_format_code) < 0) { av_log(avctx, AV_LOG_ERROR, "Inverse quantization of coefficients failed\n"); chctx->decoder_reset = 1; memset(chctx->skipFlags, 0, sizeof(chctx->skipFlags)); imc_imdct256(q, chctx, avctx->channels); return 0;
22,968
1
static int get_audio_frame_size(AVCodecContext *enc, int size) { int frame_size; if(enc->codec_id == CODEC_ID_VORBIS) return -1; if (enc->frame_size <= 1) { int bits_per_sample = av_get_bits_per_sample(enc->codec_id); if (bits_per_sample) { if (enc->channels == 0) return -1; frame_size = (size << 3) / (bits_per_sample * enc->channels); } else { /* used for example by ADPCM codecs */ if (enc->bit_rate == 0) return -1; frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate; } } else { frame_size = enc->frame_size; } return frame_size; }
22,971
1
static void apply_dependent_coupling_fixed(AACContext *ac, SingleChannelElement *target, ChannelElement *cce, int index) { IndividualChannelStream *ics = &cce->ch[0].ics; const uint16_t *offsets = ics->swb_offset; int *dest = target->coeffs; const int *src = cce->ch[0].coeffs; int g, i, group, k, idx = 0; if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) { av_log(ac->avctx, AV_LOG_ERROR, "Dependent coupling is not supported together with LTP\n"); return; } for (g = 0; g < ics->num_window_groups; g++) { for (i = 0; i < ics->max_sfb; i++, idx++) { if (cce->ch[0].band_type[idx] != ZERO_BT) { const int gain = cce->coup.gain[index][idx]; int shift, round, c, tmp; if (gain < 0) { c = -cce_scale_fixed[-gain & 7]; shift = (-gain-1024) >> 3; } else { c = cce_scale_fixed[gain & 7]; shift = (gain-1024) >> 3; } if (shift < -31) { // Nothing to do } else if (shift < 0) { shift = -shift; round = 1 << (shift - 1); for (group = 0; group < ics->group_len[g]; group++) { for (k = offsets[i]; k < offsets[i + 1]; k++) { tmp = (int)(((int64_t)src[group * 128 + k] * c + \ (int64_t)0x1000000000) >> 37); dest[group * 128 + k] += (tmp + round) >> shift; } } } else { for (group = 0; group < ics->group_len[g]; group++) { for (k = offsets[i]; k < offsets[i + 1]; k++) { tmp = (int)(((int64_t)src[group * 128 + k] * c + \ (int64_t)0x1000000000) >> 37); dest[group * 128 + k] += tmp << shift; } } } } } dest += ics->group_len[g] * 128; src += ics->group_len[g] * 128; } }
22,972
1
build_dsdt(GArray *table_data, GArray *linker, AcpiPmInfo *pm, AcpiMiscInfo *misc, PcPciInfo *pci, MachineState *machine) { CrsRangeEntry *entry; Aml *dsdt, *sb_scope, *scope, *dev, *method, *field, *pkg, *crs; GPtrArray *mem_ranges = g_ptr_array_new_with_free_func(crs_range_free); GPtrArray *io_ranges = g_ptr_array_new_with_free_func(crs_range_free); PCMachineState *pcms = PC_MACHINE(machine); uint32_t nr_mem = machine->ram_slots; int root_bus_limit = 0xFF; PCIBus *bus = NULL; int i; dsdt = init_aml_allocator(); /* Reserve space for header */ acpi_data_push(dsdt->buf, sizeof(AcpiTableHeader)); build_dbg_aml(dsdt); if (misc->is_piix4) { sb_scope = aml_scope("_SB"); dev = aml_device("PCI0"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03"))); aml_append(dev, aml_name_decl("_ADR", aml_int(0))); aml_append(dev, aml_name_decl("_UID", aml_int(1))); aml_append(sb_scope, dev); aml_append(dsdt, sb_scope); build_hpet_aml(dsdt); build_piix4_pm(dsdt); build_piix4_isa_bridge(dsdt); build_isa_devices_aml(dsdt); build_piix4_pci_hotplug(dsdt); build_piix4_pci0_int(dsdt); } else { sb_scope = aml_scope("_SB"); aml_append(sb_scope, aml_operation_region("PCST", AML_SYSTEM_IO, aml_int(0xae00), 0x0c)); aml_append(sb_scope, aml_operation_region("PCSB", AML_SYSTEM_IO, aml_int(0xae0c), 0x01)); field = aml_field("PCSB", AML_ANY_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS); aml_append(field, aml_named_field("PCIB", 8)); aml_append(sb_scope, field); aml_append(dsdt, sb_scope); sb_scope = aml_scope("_SB"); dev = aml_device("PCI0"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08"))); aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03"))); aml_append(dev, aml_name_decl("_ADR", aml_int(0))); aml_append(dev, aml_name_decl("_UID", aml_int(1))); aml_append(dev, aml_name_decl("SUPP", aml_int(0))); aml_append(dev, aml_name_decl("CTRL", aml_int(0))); aml_append(dev, build_q35_osc_method()); aml_append(sb_scope, dev); aml_append(dsdt, sb_scope); build_hpet_aml(dsdt); build_q35_isa_bridge(dsdt); build_isa_devices_aml(dsdt); build_q35_pci0_int(dsdt); build_cpu_hotplug_aml(dsdt); build_memory_hotplug_aml(dsdt, nr_mem, pm->mem_hp_io_base, pm->mem_hp_io_len); scope = aml_scope("_GPE"); { aml_append(scope, aml_name_decl("_HID", aml_string("ACPI0006"))); aml_append(scope, aml_method("_L00", 0, AML_NOTSERIALIZED)); if (misc->is_piix4) { method = aml_method("_E01", 0, AML_NOTSERIALIZED); aml_append(method, aml_acquire(aml_name("\\_SB.PCI0.BLCK"), 0xFFFF)); aml_append(method, aml_call0("\\_SB.PCI0.PCNT")); aml_append(method, aml_release(aml_name("\\_SB.PCI0.BLCK"))); aml_append(scope, method); } else { aml_append(scope, aml_method("_L01", 0, AML_NOTSERIALIZED)); method = aml_method("_E02", 0, AML_NOTSERIALIZED); aml_append(method, aml_call0("\\_SB." CPU_SCAN_METHOD)); aml_append(scope, method); method = aml_method("_E03", 0, AML_NOTSERIALIZED); aml_append(method, aml_call0(MEMORY_HOTPLUG_HANDLER_PATH)); aml_append(scope, method); aml_append(scope, aml_method("_L04", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L05", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L06", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L07", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L08", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L09", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L0A", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L0B", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L0C", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L0D", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L0E", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L0F", 0, AML_NOTSERIALIZED)); aml_append(dsdt, scope); bus = PC_MACHINE(machine)->bus; if (bus) { QLIST_FOREACH(bus, &bus->child, sibling) { uint8_t bus_num = pci_bus_num(bus); uint8_t numa_node = pci_bus_numa_node(bus); /* look only for expander root buses */ if (!pci_bus_is_root(bus)) { continue; if (bus_num < root_bus_limit) { root_bus_limit = bus_num - 1; scope = aml_scope("\\_SB"); dev = aml_device("PC%.02X", bus_num); aml_append(dev, aml_name_decl("_UID", aml_int(bus_num))); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03"))); aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num))); if (numa_node != NUMA_NODE_UNASSIGNED) { aml_append(dev, aml_name_decl("_PXM", aml_int(numa_node))); aml_append(dev, build_prt(false)); crs = build_crs(PCI_HOST_BRIDGE(BUS(bus)->parent), io_ranges, mem_ranges); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); aml_append(dsdt, scope); scope = aml_scope("\\_SB.PCI0"); /* build PCI0._CRS */ crs = aml_resource_template(); aml_append(crs, aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, 0x0000, 0x0, root_bus_limit, 0x0000, root_bus_limit + 1)); aml_append(crs, aml_io(AML_DECODE16, 0x0CF8, 0x0CF8, 0x01, 0x08)); aml_append(crs, aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, AML_ENTIRE_RANGE, 0x0000, 0x0000, 0x0CF7, 0x0000, 0x0CF8)); crs_replace_with_free_ranges(io_ranges, 0x0D00, 0xFFFF); for (i = 0; i < io_ranges->len; i++) { entry = g_ptr_array_index(io_ranges, i); aml_append(crs, aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, AML_ENTIRE_RANGE, 0x0000, entry->base, entry->limit, 0x0000, entry->limit - entry->base + 1)); aml_append(crs, aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_CACHEABLE, AML_READ_WRITE, 0, 0x000A0000, 0x000BFFFF, 0, 0x00020000)); crs_replace_with_free_ranges(mem_ranges, pci->w32.begin, pci->w32.end - 1); for (i = 0; i < mem_ranges->len; i++) { entry = g_ptr_array_index(mem_ranges, i); aml_append(crs, aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_NON_CACHEABLE, AML_READ_WRITE, 0, entry->base, entry->limit, 0, entry->limit - entry->base + 1)); if (pci->w64.begin) { aml_append(crs, aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_CACHEABLE, AML_READ_WRITE, 0, pci->w64.begin, pci->w64.end - 1, 0, pci->w64.end - pci->w64.begin)); aml_append(scope, aml_name_decl("_CRS", crs)); /* reserve GPE0 block resources */ dev = aml_device("GPE0"); aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A06"))); aml_append(dev, aml_name_decl("_UID", aml_string("GPE0 resources"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, pm->gpe0_blk, pm->gpe0_blk, 1, pm->gpe0_blk_len) ); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); g_ptr_array_free(io_ranges, true); g_ptr_array_free(mem_ranges, true); /* reserve PCIHP resources */ if (pm->pcihp_io_len) { dev = aml_device("PHPR"); aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A06"))); aml_append(dev, aml_name_decl("_UID", aml_string("PCI Hotplug resources"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, pm->pcihp_io_base, pm->pcihp_io_base, 1, pm->pcihp_io_len) ); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); aml_append(dsdt, scope); /* create S3_ / S4_ / S5_ packages if necessary */ scope = aml_scope("\\"); if (!pm->s3_disabled) { pkg = aml_package(4); aml_append(pkg, aml_int(1)); /* PM1a_CNT.SLP_TYP */ aml_append(pkg, aml_int(1)); /* PM1b_CNT.SLP_TYP, FIXME: not impl. */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(scope, aml_name_decl("_S3", pkg)); if (!pm->s4_disabled) { pkg = aml_package(4); aml_append(pkg, aml_int(pm->s4_val)); /* PM1a_CNT.SLP_TYP */ /* PM1b_CNT.SLP_TYP, FIXME: not impl. */ aml_append(pkg, aml_int(pm->s4_val)); aml_append(pkg, aml_int(0)); /* reserved */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(scope, aml_name_decl("_S4", pkg)); pkg = aml_package(4); aml_append(pkg, aml_int(0)); /* PM1a_CNT.SLP_TYP */ aml_append(pkg, aml_int(0)); /* PM1b_CNT.SLP_TYP not impl. */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(scope, aml_name_decl("_S5", pkg)); aml_append(dsdt, scope); /* create fw_cfg node, unconditionally */ { /* when using port i/o, the 8-bit data register *always* overlaps * with half of the 16-bit control register. Hence, the total size * of the i/o region used is FW_CFG_CTL_SIZE; when using DMA, the * DMA control register is located at FW_CFG_DMA_IO_BASE + 4 */ uint8_t io_size = object_property_get_bool(OBJECT(pcms->fw_cfg), "dma_enabled", NULL) ? ROUND_UP(FW_CFG_CTL_SIZE, 4) + sizeof(dma_addr_t) : FW_CFG_CTL_SIZE; scope = aml_scope("\\_SB.PCI0"); dev = aml_device("FWCF"); aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, FW_CFG_IO_BASE, FW_CFG_IO_BASE, 0x01, io_size) ); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); aml_append(dsdt, scope); if (misc->applesmc_io_base) { scope = aml_scope("\\_SB.PCI0.ISA"); dev = aml_device("SMC"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("APP0001"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, misc->applesmc_io_base, misc->applesmc_io_base, 0x01, APPLESMC_MAX_DATA_LENGTH) ); aml_append(crs, aml_irq_no_flags(6)); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); aml_append(dsdt, scope); if (misc->pvpanic_port) { scope = aml_scope("\\_SB.PCI0.ISA"); dev = aml_device("PEVT"); aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0001"))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, misc->pvpanic_port, misc->pvpanic_port, 1, 1) ); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(dev, aml_operation_region("PEOR", AML_SYSTEM_IO, aml_int(misc->pvpanic_port), 1)); field = aml_field("PEOR", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); aml_append(field, aml_named_field("PEPT", 8)); aml_append(dev, field); /* device present, functioning, decoding, shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xF))); method = aml_method("RDPT", 0, AML_NOTSERIALIZED); aml_append(method, aml_store(aml_name("PEPT"), aml_local(0))); aml_append(method, aml_return(aml_local(0))); aml_append(dev, method); method = aml_method("WRPT", 1, AML_NOTSERIALIZED); aml_append(method, aml_store(aml_arg(0), aml_name("PEPT"))); aml_append(dev, method); aml_append(scope, dev); aml_append(dsdt, scope); sb_scope = aml_scope("\\_SB"); { build_processor_devices(sb_scope, machine, pm); build_memory_devices(sb_scope, nr_mem, pm->mem_hp_io_base, pm->mem_hp_io_len); { Object *pci_host; PCIBus *bus = NULL; pci_host = acpi_get_i386_pci_host(); if (pci_host) { bus = PCI_HOST_BRIDGE(pci_host)->bus; if (bus) { Aml *scope = aml_scope("PCI0"); /* Scan all PCI buses. Generate tables to support hotplug. */ build_append_pci_bus_devices(scope, bus, pm->pcihp_bridge_en); dev = aml_device("ISA.TPM"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C31"))); aml_append(dev, aml_name_decl("_STA", aml_int(0xF))); crs = aml_resource_template(); aml_append(crs, aml_irq_no_flags(TPM_TIS_IRQ)); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); aml_append(sb_scope, scope); aml_append(dsdt, sb_scope); /* copy AML table into ACPI tables blob and patch header there */ g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); build_header(linker, table_data, (void *)(table_data->data + table_data->len - dsdt->buf->len), "DSDT", dsdt->buf->len, 1, NULL, NULL); free_aml_allocator();
22,973
1
static int copy_metadata(char *outspec, char *inspec, AVFormatContext *oc, AVFormatContext *ic, OptionsContext *o) { AVDictionary **meta_in = NULL; AVDictionary **meta_out; int i, ret = 0; char type_in, type_out; const char *istream_spec = NULL, *ostream_spec = NULL; int idx_in = 0, idx_out = 0; parse_meta_type(inspec, &type_in, &idx_in, &istream_spec); parse_meta_type(outspec, &type_out, &idx_out, &ostream_spec); if (type_in == 'g' || type_out == 'g') o->metadata_global_manual = 1; if (type_in == 's' || type_out == 's') o->metadata_streams_manual = 1; if (type_in == 'c' || type_out == 'c') o->metadata_chapters_manual = 1; #define METADATA_CHECK_INDEX(index, nb_elems, desc)\ if ((index) < 0 || (index) >= (nb_elems)) {\ av_log(NULL, AV_LOG_FATAL, "Invalid %s index %d while processing metadata maps.\n",\ (desc), (index));\ exit_program(1);\ } #define SET_DICT(type, meta, context, index)\ switch (type) {\ case 'g':\ meta = &context->metadata;\ break;\ case 'c':\ METADATA_CHECK_INDEX(index, context->nb_chapters, "chapter")\ meta = &context->chapters[index]->metadata;\ break;\ case 'p':\ METADATA_CHECK_INDEX(index, context->nb_programs, "program")\ meta = &context->programs[index]->metadata;\ break;\ }\ SET_DICT(type_in, meta_in, ic, idx_in); SET_DICT(type_out, meta_out, oc, idx_out); /* for input streams choose first matching stream */ if (type_in == 's') { for (i = 0; i < ic->nb_streams; i++) { if ((ret = check_stream_specifier(ic, ic->streams[i], istream_spec)) > 0) { meta_in = &ic->streams[i]->metadata; break; } else if (ret < 0) exit_program(1); } if (!meta_in) { av_log(NULL, AV_LOG_FATAL, "Stream specifier %s does not match any streams.\n", istream_spec); exit_program(1); } } if (type_out == 's') { for (i = 0; i < oc->nb_streams; i++) { if ((ret = check_stream_specifier(oc, oc->streams[i], ostream_spec)) > 0) { meta_out = &oc->streams[i]->metadata; av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE); } else if (ret < 0) exit_program(1); } } else av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE); return 0; }
22,974
1
static uint32_t rtl8139_io_readw(void *opaque, uint8_t addr) { RTL8139State *s = opaque; uint32_t ret; switch (addr) { case TxAddr0 ... TxAddr0+4*4-1: ret = rtl8139_TxStatus_read(s, addr, 2); break; case IntrMask: ret = rtl8139_IntrMask_read(s); break; case IntrStatus: ret = rtl8139_IntrStatus_read(s); break; case MultiIntr: ret = rtl8139_MultiIntr_read(s); break; case RxBufPtr: ret = rtl8139_RxBufPtr_read(s); break; case RxBufAddr: ret = rtl8139_RxBufAddr_read(s); break; case BasicModeCtrl: ret = rtl8139_BasicModeCtrl_read(s); break; case BasicModeStatus: ret = rtl8139_BasicModeStatus_read(s); break; case NWayAdvert: ret = s->NWayAdvert; DPRINTF("NWayAdvert read(w) val=0x%04x\n", ret); break; case NWayLPAR: ret = s->NWayLPAR; DPRINTF("NWayLPAR read(w) val=0x%04x\n", ret); break; case NWayExpansion: ret = s->NWayExpansion; DPRINTF("NWayExpansion read(w) val=0x%04x\n", ret); break; case CpCmd: ret = rtl8139_CpCmd_read(s); break; case IntrMitigate: ret = rtl8139_IntrMitigate_read(s); break; case TxSummary: ret = rtl8139_TSAD_read(s); break; case CSCR: ret = rtl8139_CSCR_read(s); break; default: DPRINTF("ioport read(w) addr=0x%x via read(b)\n", addr); ret = rtl8139_io_readb(opaque, addr); ret |= rtl8139_io_readb(opaque, addr + 1) << 8; DPRINTF("ioport read(w) addr=0x%x val=0x%04x\n", addr, ret); break; } return ret; }
22,975
1
static av_always_inline void paint_raw(uint8_t *dst, int w, int h, const uint8_t *src, int bpp, int be, int stride) { int i, j, p; for (j = 0; j < h; j++) { for (i = 0; i < w; i++) { p = vmnc_get_pixel(src, bpp, be); src += bpp; switch (bpp) { case 1: dst[i] = p; break; case 2: ((uint16_t*)dst)[i] = p; break; case 4: ((uint32_t*)dst)[i] = p; break; } } dst += stride; } }
22,977