id
int32
0
27.3k
func
stringlengths
26
142k
target
bool
2 classes
project
stringclasses
2 values
commit_id
stringlengths
40
40
6,834
uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2) { uintptr_t ra = GETPC(); uint64_t destlen = env->regs[r1 + 1] & 0xffffff; uint64_t dest = get_address(env, r1); uint64_t srclen = env->regs[r2 + 1] & 0xffffff; uint64_t src = get_address(env, r2); uint8_t pad = env->regs[r2 + 1] >> 24; uint8_t v; uint32_t cc; if (destlen == srclen) { cc = 0; } else if (destlen < srclen) { cc = 1; } else { cc = 2; } if (srclen > destlen) { srclen = destlen; } for (; destlen && srclen; src++, dest++, destlen--, srclen--) { v = cpu_ldub_data_ra(env, src, ra); cpu_stb_data_ra(env, dest, v, ra); } for (; destlen; dest++, destlen--) { cpu_stb_data_ra(env, dest, pad, ra); } env->regs[r1 + 1] = destlen; /* can't use srclen here, we trunc'ed it */ env->regs[r2 + 1] -= src - env->regs[r2]; set_address(env, r1, dest); set_address(env, r2, src); return cc; }
false
qemu
d33271213437ed1834b0a50540d79e877e1cd894
6,836
static int do_compress_ram_page(CompressParam *param) { int bytes_sent, blen; uint8_t *p; RAMBlock *block = param->block; ram_addr_t offset = param->offset; p = block->host + (offset & TARGET_PAGE_MASK); bytes_sent = save_page_header(param->file, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE); blen = qemu_put_compression_data(param->file, p, TARGET_PAGE_SIZE, migrate_compress_level()); bytes_sent += blen; return bytes_sent; }
false
qemu
b3be28969b797b27d7f7f806827e9898e4ee08f0
6,837
static int get_fw_cfg_order(FWCfgState *s, const char *name) { int i; if (s->fw_cfg_order_override > 0) { return s->fw_cfg_order_override; } for (i = 0; i < ARRAY_SIZE(fw_cfg_order); i++) { if (fw_cfg_order[i].name == NULL) { continue; } if (strcmp(name, fw_cfg_order[i].name) == 0) { return fw_cfg_order[i].order; } } /* Stick unknown stuff at the end. */ error_report("warning: Unknown firmware file in legacy mode: %s", name); return FW_CFG_ORDER_OVERRIDE_LAST; }
false
qemu
3dc6f8693694a649a9c83f1e2746565b47683923
6,840
int qemu_strtol(const char *nptr, const char **endptr, int base, long *result) { char *ep; int err = 0; if (!nptr) { if (endptr) { *endptr = nptr; } err = -EINVAL; } else { errno = 0; *result = strtol(nptr, &ep, base); err = check_strtox_error(nptr, ep, endptr, errno); } return err; }
false
qemu
4baef2679e029c76707be1e2ed54bf3dd21693fe
6,841
void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac, float* L, float* R) { int downsampled = ac->m4ac.ext_sample_rate < sbr->sample_rate; int ch; int nch = (id_aac == TYPE_CPE) ? 2 : 1; if (sbr->start) { sbr_dequant(sbr, id_aac); } for (ch = 0; ch < nch; ch++) { /* decode channel */ sbr_qmf_analysis(&ac->dsp, &sbr->mdct_ana, ch ? R : L, sbr->data[ch].analysis_filterbank_samples, (float*)sbr->qmf_filter_scratch, sbr->data[ch].W); sbr_lf_gen(ac, sbr, sbr->X_low, sbr->data[ch].W); if (sbr->start) { sbr_hf_inverse_filter(sbr->alpha0, sbr->alpha1, sbr->X_low, sbr->k[0]); sbr_chirp(sbr, &sbr->data[ch]); sbr_hf_gen(ac, sbr, sbr->X_high, sbr->X_low, sbr->alpha0, sbr->alpha1, sbr->data[ch].bw_array, sbr->data[ch].t_env, sbr->data[ch].bs_num_env); // hf_adj sbr_mapping(ac, sbr, &sbr->data[ch], sbr->data[ch].e_a); sbr_env_estimate(sbr->e_curr, sbr->X_high, sbr, &sbr->data[ch]); sbr_gain_calc(ac, sbr, &sbr->data[ch], sbr->data[ch].e_a); sbr_hf_assemble(sbr->data[ch].Y, sbr->X_high, sbr, &sbr->data[ch], sbr->data[ch].e_a); } /* synthesis */ sbr_x_gen(sbr, sbr->X[ch], sbr->X_low, sbr->data[ch].Y, ch); } if (ac->m4ac.ps == 1) { if (sbr->ps.start) { ff_ps_apply(ac->avctx, &sbr->ps, sbr->X[0], sbr->X[1], sbr->kx[1] + sbr->m[1]); } else { memcpy(sbr->X[1], sbr->X[0], sizeof(sbr->X[0])); } nch = 2; } sbr_qmf_synthesis(&ac->dsp, &sbr->mdct, L, sbr->X[0], sbr->qmf_filter_scratch, sbr->data[0].synthesis_filterbank_samples, &sbr->data[0].synthesis_filterbank_samples_offset, downsampled); if (nch == 2) sbr_qmf_synthesis(&ac->dsp, &sbr->mdct, R, sbr->X[1], sbr->qmf_filter_scratch, sbr->data[1].synthesis_filterbank_samples, &sbr->data[1].synthesis_filterbank_samples_offset, downsampled); }
false
FFmpeg
aac46e088d67a390489af686b846dea4987d8ffb
6,842
static void stellaris_enet_receive(void *opaque, const uint8_t *buf, size_t size) { stellaris_enet_state *s = (stellaris_enet_state *)opaque; int n; uint8_t *p; uint32_t crc; if ((s->rctl & SE_RCTL_RXEN) == 0) return; if (s->np >= 31) { DPRINTF("Packet dropped\n"); return; } DPRINTF("Received packet len=%d\n", size); n = s->next_packet + s->np; if (n >= 31) n -= 31; s->np++; s->rx[n].len = size + 6; p = s->rx[n].data; *(p++) = (size + 6); *(p++) = (size + 6) >> 8; memcpy (p, buf, size); p += size; crc = crc32(~0, buf, size); *(p++) = crc; *(p++) = crc >> 8; *(p++) = crc >> 16; *(p++) = crc >> 24; /* Clear the remaining bytes in the last word. */ if ((size & 3) != 2) { memset(p, 0, (6 - size) & 3); } s->ris |= SE_INT_RX; stellaris_enet_update(s); }
false
qemu
e3f5ec2b5e92706e3b807059f79b1fb5d936e567
6,843
static void virtio_blk_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->init = virtio_blk_init_pci; k->exit = virtio_blk_exit_pci; k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK; k->revision = VIRTIO_PCI_ABI_VERSION; k->class_id = PCI_CLASS_STORAGE_SCSI; dc->alias = "virtio-blk"; dc->reset = virtio_pci_reset; dc->props = virtio_blk_properties; }
false
qemu
6acbe4c6f18e7de00481ff30574262b58526de45
6,845
static void qxl_dirty_surfaces(PCIQXLDevice *qxl) { uintptr_t vram_start; int i; if (qxl->mode != QXL_MODE_NATIVE && qxl->mode != QXL_MODE_COMPAT) { return; } /* dirty the primary surface */ qxl_set_dirty(&qxl->vga.vram, qxl->shadow_rom.draw_area_offset, qxl->shadow_rom.surface0_area_size); vram_start = (uintptr_t)memory_region_get_ram_ptr(&qxl->vram_bar); /* dirty the off-screen surfaces */ for (i = 0; i < qxl->ssd.num_surfaces; i++) { QXLSurfaceCmd *cmd; intptr_t surface_offset; int surface_size; if (qxl->guest_surfaces.cmds[i] == 0) { continue; } cmd = qxl_phys2virt(qxl, qxl->guest_surfaces.cmds[i], MEMSLOT_GROUP_GUEST); assert(cmd); assert(cmd->type == QXL_SURFACE_CMD_CREATE); surface_offset = (intptr_t)qxl_phys2virt(qxl, cmd->u.surface_create.data, MEMSLOT_GROUP_GUEST); assert(surface_offset); surface_offset -= vram_start; surface_size = cmd->u.surface_create.height * abs(cmd->u.surface_create.stride); trace_qxl_surfaces_dirty(qxl->id, i, (int)surface_offset, surface_size); qxl_set_dirty(&qxl->vram_bar, surface_offset, surface_size); } }
false
qemu
1331eab216c9dc4e50a48a34d14926b31a7fd611
6,846
static void xics_reset(DeviceState *d) { XICSState *icp = XICS(d); int i; for (i = 0; i < icp->nr_servers; i++) { device_reset(DEVICE(&icp->ss[i])); } device_reset(DEVICE(icp->ics)); }
false
qemu
5a3d7b23ba41b4884b43b6bc936ea18f999d5c6b
6,847
void spapr_drc_attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt, int fdt_start_offset, bool coldplug, Error **errp) { trace_spapr_drc_attach(spapr_drc_index(drc)); if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) { error_setg(errp, "an attached device is still awaiting release"); return; } if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_PCI) { g_assert(drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_USABLE); } g_assert(fdt || coldplug); drc->dr_indicator = SPAPR_DR_INDICATOR_ACTIVE; drc->dev = d; drc->fdt = fdt; drc->fdt_start_offset = fdt_start_offset; drc->configured = coldplug; /* 'logical' DR resources such as memory/cpus are in some cases treated * as a pool of resources from which the guest is free to choose from * based on only a count. for resources that can be assigned in this * fashion, we must assume the resource is signalled immediately * since a single hotplug request might make an arbitrary number of * such attached resources available to the guest, as opposed to * 'physical' DR resources such as PCI where each device/resource is * signalled individually. */ drc->signalled = (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) ? true : coldplug; if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) { drc->awaiting_allocation = true; } object_property_add_link(OBJECT(drc), "device", object_get_typename(OBJECT(drc->dev)), (Object **)(&drc->dev), NULL, 0, NULL); }
false
qemu
307b7715d0256c95444cada36a02882e46bada2f
6,848
static void openpic_src_write(void *opaque, hwaddr addr, uint64_t val, unsigned len) { OpenPICState *opp = opaque; int idx; DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64 "\n", __func__, addr, val); if (addr & 0xF) { return; } addr = addr & 0xFFF0; idx = addr >> 5; if (addr & 0x10) { /* EXDE / IFEDE / IEEDE */ write_IRQreg_idr(opp, idx, val); } else { /* EXVP / IFEVP / IEEVP */ write_IRQreg_ivpr(opp, idx, val); } }
false
qemu
e0dfe5b18919a6a4deb841dcf3212e3e998c95e5
6,849
static void gen_exception_internal_insn(DisasContext *s, int offset, int excp) { gen_set_condexec(s); gen_set_pc_im(s, s->pc - offset); gen_exception_internal(excp); s->is_jmp = DISAS_JUMP; }
false
qemu
8a6b28c7b5104263344508df0f4bce97f22cfcaf
6,850
static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b) { TCGv tmp; switch (op1) { #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp) case 0: tmp = tcg_temp_new(TCG_TYPE_PTR); tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE)); PAS_OP(s) break; case 4: tmp = tcg_temp_new(TCG_TYPE_PTR); tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE)); PAS_OP(u) break; #undef gen_pas_helper #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b) case 1: PAS_OP(q); break; case 2: PAS_OP(sh); break; case 5: PAS_OP(uq); break; case 6: PAS_OP(uh); break; #undef gen_pas_helper } }
false
qemu
a7812ae412311d7d47f8aa85656faadac9d64b56
6,851
static void test_io_rw_interface(enum AddrMode lba48, enum IOMode dma, unsigned bufsize, uint64_t sector) { AHCIQState *ahci; ahci = ahci_boot_and_enable(); ahci_test_io_rw_simple(ahci, bufsize, sector, io_cmds[dma][lba48][IO_READ], io_cmds[dma][lba48][IO_WRITE]); ahci_shutdown(ahci); }
false
qemu
debaaa114a8877a939533ba846e64168fb287b7b
6,853
static void aio_write_done(void *opaque, int ret) { struct aio_ctx *ctx = opaque; struct timeval t2; gettimeofday(&t2, NULL); if (ret < 0) { printf("aio_write failed: %s\n", strerror(-ret)); goto out; } if (ctx->qflag) { goto out; } /* Finally, report back -- -C gives a parsable format */ t2 = tsub(t2, ctx->t1); print_report("wrote", &t2, ctx->offset, ctx->qiov.size, ctx->qiov.size, 1, ctx->Cflag); out: qemu_io_free(ctx->buf); free(ctx); }
false
qemu
031380d8770d2df6c386e4aeabd412007d3ebd54
6,854
uint64_t helper_fres (uint64_t arg) { CPU_DoubleU fone, farg; float32 f32; fone.ll = 0x3FF0000000000000ULL; /* 1.0 */ farg.ll = arg; if (unlikely(float64_is_signaling_nan(farg.d))) { /* sNaN reciprocal */ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN); } else if (unlikely(float64_is_zero(farg.d))) { /* Zero reciprocal */ farg.ll = float_zero_divide_excp(fone.d, farg.d); } else { farg.d = float64_div(fone.d, farg.d, &env->fp_status); f32 = float64_to_float32(farg.d, &env->fp_status); farg.d = float32_to_float64(f32, &env->fp_status); } return farg.ll; }
false
qemu
e33e94f92298c96e0928cefab00ea5bae0a1cd19
6,855
static uint64_t uart_read(void *opaque, target_phys_addr_t addr, unsigned size) { LM32UartState *s = opaque; uint32_t r = 0; addr >>= 2; switch (addr) { case R_RXTX: r = s->regs[R_RXTX]; s->regs[R_LSR] &= ~LSR_DR; uart_update_irq(s); break; case R_IIR: case R_LSR: case R_MSR: r = s->regs[addr]; break; case R_IER: case R_LCR: case R_MCR: case R_DIV: error_report("lm32_uart: read access to write only register 0x" TARGET_FMT_plx, addr << 2); break; default: error_report("lm32_uart: read access to unknown register 0x" TARGET_FMT_plx, addr << 2); break; } trace_lm32_uart_memory_read(addr << 2, r); return r; }
false
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
6,857
static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1) { uint32_t insn, imm, shift, offset; uint32_t rd, rn, rm, rs; TCGv_i32 tmp; TCGv_i32 tmp2; TCGv_i32 tmp3; TCGv_i32 addr; TCGv_i64 tmp64; int op; int shiftop; int conds; int logic_cc; if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2) || arm_dc_feature(s, ARM_FEATURE_M))) { /* Thumb-1 cores may need to treat bl and blx as a pair of 16-bit instructions to get correct prefetch abort behavior. */ insn = insn_hw1; if ((insn & (1 << 12)) == 0) { ARCH(5); /* Second half of blx. */ offset = ((insn & 0x7ff) << 1); tmp = load_reg(s, 14); tcg_gen_addi_i32(tmp, tmp, offset); tcg_gen_andi_i32(tmp, tmp, 0xfffffffc); tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, s->pc | 1); store_reg(s, 14, tmp2); gen_bx(s, tmp); return 0; } if (insn & (1 << 11)) { /* Second half of bl. */ offset = ((insn & 0x7ff) << 1) | 1; tmp = load_reg(s, 14); tcg_gen_addi_i32(tmp, tmp, offset); tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, s->pc | 1); store_reg(s, 14, tmp2); gen_bx(s, tmp); return 0; } if ((s->pc & ~TARGET_PAGE_MASK) == 0) { /* Instruction spans a page boundary. Implement it as two 16-bit instructions in case the second half causes an prefetch abort. */ offset = ((int32_t)insn << 21) >> 9; tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset); return 0; } /* Fall through to 32-bit decode. */ } insn = arm_lduw_code(env, s->pc, s->bswap_code); s->pc += 2; insn |= (uint32_t)insn_hw1 << 16; if ((insn & 0xf800e800) != 0xf000e800) { ARCH(6T2); } rn = (insn >> 16) & 0xf; rs = (insn >> 12) & 0xf; rd = (insn >> 8) & 0xf; rm = insn & 0xf; switch ((insn >> 25) & 0xf) { case 0: case 1: case 2: case 3: /* 16-bit instructions. Should never happen. */ abort(); case 4: if (insn & (1 << 22)) { /* Other load/store, table branch. */ if (insn & 0x01200000) { /* Load/store doubleword. */ if (rn == 15) { addr = tcg_temp_new_i32(); tcg_gen_movi_i32(addr, s->pc & ~3); } else { addr = load_reg(s, rn); } offset = (insn & 0xff) * 4; if ((insn & (1 << 23)) == 0) offset = -offset; if (insn & (1 << 24)) { tcg_gen_addi_i32(addr, addr, offset); offset = 0; } if (insn & (1 << 20)) { /* ldrd */ tmp = tcg_temp_new_i32(); gen_aa32_ld32u(tmp, addr, get_mem_index(s)); store_reg(s, rs, tmp); tcg_gen_addi_i32(addr, addr, 4); tmp = tcg_temp_new_i32(); gen_aa32_ld32u(tmp, addr, get_mem_index(s)); store_reg(s, rd, tmp); } else { /* strd */ tmp = load_reg(s, rs); gen_aa32_st32(tmp, addr, get_mem_index(s)); tcg_temp_free_i32(tmp); tcg_gen_addi_i32(addr, addr, 4); tmp = load_reg(s, rd); gen_aa32_st32(tmp, addr, get_mem_index(s)); tcg_temp_free_i32(tmp); } if (insn & (1 << 21)) { /* Base writeback. */ if (rn == 15) goto illegal_op; tcg_gen_addi_i32(addr, addr, offset - 4); store_reg(s, rn, addr); } else { tcg_temp_free_i32(addr); } } else if ((insn & (1 << 23)) == 0) { /* Load/store exclusive word. */ addr = tcg_temp_local_new_i32(); load_reg_var(s, addr, rn); tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2); if (insn & (1 << 20)) { gen_load_exclusive(s, rs, 15, addr, 2); } else { gen_store_exclusive(s, rd, rs, 15, addr, 2); } tcg_temp_free_i32(addr); } else if ((insn & (7 << 5)) == 0) { /* Table Branch. */ if (rn == 15) { addr = tcg_temp_new_i32(); tcg_gen_movi_i32(addr, s->pc); } else { addr = load_reg(s, rn); } tmp = load_reg(s, rm); tcg_gen_add_i32(addr, addr, tmp); if (insn & (1 << 4)) { /* tbh */ tcg_gen_add_i32(addr, addr, tmp); tcg_temp_free_i32(tmp); tmp = tcg_temp_new_i32(); gen_aa32_ld16u(tmp, addr, get_mem_index(s)); } else { /* tbb */ tcg_temp_free_i32(tmp); tmp = tcg_temp_new_i32(); gen_aa32_ld8u(tmp, addr, get_mem_index(s)); } tcg_temp_free_i32(addr); tcg_gen_shli_i32(tmp, tmp, 1); tcg_gen_addi_i32(tmp, tmp, s->pc); store_reg(s, 15, tmp); } else { int op2 = (insn >> 6) & 0x3; op = (insn >> 4) & 0x3; switch (op2) { case 0: goto illegal_op; case 1: /* Load/store exclusive byte/halfword/doubleword */ if (op == 2) { goto illegal_op; } ARCH(7); break; case 2: /* Load-acquire/store-release */ if (op == 3) { goto illegal_op; } /* Fall through */ case 3: /* Load-acquire/store-release exclusive */ ARCH(8); break; } addr = tcg_temp_local_new_i32(); load_reg_var(s, addr, rn); if (!(op2 & 1)) { if (insn & (1 << 20)) { tmp = tcg_temp_new_i32(); switch (op) { case 0: /* ldab */ gen_aa32_ld8u(tmp, addr, get_mem_index(s)); break; case 1: /* ldah */ gen_aa32_ld16u(tmp, addr, get_mem_index(s)); break; case 2: /* lda */ gen_aa32_ld32u(tmp, addr, get_mem_index(s)); break; default: abort(); } store_reg(s, rs, tmp); } else { tmp = load_reg(s, rs); switch (op) { case 0: /* stlb */ gen_aa32_st8(tmp, addr, get_mem_index(s)); break; case 1: /* stlh */ gen_aa32_st16(tmp, addr, get_mem_index(s)); break; case 2: /* stl */ gen_aa32_st32(tmp, addr, get_mem_index(s)); break; default: abort(); } tcg_temp_free_i32(tmp); } } else if (insn & (1 << 20)) { gen_load_exclusive(s, rs, rd, addr, op); } else { gen_store_exclusive(s, rm, rs, rd, addr, op); } tcg_temp_free_i32(addr); } } else { /* Load/store multiple, RFE, SRS. */ if (((insn >> 23) & 1) == ((insn >> 24) & 1)) { /* RFE, SRS: not available in user mode or on M profile */ if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) { goto illegal_op; } if (insn & (1 << 20)) { /* rfe */ addr = load_reg(s, rn); if ((insn & (1 << 24)) == 0) tcg_gen_addi_i32(addr, addr, -8); /* Load PC into tmp and CPSR into tmp2. */ tmp = tcg_temp_new_i32(); gen_aa32_ld32u(tmp, addr, get_mem_index(s)); tcg_gen_addi_i32(addr, addr, 4); tmp2 = tcg_temp_new_i32(); gen_aa32_ld32u(tmp2, addr, get_mem_index(s)); if (insn & (1 << 21)) { /* Base writeback. */ if (insn & (1 << 24)) { tcg_gen_addi_i32(addr, addr, 4); } else { tcg_gen_addi_i32(addr, addr, -4); } store_reg(s, rn, addr); } else { tcg_temp_free_i32(addr); } gen_rfe(s, tmp, tmp2); } else { /* srs */ gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2, insn & (1 << 21)); } } else { int i, loaded_base = 0; TCGv_i32 loaded_var; /* Load/store multiple. */ addr = load_reg(s, rn); offset = 0; for (i = 0; i < 16; i++) { if (insn & (1 << i)) offset += 4; } if (insn & (1 << 24)) { tcg_gen_addi_i32(addr, addr, -offset); } TCGV_UNUSED_I32(loaded_var); for (i = 0; i < 16; i++) { if ((insn & (1 << i)) == 0) continue; if (insn & (1 << 20)) { /* Load. */ tmp = tcg_temp_new_i32(); gen_aa32_ld32u(tmp, addr, get_mem_index(s)); if (i == 15) { gen_bx(s, tmp); } else if (i == rn) { loaded_var = tmp; loaded_base = 1; } else { store_reg(s, i, tmp); } } else { /* Store. */ tmp = load_reg(s, i); gen_aa32_st32(tmp, addr, get_mem_index(s)); tcg_temp_free_i32(tmp); } tcg_gen_addi_i32(addr, addr, 4); } if (loaded_base) { store_reg(s, rn, loaded_var); } if (insn & (1 << 21)) { /* Base register writeback. */ if (insn & (1 << 24)) { tcg_gen_addi_i32(addr, addr, -offset); } /* Fault if writeback register is in register list. */ if (insn & (1 << rn)) goto illegal_op; store_reg(s, rn, addr); } else { tcg_temp_free_i32(addr); } } } break; case 5: op = (insn >> 21) & 0xf; if (op == 6) { if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { goto illegal_op; } /* Halfword pack. */ tmp = load_reg(s, rn); tmp2 = load_reg(s, rm); shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3); if (insn & (1 << 5)) { /* pkhtb */ if (shift == 0) shift = 31; tcg_gen_sari_i32(tmp2, tmp2, shift); tcg_gen_andi_i32(tmp, tmp, 0xffff0000); tcg_gen_ext16u_i32(tmp2, tmp2); } else { /* pkhbt */ if (shift) tcg_gen_shli_i32(tmp2, tmp2, shift); tcg_gen_ext16u_i32(tmp, tmp); tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000); } tcg_gen_or_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); } else { /* Data processing register constant shift. */ if (rn == 15) { tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); } else { tmp = load_reg(s, rn); } tmp2 = load_reg(s, rm); shiftop = (insn >> 4) & 3; shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c); conds = (insn & (1 << 20)) != 0; logic_cc = (conds && thumb2_logic_op(op)); gen_arm_shift_im(tmp2, shiftop, shift, logic_cc); if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2)) goto illegal_op; tcg_temp_free_i32(tmp2); if (rd != 15) { store_reg(s, rd, tmp); } else { tcg_temp_free_i32(tmp); } } break; case 13: /* Misc data processing. */ op = ((insn >> 22) & 6) | ((insn >> 7) & 1); if (op < 4 && (insn & 0xf000) != 0xf000) goto illegal_op; switch (op) { case 0: /* Register controlled shift. */ tmp = load_reg(s, rn); tmp2 = load_reg(s, rm); if ((insn & 0x70) != 0) goto illegal_op; op = (insn >> 21) & 3; logic_cc = (insn & (1 << 20)) != 0; gen_arm_shift_reg(tmp, op, tmp2, logic_cc); if (logic_cc) gen_logic_CC(tmp); store_reg_bx(s, rd, tmp); break; case 1: /* Sign/zero extend. */ op = (insn >> 20) & 7; switch (op) { case 0: /* SXTAH, SXTH */ case 1: /* UXTAH, UXTH */ case 4: /* SXTAB, SXTB */ case 5: /* UXTAB, UXTB */ break; case 2: /* SXTAB16, SXTB16 */ case 3: /* UXTAB16, UXTB16 */ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { goto illegal_op; } break; default: goto illegal_op; } if (rn != 15) { if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { goto illegal_op; } } tmp = load_reg(s, rm); shift = (insn >> 4) & 3; /* ??? In many cases it's not necessary to do a rotate, a shift is sufficient. */ if (shift != 0) tcg_gen_rotri_i32(tmp, tmp, shift * 8); op = (insn >> 20) & 7; switch (op) { case 0: gen_sxth(tmp); break; case 1: gen_uxth(tmp); break; case 2: gen_sxtb16(tmp); break; case 3: gen_uxtb16(tmp); break; case 4: gen_sxtb(tmp); break; case 5: gen_uxtb(tmp); break; default: g_assert_not_reached(); } if (rn != 15) { tmp2 = load_reg(s, rn); if ((op >> 1) == 1) { gen_add16(tmp, tmp2); } else { tcg_gen_add_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); } } store_reg(s, rd, tmp); break; case 2: /* SIMD add/subtract. */ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { goto illegal_op; } op = (insn >> 20) & 7; shift = (insn >> 4) & 7; if ((op & 3) == 3 || (shift & 3) == 3) goto illegal_op; tmp = load_reg(s, rn); tmp2 = load_reg(s, rm); gen_thumb2_parallel_addsub(op, shift, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); break; case 3: /* Other data processing. */ op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7); if (op < 4) { /* Saturating add/subtract. */ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { goto illegal_op; } tmp = load_reg(s, rn); tmp2 = load_reg(s, rm); if (op & 1) gen_helper_double_saturate(tmp, cpu_env, tmp); if (op & 2) gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp); else gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2); tcg_temp_free_i32(tmp2); } else { switch (op) { case 0x0a: /* rbit */ case 0x08: /* rev */ case 0x09: /* rev16 */ case 0x0b: /* revsh */ case 0x18: /* clz */ break; case 0x10: /* sel */ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { goto illegal_op; } break; case 0x20: /* crc32/crc32c */ case 0x21: case 0x22: case 0x28: case 0x29: case 0x2a: if (!arm_dc_feature(s, ARM_FEATURE_CRC)) { goto illegal_op; } break; default: goto illegal_op; } tmp = load_reg(s, rn); switch (op) { case 0x0a: /* rbit */ gen_helper_rbit(tmp, tmp); break; case 0x08: /* rev */ tcg_gen_bswap32_i32(tmp, tmp); break; case 0x09: /* rev16 */ gen_rev16(tmp); break; case 0x0b: /* revsh */ gen_revsh(tmp); break; case 0x10: /* sel */ tmp2 = load_reg(s, rm); tmp3 = tcg_temp_new_i32(); tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE)); gen_helper_sel_flags(tmp, tmp3, tmp, tmp2); tcg_temp_free_i32(tmp3); tcg_temp_free_i32(tmp2); break; case 0x18: /* clz */ gen_helper_clz(tmp, tmp); break; case 0x20: case 0x21: case 0x22: case 0x28: case 0x29: case 0x2a: { /* crc32/crc32c */ uint32_t sz = op & 0x3; uint32_t c = op & 0x8; tmp2 = load_reg(s, rm); if (sz == 0) { tcg_gen_andi_i32(tmp2, tmp2, 0xff); } else if (sz == 1) { tcg_gen_andi_i32(tmp2, tmp2, 0xffff); } tmp3 = tcg_const_i32(1 << sz); if (c) { gen_helper_crc32c(tmp, tmp, tmp2, tmp3); } else { gen_helper_crc32(tmp, tmp, tmp2, tmp3); } tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp3); break; } default: g_assert_not_reached(); } } store_reg(s, rd, tmp); break; case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */ switch ((insn >> 20) & 7) { case 0: /* 32 x 32 -> 32 */ case 7: /* Unsigned sum of absolute differences. */ break; case 1: /* 16 x 16 -> 32 */ case 2: /* Dual multiply add. */ case 3: /* 32 * 16 -> 32msb */ case 4: /* Dual multiply subtract. */ case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { goto illegal_op; } break; } op = (insn >> 4) & 0xf; tmp = load_reg(s, rn); tmp2 = load_reg(s, rm); switch ((insn >> 20) & 7) { case 0: /* 32 x 32 -> 32 */ tcg_gen_mul_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); if (rs != 15) { tmp2 = load_reg(s, rs); if (op) tcg_gen_sub_i32(tmp, tmp2, tmp); else tcg_gen_add_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); } break; case 1: /* 16 x 16 -> 32 */ gen_mulxy(tmp, tmp2, op & 2, op & 1); tcg_temp_free_i32(tmp2); if (rs != 15) { tmp2 = load_reg(s, rs); gen_helper_add_setq(tmp, cpu_env, tmp, tmp2); tcg_temp_free_i32(tmp2); } break; case 2: /* Dual multiply add. */ case 4: /* Dual multiply subtract. */ if (op) gen_swap_half(tmp2); gen_smul_dual(tmp, tmp2); if (insn & (1 << 22)) { /* This subtraction cannot overflow. */ tcg_gen_sub_i32(tmp, tmp, tmp2); } else { /* This addition cannot overflow 32 bits; * however it may overflow considered as a signed * operation, in which case we must set the Q flag. */ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2); } tcg_temp_free_i32(tmp2); if (rs != 15) { tmp2 = load_reg(s, rs); gen_helper_add_setq(tmp, cpu_env, tmp, tmp2); tcg_temp_free_i32(tmp2); } break; case 3: /* 32 * 16 -> 32msb */ if (op) tcg_gen_sari_i32(tmp2, tmp2, 16); else gen_sxth(tmp2); tmp64 = gen_muls_i64_i32(tmp, tmp2); tcg_gen_shri_i64(tmp64, tmp64, 16); tmp = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(tmp, tmp64); tcg_temp_free_i64(tmp64); if (rs != 15) { tmp2 = load_reg(s, rs); gen_helper_add_setq(tmp, cpu_env, tmp, tmp2); tcg_temp_free_i32(tmp2); } break; case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */ tmp64 = gen_muls_i64_i32(tmp, tmp2); if (rs != 15) { tmp = load_reg(s, rs); if (insn & (1 << 20)) { tmp64 = gen_addq_msw(tmp64, tmp); } else { tmp64 = gen_subq_msw(tmp64, tmp); } } if (insn & (1 << 4)) { tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u); } tcg_gen_shri_i64(tmp64, tmp64, 32); tmp = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(tmp, tmp64); tcg_temp_free_i64(tmp64); break; case 7: /* Unsigned sum of absolute differences. */ gen_helper_usad8(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); if (rs != 15) { tmp2 = load_reg(s, rs); tcg_gen_add_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); } break; } store_reg(s, rd, tmp); break; case 6: case 7: /* 64-bit multiply, Divide. */ op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70); tmp = load_reg(s, rn); tmp2 = load_reg(s, rm); if ((op & 0x50) == 0x10) { /* sdiv, udiv */ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) { goto illegal_op; } if (op & 0x20) gen_helper_udiv(tmp, tmp, tmp2); else gen_helper_sdiv(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); } else if ((op & 0xe) == 0xc) { /* Dual multiply accumulate long. */ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp2); goto illegal_op; } if (op & 1) gen_swap_half(tmp2); gen_smul_dual(tmp, tmp2); if (op & 0x10) { tcg_gen_sub_i32(tmp, tmp, tmp2); } else { tcg_gen_add_i32(tmp, tmp, tmp2); } tcg_temp_free_i32(tmp2); /* BUGFIX */ tmp64 = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(tmp64, tmp); tcg_temp_free_i32(tmp); gen_addq(s, tmp64, rs, rd); gen_storeq_reg(s, rs, rd, tmp64); tcg_temp_free_i64(tmp64); } else { if (op & 0x20) { /* Unsigned 64-bit multiply */ tmp64 = gen_mulu_i64_i32(tmp, tmp2); } else { if (op & 8) { /* smlalxy */ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp); goto illegal_op; } gen_mulxy(tmp, tmp2, op & 2, op & 1); tcg_temp_free_i32(tmp2); tmp64 = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(tmp64, tmp); tcg_temp_free_i32(tmp); } else { /* Signed 64-bit multiply */ tmp64 = gen_muls_i64_i32(tmp, tmp2); } } if (op & 4) { /* umaal */ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { tcg_temp_free_i64(tmp64); goto illegal_op; } gen_addq_lo(s, tmp64, rs); gen_addq_lo(s, tmp64, rd); } else if (op & 0x40) { /* 64-bit accumulate. */ gen_addq(s, tmp64, rs, rd); } gen_storeq_reg(s, rs, rd, tmp64); tcg_temp_free_i64(tmp64); } break; } break; case 6: case 7: case 14: case 15: /* Coprocessor. */ if (((insn >> 24) & 3) == 3) { /* Translate into the equivalent ARM encoding. */ insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28); if (disas_neon_data_insn(s, insn)) { goto illegal_op; } } else if (((insn >> 8) & 0xe) == 10) { if (disas_vfp_insn(s, insn)) { goto illegal_op; } } else { if (insn & (1 << 28)) goto illegal_op; if (disas_coproc_insn(s, insn)) { goto illegal_op; } } break; case 8: case 9: case 10: case 11: if (insn & (1 << 15)) { /* Branches, misc control. */ if (insn & 0x5000) { /* Unconditional branch. */ /* signextend(hw1[10:0]) -> offset[:12]. */ offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff; /* hw1[10:0] -> offset[11:1]. */ offset |= (insn & 0x7ff) << 1; /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22] offset[24:22] already have the same value because of the sign extension above. */ offset ^= ((~insn) & (1 << 13)) << 10; offset ^= ((~insn) & (1 << 11)) << 11; if (insn & (1 << 14)) { /* Branch and link. */ tcg_gen_movi_i32(cpu_R[14], s->pc | 1); } offset += s->pc; if (insn & (1 << 12)) { /* b/bl */ gen_jmp(s, offset); } else { /* blx */ offset &= ~(uint32_t)2; /* thumb2 bx, no need to check */ gen_bx_im(s, offset); } } else if (((insn >> 23) & 7) == 7) { /* Misc control */ if (insn & (1 << 13)) goto illegal_op; if (insn & (1 << 26)) { if (!(insn & (1 << 20))) { /* Hypervisor call (v7) */ int imm16 = extract32(insn, 16, 4) << 12 | extract32(insn, 0, 12); ARCH(7); if (IS_USER(s)) { goto illegal_op; } gen_hvc(s, imm16); } else { /* Secure monitor call (v6+) */ ARCH(6K); if (IS_USER(s)) { goto illegal_op; } gen_smc(s); } } else { op = (insn >> 20) & 7; switch (op) { case 0: /* msr cpsr. */ if (arm_dc_feature(s, ARM_FEATURE_M)) { tmp = load_reg(s, rn); addr = tcg_const_i32(insn & 0xff); gen_helper_v7m_msr(cpu_env, addr, tmp); tcg_temp_free_i32(addr); tcg_temp_free_i32(tmp); gen_lookup_tb(s); break; } /* fall through */ case 1: /* msr spsr. */ if (arm_dc_feature(s, ARM_FEATURE_M)) { goto illegal_op; } tmp = load_reg(s, rn); if (gen_set_psr(s, msr_mask(s, (insn >> 8) & 0xf, op == 1), op == 1, tmp)) goto illegal_op; break; case 2: /* cps, nop-hint. */ if (((insn >> 8) & 7) == 0) { gen_nop_hint(s, insn & 0xff); } /* Implemented as NOP in user mode. */ if (IS_USER(s)) break; offset = 0; imm = 0; if (insn & (1 << 10)) { if (insn & (1 << 7)) offset |= CPSR_A; if (insn & (1 << 6)) offset |= CPSR_I; if (insn & (1 << 5)) offset |= CPSR_F; if (insn & (1 << 9)) imm = CPSR_A | CPSR_I | CPSR_F; } if (insn & (1 << 8)) { offset |= 0x1f; imm |= (insn & 0x1f); } if (offset) { gen_set_psr_im(s, offset, 0, imm); } break; case 3: /* Special control operations. */ ARCH(7); op = (insn >> 4) & 0xf; switch (op) { case 2: /* clrex */ gen_clrex(s); break; case 4: /* dsb */ case 5: /* dmb */ case 6: /* isb */ /* These execute as NOPs. */ break; default: goto illegal_op; } break; case 4: /* bxj */ /* Trivial implementation equivalent to bx. */ tmp = load_reg(s, rn); gen_bx(s, tmp); break; case 5: /* Exception return. */ if (IS_USER(s)) { goto illegal_op; } if (rn != 14 || rd != 15) { goto illegal_op; } tmp = load_reg(s, rn); tcg_gen_subi_i32(tmp, tmp, insn & 0xff); gen_exception_return(s, tmp); break; case 6: /* mrs cpsr. */ tmp = tcg_temp_new_i32(); if (arm_dc_feature(s, ARM_FEATURE_M)) { addr = tcg_const_i32(insn & 0xff); gen_helper_v7m_mrs(tmp, cpu_env, addr); tcg_temp_free_i32(addr); } else { gen_helper_cpsr_read(tmp, cpu_env); } store_reg(s, rd, tmp); break; case 7: /* mrs spsr. */ /* Not accessible in user mode. */ if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) { goto illegal_op; } tmp = load_cpu_field(spsr); store_reg(s, rd, tmp); break; } } } else { /* Conditional branch. */ op = (insn >> 22) & 0xf; /* Generate a conditional jump to next instruction. */ s->condlabel = gen_new_label(); arm_gen_test_cc(op ^ 1, s->condlabel); s->condjmp = 1; /* offset[11:1] = insn[10:0] */ offset = (insn & 0x7ff) << 1; /* offset[17:12] = insn[21:16]. */ offset |= (insn & 0x003f0000) >> 4; /* offset[31:20] = insn[26]. */ offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11; /* offset[18] = insn[13]. */ offset |= (insn & (1 << 13)) << 5; /* offset[19] = insn[11]. */ offset |= (insn & (1 << 11)) << 8; /* jump to the offset */ gen_jmp(s, s->pc + offset); } } else { /* Data processing immediate. */ if (insn & (1 << 25)) { if (insn & (1 << 24)) { if (insn & (1 << 20)) goto illegal_op; /* Bitfield/Saturate. */ op = (insn >> 21) & 7; imm = insn & 0x1f; shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c); if (rn == 15) { tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); } else { tmp = load_reg(s, rn); } switch (op) { case 2: /* Signed bitfield extract. */ imm++; if (shift + imm > 32) goto illegal_op; if (imm < 32) gen_sbfx(tmp, shift, imm); break; case 6: /* Unsigned bitfield extract. */ imm++; if (shift + imm > 32) goto illegal_op; if (imm < 32) gen_ubfx(tmp, shift, (1u << imm) - 1); break; case 3: /* Bitfield insert/clear. */ if (imm < shift) goto illegal_op; imm = imm + 1 - shift; if (imm != 32) { tmp2 = load_reg(s, rd); tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm); tcg_temp_free_i32(tmp2); } break; case 7: goto illegal_op; default: /* Saturate. */ if (shift) { if (op & 1) tcg_gen_sari_i32(tmp, tmp, shift); else tcg_gen_shli_i32(tmp, tmp, shift); } tmp2 = tcg_const_i32(imm); if (op & 4) { /* Unsigned. */ if ((op & 1) && shift == 0) { if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp2); goto illegal_op; } gen_helper_usat16(tmp, cpu_env, tmp, tmp2); } else { gen_helper_usat(tmp, cpu_env, tmp, tmp2); } } else { /* Signed. */ if ((op & 1) && shift == 0) { if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) { tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp2); goto illegal_op; } gen_helper_ssat16(tmp, cpu_env, tmp, tmp2); } else { gen_helper_ssat(tmp, cpu_env, tmp, tmp2); } } tcg_temp_free_i32(tmp2); break; } store_reg(s, rd, tmp); } else { imm = ((insn & 0x04000000) >> 15) | ((insn & 0x7000) >> 4) | (insn & 0xff); if (insn & (1 << 22)) { /* 16-bit immediate. */ imm |= (insn >> 4) & 0xf000; if (insn & (1 << 23)) { /* movt */ tmp = load_reg(s, rd); tcg_gen_ext16u_i32(tmp, tmp); tcg_gen_ori_i32(tmp, tmp, imm << 16); } else { /* movw */ tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, imm); } } else { /* Add/sub 12-bit immediate. */ if (rn == 15) { offset = s->pc & ~(uint32_t)3; if (insn & (1 << 23)) offset -= imm; else offset += imm; tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, offset); } else { tmp = load_reg(s, rn); if (insn & (1 << 23)) tcg_gen_subi_i32(tmp, tmp, imm); else tcg_gen_addi_i32(tmp, tmp, imm); } } store_reg(s, rd, tmp); } } else { int shifter_out = 0; /* modified 12-bit immediate. */ shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12); imm = (insn & 0xff); switch (shift) { case 0: /* XY */ /* Nothing to do. */ break; case 1: /* 00XY00XY */ imm |= imm << 16; break; case 2: /* XY00XY00 */ imm |= imm << 16; imm <<= 8; break; case 3: /* XYXYXYXY */ imm |= imm << 16; imm |= imm << 8; break; default: /* Rotated constant. */ shift = (shift << 1) | (imm >> 7); imm |= 0x80; imm = imm << (32 - shift); shifter_out = 1; break; } tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, imm); rn = (insn >> 16) & 0xf; if (rn == 15) { tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); } else { tmp = load_reg(s, rn); } op = (insn >> 21) & 0xf; if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0, shifter_out, tmp, tmp2)) goto illegal_op; tcg_temp_free_i32(tmp2); rd = (insn >> 8) & 0xf; if (rd != 15) { store_reg(s, rd, tmp); } else { tcg_temp_free_i32(tmp); } } } break; case 12: /* Load/store single data item. */ { int postinc = 0; int writeback = 0; int memidx; if ((insn & 0x01100000) == 0x01000000) { if (disas_neon_ls_insn(s, insn)) { goto illegal_op; } break; } op = ((insn >> 21) & 3) | ((insn >> 22) & 4); if (rs == 15) { if (!(insn & (1 << 20))) { goto illegal_op; } if (op != 2) { /* Byte or halfword load space with dest == r15 : memory hints. * Catch them early so we don't emit pointless addressing code. * This space is a mix of: * PLD/PLDW/PLI, which we implement as NOPs (note that unlike * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP * cores) * unallocated hints, which must be treated as NOPs * UNPREDICTABLE space, which we NOP or UNDEF depending on * which is easiest for the decoding logic * Some space which must UNDEF */ int op1 = (insn >> 23) & 3; int op2 = (insn >> 6) & 0x3f; if (op & 2) { goto illegal_op; } if (rn == 15) { /* UNPREDICTABLE, unallocated hint or * PLD/PLDW/PLI (literal) */ return 0; } if (op1 & 1) { return 0; /* PLD/PLDW/PLI or unallocated hint */ } if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) { return 0; /* PLD/PLDW/PLI or unallocated hint */ } /* UNDEF space, or an UNPREDICTABLE */ return 1; } } memidx = get_mem_index(s); if (rn == 15) { addr = tcg_temp_new_i32(); /* PC relative. */ /* s->pc has already been incremented by 4. */ imm = s->pc & 0xfffffffc; if (insn & (1 << 23)) imm += insn & 0xfff; else imm -= insn & 0xfff; tcg_gen_movi_i32(addr, imm); } else { addr = load_reg(s, rn); if (insn & (1 << 23)) { /* Positive offset. */ imm = insn & 0xfff; tcg_gen_addi_i32(addr, addr, imm); } else { imm = insn & 0xff; switch ((insn >> 8) & 0xf) { case 0x0: /* Shifted Register. */ shift = (insn >> 4) & 0xf; if (shift > 3) { tcg_temp_free_i32(addr); goto illegal_op; } tmp = load_reg(s, rm); if (shift) tcg_gen_shli_i32(tmp, tmp, shift); tcg_gen_add_i32(addr, addr, tmp); tcg_temp_free_i32(tmp); break; case 0xc: /* Negative offset. */ tcg_gen_addi_i32(addr, addr, -imm); break; case 0xe: /* User privilege. */ tcg_gen_addi_i32(addr, addr, imm); memidx = get_a32_user_mem_index(s); break; case 0x9: /* Post-decrement. */ imm = -imm; /* Fall through. */ case 0xb: /* Post-increment. */ postinc = 1; writeback = 1; break; case 0xd: /* Pre-decrement. */ imm = -imm; /* Fall through. */ case 0xf: /* Pre-increment. */ tcg_gen_addi_i32(addr, addr, imm); writeback = 1; break; default: tcg_temp_free_i32(addr); goto illegal_op; } } } if (insn & (1 << 20)) { /* Load. */ tmp = tcg_temp_new_i32(); switch (op) { case 0: gen_aa32_ld8u(tmp, addr, memidx); break; case 4: gen_aa32_ld8s(tmp, addr, memidx); break; case 1: gen_aa32_ld16u(tmp, addr, memidx); break; case 5: gen_aa32_ld16s(tmp, addr, memidx); break; case 2: gen_aa32_ld32u(tmp, addr, memidx); break; default: tcg_temp_free_i32(tmp); tcg_temp_free_i32(addr); goto illegal_op; } if (rs == 15) { gen_bx(s, tmp); } else { store_reg(s, rs, tmp); } } else { /* Store. */ tmp = load_reg(s, rs); switch (op) { case 0: gen_aa32_st8(tmp, addr, memidx); break; case 1: gen_aa32_st16(tmp, addr, memidx); break; case 2: gen_aa32_st32(tmp, addr, memidx); break; default: tcg_temp_free_i32(tmp); tcg_temp_free_i32(addr); goto illegal_op; } tcg_temp_free_i32(tmp); } if (postinc) tcg_gen_addi_i32(addr, addr, imm); if (writeback) { store_reg(s, rn, addr); } else { tcg_temp_free_i32(addr); } } break; default: goto illegal_op; } return 0; illegal_op: return 1; }
false
qemu
6df99dec9e81838423d723996e96236693fa31fe
6,858
static QIOChannelSocket *nbd_establish_connection(SocketAddress *saddr_flat, Error **errp) { SocketAddressLegacy *saddr = socket_address_crumple(saddr_flat); QIOChannelSocket *sioc; Error *local_err = NULL; sioc = qio_channel_socket_new(); qio_channel_set_name(QIO_CHANNEL(sioc), "nbd-client"); qio_channel_socket_connect_sync(sioc, saddr, &local_err); qapi_free_SocketAddressLegacy(saddr); if (local_err) { object_unref(OBJECT(sioc)); error_propagate(errp, local_err); return NULL; } qio_channel_set_delay(QIO_CHANNEL(sioc), false); return sioc; }
false
qemu
bd269ebc82fbaa5fe7ce5bc7c1770ac8acecd884
6,859
void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip, int shift, int next_eip_addend) { int new_stack, i; uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count; uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask; uint32_t val, limit, old_sp_mask; target_ulong ssp, old_ssp, next_eip; next_eip = env->eip + next_eip_addend; LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift); LOG_PCALL_STATE(CPU(x86_env_get_cpu(env))); if ((new_cs & 0xfffc) == 0) { raise_exception_err(env, EXCP0D_GPF, 0); } if (load_segment(env, &e1, &e2, new_cs) != 0) { raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); } cpl = env->hflags & HF_CPL_MASK; LOG_PCALL("desc=%08x:%08x\n", e1, e2); if (e2 & DESC_S_MASK) { if (!(e2 & DESC_CS_MASK)) { raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (e2 & DESC_C_MASK) { /* conforming code segment */ if (dpl > cpl) { raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); } } else { /* non conforming code segment */ rpl = new_cs & 3; if (rpl > cpl) { raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); } if (dpl != cpl) { raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); } } if (!(e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); } #ifdef TARGET_X86_64 /* XXX: check 16/32 bit cases in long mode */ if (shift == 2) { target_ulong rsp; /* 64 bit case */ rsp = env->regs[R_ESP]; PUSHQ(rsp, env->segs[R_CS].selector); PUSHQ(rsp, next_eip); /* from this point, not restartable */ env->regs[R_ESP] = rsp; cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); env->eip = new_eip; } else #endif { sp = env->regs[R_ESP]; sp_mask = get_sp_mask(env->segs[R_SS].flags); ssp = env->segs[R_SS].base; if (shift) { PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector); PUSHL(ssp, sp, sp_mask, next_eip); } else { PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector); PUSHW(ssp, sp, sp_mask, next_eip); } limit = get_seg_limit(e1, e2); if (new_eip > limit) { raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); } /* from this point, not restartable */ SET_ESP(sp, sp_mask); cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, get_seg_base(e1, e2), limit, e2); env->eip = new_eip; } } else { /* check gate type */ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; dpl = (e2 >> DESC_DPL_SHIFT) & 3; rpl = new_cs & 3; switch (type) { case 1: /* available 286 TSS */ case 9: /* available 386 TSS */ case 5: /* task gate */ if (dpl < cpl || dpl < rpl) { raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); } switch_tss(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip); CC_OP = CC_OP_EFLAGS; return; case 4: /* 286 call gate */ case 12: /* 386 call gate */ break; default: raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); break; } shift = type >> 3; if (dpl < cpl || dpl < rpl) { raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); } /* check valid bit */ if (!(e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); } selector = e1 >> 16; offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); param_count = e2 & 0x1f; if ((selector & 0xfffc) == 0) { raise_exception_err(env, EXCP0D_GPF, 0); } if (load_segment(env, &e1, &e2, selector) != 0) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (dpl > cpl) { raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc); } if (!(e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc); } if (!(e2 & DESC_C_MASK) && dpl < cpl) { /* to inner privilege */ get_ss_esp_from_tss(env, &ss, &sp, dpl); LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]=" TARGET_FMT_lx "\n", ss, sp, param_count, env->regs[R_ESP]); if ((ss & 0xfffc) == 0) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } if ((ss & 3) != dpl) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; if (ss_dpl != dpl) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } if (!(ss_e2 & DESC_S_MASK) || (ss_e2 & DESC_CS_MASK) || !(ss_e2 & DESC_W_MASK)) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } if (!(ss_e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc); } /* push_size = ((param_count * 2) + 8) << shift; */ old_sp_mask = get_sp_mask(env->segs[R_SS].flags); old_ssp = env->segs[R_SS].base; sp_mask = get_sp_mask(ss_e2); ssp = get_seg_base(ss_e1, ss_e2); if (shift) { PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector); PUSHL(ssp, sp, sp_mask, env->regs[R_ESP]); for (i = param_count - 1; i >= 0; i--) { val = cpu_ldl_kernel(env, old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask)); PUSHL(ssp, sp, sp_mask, val); } } else { PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector); PUSHW(ssp, sp, sp_mask, env->regs[R_ESP]); for (i = param_count - 1; i >= 0; i--) { val = cpu_lduw_kernel(env, old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask)); PUSHW(ssp, sp, sp_mask, val); } } new_stack = 1; } else { /* to same privilege */ sp = env->regs[R_ESP]; sp_mask = get_sp_mask(env->segs[R_SS].flags); ssp = env->segs[R_SS].base; /* push_size = (4 << shift); */ new_stack = 0; } if (shift) { PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector); PUSHL(ssp, sp, sp_mask, next_eip); } else { PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector); PUSHW(ssp, sp, sp_mask, next_eip); } /* from this point, not restartable */ if (new_stack) { ss = (ss & ~3) | dpl; cpu_x86_load_seg_cache(env, R_SS, ss, ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); } selector = (selector & ~3) | dpl; cpu_x86_load_seg_cache(env, R_CS, selector, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); cpu_x86_set_cpl(env, dpl); SET_ESP(sp, sp_mask); env->eip = offset; } }
false
qemu
7848c8d19f8556666df25044bbd5d8b29439c368
6,860
static void tile_codeblocks(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile) { Jpeg2000T1Context t1; int compno, reslevelno, bandno; /* Loop on tile components */ for (compno = 0; compno < s->ncomponents; compno++) { Jpeg2000Component *comp = tile->comp + compno; Jpeg2000CodingStyle *codsty = tile->codsty + compno; /* Loop on resolution levels */ for (reslevelno = 0; reslevelno < codsty->nreslevels2decode; reslevelno++) { Jpeg2000ResLevel *rlevel = comp->reslevel + reslevelno; /* Loop on bands */ for (bandno = 0; bandno < rlevel->nbands; bandno++) { uint16_t nb_precincts, precno; Jpeg2000Band *band = rlevel->band + bandno; int cblkno = 0, bandpos; bandpos = bandno + (reslevelno > 0); if (band->coord[0][0] == band->coord[0][1] || band->coord[1][0] == band->coord[1][1]) continue; nb_precincts = rlevel->num_precincts_x * rlevel->num_precincts_y; /* Loop on precincts */ for (precno = 0; precno < nb_precincts; precno++) { Jpeg2000Prec *prec = band->prec + precno; /* Loop on codeblocks */ for (cblkno = 0; cblkno < prec->nb_codeblocks_width * prec->nb_codeblocks_height; cblkno++) { int x, y; Jpeg2000Cblk *cblk = prec->cblk + cblkno; decode_cblk(s, codsty, &t1, cblk, cblk->coord[0][1] - cblk->coord[0][0], cblk->coord[1][1] - cblk->coord[1][0], bandpos); x = cblk->coord[0][0]; y = cblk->coord[1][0]; if (codsty->transform == FF_DWT97) dequantization_float(x, y, cblk, comp, &t1, band); else dequantization_int(x, y, cblk, comp, &t1, band); } /* end cblk */ } /*end prec */ } /* end band */ } /* end reslevel */ /* inverse DWT */ ff_dwt_decode(&comp->dwt, codsty->transform == FF_DWT97 ? (void*)comp->f_data : (void*)comp->i_data); } /*end comp */ }
false
FFmpeg
41bcc3d15204f290400ba02e4e8f87fc07bcc00e
6,861
static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, PCIBus *bus, const char *name, int devfn) { PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); PCIConfigReadFunc *config_read = pc->config_read; PCIConfigWriteFunc *config_write = pc->config_write; AddressSpace *dma_as; if (devfn < 0) { for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices); devfn += PCI_FUNC_MAX) { if (!bus->devices[devfn]) goto found; } error_report("PCI: no slot/function available for %s, all in use", name); return NULL; found: ; } else if (bus->devices[devfn]) { error_report("PCI: slot %d function %d not available for %s, in use by %s", PCI_SLOT(devfn), PCI_FUNC(devfn), name, bus->devices[devfn]->name); return NULL; } pci_dev->bus = bus; dma_as = pci_device_iommu_address_space(pci_dev); memory_region_init_alias(&pci_dev->bus_master_enable_region, OBJECT(pci_dev), "bus master", dma_as->root, 0, memory_region_size(dma_as->root)); memory_region_set_enabled(&pci_dev->bus_master_enable_region, false); address_space_init(&pci_dev->bus_master_as, &pci_dev->bus_master_enable_region, name); pci_dev->devfn = devfn; pstrcpy(pci_dev->name, sizeof(pci_dev->name), name); pci_dev->irq_state = 0; pci_config_alloc(pci_dev); pci_config_set_vendor_id(pci_dev->config, pc->vendor_id); pci_config_set_device_id(pci_dev->config, pc->device_id); pci_config_set_revision(pci_dev->config, pc->revision); pci_config_set_class(pci_dev->config, pc->class_id); if (!pc->is_bridge) { if (pc->subsystem_vendor_id || pc->subsystem_id) { pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, pc->subsystem_vendor_id); pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, pc->subsystem_id); } else { pci_set_default_subsystem_id(pci_dev); } } else { /* subsystem_vendor_id/subsystem_id are only for header type 0 */ assert(!pc->subsystem_vendor_id); assert(!pc->subsystem_id); } pci_init_cmask(pci_dev); pci_init_wmask(pci_dev); pci_init_w1cmask(pci_dev); if (pc->is_bridge) { pci_init_mask_bridge(pci_dev); } if (pci_init_multifunction(bus, pci_dev)) { pci_config_free(pci_dev); return NULL; } if (!config_read) config_read = pci_default_read_config; if (!config_write) config_write = pci_default_write_config; pci_dev->config_read = config_read; pci_dev->config_write = config_write; bus->devices[devfn] = pci_dev; pci_dev->irq = qemu_allocate_irqs(pci_irq_handler, pci_dev, PCI_NUM_PINS); pci_dev->version_id = 2; /* Current pci device vmstate version */ return pci_dev; }
false
qemu
c31d04b516b183b02336f8cce65a41bd547f6f6b
6,862
static int bdrv_inherited_flags(int flags) { /* Enable protocol handling, disable format probing for bs->file */ flags |= BDRV_O_PROTOCOL; /* Our block drivers take care to send flushes and respect unmap policy, * so we can enable both unconditionally on lower layers. */ flags |= BDRV_O_CACHE_WB | BDRV_O_UNMAP; /* The backing file of a temporary snapshot is read-only */ if (flags & BDRV_O_SNAPSHOT) { flags &= ~BDRV_O_RDWR; } /* Clear flags that only apply to the top layer */ flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING); return flags; }
false
qemu
5669b44de5b3b607a3a4749e0c8c5ddfd723e76b
6,863
static inline void gen_movcf_ps(DisasContext *ctx, int fs, int fd, int cc, int tf) { int cond; TCGv_i32 t0 = tcg_temp_new_i32(); int l1 = gen_new_label(); int l2 = gen_new_label(); if (tf) cond = TCG_COND_EQ; else cond = TCG_COND_NE; tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc)); tcg_gen_brcondi_i32(cond, t0, 0, l1); gen_load_fpr32(t0, fs); gen_store_fpr32(t0, fd); gen_set_label(l1); tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc+1)); tcg_gen_brcondi_i32(cond, t0, 0, l2); gen_load_fpr32h(ctx, t0, fs); gen_store_fpr32h(ctx, t0, fd); tcg_temp_free_i32(t0); gen_set_label(l2); }
false
qemu
42a268c241183877192c376d03bd9b6d527407c7
6,865
QEMUFile *qemu_fopen(const char *filename, const char *mode) { QEMUFile *f; f = qemu_mallocz(sizeof(QEMUFile)); if (!f) return NULL; if (!strcmp(mode, "wb")) { f->is_writable = 1; } else if (!strcmp(mode, "rb")) { f->is_writable = 0; } else { goto fail; } f->outfile = fopen(filename, mode); if (!f->outfile) goto fail; f->is_file = 1; return f; fail: if (f->outfile) fclose(f->outfile); qemu_free(f); return NULL; }
false
qemu
5dafc53f1fb091d242f2179ffcb43bb28af36d1e
6,868
void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr, int is_write, int is_user, uintptr_t retaddr) { MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; env->CP0_BadVAddr = addr; do_raise_exception(env, (is_write == 1) ? EXCP_AdES : EXCP_AdEL, retaddr); }
false
qemu
aea14095ea91f792ee43ee52fe6032cd8cdd7190
6,869
void block_job_cancel(BlockJob *job) { job->cancelled = true; block_job_resume(job); }
false
qemu
751ebd76e654bd1e65da08ecf694325282b4cfcc
6,870
PCIBus *pci_pmac_init(qemu_irq *pic, MemoryRegion *address_space_mem, MemoryRegion *address_space_io) { DeviceState *dev; SysBusDevice *s; PCIHostState *h; UNINState *d; /* Use values found on a real PowerMac */ /* Uninorth main bus */ dev = qdev_create(NULL, TYPE_UNI_NORTH_PCI_HOST_BRIDGE); qdev_init_nofail(dev); s = SYS_BUS_DEVICE(dev); h = PCI_HOST_BRIDGE(s); d = UNI_NORTH_PCI_HOST_BRIDGE(dev); memory_region_init(&d->pci_mmio, OBJECT(d), "pci-mmio", 0x100000000ULL); memory_region_init_alias(&d->pci_hole, OBJECT(d), "pci-hole", &d->pci_mmio, 0x80000000ULL, 0x70000000ULL); memory_region_add_subregion(address_space_mem, 0x80000000ULL, &d->pci_hole); h->bus = pci_register_bus(dev, "pci", pci_unin_set_irq, pci_unin_map_irq, pic, &d->pci_mmio, address_space_io, PCI_DEVFN(11, 0), 4, TYPE_PCI_BUS); #if 0 pci_create_simple(h->bus, PCI_DEVFN(11, 0), "uni-north"); #endif sysbus_mmio_map(s, 0, 0xf2800000); sysbus_mmio_map(s, 1, 0xf2c00000); /* DEC 21154 bridge */ #if 0 /* XXX: not activated as PPC BIOS doesn't handle multiple buses properly */ pci_create_simple(h->bus, PCI_DEVFN(12, 0), "dec-21154"); #endif /* Uninorth AGP bus */ pci_create_simple(h->bus, PCI_DEVFN(11, 0), "uni-north-agp"); dev = qdev_create(NULL, TYPE_UNI_NORTH_AGP_HOST_BRIDGE); qdev_init_nofail(dev); s = SYS_BUS_DEVICE(dev); sysbus_mmio_map(s, 0, 0xf0800000); sysbus_mmio_map(s, 1, 0xf0c00000); /* Uninorth internal bus */ #if 0 /* XXX: not needed for now */ pci_create_simple(h->bus, PCI_DEVFN(14, 0), "uni-north-internal-pci"); dev = qdev_create(NULL, TYPE_UNI_NORTH_INTERNAL_PCI_HOST_BRIDGE); qdev_init_nofail(dev); s = SYS_BUS_DEVICE(dev); sysbus_mmio_map(s, 0, 0xf4800000); sysbus_mmio_map(s, 1, 0xf4c00000); #endif return h->bus; }
false
qemu
8a0e11045d5f50d300e0ab1ba05f4c8217fb5dcb
6,871
static inline void downmix_3f_2r_to_dolby(float *samples) { int i; for (i = 0; i < 256; i++) { samples[i] += (samples[i + 256] - samples[i + 768]); samples[i + 256] = (samples[i + 512] + samples[i + 1024]); samples[i + 512] = samples[i + 768] = samples[i + 1024] = 0; } }
false
FFmpeg
0058584580b87feb47898e60e4b80c7f425882ad
6,872
int qemu_paio_ioctl(struct qemu_paiocb *aiocb) { return qemu_paio_submit(aiocb, QEMU_PAIO_IOCTL); }
false
qemu
9ef91a677110ec200d7b2904fc4bcae5a77329ad
6,873
int rtsp_next_attr_and_value(const char **p, char *attr, int attr_size, char *value, int value_size) { skip_spaces(p); if(**p) { get_word_sep(attr, attr_size, "=", p); if (**p == '=') (*p)++; get_word_sep(value, value_size, ";", p); if (**p == ';') (*p)++; return 1; } return 0; }
false
FFmpeg
c89658008705d949c319df3fa6f400c481ad73e1
6,874
int av_opencl_init(AVDictionary *options, AVOpenCLExternalEnv *ext_opencl_env) { int ret = 0; AVDictionaryEntry *opt_build_entry; AVDictionaryEntry *opt_platform_entry; AVDictionaryEntry *opt_device_entry; LOCK_OPENCL if (!gpu_env.init_count) { opt_platform_entry = av_dict_get(options, "platform_idx", NULL, 0); opt_device_entry = av_dict_get(options, "device_idx", NULL, 0); /*initialize devices, context, command_queue*/ gpu_env.usr_spec_dev_info.platform_idx = -1; gpu_env.usr_spec_dev_info.dev_idx = -1; if (opt_platform_entry) { gpu_env.usr_spec_dev_info.platform_idx = strtol(opt_platform_entry->value, NULL, 10); } if (opt_device_entry) { gpu_env.usr_spec_dev_info.dev_idx = strtol(opt_device_entry->value, NULL, 10); } ret = init_opencl_env(&gpu_env, ext_opencl_env); if (ret < 0) goto end; } /*initialize program, kernel_name, kernel_count*/ opt_build_entry = av_dict_get(options, "build_options", NULL, 0); if (opt_build_entry) ret = compile_kernel_file(&gpu_env, opt_build_entry->value); else ret = compile_kernel_file(&gpu_env, NULL); if (ret < 0) goto end; av_assert1(gpu_env.kernel_code_count > 0); gpu_env.init_count++; end: UNLOCK_OPENCL return ret; }
false
FFmpeg
57d77b3963ce1023eaf5ada8cba58b9379405cc8
6,875
static CodeBook unpack_codebook(GetBitContext* gb, unsigned depth, unsigned size) { unsigned i, j; CodeBook cb = { 0 }; if (!can_safely_read(gb, (uint64_t)size * 34)) return cb; if (size >= INT_MAX / sizeof(MacroBlock)) return cb; cb.blocks = av_malloc(size ? size * sizeof(MacroBlock) : 1); if (!cb.blocks) return cb; cb.depth = depth; cb.size = size; for (i = 0; i < size; i++) { unsigned mask_bits = get_bits(gb, 4); unsigned color0 = get_bits(gb, 15); unsigned color1 = get_bits(gb, 15); for (j = 0; j < 4; j++) { if (mask_bits & (1 << j)) cb.blocks[i].pixels[j] = color1; else cb.blocks[i].pixels[j] = color0; } } return cb; }
false
FFmpeg
e494f44c051d7dccc038a603ab22532b87dd1705
6,876
static void hybrid4_8_12_cx(float (*in)[2], float (*out)[32][2], const float (*filter)[7][2], int N, int len) { int i, j, ssb; for (i = 0; i < len; i++, in++) { for (ssb = 0; ssb < N; ssb++) { float sum_re = filter[ssb][6][0] * in[6][0], sum_im = filter[ssb][6][0] * in[6][1]; for (j = 0; j < 6; j++) { float in0_re = in[j][0]; float in0_im = in[j][1]; float in1_re = in[12-j][0]; float in1_im = in[12-j][1]; sum_re += filter[ssb][j][0] * (in0_re + in1_re) - filter[ssb][j][1] * (in0_im - in1_im); sum_im += filter[ssb][j][0] * (in0_im + in1_im) + filter[ssb][j][1] * (in0_re - in1_re); } out[ssb][i][0] = sum_re; out[ssb][i][1] = sum_im; } } }
false
FFmpeg
bf1945af301aff54c33352e75f17aec6cb5269d7
6,877
static int add_metadata(const uint8_t **buf, int count, int type, const char *name, const char *sep, TiffContext *s) { switch(type) { case TIFF_DOUBLE: return add_doubles_metadata(buf, count, name, sep, s); case TIFF_SHORT : return add_shorts_metadata(buf, count, name, sep, s); default : return AVERROR_INVALIDDATA; }; }
false
FFmpeg
1ec83d9a9e472f485897ac92bad9631d551a8c5b
6,878
static int opt_recording_timestamp(void *optctx, const char *opt, const char *arg) { OptionsContext *o = optctx; char buf[128]; int64_t recording_timestamp = parse_time_or_die(opt, arg, 0) / 1E6; struct tm time = *gmtime((time_t*)&recording_timestamp); strftime(buf, sizeof(buf), "creation_time=%FT%T%z", &time); parse_option(o, "metadata", buf, options); av_log(NULL, AV_LOG_WARNING, "%s is deprecated, set the 'creation_time' metadata " "tag instead.\n", opt); return 0; }
false
FFmpeg
c0002ddb019d7f2f22da301b2855f86d697d37bd
6,880
static void tcp_chr_tls_init(CharDriverState *chr) { TCPCharDriver *s = chr->opaque; QIOChannelTLS *tioc; Error *err = NULL; if (s->is_listen) { tioc = qio_channel_tls_new_server( s->ioc, s->tls_creds, NULL, /* XXX Use an ACL */ &err); } else { tioc = qio_channel_tls_new_client( s->ioc, s->tls_creds, s->addr->u.inet.data->host, &err); } if (tioc == NULL) { error_free(err); tcp_chr_disconnect(chr); } object_unref(OBJECT(s->ioc)); s->ioc = QIO_CHANNEL(tioc); qio_channel_tls_handshake(tioc, tcp_chr_tls_handshake, chr, NULL); }
true
qemu
660a2d83e026496db6b3eaec2256a2cdd6c74de8
6,881
rdt_free_extradata (PayloadContext *rdt) { ff_rm_free_rmstream(rdt->rmst[0]); if (rdt->rmctx) av_close_input_stream(rdt->rmctx); av_freep(&rdt->mlti_data); av_free(rdt); }
true
FFmpeg
c49a3ec30aaa8042335656982054f02847c03aae
6,882
static int h263_decode_gob_header(MpegEncContext *s) { unsigned int val, gob_number; int left; /* Check for GOB Start Code */ val = show_bits(&s->gb, 16); if(val) return -1; /* We have a GBSC probably with GSTUFF */ skip_bits(&s->gb, 16); /* Drop the zeros */ left= get_bits_left(&s->gb); //MN: we must check the bits left or we might end in an infinite loop (or segfault) for(;left>13; left--){ if(get_bits1(&s->gb)) break; /* Seek the '1' bit */ } if(left<=13) return -1; if(s->h263_slice_structured){ if(check_marker(s->avctx, &s->gb, "before MBA")==0) return -1; ff_h263_decode_mba(s); if(s->mb_num > 1583) if(check_marker(s->avctx, &s->gb, "after MBA")==0) return -1; s->qscale = get_bits(&s->gb, 5); /* SQUANT */ if(check_marker(s->avctx, &s->gb, "after SQUANT")==0) return -1; skip_bits(&s->gb, 2); /* GFID */ }else{ gob_number = get_bits(&s->gb, 5); /* GN */ s->mb_x= 0; s->mb_y= s->gob_index* gob_number; skip_bits(&s->gb, 2); /* GFID */ s->qscale = get_bits(&s->gb, 5); /* GQUANT */ } if(s->mb_y >= s->mb_height) return -1; if(s->qscale==0) return -1; return 0; }
true
FFmpeg
2baf36caed98cfdc7f6a2086fbf26f1a172f16cf
6,883
static int mov_read_wide(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom) { int err; uint32_t type; #ifdef DEBUG print_atom("wide", atom); debug_indent++; #endif if (atom.size < 8) return 0; /* continue */ if (get_be32(pb) != 0) { /* 0 sized mdat atom... use the 'wide' atom size */ url_fskip(pb, atom.size - 4); return 0; } atom.type = get_le32(pb); atom.offset += 8; atom.size -= 8; if (type != MKTAG('m', 'd', 'a', 't')) { url_fskip(pb, atom.size); return 0; } err = mov_read_mdat(c, pb, atom); #ifdef DEBUG debug_indent--; #endif return err; }
true
FFmpeg
fd6e513ee1dc13174256de8adaeeb2c2691eee95
6,884
static void net_socket_send(void *opaque) { NetSocketState *s = opaque; int l, size, err; uint8_t buf1[4096]; const uint8_t *buf; size = recv(s->fd, buf1, sizeof(buf1), 0); if (size < 0) { err = socket_error(); if (err != EWOULDBLOCK) goto eoc; } else if (size == 0) { /* end of connection */ eoc: qemu_set_fd_handler(s->fd, NULL, NULL, NULL); closesocket(s->fd); return; } buf = buf1; while (size > 0) { /* reassemble a packet from the network */ switch(s->state) { case 0: l = 4 - s->index; if (l > size) l = size; memcpy(s->buf + s->index, buf, l); buf += l; size -= l; s->index += l; if (s->index == 4) { /* got length */ s->packet_len = ntohl(*(uint32_t *)s->buf); s->index = 0; s->state = 1; } break; case 1: l = s->packet_len - s->index; if (l > size) l = size; memcpy(s->buf + s->index, buf, l); s->index += l; buf += l; size -= l; if (s->index >= s->packet_len) { qemu_send_packet(s->vc, s->buf, s->packet_len); s->index = 0; s->state = 0; } break; } } }
true
qemu
abcd2baab187cc3b1fcce13b697da5874a123e39
6,885
static void stellaris_enet_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { stellaris_enet_state *s = (stellaris_enet_state *)opaque; switch (offset) { case 0x00: /* IACK */ s->ris &= ~value; DPRINTF("IRQ ack %02x/%02x\n", value, s->ris); stellaris_enet_update(s); /* Clearing TXER also resets the TX fifo. */ if (value & SE_INT_TXER) s->tx_frame_len = -1; break; case 0x04: /* IM */ DPRINTF("IRQ mask %02x/%02x\n", value, s->ris); s->im = value; stellaris_enet_update(s); break; case 0x08: /* RCTL */ s->rctl = value; if (value & SE_RCTL_RSTFIFO) { s->rx_fifo_len = 0; s->np = 0; stellaris_enet_update(s); } break; case 0x0c: /* TCTL */ s->tctl = value; break; case 0x10: /* DATA */ if (s->tx_frame_len == -1) { s->tx_frame_len = value & 0xffff; if (s->tx_frame_len > 2032) { DPRINTF("TX frame too long (%d)\n", s->tx_frame_len); s->tx_frame_len = 0; s->ris |= SE_INT_TXER; stellaris_enet_update(s); } else { DPRINTF("Start TX frame len=%d\n", s->tx_frame_len); /* The value written does not include the ethernet header. */ s->tx_frame_len += 14; if ((s->tctl & SE_TCTL_CRC) == 0) s->tx_frame_len += 4; s->tx_fifo_len = 0; s->tx_fifo[s->tx_fifo_len++] = value >> 16; s->tx_fifo[s->tx_fifo_len++] = value >> 24; } } else { s->tx_fifo[s->tx_fifo_len++] = value; s->tx_fifo[s->tx_fifo_len++] = value >> 8; s->tx_fifo[s->tx_fifo_len++] = value >> 16; s->tx_fifo[s->tx_fifo_len++] = value >> 24; if (s->tx_fifo_len >= s->tx_frame_len) { /* We don't implement explicit CRC, so just chop it off. */ if ((s->tctl & SE_TCTL_CRC) == 0) s->tx_frame_len -= 4; if ((s->tctl & SE_TCTL_PADEN) && s->tx_frame_len < 60) { memset(&s->tx_fifo[s->tx_frame_len], 0, 60 - s->tx_frame_len); s->tx_fifo_len = 60; } qemu_send_packet(qemu_get_queue(s->nic), s->tx_fifo, s->tx_frame_len); s->tx_frame_len = -1; s->ris |= SE_INT_TXEMP; stellaris_enet_update(s); DPRINTF("Done TX\n"); } } break; case 0x14: /* IA0 */ s->conf.macaddr.a[0] = value; s->conf.macaddr.a[1] = value >> 8; s->conf.macaddr.a[2] = value >> 16; s->conf.macaddr.a[3] = value >> 24; break; case 0x18: /* IA1 */ s->conf.macaddr.a[4] = value; s->conf.macaddr.a[5] = value >> 8; break; case 0x1c: /* THR */ s->thr = value; break; case 0x20: /* MCTL */ s->mctl = value; break; case 0x24: /* MDV */ s->mdv = value; break; case 0x28: /* MADD */ /* ignored. */ break; case 0x2c: /* MTXD */ s->mtxd = value & 0xff; break; case 0x30: /* MRXD */ case 0x34: /* NP */ case 0x38: /* TR */ /* Ignored. */ case 0x3c: /* Undocuented: Timestamp? */ /* Ignored. */ break; default: hw_error("stellaris_enet_write: Bad offset %x\n", (int)offset); } }
true
qemu
5c10495ab1546d5d12b51a97817051e9ec98d0f6
6,887
void *av_malloc(unsigned int size) { void *ptr; #if defined (HAVE_MEMALIGN) ptr = memalign(16,size); /* Why 64? Indeed, we should align it: on 4 for 386 on 16 for 486 on 32 for 586, PPro - k6-III on 64 for K7 (maybe for P3 too). Because L1 and L2 caches are aligned on those values. But I don't want to code such logic here! */ /* Why 16? because some cpus need alignment, for example SSE2 on P4, & most RISC cpus it will just trigger an exception and the unaligned load will be done in the exception handler or it will just segfault (SSE2 on P4) Why not larger? because i didnt see a difference in benchmarks ... */ /* benchmarks with p3 memalign(64)+1 3071,3051,3032 memalign(64)+2 3051,3032,3041 memalign(64)+4 2911,2896,2915 memalign(64)+8 2545,2554,2550 memalign(64)+16 2543,2572,2563 memalign(64)+32 2546,2545,2571 memalign(64)+64 2570,2533,2558 btw, malloc seems to do 8 byte alignment by default here */ #else ptr = malloc(size); #endif return ptr; }
false
FFmpeg
da9b170c6f06184a5114dc66afb8385cd0ffff83
6,888
static int posix_aio_process_queue(void *opaque) { PosixAioState *s = opaque; struct qemu_paiocb *acb, **pacb; int ret; int result = 0; for(;;) { pacb = &s->first_aio; for(;;) { acb = *pacb; if (!acb) return result; ret = qemu_paio_error(acb); if (ret == ECANCELED) { /* remove the request */ *pacb = acb->next; qemu_aio_release(acb); result = 1; } else if (ret != EINPROGRESS) { /* end of aio */ if (ret == 0) { ret = qemu_paio_return(acb); if (ret == acb->aio_nbytes) ret = 0; else ret = -EINVAL; } else { ret = -ret; } trace_paio_complete(acb, acb->common.opaque, ret); /* remove the request */ *pacb = acb->next; /* call the callback */ acb->common.cb(acb->common.opaque, ret); qemu_aio_release(acb); result = 1; break; } else { pacb = &acb->next; } } } return result; }
true
qemu
adfe92f6d18c0e0a3694e19abb58eb55fd0c5993
6,889
static int protocol_client_auth(VncState *vs, uint8_t *data, size_t len) { /* We only advertise 1 auth scheme at a time, so client * must pick the one we sent. Verify this */ if (data[0] != vs->auth) { /* Reject auth */ VNC_DEBUG("Reject auth %d because it didn't match advertized\n", (int)data[0]); vnc_write_u32(vs, 1); if (vs->minor >= 8) { static const char err[] = "Authentication failed"; vnc_write_u32(vs, sizeof(err)); vnc_write(vs, err, sizeof(err)); } vnc_client_error(vs); } else { /* Accept requested auth */ VNC_DEBUG("Client requested auth %d\n", (int)data[0]); switch (vs->auth) { case VNC_AUTH_NONE: VNC_DEBUG("Accept auth none\n"); if (vs->minor >= 8) { vnc_write_u32(vs, 0); /* Accept auth completion */ vnc_flush(vs); } start_client_init(vs); break; case VNC_AUTH_VNC: VNC_DEBUG("Start VNC auth\n"); start_auth_vnc(vs); break; case VNC_AUTH_VENCRYPT: VNC_DEBUG("Accept VeNCrypt auth\n"); start_auth_vencrypt(vs); break; #ifdef CONFIG_VNC_SASL case VNC_AUTH_SASL: VNC_DEBUG("Accept SASL auth\n"); start_auth_sasl(vs); break; #endif /* CONFIG_VNC_SASL */ default: /* Should not be possible, but just in case */ VNC_DEBUG("Reject auth %d server code bug\n", vs->auth); vnc_write_u8(vs, 1); if (vs->minor >= 8) { static const char err[] = "Authentication failed"; vnc_write_u32(vs, sizeof(err)); vnc_write(vs, err, sizeof(err)); } vnc_client_error(vs); } } return 0; }
true
qemu
7364dbdabb7824d5bde1e341bb6d928282f01c83
6,890
static int ehci_process_itd(EHCIState *ehci, EHCIitd *itd, uint32_t addr) { USBDevice *dev; USBEndpoint *ep; uint32_t i, len, pid, dir, devaddr, endp; uint32_t pg, off, ptr1, ptr2, max, mult; ehci->periodic_sched_active = PERIODIC_ACTIVE; dir =(itd->bufptr[1] & ITD_BUFPTR_DIRECTION); devaddr = get_field(itd->bufptr[0], ITD_BUFPTR_DEVADDR); endp = get_field(itd->bufptr[0], ITD_BUFPTR_EP); max = get_field(itd->bufptr[1], ITD_BUFPTR_MAXPKT); mult = get_field(itd->bufptr[2], ITD_BUFPTR_MULT); for(i = 0; i < 8; i++) { if (itd->transact[i] & ITD_XACT_ACTIVE) { pg = get_field(itd->transact[i], ITD_XACT_PGSEL); off = itd->transact[i] & ITD_XACT_OFFSET_MASK; len = get_field(itd->transact[i], ITD_XACT_LENGTH); if (len > max * mult) { len = max * mult; } if (len > BUFF_SIZE || pg > 6) { return -1; } ptr1 = (itd->bufptr[pg] & ITD_BUFPTR_MASK); qemu_sglist_init(&ehci->isgl, ehci->device, 2, ehci->as); if (off + len > 4096) { /* transfer crosses page border */ if (pg == 6) { return -1; /* avoid page pg + 1 */ } ptr2 = (itd->bufptr[pg + 1] & ITD_BUFPTR_MASK); uint32_t len2 = off + len - 4096; uint32_t len1 = len - len2; qemu_sglist_add(&ehci->isgl, ptr1 + off, len1); qemu_sglist_add(&ehci->isgl, ptr2, len2); } else { qemu_sglist_add(&ehci->isgl, ptr1 + off, len); } pid = dir ? USB_TOKEN_IN : USB_TOKEN_OUT; dev = ehci_find_device(ehci, devaddr); ep = usb_ep_get(dev, pid, endp); if (ep && ep->type == USB_ENDPOINT_XFER_ISOC) { usb_packet_setup(&ehci->ipacket, pid, ep, 0, addr, false, (itd->transact[i] & ITD_XACT_IOC) != 0); usb_packet_map(&ehci->ipacket, &ehci->isgl); usb_handle_packet(dev, &ehci->ipacket); usb_packet_unmap(&ehci->ipacket, &ehci->isgl); } else { DPRINTF("ISOCH: attempt to addess non-iso endpoint\n"); ehci->ipacket.status = USB_RET_NAK; ehci->ipacket.actual_length = 0; } switch (ehci->ipacket.status) { case USB_RET_SUCCESS: break; default: fprintf(stderr, "Unexpected iso usb result: %d\n", ehci->ipacket.status); /* Fall through */ case USB_RET_IOERROR: case USB_RET_NODEV: /* 3.3.2: XACTERR is only allowed on IN transactions */ if (dir) { itd->transact[i] |= ITD_XACT_XACTERR; ehci_raise_irq(ehci, USBSTS_ERRINT); } break; case USB_RET_BABBLE: itd->transact[i] |= ITD_XACT_BABBLE; ehci_raise_irq(ehci, USBSTS_ERRINT); break; case USB_RET_NAK: /* no data for us, so do a zero-length transfer */ ehci->ipacket.actual_length = 0; break; } if (!dir) { set_field(&itd->transact[i], len - ehci->ipacket.actual_length, ITD_XACT_LENGTH); /* OUT */ } else { set_field(&itd->transact[i], ehci->ipacket.actual_length, ITD_XACT_LENGTH); /* IN */ } if (itd->transact[i] & ITD_XACT_IOC) { ehci_raise_irq(ehci, USBSTS_INT); } itd->transact[i] &= ~ITD_XACT_ACTIVE; } } return 0; }
true
qemu
b16c129daf0fed91febbb88de23dae8271c8898a
6,892
void *g_try_malloc0(size_t n_bytes) { __coverity_negative_sink__(n_bytes); return calloc(1, n_bytes == 0 ? 1 : n_bytes); }
true
qemu
9d7a4c6690ef9962a3b20034f65008f1ea15c1d6
6,893
void gdb_register_coprocessor(CPUState * env, gdb_reg_cb get_reg, gdb_reg_cb set_reg, int num_regs, const char *xml, int g_pos) { GDBRegisterState *s; GDBRegisterState **p; static int last_reg = NUM_CORE_REGS; s = (GDBRegisterState *)g_malloc0(sizeof(GDBRegisterState)); s->base_reg = last_reg; s->num_regs = num_regs; s->get_reg = get_reg; s->set_reg = set_reg; s->xml = xml; p = &env->gdb_regs; while (*p) { /* Check for duplicates. */ if (strcmp((*p)->xml, xml) == 0) return; p = &(*p)->next; } /* Add to end of list. */ last_reg += num_regs; *p = s; if (g_pos) { if (g_pos != s->base_reg) { fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n" "Expected %d got %d\n", xml, g_pos, s->base_reg); } else { num_g_regs = last_reg; } } }
true
qemu
9643c25f8d67646857159d6fc021b07e7a659192
6,894
static int check_refcounts_l1(BlockDriverState *bs, uint16_t *refcount_table, int refcount_table_size, int64_t l1_table_offset, int l1_size, int check_copied) { BDRVQcowState *s = bs->opaque; uint64_t *l1_table, l2_offset, l1_size2; int i, refcount, ret; int errors = 0; l1_size2 = l1_size * sizeof(uint64_t); /* Mark L1 table as used */ errors += inc_refcounts(bs, refcount_table, refcount_table_size, l1_table_offset, l1_size2); /* Read L1 table entries from disk */ if (l1_size2 == 0) { l1_table = NULL; } else { l1_table = qemu_malloc(l1_size2); if (bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2) != l1_size2) goto fail; for(i = 0;i < l1_size; i++) be64_to_cpus(&l1_table[i]); } /* Do the actual checks */ for(i = 0; i < l1_size; i++) { l2_offset = l1_table[i]; if (l2_offset) { /* QCOW_OFLAG_COPIED must be set iff refcount == 1 */ if (check_copied) { refcount = get_refcount(bs, (l2_offset & ~QCOW_OFLAG_COPIED) >> s->cluster_bits); if (refcount < 0) { fprintf(stderr, "Can't get refcount for l2_offset %" PRIx64 ": %s\n", l2_offset, strerror(-refcount)); } if ((refcount == 1) != ((l2_offset & QCOW_OFLAG_COPIED) != 0)) { fprintf(stderr, "ERROR OFLAG_COPIED: l2_offset=%" PRIx64 " refcount=%d\n", l2_offset, refcount); errors++; } } /* Mark L2 table as used */ l2_offset &= ~QCOW_OFLAG_COPIED; errors += inc_refcounts(bs, refcount_table, refcount_table_size, l2_offset, s->cluster_size); /* L2 tables are cluster aligned */ if (l2_offset & (s->cluster_size - 1)) { fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not " "cluster aligned; L1 entry corrupted\n", l2_offset); errors++; } /* Process and check L2 entries */ ret = check_refcounts_l2(bs, refcount_table, refcount_table_size, l2_offset, check_copied); if (ret < 0) { goto fail; } errors += ret; } } qemu_free(l1_table); return errors; fail: fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n"); qemu_free(l1_table); return -EIO; }
true
qemu
9ac228e02cf16202547e7025ef300369e0db7781
6,895
static int alloc_refcount_block(BlockDriverState *bs, int64_t cluster_index, void **refcount_block) { BDRVQcow2State *s = bs->opaque; unsigned int refcount_table_index; int ret; BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); /* Find the refcount block for the given cluster */ refcount_table_index = cluster_index >> s->refcount_block_bits; if (refcount_table_index < s->refcount_table_size) { uint64_t refcount_block_offset = s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK; /* If it's already there, we're done */ if (refcount_block_offset) { if (offset_into_cluster(s, refcount_block_offset)) { qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#" PRIx64 " unaligned (reftable index: " "%#x)", refcount_block_offset, refcount_table_index); return -EIO; } return load_refcount_block(bs, refcount_block_offset, refcount_block); } } /* * If we came here, we need to allocate something. Something is at least * a cluster for the new refcount block. It may also include a new refcount * table if the old refcount table is too small. * * Note that allocating clusters here needs some special care: * * - We can't use the normal qcow2_alloc_clusters(), it would try to * increase the refcount and very likely we would end up with an endless * recursion. Instead we must place the refcount blocks in a way that * they can describe them themselves. * * - We need to consider that at this point we are inside update_refcounts * and potentially doing an initial refcount increase. This means that * some clusters have already been allocated by the caller, but their * refcount isn't accurate yet. If we allocate clusters for metadata, we * need to return -EAGAIN to signal the caller that it needs to restart * the search for free clusters. * * - alloc_clusters_noref and qcow2_free_clusters may load a different * refcount block into the cache */ *refcount_block = NULL; /* We write to the refcount table, so we might depend on L2 tables */ ret = qcow2_cache_flush(bs, s->l2_table_cache); if (ret < 0) { return ret; } /* Allocate the refcount block itself and mark it as used */ int64_t new_block = alloc_clusters_noref(bs, s->cluster_size); if (new_block < 0) { return new_block; } #ifdef DEBUG_ALLOC2 fprintf(stderr, "qcow2: Allocate refcount block %d for %" PRIx64 " at %" PRIx64 "\n", refcount_table_index, cluster_index << s->cluster_bits, new_block); #endif if (in_same_refcount_block(s, new_block, cluster_index << s->cluster_bits)) { /* Zero the new refcount block before updating it */ ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block, refcount_block); if (ret < 0) { goto fail_block; } memset(*refcount_block, 0, s->cluster_size); /* The block describes itself, need to update the cache */ int block_index = (new_block >> s->cluster_bits) & (s->refcount_block_size - 1); s->set_refcount(*refcount_block, block_index, 1); } else { /* Described somewhere else. This can recurse at most twice before we * arrive at a block that describes itself. */ ret = update_refcount(bs, new_block, s->cluster_size, 1, false, QCOW2_DISCARD_NEVER); if (ret < 0) { goto fail_block; } ret = qcow2_cache_flush(bs, s->refcount_block_cache); if (ret < 0) { goto fail_block; } /* Initialize the new refcount block only after updating its refcount, * update_refcount uses the refcount cache itself */ ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block, refcount_block); if (ret < 0) { goto fail_block; } memset(*refcount_block, 0, s->cluster_size); } /* Now the new refcount block needs to be written to disk */ BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE); qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache, *refcount_block); ret = qcow2_cache_flush(bs, s->refcount_block_cache); if (ret < 0) { goto fail_block; } /* If the refcount table is big enough, just hook the block up there */ if (refcount_table_index < s->refcount_table_size) { uint64_t data64 = cpu_to_be64(new_block); BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP); ret = bdrv_pwrite_sync(bs->file->bs, s->refcount_table_offset + refcount_table_index * sizeof(uint64_t), &data64, sizeof(data64)); if (ret < 0) { goto fail_block; } s->refcount_table[refcount_table_index] = new_block; /* The new refcount block may be where the caller intended to put its * data, so let it restart the search. */ return -EAGAIN; } qcow2_cache_put(bs, s->refcount_block_cache, refcount_block); /* * If we come here, we need to grow the refcount table. Again, a new * refcount table needs some space and we can't simply allocate to avoid * endless recursion. * * Therefore let's grab new refcount blocks at the end of the image, which * will describe themselves and the new refcount table. This way we can * reference them only in the new table and do the switch to the new * refcount table at once without producing an inconsistent state in * between. */ BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_GROW); /* Calculate the number of refcount blocks needed so far; this will be the * basis for calculating the index of the first cluster used for the * self-describing refcount structures which we are about to create. * * Because we reached this point, there cannot be any refcount entries for * cluster_index or higher indices yet. However, because new_block has been * allocated to describe that cluster (and it will assume this role later * on), we cannot use that index; also, new_block may actually have a higher * cluster index than cluster_index, so it needs to be taken into account * here (and 1 needs to be added to its value because that cluster is used). */ uint64_t blocks_used = DIV_ROUND_UP(MAX(cluster_index + 1, (new_block >> s->cluster_bits) + 1), s->refcount_block_size); if (blocks_used > QCOW_MAX_REFTABLE_SIZE / sizeof(uint64_t)) { return -EFBIG; } /* And now we need at least one block more for the new metadata */ uint64_t table_size = next_refcount_table_size(s, blocks_used + 1); uint64_t last_table_size; uint64_t blocks_clusters; do { uint64_t table_clusters = size_to_clusters(s, table_size * sizeof(uint64_t)); blocks_clusters = 1 + ((table_clusters + s->refcount_block_size - 1) / s->refcount_block_size); uint64_t meta_clusters = table_clusters + blocks_clusters; last_table_size = table_size; table_size = next_refcount_table_size(s, blocks_used + ((meta_clusters + s->refcount_block_size - 1) / s->refcount_block_size)); } while (last_table_size != table_size); #ifdef DEBUG_ALLOC2 fprintf(stderr, "qcow2: Grow refcount table %" PRId32 " => %" PRId64 "\n", s->refcount_table_size, table_size); #endif /* Create the new refcount table and blocks */ uint64_t meta_offset = (blocks_used * s->refcount_block_size) * s->cluster_size; uint64_t table_offset = meta_offset + blocks_clusters * s->cluster_size; uint64_t *new_table = g_try_new0(uint64_t, table_size); void *new_blocks = g_try_malloc0(blocks_clusters * s->cluster_size); assert(table_size > 0 && blocks_clusters > 0); if (new_table == NULL || new_blocks == NULL) { ret = -ENOMEM; goto fail_table; } /* Fill the new refcount table */ memcpy(new_table, s->refcount_table, s->refcount_table_size * sizeof(uint64_t)); new_table[refcount_table_index] = new_block; int i; for (i = 0; i < blocks_clusters; i++) { new_table[blocks_used + i] = meta_offset + (i * s->cluster_size); } /* Fill the refcount blocks */ uint64_t table_clusters = size_to_clusters(s, table_size * sizeof(uint64_t)); int block = 0; for (i = 0; i < table_clusters + blocks_clusters; i++) { s->set_refcount(new_blocks, block++, 1); } /* Write refcount blocks to disk */ BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS); ret = bdrv_pwrite_sync(bs->file->bs, meta_offset, new_blocks, blocks_clusters * s->cluster_size); g_free(new_blocks); new_blocks = NULL; if (ret < 0) { goto fail_table; } /* Write refcount table to disk */ for(i = 0; i < table_size; i++) { cpu_to_be64s(&new_table[i]); } BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE); ret = bdrv_pwrite_sync(bs->file->bs, table_offset, new_table, table_size * sizeof(uint64_t)); if (ret < 0) { goto fail_table; } for(i = 0; i < table_size; i++) { be64_to_cpus(&new_table[i]); } /* Hook up the new refcount table in the qcow2 header */ uint8_t data[12]; cpu_to_be64w((uint64_t*)data, table_offset); cpu_to_be32w((uint32_t*)(data + 8), table_clusters); BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE); ret = bdrv_pwrite_sync(bs->file->bs, offsetof(QCowHeader, refcount_table_offset), data, sizeof(data)); if (ret < 0) { goto fail_table; } /* And switch it in memory */ uint64_t old_table_offset = s->refcount_table_offset; uint64_t old_table_size = s->refcount_table_size; g_free(s->refcount_table); s->refcount_table = new_table; s->refcount_table_size = table_size; s->refcount_table_offset = table_offset; /* Free old table. */ qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t), QCOW2_DISCARD_OTHER); ret = load_refcount_block(bs, new_block, refcount_block); if (ret < 0) { return ret; } /* If we were trying to do the initial refcount update for some cluster * allocation, we might have used the same clusters to store newly * allocated metadata. Make the caller search some new space. */ return -EAGAIN; fail_table: g_free(new_blocks); g_free(new_table); fail_block: if (*refcount_block != NULL) { qcow2_cache_put(bs, s->refcount_block_cache, refcount_block); } return ret; }
true
qemu
95334230637cef9fbd199bb79a56271ec73d4732
6,896
void ppc6xx_irq_init (CPUState *env) { env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, env, 6); }
true
qemu
7b62a955047934bab158e84ecb63cb432c193ace
6,897
static void coroutine_fn v9fs_create(void *opaque) { int32_t fid; int err = 0; size_t offset = 7; V9fsFidState *fidp; V9fsQID qid; int32_t perm; int8_t mode; V9fsPath path; struct stat stbuf; V9fsString name; V9fsString extension; int iounit; V9fsPDU *pdu = opaque; v9fs_path_init(&path); v9fs_string_init(&name); v9fs_string_init(&extension); err = pdu_unmarshal(pdu, offset, "dsdbs", &fid, &name, &perm, &mode, &extension); if (err < 0) { goto out_nofid; trace_v9fs_create(pdu->tag, pdu->id, fid, name.data, perm, mode); if (name_is_illegal(name.data)) { err = -ENOENT; goto out_nofid; if (!strcmp(".", name.data) || !strcmp("..", name.data)) { err = -EEXIST; goto out_nofid; fidp = get_fid(pdu, fid); if (fidp == NULL) { goto out_nofid; if (perm & P9_STAT_MODE_DIR) { err = v9fs_co_mkdir(pdu, fidp, &name, perm & 0777, fidp->uid, -1, &stbuf); if (err < 0) { err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); if (err < 0) { v9fs_path_copy(&fidp->path, &path); err = v9fs_co_opendir(pdu, fidp); if (err < 0) { fidp->fid_type = P9_FID_DIR; } else if (perm & P9_STAT_MODE_SYMLINK) { err = v9fs_co_symlink(pdu, fidp, &name, extension.data, -1 , &stbuf); if (err < 0) { err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); if (err < 0) { v9fs_path_copy(&fidp->path, &path); } else if (perm & P9_STAT_MODE_LINK) { int32_t ofid = atoi(extension.data); V9fsFidState *ofidp = get_fid(pdu, ofid); if (ofidp == NULL) { err = v9fs_co_link(pdu, ofidp, fidp, &name); put_fid(pdu, ofidp); if (err < 0) { err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); if (err < 0) { fidp->fid_type = P9_FID_NONE; v9fs_path_copy(&fidp->path, &path); err = v9fs_co_lstat(pdu, &fidp->path, &stbuf); if (err < 0) { fidp->fid_type = P9_FID_NONE; } else if (perm & P9_STAT_MODE_DEVICE) { char ctype; uint32_t major, minor; mode_t nmode = 0; if (sscanf(extension.data, "%c %u %u", &ctype, &major, &minor) != 3) { err = -errno; switch (ctype) { case 'c': nmode = S_IFCHR; break; case 'b': nmode = S_IFBLK; break; default: err = -EIO; nmode |= perm & 0777; err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1, makedev(major, minor), nmode, &stbuf); if (err < 0) { err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); if (err < 0) { v9fs_path_copy(&fidp->path, &path); } else if (perm & P9_STAT_MODE_NAMED_PIPE) { err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1, 0, S_IFIFO | (perm & 0777), &stbuf); if (err < 0) { err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); if (err < 0) { v9fs_path_copy(&fidp->path, &path); } else if (perm & P9_STAT_MODE_SOCKET) { err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1, 0, S_IFSOCK | (perm & 0777), &stbuf); if (err < 0) { err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path); if (err < 0) { v9fs_path_copy(&fidp->path, &path); } else { err = v9fs_co_open2(pdu, fidp, &name, -1, omode_to_uflags(mode)|O_CREAT, perm, &stbuf); if (err < 0) { fidp->fid_type = P9_FID_FILE; fidp->open_flags = omode_to_uflags(mode); if (fidp->open_flags & O_EXCL) { /* * We let the host file system do O_EXCL check * We should not reclaim such fd */ fidp->flags |= FID_NON_RECLAIMABLE; iounit = get_iounit(pdu, &fidp->path); stat_to_qid(&stbuf, &qid); err = pdu_marshal(pdu, offset, "Qd", &qid, iounit); if (err < 0) { err += offset; trace_v9fs_create_return(pdu->tag, pdu->id, qid.type, qid.version, qid.path, iounit); out: put_fid(pdu, fidp); out_nofid: pdu_complete(pdu, err); v9fs_string_free(&name); v9fs_string_free(&extension); v9fs_path_free(&path);
true
qemu
d63fb193e71644a073b77ff5ac6f1216f2f6cf6e
6,898
static int copy_cell(Indeo3DecodeContext *ctx, Plane *plane, Cell *cell) { int h, w, mv_x, mv_y, offset, offset_dst; uint8_t *src, *dst; /* setup output and reference pointers */ offset_dst = (cell->ypos << 2) * plane->pitch + (cell->xpos << 2); dst = plane->pixels[ctx->buf_sel] + offset_dst; mv_y = cell->mv_ptr[0]; mv_x = cell->mv_ptr[1]; /* -1 because there is an extra line on top for prediction */ if ((cell->ypos << 2) + mv_y < -1 || (cell->xpos << 2) + mv_x < 0 || ((cell->ypos + cell->height) << 2) + mv_y >= plane->height || ((cell->xpos + cell->width) << 2) + mv_x >= plane->width) { av_log(ctx->avctx, AV_LOG_ERROR, "Motion vectors point out of the frame.\n"); return AVERROR_INVALIDDATA; } offset = offset_dst + mv_y * plane->pitch + mv_x; src = plane->pixels[ctx->buf_sel ^ 1] + offset; h = cell->height << 2; for (w = cell->width; w > 0;) { /* copy using 16xH blocks */ if (!((cell->xpos << 2) & 15) && w >= 4) { for (; w >= 4; src += 16, dst += 16, w -= 4) ctx->hdsp.put_no_rnd_pixels_tab[0][0](dst, src, plane->pitch, h); } /* copy using 8xH blocks */ if (!((cell->xpos << 2) & 7) && w >= 2) { ctx->hdsp.put_no_rnd_pixels_tab[1][0](dst, src, plane->pitch, h); w -= 2; src += 8; dst += 8; } if (w >= 1) { ctx->hdsp.put_no_rnd_pixels_tab[2][0](dst, src, plane->pitch, h); w--; src += 4; dst += 4; } } return 0; }
true
FFmpeg
95220be1faac628d849a004644c0d102df0aa98b
6,899
static int serial_can_receive(SerialState *s) { return !(s->lsr & UART_LSR_DR); }
true
qemu
81174dae3f9189519cd60c7b79e91c291b021bbe
6,900
static void mov_parse_stsd_subtitle(MOVContext *c, AVIOContext *pb, AVStream *st, MOVStreamContext *sc, int size) { // ttxt stsd contains display flags, justification, background // color, fonts, and default styles, so fake an atom to read it MOVAtom fake_atom = { .size = size }; // mp4s contains a regular esds atom if (st->codec->codec_tag != AV_RL32("mp4s")) mov_read_glbl(c, pb, fake_atom); st->codec->width = sc->width; st->codec->height = sc->height; }
true
FFmpeg
be9ce6e10a8d53b8bc346c9337d75a5a30631a2a
6,901
static void virgl_cmd_get_capset_info(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd) { struct virtio_gpu_get_capset_info info; struct virtio_gpu_resp_capset_info resp; VIRTIO_GPU_FILL_CMD(info); if (info.capset_index == 0) { resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL; virgl_renderer_get_cap_set(resp.capset_id, &resp.capset_max_version, &resp.capset_max_size); } else { resp.capset_max_version = 0; resp.capset_max_size = 0; } resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO; virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); }
true
qemu
42a8dadc74f8982fc269e54e3c5627b54d9f83d8
6,902
static int virtio_rng_pci_init(VirtIOPCIProxy *vpci_dev) { VirtIORngPCI *vrng = VIRTIO_RNG_PCI(vpci_dev); DeviceState *vdev = DEVICE(&vrng->vdev); qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); if (qdev_init(vdev) < 0) { return -1; } object_property_set_link(OBJECT(vrng), OBJECT(vrng->vdev.conf.default_backend), "rng", NULL); return 0; }
true
qemu
5b456438f5bb395ed6b1eec95e18ce7a7a884a0a
6,903
static int usb_linux_update_endp_table(USBHostDevice *s) { uint8_t *descriptors; uint8_t devep, type, configuration, alt_interface; struct usb_ctrltransfer ct; int interface, ret, length, i; ct.bRequestType = USB_DIR_IN; ct.bRequest = USB_REQ_GET_CONFIGURATION; ct.wValue = 0; ct.wIndex = 0; ct.wLength = 1; ct.data = &configuration; ct.timeout = 50; ret = ioctl(s->fd, USBDEVFS_CONTROL, &ct); if (ret < 0) { perror("usb_linux_update_endp_table"); return 1; } /* in address state */ if (configuration == 0) return 1; /* get the desired configuration, interface, and endpoint descriptors * from device description */ descriptors = &s->descr[18]; length = s->descr_len - 18; i = 0; if (descriptors[i + 1] != USB_DT_CONFIG || descriptors[i + 5] != configuration) { DPRINTF("invalid descriptor data - configuration\n"); return 1; } i += descriptors[i]; while (i < length) { if (descriptors[i + 1] != USB_DT_INTERFACE || (descriptors[i + 1] == USB_DT_INTERFACE && descriptors[i + 4] == 0)) { i += descriptors[i]; continue; } interface = descriptors[i + 2]; ct.bRequestType = USB_DIR_IN | USB_RECIP_INTERFACE; ct.bRequest = USB_REQ_GET_INTERFACE; ct.wValue = 0; ct.wIndex = interface; ct.wLength = 1; ct.data = &alt_interface; ct.timeout = 50; ret = ioctl(s->fd, USBDEVFS_CONTROL, &ct); if (ret < 0) { alt_interface = interface; } /* the current interface descriptor is the active interface * and has endpoints */ if (descriptors[i + 3] != alt_interface) { i += descriptors[i]; continue; } /* advance to the endpoints */ while (i < length && descriptors[i +1] != USB_DT_ENDPOINT) i += descriptors[i]; if (i >= length) break; while (i < length) { if (descriptors[i + 1] != USB_DT_ENDPOINT) break; devep = descriptors[i + 2]; switch (descriptors[i + 3] & 0x3) { case 0x00: type = USBDEVFS_URB_TYPE_CONTROL; break; case 0x01: type = USBDEVFS_URB_TYPE_ISO; break; case 0x02: break; case 0x03: type = USBDEVFS_URB_TYPE_INTERRUPT; break; } s->endp_table[(devep & 0xf) - 1].type = type; s->endp_table[(devep & 0xf) - 1].halted = 0; i += descriptors[i]; } } return 0; }
true
qemu
ddbda4323e2bdc7cb3925b0f4080f9eb836a09e8
6,904
AVStream *add_av_stream1(FFStream *stream, AVCodecContext *codec) { AVStream *fst; fst = av_mallocz(sizeof(AVStream)); if (!fst) return NULL; fst->priv_data = av_mallocz(sizeof(FeedData)); memcpy(&fst->codec, codec, sizeof(AVCodecContext)); stream->streams[stream->nb_streams++] = fst; return fst; }
true
FFmpeg
a4d70941cd4a82f7db9fbaa2148d60ce550e7611
6,905
static av_cold int ljpeg_encode_init(AVCodecContext *avctx) { LJpegEncContext *s = avctx->priv_data; if ((avctx->pix_fmt == AV_PIX_FMT_YUV420P || avctx->pix_fmt == AV_PIX_FMT_YUV422P || avctx->pix_fmt == AV_PIX_FMT_YUV444P || avctx->color_range == AVCOL_RANGE_MPEG) && avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) { av_log(avctx, AV_LOG_ERROR, "Limited range YUV is non-standard, set strict_std_compliance to " "at least unofficial to use it.\n"); return AVERROR(EINVAL); } avctx->coded_frame = av_frame_alloc(); if (!avctx->coded_frame) avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; avctx->coded_frame->key_frame = 1; s->scratch = av_malloc_array(avctx->width + 1, sizeof(*s->scratch)); if (!s->scratch) goto fail; ff_idctdsp_init(&s->idsp, avctx); ff_init_scantable(s->idsp.idct_permutation, &s->scantable, ff_zigzag_direct); ff_mjpeg_init_hvsample(avctx, s->hsample, s->vsample); ff_mjpeg_build_huffman_codes(s->huff_size_dc_luminance, s->huff_code_dc_luminance, avpriv_mjpeg_bits_dc_luminance, avpriv_mjpeg_val_dc); ff_mjpeg_build_huffman_codes(s->huff_size_dc_chrominance, s->huff_code_dc_chrominance, avpriv_mjpeg_bits_dc_chrominance, avpriv_mjpeg_val_dc); return 0; }
true
FFmpeg
bd12aa2bc597f08409bede6d4c710eddf4d7c142
6,906
void net_host_device_remove(Monitor *mon, int vlan_id, const char *device) { VLANState *vlan; VLANClientState *vc; vlan = qemu_find_vlan(vlan_id); if (!vlan) { monitor_printf(mon, "can't find vlan %d\n", vlan_id); return; } for(vc = vlan->first_client; vc != NULL; vc = vc->next) if (!strcmp(vc->name, device)) break; if (!vc) { monitor_printf(mon, "can't find device %s\n", device); return; } qemu_del_vlan_client(vc); }
true
qemu
9036de1a8ad6c565a4e5d8cd124ad8dd05e7d4d4
6,907
static inline PageDesc *page_find_alloc(target_ulong index) { PageDesc **lp, *p; #if TARGET_LONG_BITS > 32 /* Host memory outside guest VM. For 32-bit targets we have already excluded high addresses. */ if (index > ((target_ulong)L2_SIZE * L1_SIZE)) return NULL; #endif lp = &l1_map[index >> L2_BITS]; p = *lp; if (!p) { /* allocate if not found */ #if defined(CONFIG_USER_ONLY) unsigned long addr; size_t len = sizeof(PageDesc) * L2_SIZE; /* Don't use qemu_malloc because it may recurse. */ p = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); *lp = p; addr = h2g(p); if (addr == (target_ulong)addr) { page_set_flags(addr & TARGET_PAGE_MASK, TARGET_PAGE_ALIGN(addr + len), PAGE_RESERVED); } #else p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE); *lp = p; #endif } return p + (index & (L2_SIZE - 1)); }
true
qemu
434929bf11f0573d953c24287badbc2431a042ef
6,908
static int asf_write_trailer(AVFormatContext *s) { ASFContext *asf = s->priv_data; int64_t file_size, data_size; /* flush the current packet */ if (asf->pb.buf_ptr > asf->pb.buffer) flush_packet(s); /* write index */ data_size = avio_tell(s->pb); if ((!asf->is_streamed) && (asf->nb_index_count != 0)) asf_write_index(s, asf->index_ptr, asf->maximum_packet, asf->nb_index_count); avio_flush(s->pb); if (asf->is_streamed || !s->pb->seekable) { put_chunk(s, 0x4524, 0, 0); /* end of stream */ } else { /* rewrite an updated header */ file_size = avio_tell(s->pb); avio_seek(s->pb, 0, SEEK_SET); asf_write_header1(s, file_size, data_size - asf->data_offset); } av_free(asf->index_ptr); return 0; }
false
FFmpeg
83548fe894cdb455cc127f754d09905b6d23c173
6,909
static void dvbsub_parse_display_definition_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { DVBSubContext *ctx = avctx->priv_data; DVBSubDisplayDefinition *display_def = ctx->display_definition; int dds_version, info_byte; if (buf_size < 5) return; info_byte = bytestream_get_byte(&buf); dds_version = info_byte >> 4; if (display_def && display_def->version == dds_version) return; // already have this display definition version if (!display_def) { display_def = av_mallocz(sizeof(*display_def)); ctx->display_definition = display_def; } if (!display_def) return; display_def->version = dds_version; display_def->x = 0; display_def->y = 0; display_def->width = bytestream_get_be16(&buf) + 1; display_def->height = bytestream_get_be16(&buf) + 1; if (buf_size < 13) return; if (info_byte & 1<<3) { // display_window_flag display_def->x = bytestream_get_be16(&buf); display_def->y = bytestream_get_be16(&buf); display_def->width = bytestream_get_be16(&buf) - display_def->x + 1; display_def->height = bytestream_get_be16(&buf) - display_def->y + 1; } }
false
FFmpeg
607ad990d31e6be52980970e5ce8cd25ab3de812
6,910
static int synchronize_audio(VideoState *is, int nb_samples) { int wanted_nb_samples = nb_samples; /* if not master, then we try to remove or add samples to correct the clock */ if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) { double diff, avg_diff; int min_nb_samples, max_nb_samples; diff = get_audio_clock(is) - get_master_clock(is); if (fabs(diff) < AV_NOSYNC_THRESHOLD) { is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum; if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) { /* not enough measures to have a correct estimate */ is->audio_diff_avg_count++; } else { /* estimate the A-V difference */ avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef); if (fabs(avg_diff) >= is->audio_diff_threshold) { wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq); min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100)); max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100)); wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples); } av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n", diff, avg_diff, wanted_nb_samples - nb_samples, is->audio_clock, is->audio_diff_threshold); } } else { /* too big difference : may be initial PTS errors, so reset A-V filter */ is->audio_diff_avg_count = 0; is->audio_diff_cum = 0; } } return wanted_nb_samples; }
false
FFmpeg
26c208cf0ff59efd7786528884a64d35fc42e9bf
6,911
int ff_alloc_packet(AVPacket *avpkt, int size) { if (size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE) return AVERROR(EINVAL); if (avpkt->data) { void *destruct = avpkt->destruct; if (avpkt->size < size) return AVERROR(EINVAL); av_init_packet(avpkt); avpkt->destruct = destruct; avpkt->size = size; return 0; } else { return av_new_packet(avpkt, size); } }
false
FFmpeg
00663de3b752fc3bdd47d4516ad2fcc720722782
6,912
static int ast_write_trailer(AVFormatContext *s) { AVIOContext *pb = s->pb; ASTMuxContext *ast = s->priv_data; AVCodecContext *enc = s->streams[0]->codec; int64_t file_size = avio_tell(pb); int64_t samples = (file_size - 64 - (32 * enc->frame_number)) / enc->block_align; /* PCM_S16BE_PLANAR */ av_log(s, AV_LOG_DEBUG, "total samples: %"PRId64"\n", samples); if (s->pb->seekable) { /* Number of samples */ avio_seek(pb, ast->samples, SEEK_SET); avio_wb32(pb, samples); /* Loopstart if provided */ if (ast->loopstart > 0) { if (ast->loopstart >= samples) { av_log(s, AV_LOG_WARNING, "Loopstart value is out of range and will be ignored\n"); ast->loopstart = -1; avio_skip(pb, 4); } else avio_wb32(pb, ast->loopstart); } else avio_skip(pb, 4); /* Loopend if provided. Otherwise number of samples again */ if (ast->loopend && ast->loopstart >= 0) { if (ast->loopend > samples) { av_log(s, AV_LOG_WARNING, "Loopend value is out of range and will be ignored\n"); ast->loopend = samples; } avio_wb32(pb, ast->loopend); } else { avio_wb32(pb, samples); } /* Size of first block */ avio_wb32(pb, ast->fbs); /* File size minus header */ avio_seek(pb, ast->size, SEEK_SET); avio_wb32(pb, file_size - 64); /* Loop flag */ if (ast->loopstart >= 0) { avio_skip(pb, 6); avio_wb16(pb, 0xFFFF); } avio_seek(pb, file_size, SEEK_SET); avio_flush(pb); } return 0; }
false
FFmpeg
47550e62bf717ed626b652bd3797fcae0ca8c335
6,913
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup) { AVCodecContext *const avctx = h->avctx; int err = 0; h->mb_y = 0; if (!in_setup && !h->droppable) ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, h->picture_structure == PICT_BOTTOM_FIELD); if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) { if (!h->droppable) { err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); h->prev_poc_msb = h->poc_msb; h->prev_poc_lsb = h->poc_lsb; } h->prev_frame_num_offset = h->frame_num_offset; h->prev_frame_num = h->frame_num; } if (avctx->hwaccel) { if (avctx->hwaccel->end_frame(avctx) < 0) av_log(avctx, AV_LOG_ERROR, "hardware accelerator failed to decode picture\n"); } #if CONFIG_ERROR_RESILIENCE /* * FIXME: Error handling code does not seem to support interlaced * when slices span multiple rows * The ff_er_add_slice calls don't work right for bottom * fields; they cause massive erroneous error concealing * Error marking covers both fields (top and bottom). * This causes a mismatched s->error_count * and a bad error table. Further, the error count goes to * INT_MAX when called for bottom field, because mb_y is * past end by one (callers fault) and resync_mb_y != 0 * causes problems for the first MB line, too. */ if (!FIELD_PICTURE(h) && h->enable_er) { h264_set_erpic(&sl->er.cur_pic, h->cur_pic_ptr); h264_set_erpic(&sl->er.last_pic, sl->ref_count[0] ? sl->ref_list[0][0].parent : NULL); h264_set_erpic(&sl->er.next_pic, sl->ref_count[1] ? sl->ref_list[1][0].parent : NULL); ff_er_frame_end(&sl->er); } #endif /* CONFIG_ERROR_RESILIENCE */ emms_c(); h->current_slice = 0; return err; }
false
FFmpeg
c8dcff0cdb17d0aa03ac729eba12d1a20f1f59c8
6,914
static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt) { AlsaData *s = s1->priv_data; AVStream *st = s1->streams[0]; int res; snd_htimestamp_t timestamp; snd_pcm_uframes_t ts_delay; if (av_new_packet(pkt, s->period_size) < 0) { return AVERROR(EIO); } while ((res = snd_pcm_readi(s->h, pkt->data, pkt->size / s->frame_size)) < 0) { if (res == -EAGAIN) { av_free_packet(pkt); return AVERROR(EAGAIN); } if (ff_alsa_xrun_recover(s1, res) < 0) { av_log(s1, AV_LOG_ERROR, "ALSA read error: %s\n", snd_strerror(res)); av_free_packet(pkt); return AVERROR(EIO); } } snd_pcm_htimestamp(s->h, &ts_delay, &timestamp); ts_delay += res; pkt->pts = timestamp.tv_sec * 1000000LL + (timestamp.tv_nsec * st->codec->sample_rate - ts_delay * 1000000000LL + st->codec->sample_rate * 500LL) / (st->codec->sample_rate * 1000LL); pkt->size = res * s->frame_size; return 0; }
false
FFmpeg
089fac77a6bf9199a5ec161e9c27850f0a680541
6,915
static void RENAME(uyvytoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src, long width, long height, long lumStride, long chromStride, long srcStride) { long y; const long chromWidth= -((-width)>>1); for (y=0; y<height; y++) { RENAME(extract_even)(src+1, ydst, width); RENAME(extract_even2)(src, udst, vdst, chromWidth); src += srcStride; ydst+= lumStride; udst+= chromStride; vdst+= chromStride; } #if COMPILE_TEMPLATE_MMX __asm__( EMMS" \n\t" SFENCE" \n\t" ::: "memory" ); #endif }
false
FFmpeg
d1adad3cca407f493c3637e20ecd4f7124e69212
6,916
static int mpc8_decode_init(AVCodecContext * avctx) { int i; MPCContext *c = avctx->priv_data; GetBitContext gb; static int vlc_inited = 0; if(avctx->extradata_size < 2){ av_log(avctx, AV_LOG_ERROR, "Too small extradata size (%i)!\n", avctx->extradata_size); return -1; } memset(c->oldDSCF, 0, sizeof(c->oldDSCF)); av_init_random(0xDEADBEEF, &c->rnd); dsputil_init(&c->dsp, avctx); ff_mpc_init(); init_get_bits(&gb, avctx->extradata, 16); skip_bits(&gb, 3);//sample rate c->maxbands = get_bits(&gb, 5) + 1; skip_bits(&gb, 4);//channels c->MSS = get_bits1(&gb); c->frames = 1 << (get_bits(&gb, 3) * 2); if(vlc_inited) return 0; av_log(avctx, AV_LOG_DEBUG, "Initing VLC\n"); init_vlc(&band_vlc, MPC8_BANDS_BITS, MPC8_BANDS_SIZE, mpc8_bands_bits, 1, 1, mpc8_bands_codes, 1, 1, INIT_VLC_USE_STATIC); init_vlc(&q1_vlc, MPC8_Q1_BITS, MPC8_Q1_SIZE, mpc8_q1_bits, 1, 1, mpc8_q1_codes, 1, 1, INIT_VLC_USE_STATIC); init_vlc(&q9up_vlc, MPC8_Q9UP_BITS, MPC8_Q9UP_SIZE, mpc8_q9up_bits, 1, 1, mpc8_q9up_codes, 1, 1, INIT_VLC_USE_STATIC); init_vlc(&scfi_vlc[0], MPC8_SCFI0_BITS, MPC8_SCFI0_SIZE, mpc8_scfi0_bits, 1, 1, mpc8_scfi0_codes, 1, 1, INIT_VLC_USE_STATIC); init_vlc(&scfi_vlc[1], MPC8_SCFI1_BITS, MPC8_SCFI1_SIZE, mpc8_scfi1_bits, 1, 1, mpc8_scfi1_codes, 1, 1, INIT_VLC_USE_STATIC); init_vlc(&dscf_vlc[0], MPC8_DSCF0_BITS, MPC8_DSCF0_SIZE, mpc8_dscf0_bits, 1, 1, mpc8_dscf0_codes, 1, 1, INIT_VLC_USE_STATIC); init_vlc(&dscf_vlc[1], MPC8_DSCF1_BITS, MPC8_DSCF1_SIZE, mpc8_dscf1_bits, 1, 1, mpc8_dscf1_codes, 1, 1, INIT_VLC_USE_STATIC); init_vlc_sparse(&q3_vlc[0], MPC8_Q3_BITS, MPC8_Q3_SIZE, mpc8_q3_bits, 1, 1, mpc8_q3_codes, 1, 1, mpc8_q3_syms, 1, 1, INIT_VLC_USE_STATIC); init_vlc_sparse(&q3_vlc[1], MPC8_Q4_BITS, MPC8_Q4_SIZE, mpc8_q4_bits, 1, 1, mpc8_q4_codes, 1, 1, mpc8_q4_syms, 1, 1, INIT_VLC_USE_STATIC); for(i = 0; i < 2; i++){ init_vlc(&res_vlc[i], MPC8_RES_BITS, MPC8_RES_SIZE, &mpc8_res_bits[i], 1, 1, &mpc8_res_codes[i], 1, 1, INIT_VLC_USE_STATIC); init_vlc(&q2_vlc[i], MPC8_Q2_BITS, MPC8_Q2_SIZE, &mpc8_q2_bits[i], 1, 1, &mpc8_q2_codes[i], 1, 1, INIT_VLC_USE_STATIC); init_vlc(&quant_vlc[0][i], MPC8_Q5_BITS, MPC8_Q5_SIZE, &mpc8_q5_bits[i], 1, 1, &mpc8_q5_codes[i], 1, 1, INIT_VLC_USE_STATIC); init_vlc(&quant_vlc[1][i], MPC8_Q6_BITS, MPC8_Q6_SIZE, &mpc8_q6_bits[i], 1, 1, &mpc8_q6_codes[i], 1, 1, INIT_VLC_USE_STATIC); init_vlc(&quant_vlc[2][i], MPC8_Q7_BITS, MPC8_Q7_SIZE, &mpc8_q7_bits[i], 1, 1, &mpc8_q7_codes[i], 1, 1, INIT_VLC_USE_STATIC); init_vlc(&quant_vlc[3][i], MPC8_Q8_BITS, MPC8_Q8_SIZE, &mpc8_q8_bits[i], 1, 1, &mpc8_q8_codes[i], 1, 1, INIT_VLC_USE_STATIC); } vlc_inited = 1; return 0; }
false
FFmpeg
5e53486545726987ab4482321d4dcf7e23e7652f
6,918
static int raw_pwrite_aligned(BlockDriverState *bs, int64_t offset, const uint8_t *buf, int count) { BDRVRawState *s = bs->opaque; int ret; ret = fd_open(bs); if (ret < 0) return -errno; ret = pwrite(s->fd, buf, count, offset); if (ret == count) goto label__raw_write__success; DEBUG_BLOCK_PRINT("raw_pwrite(%d:%s, %" PRId64 ", %p, %d) [%" PRId64 "] write failed %d : %d = %s\n", s->fd, bs->filename, offset, buf, count, bs->total_sectors, ret, errno, strerror(errno)); label__raw_write__success: return (ret < 0) ? -errno : ret; }
false
qemu
65d21bc73bda6515fd9b4ff5b2e90454f7a0b419
6,920
void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap) { BdrvDirtyBitmap *bm, *next; QLIST_FOREACH_SAFE(bm, &bs->dirty_bitmaps, list, next) { if (bm == bitmap) { assert(!bdrv_dirty_bitmap_frozen(bm)); QLIST_REMOVE(bitmap, list); hbitmap_free(bitmap->bitmap); g_free(bitmap->name); g_free(bitmap); return; } } }
false
qemu
c5acdc9ab4e6aa9b05e6242114479333b15d496b
6,922
void memory_region_init_ram(MemoryRegion *mr, Object *owner, const char *name, uint64_t size) { memory_region_init(mr, owner, name, size); mr->ram = true; mr->terminates = true; mr->destructor = memory_region_destructor_ram; mr->ram_addr = qemu_ram_alloc(size, mr); }
false
qemu
e1c57ab86f3c4ea6532b51cfecf32770b45f5e7a
6,923
int ff_h264_decode_slice_header(H264Context *h, H264Context *h0) { unsigned int first_mb_in_slice; unsigned int pps_id; int ret; unsigned int slice_type, tmp, i, j; int last_pic_structure, last_pic_droppable; int must_reinit; int needs_reinit = 0; int field_pic_flag, bottom_field_flag; h->qpel_put = h->h264qpel.put_h264_qpel_pixels_tab; h->qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab; first_mb_in_slice = get_ue_golomb_long(&h->gb); if (first_mb_in_slice == 0) { // FIXME better field boundary detection if (h0->current_slice && h->cur_pic_ptr && FIELD_PICTURE(h)) { ff_h264_field_end(h, 1); } h0->current_slice = 0; if (!h0->first_field) { if (h->cur_pic_ptr && !h->droppable) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, h->picture_structure == PICT_BOTTOM_FIELD); } h->cur_pic_ptr = NULL; } } slice_type = get_ue_golomb_31(&h->gb); if (slice_type > 9) { av_log(h->avctx, AV_LOG_ERROR, "slice type %d too large at %d %d\n", slice_type, h->mb_x, h->mb_y); return AVERROR_INVALIDDATA; } if (slice_type > 4) { slice_type -= 5; h->slice_type_fixed = 1; } else h->slice_type_fixed = 0; slice_type = golomb_to_pict_type[slice_type]; h->slice_type = slice_type; h->slice_type_nos = slice_type & 3; if (h->nal_unit_type == NAL_IDR_SLICE && h->slice_type_nos != AV_PICTURE_TYPE_I) { av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n"); return AVERROR_INVALIDDATA; } if ( (h->avctx->skip_frame >= AVDISCARD_NONREF && !h->nal_ref_idc) || (h->avctx->skip_frame >= AVDISCARD_BIDIR && h->slice_type_nos == AV_PICTURE_TYPE_B) || (h->avctx->skip_frame >= AVDISCARD_NONINTRA && h->slice_type_nos != AV_PICTURE_TYPE_I) || (h->avctx->skip_frame >= AVDISCARD_NONKEY && h->nal_unit_type != NAL_IDR_SLICE) || h->avctx->skip_frame >= AVDISCARD_ALL) { return SLICE_SKIPED; } // to make a few old functions happy, it's wrong though h->pict_type = h->slice_type; pps_id = get_ue_golomb(&h->gb); if (pps_id >= MAX_PPS_COUNT) { av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id); return AVERROR_INVALIDDATA; } if (!h0->pps_buffers[pps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing PPS %u referenced\n", pps_id); return AVERROR_INVALIDDATA; } if (h0->au_pps_id >= 0 && pps_id != h0->au_pps_id) { av_log(h->avctx, AV_LOG_ERROR, "PPS change from %d to %d forbidden\n", h0->au_pps_id, pps_id); return AVERROR_INVALIDDATA; } h->pps = *h0->pps_buffers[pps_id]; if (!h0->sps_buffers[h->pps.sps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing SPS %u referenced\n", h->pps.sps_id); return AVERROR_INVALIDDATA; } if (h->pps.sps_id != h->sps.sps_id || h->pps.sps_id != h->current_sps_id || h0->sps_buffers[h->pps.sps_id]->new) { h->sps = *h0->sps_buffers[h->pps.sps_id]; if (h->mb_width != h->sps.mb_width || h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) || h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma || h->cur_chroma_format_idc != h->sps.chroma_format_idc ) needs_reinit = 1; if (h->bit_depth_luma != h->sps.bit_depth_luma || h->chroma_format_idc != h->sps.chroma_format_idc) { h->bit_depth_luma = h->sps.bit_depth_luma; h->chroma_format_idc = h->sps.chroma_format_idc; needs_reinit = 1; } if ((ret = ff_h264_set_parameter_from_sps(h)) < 0) return ret; } h->avctx->profile = ff_h264_get_profile(&h->sps); h->avctx->level = h->sps.level_idc; h->avctx->refs = h->sps.ref_frame_count; must_reinit = (h->context_initialized && ( 16*h->sps.mb_width != h->avctx->coded_width || 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height || h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma || h->cur_chroma_format_idc != h->sps.chroma_format_idc || av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio) || h->mb_width != h->sps.mb_width || h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) )); if (non_j_pixfmt(h0->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h0, 0))) must_reinit = 1; h->mb_width = h->sps.mb_width; h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag); h->mb_num = h->mb_width * h->mb_height; h->mb_stride = h->mb_width + 1; h->b_stride = h->mb_width * 4; h->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p h->width = 16 * h->mb_width; h->height = 16 * h->mb_height; ret = init_dimensions(h); if (ret < 0) return ret; if (h->sps.video_signal_type_present_flag) { h->avctx->color_range = h->sps.full_range>0 ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; if (h->sps.colour_description_present_flag) { if (h->avctx->colorspace != h->sps.colorspace) needs_reinit = 1; h->avctx->color_primaries = h->sps.color_primaries; h->avctx->color_trc = h->sps.color_trc; h->avctx->colorspace = h->sps.colorspace; } } if (h->context_initialized && (must_reinit || needs_reinit)) { if (h != h0) { av_log(h->avctx, AV_LOG_ERROR, "changing width %d -> %d / height %d -> %d on " "slice %d\n", h->width, h->avctx->coded_width, h->height, h->avctx->coded_height, h0->current_slice + 1); return AVERROR_INVALIDDATA; } ff_h264_flush_change(h); if ((ret = get_pixel_format(h, 1)) < 0) return ret; h->avctx->pix_fmt = ret; av_log(h->avctx, AV_LOG_INFO, "Reinit context to %dx%d, " "pix_fmt: %s\n", h->width, h->height, av_get_pix_fmt_name(h->avctx->pix_fmt)); if ((ret = h264_slice_header_init(h, 1)) < 0) { av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed\n"); return ret; } } if (!h->context_initialized) { if (h != h0) { av_log(h->avctx, AV_LOG_ERROR, "Cannot (re-)initialize context during parallel decoding.\n"); return AVERROR_PATCHWELCOME; } if ((ret = get_pixel_format(h, 1)) < 0) return ret; h->avctx->pix_fmt = ret; if ((ret = h264_slice_header_init(h, 0)) < 0) { av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed\n"); return ret; } } if (h == h0 && h->dequant_coeff_pps != pps_id) { h->dequant_coeff_pps = pps_id; h264_init_dequant_tables(h); } h->frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num); h->mb_mbaff = 0; h->mb_aff_frame = 0; last_pic_structure = h0->picture_structure; last_pic_droppable = h0->droppable; h->droppable = h->nal_ref_idc == 0; if (h->sps.frame_mbs_only_flag) { h->picture_structure = PICT_FRAME; } else { if (!h->sps.direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) { av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n"); return -1; } field_pic_flag = get_bits1(&h->gb); if (field_pic_flag) { bottom_field_flag = get_bits1(&h->gb); h->picture_structure = PICT_TOP_FIELD + bottom_field_flag; } else { h->picture_structure = PICT_FRAME; h->mb_aff_frame = h->sps.mb_aff; } } h->mb_field_decoding_flag = h->picture_structure != PICT_FRAME; if (h0->current_slice != 0) { if (last_pic_structure != h->picture_structure || last_pic_droppable != h->droppable) { av_log(h->avctx, AV_LOG_ERROR, "Changing field mode (%d -> %d) between slices is not allowed\n", last_pic_structure, h->picture_structure); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_INVALIDDATA; } else if (!h0->cur_pic_ptr) { av_log(h->avctx, AV_LOG_ERROR, "unset cur_pic_ptr on slice %d\n", h0->current_slice + 1); return AVERROR_INVALIDDATA; } } else { /* Shorten frame num gaps so we don't have to allocate reference * frames just to throw them away */ if (h->frame_num != h->prev_frame_num) { int unwrap_prev_frame_num = h->prev_frame_num; int max_frame_num = 1 << h->sps.log2_max_frame_num; if (unwrap_prev_frame_num > h->frame_num) unwrap_prev_frame_num -= max_frame_num; if ((h->frame_num - unwrap_prev_frame_num) > h->sps.ref_frame_count) { unwrap_prev_frame_num = (h->frame_num - h->sps.ref_frame_count) - 1; if (unwrap_prev_frame_num < 0) unwrap_prev_frame_num += max_frame_num; h->prev_frame_num = unwrap_prev_frame_num; } } /* See if we have a decoded first field looking for a pair... * Here, we're using that to see if we should mark previously * decode frames as "finished". * We have to do that before the "dummy" in-between frame allocation, * since that can modify h->cur_pic_ptr. */ if (h0->first_field) { assert(h0->cur_pic_ptr); assert(h0->cur_pic_ptr->f.buf[0]); assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF); /* Mark old field/frame as completed */ if (h0->cur_pic_ptr->tf.owner == h0->avctx) { ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_BOTTOM_FIELD); } /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ if (last_pic_structure != PICT_FRAME) { ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } } else { if (h0->cur_pic_ptr->frame_num != h->frame_num) { /* This and previous field were reference, but had * different frame_nums. Consider this field first in * pair. Throw away previous field except for reference * purposes. */ if (last_pic_structure != PICT_FRAME) { ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } } else { /* Second field in complementary pair */ if (!((last_pic_structure == PICT_TOP_FIELD && h->picture_structure == PICT_BOTTOM_FIELD) || (last_pic_structure == PICT_BOTTOM_FIELD && h->picture_structure == PICT_TOP_FIELD))) { av_log(h->avctx, AV_LOG_ERROR, "Invalid field mode combination %d/%d\n", last_pic_structure, h->picture_structure); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_INVALIDDATA; } else if (last_pic_droppable != h->droppable) { avpriv_request_sample(h->avctx, "Found reference and non-reference fields in the same frame, which"); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_PATCHWELCOME; } } } } while (h->frame_num != h->prev_frame_num && !h0->first_field && h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) { H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL; av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n", h->frame_num, h->prev_frame_num); if (!h->sps.gaps_in_frame_num_allowed_flag) for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++) h->last_pocs[i] = INT_MIN; ret = h264_frame_start(h); if (ret < 0) { h0->first_field = 0; return ret; } h->prev_frame_num++; h->prev_frame_num %= 1 << h->sps.log2_max_frame_num; h->cur_pic_ptr->frame_num = h->prev_frame_num; h->cur_pic_ptr->invalid_gap = !h->sps.gaps_in_frame_num_allowed_flag; ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0); ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1); ret = ff_generate_sliding_window_mmcos(h, 1); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return ret; ret = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return ret; /* Error concealment: If a ref is missing, copy the previous ref * in its place. * FIXME: Avoiding a memcpy would be nice, but ref handling makes * many assumptions about there being no actual duplicates. * FIXME: This does not copy padding for out-of-frame motion * vectors. Given we are concealing a lost frame, this probably * is not noticeable by comparison, but it should be fixed. */ if (h->short_ref_count) { if (prev) { av_image_copy(h->short_ref[0]->f.data, h->short_ref[0]->f.linesize, (const uint8_t **)prev->f.data, prev->f.linesize, h->avctx->pix_fmt, h->mb_width * 16, h->mb_height * 16); h->short_ref[0]->poc = prev->poc + 2; } h->short_ref[0]->frame_num = h->prev_frame_num; } } /* See if we have a decoded first field looking for a pair... * We're using that to see whether to continue decoding in that * frame, or to allocate a new one. */ if (h0->first_field) { assert(h0->cur_pic_ptr); assert(h0->cur_pic_ptr->f.buf[0]); assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF); /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ h0->cur_pic_ptr = NULL; h0->first_field = FIELD_PICTURE(h); } else { if (h0->cur_pic_ptr->frame_num != h->frame_num) { ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX, h0->picture_structure==PICT_BOTTOM_FIELD); /* This and the previous field had different frame_nums. * Consider this field first in pair. Throw away previous * one except for reference purposes. */ h0->first_field = 1; h0->cur_pic_ptr = NULL; } else { /* Second field in complementary pair */ h0->first_field = 0; } } } else { /* Frame or first field in a potentially complementary pair */ h0->first_field = FIELD_PICTURE(h); } if (!FIELD_PICTURE(h) || h0->first_field) { if (h264_frame_start(h) < 0) { h0->first_field = 0; return AVERROR_INVALIDDATA; } } else { release_unused_pictures(h, 0); } /* Some macroblocks can be accessed before they're available in case * of lost slices, MBAFF or threading. */ if (FIELD_PICTURE(h)) { for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++) memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table)); } else { memset(h->slice_table, -1, (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table)); } h0->last_slice_type = -1; } if (h != h0 && (ret = clone_slice(h, h0)) < 0) return ret; /* can't be in alloc_tables because linesize isn't known there. * FIXME: redo bipred weight to not require extra buffer? */ for (i = 0; i < h->slice_context_count; i++) if (h->thread_context[i]) { ret = alloc_scratch_buffers(h->thread_context[i], h->linesize); if (ret < 0) return ret; } h->cur_pic_ptr->frame_num = h->frame_num; // FIXME frame_num cleanup av_assert1(h->mb_num == h->mb_width * h->mb_height); if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num || first_mb_in_slice >= h->mb_num) { av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n"); return AVERROR_INVALIDDATA; } h->resync_mb_x = h->mb_x = first_mb_in_slice % h->mb_width; h->resync_mb_y = h->mb_y = (first_mb_in_slice / h->mb_width) << FIELD_OR_MBAFF_PICTURE(h); if (h->picture_structure == PICT_BOTTOM_FIELD) h->resync_mb_y = h->mb_y = h->mb_y + 1; av_assert1(h->mb_y < h->mb_height); if (h->picture_structure == PICT_FRAME) { h->curr_pic_num = h->frame_num; h->max_pic_num = 1 << h->sps.log2_max_frame_num; } else { h->curr_pic_num = 2 * h->frame_num + 1; h->max_pic_num = 1 << (h->sps.log2_max_frame_num + 1); } if (h->nal_unit_type == NAL_IDR_SLICE) get_ue_golomb(&h->gb); /* idr_pic_id */ if (h->sps.poc_type == 0) { h->poc_lsb = get_bits(&h->gb, h->sps.log2_max_poc_lsb); if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) h->delta_poc_bottom = get_se_golomb(&h->gb); } if (h->sps.poc_type == 1 && !h->sps.delta_pic_order_always_zero_flag) { h->delta_poc[0] = get_se_golomb(&h->gb); if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) h->delta_poc[1] = get_se_golomb(&h->gb); } ff_init_poc(h, h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc); if (h->pps.redundant_pic_cnt_present) h->redundant_pic_count = get_ue_golomb(&h->gb); ret = ff_set_ref_count(h); if (ret < 0) return ret; if (slice_type != AV_PICTURE_TYPE_I && (h0->current_slice == 0 || slice_type != h0->last_slice_type || memcmp(h0->last_ref_count, h0->ref_count, sizeof(h0->ref_count)))) { ff_h264_fill_default_ref_list(h); } if (h->slice_type_nos != AV_PICTURE_TYPE_I) { ret = ff_h264_decode_ref_pic_list_reordering(h); if (ret < 0) { h->ref_count[1] = h->ref_count[0] = 0; return ret; } } if ((h->pps.weighted_pred && h->slice_type_nos == AV_PICTURE_TYPE_P) || (h->pps.weighted_bipred_idc == 1 && h->slice_type_nos == AV_PICTURE_TYPE_B)) ff_pred_weight_table(h); else if (h->pps.weighted_bipred_idc == 2 && h->slice_type_nos == AV_PICTURE_TYPE_B) { implicit_weight_table(h, -1); } else { h->use_weight = 0; for (i = 0; i < 2; i++) { h->luma_weight_flag[i] = 0; h->chroma_weight_flag[i] = 0; } } // If frame-mt is enabled, only update mmco tables for the first slice // in a field. Subsequent slices can temporarily clobber h->mmco_index // or h->mmco, which will cause ref list mix-ups and decoding errors // further down the line. This may break decoding if the first slice is // corrupt, thus we only do this if frame-mt is enabled. if (h->nal_ref_idc) { ret = ff_h264_decode_ref_pic_marking(h0, &h->gb, !(h->avctx->active_thread_type & FF_THREAD_FRAME) || h0->current_slice == 0); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return AVERROR_INVALIDDATA; } if (FRAME_MBAFF(h)) { ff_h264_fill_mbaff_ref_list(h); if (h->pps.weighted_bipred_idc == 2 && h->slice_type_nos == AV_PICTURE_TYPE_B) { implicit_weight_table(h, 0); implicit_weight_table(h, 1); } } if (h->slice_type_nos == AV_PICTURE_TYPE_B && !h->direct_spatial_mv_pred) ff_h264_direct_dist_scale_factor(h); ff_h264_direct_ref_list_init(h); if (h->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) { tmp = get_ue_golomb_31(&h->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp); return AVERROR_INVALIDDATA; } h->cabac_init_idc = tmp; } h->last_qscale_diff = 0; tmp = h->pps.init_qp + get_se_golomb(&h->gb); if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) { av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp); return AVERROR_INVALIDDATA; } h->qscale = tmp; h->chroma_qp[0] = get_chroma_qp(h, 0, h->qscale); h->chroma_qp[1] = get_chroma_qp(h, 1, h->qscale); // FIXME qscale / qp ... stuff if (h->slice_type == AV_PICTURE_TYPE_SP) get_bits1(&h->gb); /* sp_for_switch_flag */ if (h->slice_type == AV_PICTURE_TYPE_SP || h->slice_type == AV_PICTURE_TYPE_SI) get_se_golomb(&h->gb); /* slice_qs_delta */ h->deblocking_filter = 1; h->slice_alpha_c0_offset = 0; h->slice_beta_offset = 0; if (h->pps.deblocking_filter_parameters_present) { tmp = get_ue_golomb_31(&h->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, "deblocking_filter_idc %u out of range\n", tmp); return AVERROR_INVALIDDATA; } h->deblocking_filter = tmp; if (h->deblocking_filter < 2) h->deblocking_filter ^= 1; // 1<->0 if (h->deblocking_filter) { h->slice_alpha_c0_offset = get_se_golomb(&h->gb) * 2; h->slice_beta_offset = get_se_golomb(&h->gb) * 2; if (h->slice_alpha_c0_offset > 12 || h->slice_alpha_c0_offset < -12 || h->slice_beta_offset > 12 || h->slice_beta_offset < -12) { av_log(h->avctx, AV_LOG_ERROR, "deblocking filter parameters %d %d out of range\n", h->slice_alpha_c0_offset, h->slice_beta_offset); return AVERROR_INVALIDDATA; } } } if (h->avctx->skip_loop_filter >= AVDISCARD_ALL || (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY && h->nal_unit_type != NAL_IDR_SLICE) || (h->avctx->skip_loop_filter >= AVDISCARD_NONINTRA && h->slice_type_nos != AV_PICTURE_TYPE_I) || (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR && h->slice_type_nos == AV_PICTURE_TYPE_B) || (h->avctx->skip_loop_filter >= AVDISCARD_NONREF && h->nal_ref_idc == 0)) h->deblocking_filter = 0; if (h->deblocking_filter == 1 && h0->max_contexts > 1) { if (h->avctx->flags2 & CODEC_FLAG2_FAST) { /* Cheat slightly for speed: * Do not bother to deblock across slices. */ h->deblocking_filter = 2; } else { h0->max_contexts = 1; if (!h0->single_decode_warning) { av_log(h->avctx, AV_LOG_INFO, "Cannot parallelize deblocking type 1, decoding such frames in sequential order\n"); h0->single_decode_warning = 1; } if (h != h0) { av_log(h->avctx, AV_LOG_ERROR, "Deblocking switched inside frame.\n"); return SLICE_SINGLETHREAD; } } } h->qp_thresh = 15 - FFMIN(h->slice_alpha_c0_offset, h->slice_beta_offset) - FFMAX3(0, h->pps.chroma_qp_index_offset[0], h->pps.chroma_qp_index_offset[1]) + 6 * (h->sps.bit_depth_luma - 8); h0->last_slice_type = slice_type; memcpy(h0->last_ref_count, h0->ref_count, sizeof(h0->last_ref_count)); h->slice_num = ++h0->current_slice; if (h->slice_num) h0->slice_row[(h->slice_num-1)&(MAX_SLICES-1)]= h->resync_mb_y; if ( h0->slice_row[h->slice_num&(MAX_SLICES-1)] + 3 >= h->resync_mb_y && h0->slice_row[h->slice_num&(MAX_SLICES-1)] <= h->resync_mb_y && h->slice_num >= MAX_SLICES) { //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", h->slice_num, MAX_SLICES); } for (j = 0; j < 2; j++) { int id_list[16]; int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j]; for (i = 0; i < 16; i++) { id_list[i] = 60; if (j < h->list_count && i < h->ref_count[j] && h->ref_list[j][i].f.buf[0]) { int k; AVBuffer *buf = h->ref_list[j][i].f.buf[0]->buffer; for (k = 0; k < h->short_ref_count; k++) if (h->short_ref[k]->f.buf[0]->buffer == buf) { id_list[i] = k; break; } for (k = 0; k < h->long_ref_count; k++) if (h->long_ref[k] && h->long_ref[k]->f.buf[0]->buffer == buf) { id_list[i] = h->short_ref_count + k; break; } } } ref2frm[0] = ref2frm[1] = -1; for (i = 0; i < 16; i++) ref2frm[i + 2] = 4 * id_list[i] + (h->ref_list[j][i].reference & 3); ref2frm[18 + 0] = ref2frm[18 + 1] = -1; for (i = 16; i < 48; i++) ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] + (h->ref_list[j][i].reference & 3); } if (h->ref_count[0]) ff_h264_set_erpic(&h->er.last_pic, &h->ref_list[0][0]); if (h->ref_count[1]) ff_h264_set_erpic(&h->er.next_pic, &h->ref_list[1][0]); h->er.ref_count = h->ref_count[0]; h0->au_pps_id = pps_id; h->sps.new = h0->sps_buffers[h->pps.sps_id]->new = 0; h->current_sps_id = h->pps.sps_id; if (h->avctx->debug & FF_DEBUG_PICT_INFO) { av_log(h->avctx, AV_LOG_DEBUG, "slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n", h->slice_num, (h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"), first_mb_in_slice, av_get_picture_type_char(h->slice_type), h->slice_type_fixed ? " fix" : "", h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "", pps_id, h->frame_num, h->cur_pic_ptr->field_poc[0], h->cur_pic_ptr->field_poc[1], h->ref_count[0], h->ref_count[1], h->qscale, h->deblocking_filter, h->slice_alpha_c0_offset, h->slice_beta_offset, h->use_weight, h->use_weight == 1 && h->use_weight_chroma ? "c" : "", h->slice_type == AV_PICTURE_TYPE_B ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : ""); } return 0; }
false
FFmpeg
d381109ce8acd84fc16d3dfc4f8f688462d45366
6,924
static void virtio_scsi_request_cancelled(SCSIRequest *r) { VirtIOSCSIReq *req = r->hba_private; if (!req) { return; } if (req->dev->resetting) { req->resp.cmd->response = VIRTIO_SCSI_S_RESET; } else { req->resp.cmd->response = VIRTIO_SCSI_S_ABORTED; } virtio_scsi_complete_cmd_req(req); }
false
qemu
3eff1f46f08a360a4ae9f834ce9fef4c45bf6f0f
6,925
void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome) { const ARMCPRegInfo *ri = rip; int target_el; if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) { raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env)); } if (!ri->accessfn) { return; } switch (ri->accessfn(env, ri)) { case CP_ACCESS_OK: return; case CP_ACCESS_TRAP: target_el = exception_target_el(env); break; case CP_ACCESS_TRAP_EL2: /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is * a bug in the access function. */ assert(!arm_is_secure(env) && !arm_current_el(env) == 3); target_el = 2; break; case CP_ACCESS_TRAP_EL3: target_el = 3; break; case CP_ACCESS_TRAP_UNCATEGORIZED: target_el = exception_target_el(env); syndrome = syn_uncategorized(); break; default: g_assert_not_reached(); } raise_exception(env, EXCP_UDEF, syndrome, target_el); }
false
qemu
3fc827d591679f3e262b9d1f8b34528eabfca8c0
6,926
static void notify_event_cb(void *opaque) { /* No need to do anything; this bottom half is only used to * kick the kernel out of ppoll/poll/WaitForMultipleObjects. */ }
false
qemu
c2b38b277a7882a592f4f2ec955084b2b756daaa
6,928
static int m25p80_init(SSISlave *ss) { DriveInfo *dinfo; Flash *s = M25P80(ss); M25P80Class *mc = M25P80_GET_CLASS(s); s->pi = mc->pi; s->size = s->pi->sector_size * s->pi->n_sectors; s->dirty_page = -1; s->storage = qemu_blockalign(s->bdrv, s->size); dinfo = drive_get_next(IF_MTD); if (dinfo) { DB_PRINT_L(0, "Binding to IF_MTD drive\n"); s->bdrv = blk_bs(blk_by_legacy_dinfo(dinfo)); /* FIXME: Move to late init */ if (bdrv_read(s->bdrv, 0, s->storage, DIV_ROUND_UP(s->size, BDRV_SECTOR_SIZE))) { fprintf(stderr, "Failed to initialize SPI flash!\n"); return 1; } } else { DB_PRINT_L(0, "No BDRV - binding to RAM\n"); memset(s->storage, 0xFF, s->size); } return 0; }
false
qemu
4be746345f13e99e468c60acbd3a355e8183e3ce
6,929
static void sdl_mouse_warp(int x, int y, int on) { if (on) { if (!guest_cursor) sdl_show_cursor(); if (gui_grab || kbd_mouse_is_absolute() || absolute_enabled) { SDL_SetCursor(guest_sprite); SDL_WarpMouse(x, y); } } else if (gui_grab) sdl_hide_cursor(); guest_cursor = on; guest_x = x, guest_y = y; }
false
qemu
08a2d4c4ffde60e48819449f461274c43ad6e2d3
6,930
static int qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset, void **table, bool read_from_disk) { BDRVQcowState *s = bs->opaque; int i; int ret; int lookup_index; uint64_t min_lru_counter = UINT64_MAX; int min_lru_index = -1; trace_qcow2_cache_get(qemu_coroutine_self(), c == s->l2_table_cache, offset, read_from_disk); /* Check if the table is already cached */ i = lookup_index = (offset / s->cluster_size * 4) % c->size; do { const Qcow2CachedTable *t = &c->entries[i]; if (t->offset == offset) { goto found; } if (t->ref == 0 && t->lru_counter < min_lru_counter) { min_lru_counter = t->lru_counter; min_lru_index = i; } if (++i == c->size) { i = 0; } } while (i != lookup_index); if (min_lru_index == -1) { /* This can't happen in current synchronous code, but leave the check * here as a reminder for whoever starts using AIO with the cache */ abort(); } /* Cache miss: write a table back and replace it */ i = min_lru_index; trace_qcow2_cache_get_replace_entry(qemu_coroutine_self(), c == s->l2_table_cache, i); if (i < 0) { return i; } ret = qcow2_cache_entry_flush(bs, c, i); if (ret < 0) { return ret; } trace_qcow2_cache_get_read(qemu_coroutine_self(), c == s->l2_table_cache, i); c->entries[i].offset = 0; if (read_from_disk) { if (c == s->l2_table_cache) { BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD); } ret = bdrv_pread(bs->file, offset, qcow2_cache_get_table_addr(bs, c, i), s->cluster_size); if (ret < 0) { return ret; } } c->entries[i].offset = offset; /* And return the right table */ found: c->entries[i].ref++; *table = qcow2_cache_get_table_addr(bs, c, i); trace_qcow2_cache_get_done(qemu_coroutine_self(), c == s->l2_table_cache, i); return 0; }
false
qemu
1bd84ee717bf146c19281cce48a36a2f4d71748d
6,932
int net_init_socket(QemuOpts *opts, Monitor *mon, const char *name, VLANState *vlan) { if (qemu_opt_get(opts, "fd")) { int fd; if (qemu_opt_get(opts, "listen") || qemu_opt_get(opts, "connect") || qemu_opt_get(opts, "mcast")) { qemu_error("listen=, connect= and mcast= is invalid with fd=\n"); return -1; } fd = net_handle_fd_param(mon, qemu_opt_get(opts, "fd")); if (fd == -1) { return -1; } if (!net_socket_fd_init(vlan, "socket", name, fd, 1)) { close(fd); return -1; } } else if (qemu_opt_get(opts, "listen")) { const char *listen; if (qemu_opt_get(opts, "fd") || qemu_opt_get(opts, "connect") || qemu_opt_get(opts, "mcast")) { qemu_error("fd=, connect= and mcast= is invalid with listen=\n"); return -1; } listen = qemu_opt_get(opts, "listen"); if (net_socket_listen_init(vlan, "socket", name, listen) == -1) { return -1; } } else if (qemu_opt_get(opts, "connect")) { const char *connect; if (qemu_opt_get(opts, "fd") || qemu_opt_get(opts, "listen") || qemu_opt_get(opts, "mcast")) { qemu_error("fd=, listen= and mcast= is invalid with connect=\n"); return -1; } connect = qemu_opt_get(opts, "connect"); if (net_socket_connect_init(vlan, "socket", name, connect) == -1) { return -1; } } else if (qemu_opt_get(opts, "mcast")) { const char *mcast; if (qemu_opt_get(opts, "fd") || qemu_opt_get(opts, "connect") || qemu_opt_get(opts, "listen")) { qemu_error("fd=, connect= and listen= is invalid with mcast=\n"); return -1; } mcast = qemu_opt_get(opts, "mcast"); if (net_socket_mcast_init(vlan, "socket", name, mcast) == -1) { return -1; } } else { qemu_error("-socket requires fd=, listen=, connect= or mcast=\n"); return -1; } if (vlan) { vlan->nb_host_devs++; } return 0; }
false
qemu
62112d181ca33fea976100c4335dfc3e2f727e6c
6,934
int ff_init_filters(SwsContext * c) { int i; int index; int num_ydesc; int num_cdesc; int num_vdesc = isPlanarYUV(c->dstFormat) && !isGray(c->dstFormat) ? 2 : 1; int need_lum_conv = c->lumToYV12 || c->readLumPlanar || c->alpToYV12 || c->readAlpPlanar; int need_chr_conv = c->chrToYV12 || c->readChrPlanar; int srcIdx, dstIdx; int dst_stride = FFALIGN(c->dstW * sizeof(int16_t) + 66, 16); uint32_t * pal = usePal(c->srcFormat) ? c->pal_yuv : (uint32_t*)c->input_rgb2yuv_table; int res = 0; if (c->dstBpc == 16) dst_stride <<= 1; num_ydesc = need_lum_conv ? 2 : 1; num_cdesc = need_chr_conv ? 2 : 1; c->numSlice = FFMAX(num_ydesc, num_cdesc) + 2; c->numDesc = num_ydesc + num_cdesc + num_vdesc; c->descIndex[0] = num_ydesc; c->descIndex[1] = num_ydesc + num_cdesc; c->desc = av_mallocz_array(sizeof(SwsFilterDescriptor), c->numDesc); if (!c->desc) return AVERROR(ENOMEM); c->slice = av_mallocz_array(sizeof(SwsSlice), c->numSlice); res = alloc_slice(&c->slice[0], c->srcFormat, c->srcH, c->chrSrcH, c->chrSrcHSubSample, c->chrSrcVSubSample, 0); if (res < 0) goto cleanup; for (i = 1; i < c->numSlice-2; ++i) { res = alloc_slice(&c->slice[i], c->srcFormat, c->vLumFilterSize + MAX_LINES_AHEAD, c->vChrFilterSize + MAX_LINES_AHEAD, c->chrSrcHSubSample, c->chrSrcVSubSample, 0); if (res < 0) goto cleanup; res = alloc_lines(&c->slice[i], FFALIGN(c->srcW*2+78, 16), c->srcW); if (res < 0) goto cleanup; } // horizontal scaler output res = alloc_slice(&c->slice[i], c->srcFormat, c->vLumFilterSize + MAX_LINES_AHEAD, c->vChrFilterSize + MAX_LINES_AHEAD, c->chrDstHSubSample, c->chrDstVSubSample, 1); if (res < 0) goto cleanup; res = alloc_lines(&c->slice[i], dst_stride, c->dstW); if (res < 0) goto cleanup; fill_ones(&c->slice[i], dst_stride>>1, c->dstBpc == 16); // vertical scaler output ++i; res = alloc_slice(&c->slice[i], c->dstFormat, c->dstH, c->chrDstH, c->chrDstHSubSample, c->chrDstVSubSample, 0); if (res < 0) goto cleanup; index = 0; srcIdx = 0; dstIdx = 1; if (need_lum_conv) { ff_init_desc_fmt_convert(&c->desc[index], &c->slice[srcIdx], &c->slice[dstIdx], pal); c->desc[index].alpha = c->alpPixBuf != 0; ++index; srcIdx = dstIdx; } dstIdx = FFMAX(num_ydesc, num_cdesc); ff_init_desc_hscale(&c->desc[index], &c->slice[index], &c->slice[dstIdx], c->hLumFilter, c->hLumFilterPos, c->hLumFilterSize, c->lumXInc); c->desc[index].alpha = c->alpPixBuf != 0; ++index; { srcIdx = 0; dstIdx = 1; if (need_chr_conv) { ff_init_desc_cfmt_convert(&c->desc[index], &c->slice[srcIdx], &c->slice[dstIdx], pal); ++index; srcIdx = dstIdx; } dstIdx = FFMAX(num_ydesc, num_cdesc); if (c->needs_hcscale) ff_init_desc_chscale(&c->desc[index], &c->slice[srcIdx], &c->slice[dstIdx], c->hChrFilter, c->hChrFilterPos, c->hChrFilterSize, c->chrXInc); else ff_init_desc_no_chr(&c->desc[index], &c->slice[srcIdx], &c->slice[dstIdx]); } ++index; { srcIdx = c->numSlice - 2; dstIdx = c->numSlice - 1; ff_init_vscale(c, c->desc + index, c->slice + srcIdx, c->slice + dstIdx); } return 0; cleanup: ff_free_filters(c); return res; }
false
FFmpeg
f67aff3ad7168b0721e7e5ba05858d885966534a
6,936
void commit_start(const char *job_id, BlockDriverState *bs, BlockDriverState *base, BlockDriverState *top, int64_t speed, BlockdevOnError on_error, const char *backing_file_str, Error **errp) { CommitBlockJob *s; BlockReopenQueue *reopen_queue = NULL; int orig_overlay_flags; int orig_base_flags; BlockDriverState *iter; BlockDriverState *overlay_bs; Error *local_err = NULL; int ret; assert(top != bs); if (top == base) { error_setg(errp, "Invalid files for merge: top and base are the same"); return; } overlay_bs = bdrv_find_overlay(bs, top); if (overlay_bs == NULL) { error_setg(errp, "Could not find overlay image for %s:", top->filename); return; } /* FIXME Use real permissions */ s = block_job_create(job_id, &commit_job_driver, bs, 0, BLK_PERM_ALL, speed, BLOCK_JOB_DEFAULT, NULL, NULL, errp); if (!s) { return; } orig_base_flags = bdrv_get_flags(base); orig_overlay_flags = bdrv_get_flags(overlay_bs); /* convert base & overlay_bs to r/w, if necessary */ if (!(orig_base_flags & BDRV_O_RDWR)) { reopen_queue = bdrv_reopen_queue(reopen_queue, base, NULL, orig_base_flags | BDRV_O_RDWR); } if (!(orig_overlay_flags & BDRV_O_RDWR)) { reopen_queue = bdrv_reopen_queue(reopen_queue, overlay_bs, NULL, orig_overlay_flags | BDRV_O_RDWR); } if (reopen_queue) { bdrv_reopen_multiple(bdrv_get_aio_context(bs), reopen_queue, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); goto fail; } } /* Block all nodes between top and base, because they will * disappear from the chain after this operation. */ assert(bdrv_chain_contains(top, base)); for (iter = top; iter != backing_bs(base); iter = backing_bs(iter)) { /* FIXME Use real permissions */ block_job_add_bdrv(&s->common, "intermediate node", iter, 0, BLK_PERM_ALL, &error_abort); } /* overlay_bs must be blocked because it needs to be modified to * update the backing image string, but if it's the root node then * don't block it again */ if (bs != overlay_bs) { /* FIXME Use real permissions */ block_job_add_bdrv(&s->common, "overlay of top", overlay_bs, 0, BLK_PERM_ALL, &error_abort); } /* FIXME Use real permissions */ s->base = blk_new(0, BLK_PERM_ALL); ret = blk_insert_bs(s->base, base, errp); if (ret < 0) { goto fail; } /* FIXME Use real permissions */ s->top = blk_new(0, BLK_PERM_ALL); ret = blk_insert_bs(s->top, top, errp); if (ret < 0) { goto fail; } s->active = bs; s->base_flags = orig_base_flags; s->orig_overlay_flags = orig_overlay_flags; s->backing_file_str = g_strdup(backing_file_str); s->on_error = on_error; trace_commit_start(bs, base, top, s); block_job_start(&s->common); return; fail: if (s->base) { blk_unref(s->base); } if (s->top) { blk_unref(s->top); } block_job_unref(&s->common); }
false
qemu
8dfba2797761d8a43744e4e6571c8175e448a478
6,937
int load_elf(const char *filename, int64_t virt_to_phys_addend, uint64_t *pentry) { int fd, data_order, must_swab, ret; uint8_t e_ident[EI_NIDENT]; fd = open(filename, O_RDONLY | O_BINARY); if (fd < 0) { perror(filename); return -1; } if (read(fd, e_ident, sizeof(e_ident)) != sizeof(e_ident)) goto fail; if (e_ident[0] != ELFMAG0 || e_ident[1] != ELFMAG1 || e_ident[2] != ELFMAG2 || e_ident[3] != ELFMAG3) goto fail; #ifdef WORDS_BIGENDIAN data_order = ELFDATA2MSB; #else data_order = ELFDATA2LSB; #endif must_swab = data_order != e_ident[EI_DATA]; lseek(fd, 0, SEEK_SET); if (e_ident[EI_CLASS] == ELFCLASS64) { ret = load_elf64(fd, virt_to_phys_addend, must_swab, pentry); } else { ret = load_elf32(fd, virt_to_phys_addend, must_swab, pentry); } close(fd); return ret; fail: close(fd); return -1; }
false
qemu
9042c0e20de166542b603621fd30dc8be95dfd4d
6,938
qemu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status, uint64_t mcg_status, uint64_t addr, uint64_t misc) { uint64_t mcg_cap = cenv->mcg_cap; uint64_t *banks = cenv->mce_banks; /* * if MSR_MCG_CTL is not all 1s, the uncorrected error * reporting is disabled */ if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) { return; } banks += 4 * bank; /* * if MSR_MCi_CTL is not all 1s, the uncorrected error * reporting is disabled for the bank */ if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0) { return; } if (status & MCI_STATUS_UC) { if ((cenv->mcg_status & MCG_STATUS_MCIP) || !(cenv->cr[4] & CR4_MCE_MASK)) { fprintf(stderr, "injects mce exception while previous " "one is in progress!\n"); qemu_log_mask(CPU_LOG_RESET, "Triple fault\n"); qemu_system_reset_request(); return; } if (banks[1] & MCI_STATUS_VAL) { status |= MCI_STATUS_OVER; } banks[2] = addr; banks[3] = misc; cenv->mcg_status = mcg_status; banks[1] = status; cpu_interrupt(cenv, CPU_INTERRUPT_MCE); } else if (!(banks[1] & MCI_STATUS_VAL) || !(banks[1] & MCI_STATUS_UC)) { if (banks[1] & MCI_STATUS_VAL) { status |= MCI_STATUS_OVER; } banks[2] = addr; banks[3] = misc; banks[1] = status; } else { banks[1] |= MCI_STATUS_OVER; } }
false
qemu
316378e4d0214b45cfeaa01609aca4dabb18d78b
6,939
static USBDevice *usb_msd_init(const char *filename) { static int nr=0; char id[8]; QemuOpts *opts; DriveInfo *dinfo; USBDevice *dev; int fatal_error; const char *p1; char fmt[32]; /* parse -usbdevice disk: syntax into drive opts */ snprintf(id, sizeof(id), "usb%d", nr++); opts = qemu_opts_create(&qemu_drive_opts, id, 0); p1 = strchr(filename, ':'); if (p1++) { const char *p2; if (strstart(filename, "format=", &p2)) { int len = MIN(p1 - p2, sizeof(fmt)); pstrcpy(fmt, len, p2); qemu_opt_set(opts, "format", fmt); } else if (*filename != ':') { printf("unrecognized USB mass-storage option %s\n", filename); return NULL; } filename = p1; } if (!*filename) { printf("block device specification needed\n"); return NULL; } qemu_opt_set(opts, "file", filename); qemu_opt_set(opts, "if", "none"); /* create host drive */ dinfo = drive_init(opts, 0, &fatal_error); if (!dinfo) { qemu_opts_del(opts); return NULL; } /* create guest device */ dev = usb_create(NULL /* FIXME */, "usb-storage"); if (!dev) { return NULL; } qdev_prop_set_drive(&dev->qdev, "drive", dinfo); if (qdev_init(&dev->qdev) < 0) return NULL; return dev; }
false
qemu
f8b6cc0070aab8b75bd082582c829be1353f395f
6,940
static inline void gen_evmergelo(DisasContext *ctx) { if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_APU); return; } #if defined(TARGET_PPC64) TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); tcg_gen_ext32u_tl(t0, cpu_gpr[rB(ctx->opcode)]); tcg_gen_shli_tl(t1, cpu_gpr[rA(ctx->opcode)], 32); tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], t0, t1); tcg_temp_free(t0); tcg_temp_free(t1); #else tcg_gen_mov_i32(cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); #endif }
false
qemu
27a69bb088bee6d4efea254659422fb9c751b3c7
6,942
static int64_t cache_seek(URLContext *h, int64_t pos, int whence) { Context *c= h->priv_data; if (whence == AVSEEK_SIZE) { pos= ffurl_seek(c->inner, pos, whence); if(pos <= 0){ pos= ffurl_seek(c->inner, -1, SEEK_END); ffurl_seek(c->inner, c->end, SEEK_SET); if(pos <= 0) return c->end; } return pos; } pos= lseek(c->fd, pos, whence); if(pos<0){ return pos; }else if(pos <= c->end){ c->pos= pos; return pos; }else{ lseek(c->fd, c->pos, SEEK_SET); return AVERROR(EPIPE); } }
false
FFmpeg
eb19d89d8eb51f20299d59558d69d0f057583e7c
6,943
static int sonic_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { SonicContext *s = avctx->priv_data; RangeCoder c; int i, j, ch, quant = 0, x = 0; int ret; const short *samples = (const int16_t*)frame->data[0]; uint8_t state[32]; if ((ret = ff_alloc_packet2(avctx, avpkt, s->frame_size * 5 + 1000)) < 0) return ret; ff_init_range_encoder(&c, avpkt->data, avpkt->size); ff_build_rac_states(&c, 0.05*(1LL<<32), 256-8); memset(state, 128, sizeof(state)); // short -> internal for (i = 0; i < s->frame_size; i++) s->int_samples[i] = samples[i]; if (!s->lossless) for (i = 0; i < s->frame_size; i++) s->int_samples[i] = s->int_samples[i] << SAMPLE_SHIFT; switch(s->decorrelation) { case MID_SIDE: for (i = 0; i < s->frame_size; i += s->channels) { s->int_samples[i] += s->int_samples[i+1]; s->int_samples[i+1] -= shift(s->int_samples[i], 1); } break; case LEFT_SIDE: for (i = 0; i < s->frame_size; i += s->channels) s->int_samples[i+1] -= s->int_samples[i]; break; case RIGHT_SIDE: for (i = 0; i < s->frame_size; i += s->channels) s->int_samples[i] -= s->int_samples[i+1]; break; } memset(s->window, 0, 4* s->window_size); for (i = 0; i < s->tail_size; i++) s->window[x++] = s->tail[i]; for (i = 0; i < s->frame_size; i++) s->window[x++] = s->int_samples[i]; for (i = 0; i < s->tail_size; i++) s->window[x++] = 0; for (i = 0; i < s->tail_size; i++) s->tail[i] = s->int_samples[s->frame_size - s->tail_size + i]; // generate taps modified_levinson_durbin(s->window, s->window_size, s->predictor_k, s->num_taps, s->channels, s->tap_quant); if ((ret = intlist_write(&c, state, s->predictor_k, s->num_taps, 0)) < 0) return ret; for (ch = 0; ch < s->channels; ch++) { x = s->tail_size+ch; for (i = 0; i < s->block_align; i++) { int sum = 0; for (j = 0; j < s->downsampling; j++, x += s->channels) sum += s->window[x]; s->coded_samples[ch][i] = sum; } } // simple rate control code if (!s->lossless) { double energy1 = 0.0, energy2 = 0.0; for (ch = 0; ch < s->channels; ch++) { for (i = 0; i < s->block_align; i++) { double sample = s->coded_samples[ch][i]; energy2 += sample*sample; energy1 += fabs(sample); } } energy2 = sqrt(energy2/(s->channels*s->block_align)); energy1 = M_SQRT2*energy1/(s->channels*s->block_align); // increase bitrate when samples are like a gaussian distribution // reduce bitrate when samples are like a two-tailed exponential distribution if (energy2 > energy1) energy2 += (energy2-energy1)*RATE_VARIATION; quant = (int)(BASE_QUANT*s->quantization*energy2/SAMPLE_FACTOR); // av_log(avctx, AV_LOG_DEBUG, "quant: %d energy: %f / %f\n", quant, energy1, energy2); quant = av_clip(quant, 1, 65534); put_symbol(&c, state, quant, 0, NULL, NULL); quant *= SAMPLE_FACTOR; } // write out coded samples for (ch = 0; ch < s->channels; ch++) { if (!s->lossless) for (i = 0; i < s->block_align; i++) s->coded_samples[ch][i] = ROUNDED_DIV(s->coded_samples[ch][i], quant); if ((ret = intlist_write(&c, state, s->coded_samples[ch], s->block_align, 1)) < 0) return ret; } // av_log(avctx, AV_LOG_DEBUG, "used bytes: %d\n", (put_bits_count(&pb)+7)/8); avpkt->size = ff_rac_terminate(&c); *got_packet_ptr = 1; return 0; }
false
FFmpeg
c131a9fead5bf63215b6e1172b3c5c183cf90b85
6,944
int tpm_register_driver(const TPMDriverOps *tdo) { int i; for (i = 0; i < TPM_MAX_DRIVERS; i++) { if (!be_drivers[i]) { be_drivers[i] = tdo; return 0; } } error_report("Could not register TPM driver"); return 1; }
true
qemu
a9a72aeefbd3ef8bcbbeeccaf174ee10db2978ac
6,945
static void xilinx_axidma_init(Object *obj) { XilinxAXIDMA *s = XILINX_AXI_DMA(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); object_property_add_link(obj, "axistream-connected", TYPE_STREAM_SLAVE, (Object **)&s->tx_data_dev, &error_abort); object_property_add_link(obj, "axistream-control-connected", TYPE_STREAM_SLAVE, (Object **)&s->tx_control_dev, &error_abort); object_initialize(&s->rx_data_dev, sizeof(s->rx_data_dev), TYPE_XILINX_AXI_DMA_DATA_STREAM); object_initialize(&s->rx_control_dev, sizeof(s->rx_control_dev), TYPE_XILINX_AXI_DMA_CONTROL_STREAM); object_property_add_child(OBJECT(s), "axistream-connected-target", (Object *)&s->rx_data_dev, &error_abort); object_property_add_child(OBJECT(s), "axistream-control-connected-target", (Object *)&s->rx_control_dev, &error_abort); sysbus_init_irq(sbd, &s->streams[0].irq); sysbus_init_irq(sbd, &s->streams[1].irq); memory_region_init_io(&s->iomem, obj, &axidma_ops, s, "xlnx.axi-dma", R_MAX * 4 * 2); sysbus_init_mmio(sbd, &s->iomem); }
true
qemu
9561fda8d90e176bef598ba87c42a1bd6ad03ef7
6,947
static int count_contiguous_free_clusters(int nb_clusters, uint64_t *l2_table) { int i; for (i = 0; i < nb_clusters; i++) { int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i])); if (type != QCOW2_CLUSTER_UNALLOCATED) { break; } } return i; }
true
qemu
a99dfb45f26bface6830ee5465e57bcdbc53c6c8
6,948
void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2) { /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA bridge has been setup properly to always register with ISA. */ isa_register_portio_list(dev, iobase, ide_portio_list, bus, "ide"); if (iobase2) { isa_register_portio_list(dev, iobase2, ide_portio2_list, bus, "ide"); } }
true
qemu
e305a16510afa74eec20390479e349402e55ef4c
6,949
static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){ GetBitContext gb; int i; init_get_bits(&gb, src, length*8); for(i=0; i<3; i++){ read_len_table(s->len[i], &gb); if(generate_bits_table(s->bits[i], s->len[i])<0){ return -1; } #if 0 for(j=0; j<256; j++){ printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j); } #endif free_vlc(&s->vlc[i]); init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0); } generate_joint_tables(s); return (get_bits_count(&gb)+7)/8; }
true
FFmpeg
e30004fa733ec64b6ff90678098c1f1132d4d603
6,950
static int parse_bit(DeviceState *dev, Property *prop, const char *str) { if (!strcasecmp(str, "on")) bit_prop_set(dev, prop, true); else if (!strcasecmp(str, "off")) bit_prop_set(dev, prop, false); else return -EINVAL; return 0; }
true
qemu
5cb9b56acfc0b50acf7ccd2d044ab4991c47fdde
6,952
int ff_hevc_cu_qp_delta_abs(HEVCContext *s) { int prefix_val = 0; int suffix_val = 0; int inc = 0; while (prefix_val < 5 && GET_CABAC(elem_offset[CU_QP_DELTA] + inc)) { prefix_val++; inc = 1; } if (prefix_val >= 5) { int k = 0; while (k < CABAC_MAX_BIN && get_cabac_bypass(&s->HEVClc->cc)) { suffix_val += 1 << k; k++; } if (k == CABAC_MAX_BIN) av_log(s->avctx, AV_LOG_ERROR, "CABAC_MAX_BIN : %d\n", k); while (k--) suffix_val += get_cabac_bypass(&s->HEVClc->cc) << k; } return prefix_val + suffix_val; }
true
FFmpeg
0ee143558d55b590774dba69cff5a16eda089a4d
6,954
static inline void gdb_continue(GDBState *s) { #ifdef CONFIG_USER_ONLY s->running_state = 1; #else vm_start(); #endif }
true
qemu
87f25c12bfeaaa0c41fb857713bbc7e8a9b757dc
6,955
av_cold int ff_nvenc_encode_close(AVCodecContext *avctx) { NVENCContext *ctx = avctx->priv_data; NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs; int i; av_fifo_free(ctx->timestamps); av_fifo_free(ctx->pending); av_fifo_free(ctx->ready); if (ctx->in) { for (i = 0; i < ctx->nb_surfaces; ++i) { nv->nvEncDestroyInputBuffer(ctx->nvenc_ctx, ctx->in[i].in); nv->nvEncDestroyBitstreamBuffer(ctx->nvenc_ctx, ctx->out[i].out); av_freep(&ctx->in); av_freep(&ctx->out); if (ctx->nvenc_ctx) nv->nvEncDestroyEncoder(ctx->nvenc_ctx); if (ctx->cu_context) ctx->nvel.cu_ctx_destroy(ctx->cu_context); if (ctx->nvel.nvenc) dlclose(ctx->nvel.nvenc); if (ctx->nvel.cuda) dlclose(ctx->nvel.cuda); return 0;
true
FFmpeg
aac7d6b284c3976eb0c48d61f342f008fb6e4103
6,957
static void trigger_console_data(void *opaque, int n, int level) { sclp_service_interrupt(0); }
true
qemu
4f3ed190a673c0020c3ccebb4882ae4675cb5f4d