project
stringclasses
2 values
commit_id
stringlengths
40
40
target
int64
0
1
func
stringlengths
26
142k
idx
int64
0
27.3k
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
uint64_t ldq_be_phys(target_phys_addr_t addr) { return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN); }
16,831
qemu
9be385980d37e8f4fd33f605f5fb1c3d144170a8
0
static hwaddr intel_hda_addr(uint32_t lbase, uint32_t ubase) { hwaddr addr; addr = ((uint64_t)ubase << 32) | lbase; return addr; }
16,832
qemu
1a6245a5b0b4e8d822c739b403fc67c8a7bc8d12
0
static int nbd_send_rep(int csock, uint32_t type, uint32_t opt) { uint64_t magic; uint32_t len; magic = cpu_to_be64(NBD_REP_MAGIC); if (write_sync(csock, &magic, sizeof(magic)) != sizeof(magic)) { LOG("write failed (rep magic)"); return -EINVAL; } opt = cpu_to_be32(opt); if (write_sync(csock, &opt, sizeof(opt)) != sizeof(opt)) { LOG("write failed (rep opt)"); return -EINVAL; } type = cpu_to_be32(type); if (write_sync(csock, &type, sizeof(type)) != sizeof(type)) { LOG("write failed (rep type)"); return -EINVAL; } len = cpu_to_be32(0); if (write_sync(csock, &len, sizeof(len)) != sizeof(len)) { LOG("write failed (rep data length)"); return -EINVAL; } return 0; }
16,833
qemu
4be746345f13e99e468c60acbd3a355e8183e3ce
0
void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf, VirtIOBlockDataPlane **dataplane, Error **errp) { VirtIOBlockDataPlane *s; Error *local_err = NULL; BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); *dataplane = NULL; if (!conf->data_plane && !conf->iothread) { return; } /* Don't try if transport does not support notifiers. */ if (!k->set_guest_notifiers || !k->set_host_notifier) { error_setg(errp, "device is incompatible with x-data-plane " "(transport does not support notifiers)"); return; } /* If dataplane is (re-)enabled while the guest is running there could be * block jobs that can conflict. */ if (bdrv_op_is_blocked(conf->conf.bs, BLOCK_OP_TYPE_DATAPLANE, &local_err)) { error_setg(errp, "cannot start dataplane thread: %s", error_get_pretty(local_err)); error_free(local_err); return; } s = g_new0(VirtIOBlockDataPlane, 1); s->vdev = vdev; s->conf = conf; if (conf->iothread) { s->iothread = conf->iothread; object_ref(OBJECT(s->iothread)); } else { /* Create per-device IOThread if none specified. This is for * x-data-plane option compatibility. If x-data-plane is removed we * can drop this. */ object_initialize(&s->internal_iothread_obj, sizeof(s->internal_iothread_obj), TYPE_IOTHREAD); user_creatable_complete(OBJECT(&s->internal_iothread_obj), &error_abort); s->iothread = &s->internal_iothread_obj; } s->ctx = iothread_get_aio_context(s->iothread); s->bh = aio_bh_new(s->ctx, notify_guest_bh, s); error_setg(&s->blocker, "block device is in use by data plane"); bdrv_op_block_all(conf->conf.bs, s->blocker); bdrv_op_unblock(conf->conf.bs, BLOCK_OP_TYPE_RESIZE, s->blocker); bdrv_op_unblock(conf->conf.bs, BLOCK_OP_TYPE_DRIVE_DEL, s->blocker); *dataplane = s; }
16,834
qemu
9abf567d95a4e840df868ca993219175fbef8c22
0
uint32_t HELPER(servc)(uint32_t r1, uint64_t r2) { if (sclp_service_call(env, r1, r2)) { return 3; } return 0; }
16,835
FFmpeg
2ad0d96a24879b96153a3fbbc1707372baa2615e
0
void ff_acelp_interpolate( int16_t* out, const int16_t* in, const int16_t* filter_coeffs, int precision, int frac_pos, int filter_length, int length) { int n, i; assert(pitch_delay_frac >= 0 && pitch_delay_frac < precision); for(n=0; n<length; n++) { int idx = 0; int v = 0x4000; for(i=0; i<filter_length;) { /* The reference G.729 and AMR fixed point code performs clipping after each of the two following accumulations. Since clipping affects only the synthetic OVERFLOW test without causing an int type overflow, it was moved outside the loop. */ /* R(x):=ac_v[-k+x] v += R(n-i)*ff_acelp_interp_filter(t+6i) v += R(n+i+1)*ff_acelp_interp_filter(6-t+6i) */ v += in[n + i] * filter_coeffs[idx + frac_pos]; idx += precision; i++; v += in[n - i] * filter_coeffs[idx - frac_pos]; } out[n] = av_clip_int16(v >> 15); } }
16,836
qemu
2662a059aa2affddfbe42e78b11c802cf30a970f
0
static void init_ppc_proc (CPUPPCState *env, ppc_def_t *def) { env->reserve = -1; /* Default MMU definitions */ env->nb_BATs = -1; env->nb_tlb = 0; env->nb_ways = 0; /* XXX: missing: * 32 bits PowerPC: * - MPC5xx(x) * - MPC8xx(x) * - RCPU (same as MPC5xx ?) */ spr_register(env, SPR_PVR, "PVR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, def->pvr); printf("%s: PVR %08x mask %08x => %08x\n", __func__, def->pvr, def->pvr_mask, def->pvr & def->pvr_mask); switch (def->pvr) { /* Embedded PowerPC from IBM */ case CPU_PPC_401A1: /* 401 A1 family */ case CPU_PPC_401B2: /* 401 B2 family */ case CPU_PPC_401C2: /* 401 C2 family */ case CPU_PPC_401D2: /* 401 D2 family */ case CPU_PPC_401E2: /* 401 E2 family */ case CPU_PPC_401F2: /* 401 F2 family */ case CPU_PPC_401G2: /* 401 G2 family */ case CPU_PPC_IOP480: /* IOP 480 family */ case CPU_PPC_COBRA: /* IBM Processor for Network Resources */ gen_spr_generic(env); gen_spr_40x(env); gen_spr_401_403(env); #if defined (TODO) /* XXX: optional ? */ gen_spr_compress(env); #endif env->nb_BATs = 0; env->nb_tlb = 64; env->nb_ways = 1; env->id_tlbs = 0; /* XXX: TODO: allocate internal IRQ controller */ break; case CPU_PPC_403GA: /* 403 GA family */ case CPU_PPC_403GB: /* 403 GB family */ case CPU_PPC_403GC: /* 403 GC family */ case CPU_PPC_403GCX: /* 403 GCX family */ gen_spr_generic(env); gen_spr_40x(env); gen_spr_401_403(env); gen_spr_403(env); env->nb_BATs = 0; env->nb_tlb = 64; env->nb_ways = 1; env->id_tlbs = 0; /* XXX: TODO: allocate internal IRQ controller */ break; case CPU_PPC_405CR: /* 405 GP/CR family */ case CPU_PPC_405EP: /* 405 EP family */ case CPU_PPC_405GPR: /* 405 GPR family */ case CPU_PPC_405D2: /* 405 D2 family */ case CPU_PPC_405D4: /* 405 D4 family */ gen_spr_generic(env); /* Time base */ gen_tbl(env); gen_spr_40x(env); gen_spr_405(env); env->nb_BATs = 0; env->nb_tlb = 64; env->nb_ways = 1; env->id_tlbs = 0; /* Allocate hardware IRQ controller */ ppc405_irq_init(env); break; case CPU_PPC_NPE405H: /* NPe405 H family */ case CPU_PPC_NPE405H2: case CPU_PPC_NPE405L: /* Npe405 L family */ gen_spr_generic(env); /* Time base */ gen_tbl(env); gen_spr_40x(env); gen_spr_405(env); env->nb_BATs = 0; env->nb_tlb = 64; env->nb_ways = 1; env->id_tlbs = 0; /* Allocate hardware IRQ controller */ ppc405_irq_init(env); break; #if defined (TODO) case CPU_PPC_STB01000: #endif #if defined (TODO) case CPU_PPC_STB01010: #endif #if defined (TODO) case CPU_PPC_STB0210: #endif case CPU_PPC_STB03: /* STB03 family */ #if defined (TODO) case CPU_PPC_STB043: /* STB043 family */ #endif #if defined (TODO) case CPU_PPC_STB045: /* STB045 family */ #endif case CPU_PPC_STB25: /* STB25 family */ #if defined (TODO) case CPU_PPC_STB130: /* STB130 family */ #endif gen_spr_generic(env); /* Time base */ gen_tbl(env); gen_spr_40x(env); gen_spr_405(env); env->nb_BATs = 0; env->nb_tlb = 64; env->nb_ways = 1; env->id_tlbs = 0; /* Allocate hardware IRQ controller */ ppc405_irq_init(env); break; case CPU_PPC_440EP: /* 440 EP family */ case CPU_PPC_440GP: /* 440 GP family */ case CPU_PPC_440GX: /* 440 GX family */ case CPU_PPC_440GXc: /* 440 GXc family */ case CPU_PPC_440GXf: /* 440 GXf family */ case CPU_PPC_440SP: /* 440 SP family */ case CPU_PPC_440SP2: case CPU_PPC_440SPE: /* 440 SPE family */ gen_spr_generic(env); /* Time base */ gen_tbl(env); gen_spr_BookE(env); gen_spr_440(env); env->nb_BATs = 0; env->nb_tlb = 64; env->nb_ways = 1; env->id_tlbs = 0; /* XXX: TODO: allocate internal IRQ controller */ break; /* Embedded PowerPC from Freescale */ #if defined (TODO) case CPU_PPC_5xx: break; #endif #if defined (TODO) case CPU_PPC_8xx: /* MPC821 / 823 / 850 / 860 */ break; #endif #if defined (TODO) case CPU_PPC_82xx_HIP3: /* MPC8240 / 8260 */ case CPU_PPC_82xx_HIP4: /* MPC8240 / 8260 */ break; #endif #if defined (TODO) case CPU_PPC_827x: /* MPC 827x / 828x */ break; #endif /* XXX: Use MPC8540 PVR to implement a test PowerPC BookE target */ case CPU_PPC_e500v110: case CPU_PPC_e500v120: case CPU_PPC_e500v210: case CPU_PPC_e500v220: gen_spr_generic(env); /* Time base */ gen_tbl(env); gen_spr_BookE(env); gen_spr_BookE_FSL(env); env->nb_BATs = 0; env->nb_tlb = 64; env->nb_ways = 1; env->id_tlbs = 0; /* XXX: TODO: allocate internal IRQ controller */ break; #if defined (TODO) case CPU_PPC_e600: break; #endif /* 32 bits PowerPC */ case CPU_PPC_601: /* PowerPC 601 */ gen_spr_generic(env); gen_spr_ne_601(env); gen_spr_601(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_601_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_601_HID5, "HID5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ #if 0 /* ? */ spr_register(env, SPR_601_HID15, "HID15", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); #endif env->nb_tlb = 64; env->nb_ways = 2; env->id_tlbs = 0; env->id_tlbs = 0; /* XXX: TODO: allocate internal IRQ controller */ break; case CPU_PPC_602: /* PowerPC 602 */ gen_spr_generic(env); gen_spr_ne_601(env); /* Memory management */ gen_low_BATs(env); /* Time base */ gen_tbl(env); gen_6xx_7xx_soft_tlb(env, 64, 2); gen_spr_602(env); /* hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env); break; case CPU_PPC_603: /* PowerPC 603 */ case CPU_PPC_603E: /* PowerPC 603e */ case CPU_PPC_603E7v: case CPU_PPC_603E7v2: case CPU_PPC_603P: /* PowerPC 603p */ case CPU_PPC_603R: /* PowerPC 603r */ gen_spr_generic(env); gen_spr_ne_601(env); /* Memory management */ gen_low_BATs(env); /* Time base */ gen_tbl(env); gen_6xx_7xx_soft_tlb(env, 64, 2); gen_spr_603(env); /* hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env); break; case CPU_PPC_G2: /* PowerPC G2 family */ case CPU_PPC_G2H4: case CPU_PPC_G2gp: case CPU_PPC_G2ls: case CPU_PPC_G2LE: /* PowerPC G2LE family */ case CPU_PPC_G2LEgp: case CPU_PPC_G2LEls: gen_spr_generic(env); gen_spr_ne_601(env); /* Memory management */ gen_low_BATs(env); /* Time base */ gen_tbl(env); /* Memory management */ gen_high_BATs(env); gen_6xx_7xx_soft_tlb(env, 64, 2); gen_spr_G2_755(env); gen_spr_G2(env); /* Hardware implementation register */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env); break; case CPU_PPC_604: /* PowerPC 604 */ case CPU_PPC_604E: /* PowerPC 604e */ case CPU_PPC_604R: /* PowerPC 604r */ gen_spr_generic(env); gen_spr_ne_601(env); /* Memory management */ gen_low_BATs(env); /* Time base */ gen_tbl(env); gen_spr_604(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env); break; case CPU_PPC_74x: /* PowerPC 740 / 750 */ case CPU_PPC_740E: case CPU_PPC_750E: case CPU_PPC_74xP: /* PowerPC 740P / 750P */ case CPU_PPC_750CXE21: /* IBM PowerPC 750cxe */ case CPU_PPC_750CXE22: case CPU_PPC_750CXE23: case CPU_PPC_750CXE24: case CPU_PPC_750CXE24b: case CPU_PPC_750CXE31: case CPU_PPC_750CXE31b: case CPU_PPC_750CXR: gen_spr_generic(env); gen_spr_ne_601(env); /* Memory management */ gen_low_BATs(env); /* Time base */ gen_tbl(env); gen_spr_7xx(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env); break; case CPU_PPC_750FX10: /* IBM PowerPC 750 FX */ case CPU_PPC_750FX20: case CPU_PPC_750FX21: case CPU_PPC_750FX22: case CPU_PPC_750FX23: case CPU_PPC_750GX10: /* IBM PowerPC 750 GX */ case CPU_PPC_750GX11: case CPU_PPC_750GX12: gen_spr_generic(env); gen_spr_ne_601(env); /* Memory management */ gen_low_BATs(env); /* PowerPC 750fx & 750gx has 8 DBATs and 8 IBATs */ gen_high_BATs(env); /* Time base */ gen_tbl(env); gen_spr_7xx(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_750_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env); break; case CPU_PPC_755_10: /* PowerPC 755 */ case CPU_PPC_755_11: case CPU_PPC_755_20: case CPU_PPC_755D: case CPU_PPC_755E: gen_spr_generic(env); gen_spr_ne_601(env); /* Memory management */ gen_low_BATs(env); /* Time base */ gen_tbl(env); /* Memory management */ gen_high_BATs(env); gen_6xx_7xx_soft_tlb(env, 64, 2); gen_spr_G2_755(env); /* L2 cache control */ /* XXX : not implemented */ spr_register(env, SPR_ICTC, "ICTC", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_L2PM, "L2PM", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env); break; #if defined (TODO) /* G4 family */ case CPU_PPC_7400: /* PowerPC 7400 */ case CPU_PPC_7410C: /* PowerPC 7410 */ case CPU_PPC_7410D: case CPU_PPC_7410E: case CPU_PPC_7441: /* PowerPC 7441 */ case CPU_PPC_7445: /* PowerPC 7445 */ case CPU_PPC_7447: /* PowerPC 7447 */ case CPU_PPC_7447A: /* PowerPC 7447A */ case CPU_PPC_7448: /* PowerPC 7448 */ case CPU_PPC_7450: /* PowerPC 7450 */ case CPU_PPC_7450b: case CPU_PPC_7451: /* PowerPC 7451 */ case CPU_PPC_7451G: case CPU_PPC_7455: /* PowerPC 7455 */ case CPU_PPC_7455F: case CPU_PPC_7455G: case CPU_PPC_7457: /* PowerPC 7457 */ case CPU_PPC_7457C: case CPU_PPC_7457A: /* PowerPC 7457A */ break; #endif /* 64 bits PowerPC */ #if defined (TARGET_PPC64) #if defined (TODO) case CPU_PPC_620: /* PowerPC 620 */ case CPU_PPC_630: /* PowerPC 630 (Power 3) */ case CPU_PPC_631: /* PowerPC 631 (Power 3+) */ case CPU_PPC_POWER4: /* Power 4 */ case CPU_PPC_POWER4P: /* Power 4+ */ case CPU_PPC_POWER5: /* Power 5 */ case CPU_PPC_POWER5P: /* Power 5+ */ #endif break; case CPU_PPC_970: /* PowerPC 970 */ case CPU_PPC_970FX10: /* PowerPC 970 FX */ case CPU_PPC_970FX20: case CPU_PPC_970FX21: case CPU_PPC_970FX30: case CPU_PPC_970FX31: case CPU_PPC_970MP10: /* PowerPC 970 MP */ case CPU_PPC_970MP11: gen_spr_generic(env); gen_spr_ne_601(env); /* XXX: not correct */ gen_low_BATs(env); /* Time base */ gen_tbl(env); gen_spr_7xx(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_750_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Allocate hardware IRQ controller */ ppc970_irq_init(env); break; #if defined (TODO) case CPU_PPC_CELL10: /* Cell family */ case CPU_PPC_CELL20: case CPU_PPC_CELL30: case CPU_PPC_CELL31: #endif break; #if defined (TODO) case CPU_PPC_RS64: /* Apache (RS64/A35) */ case CPU_PPC_RS64II: /* NorthStar (RS64-II/A50) */ case CPU_PPC_RS64III: /* Pulsar (RS64-III) */ case CPU_PPC_RS64IV: /* IceStar/IStar/SStar (RS64-IV) */ #endif break; #endif /* defined (TARGET_PPC64) */ #if defined (TODO) /* POWER */ case CPU_POWER: /* POWER */ case CPU_POWER2: /* POWER2 */ break; #endif default: gen_spr_generic(env); /* XXX: TODO: allocate internal IRQ controller */ break; } if (env->nb_BATs == -1) env->nb_BATs = 4; /* Allocate TLBs buffer when needed */ if (env->nb_tlb != 0) { int nb_tlb = env->nb_tlb; if (env->id_tlbs != 0) nb_tlb *= 2; env->tlb = qemu_mallocz(nb_tlb * sizeof(ppc_tlb_t)); /* Pre-compute some useful values */ env->tlb_per_way = env->nb_tlb / env->nb_ways; } }
16,837
qemu
ad674e53b5cce265fadafbde2c6a4f190345cd00
0
static void nop(DBDMA_channel *ch) { dbdma_cmd *current = &ch->current; if (conditional_wait(ch)) goto wait; current->xfer_status = cpu_to_le16(be32_to_cpu(ch->regs[DBDMA_STATUS])); dbdma_cmdptr_save(ch); conditional_interrupt(ch); conditional_branch(ch); wait: qemu_bh_schedule(dbdma_bh); }
16,838
qemu
dd8070d865ad1b32876931f812a80645f97112ff
0
static TileExcp decode_y1(DisasContext *dc, tilegx_bundle_bits bundle) { unsigned opc = get_Opcode_Y1(bundle); unsigned ext = get_RRROpcodeExtension_Y1(bundle); unsigned dest = get_Dest_Y1(bundle); unsigned srca = get_SrcA_Y1(bundle); unsigned srcb; int imm; switch (get_Opcode_Y1(bundle)) { case RRR_1_OPCODE_Y1: if (ext == UNARY_RRR_1_OPCODE_Y0) { ext = get_UnaryOpcodeExtension_Y1(bundle); return gen_rr_opcode(dc, OE(opc, ext, Y1), dest, srca); } /* fallthru */ case RRR_0_OPCODE_Y1: case RRR_2_OPCODE_Y1: case RRR_3_OPCODE_Y1: case RRR_4_OPCODE_Y1: case RRR_5_OPCODE_Y1: case RRR_6_OPCODE_Y1: case RRR_7_OPCODE_Y1: srcb = get_SrcB_Y1(bundle); return gen_rrr_opcode(dc, OE(opc, ext, Y1), dest, srca, srcb); case SHIFT_OPCODE_Y1: ext = get_ShiftOpcodeExtension_Y1(bundle); imm = get_ShAmt_Y1(bundle); return gen_rri_opcode(dc, OE(opc, ext, Y1), dest, srca, imm); case ADDI_OPCODE_Y1: case ADDXI_OPCODE_Y1: case ANDI_OPCODE_Y1: case CMPEQI_OPCODE_Y1: case CMPLTSI_OPCODE_Y1: imm = (int8_t)get_Imm8_Y1(bundle); return gen_rri_opcode(dc, OE(opc, 0, Y1), dest, srca, imm); default: return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; } }
16,839
qemu
26e92f65525ef4446a500d85e185cf78835922aa
0
static void versatile_init(ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model, int board_id) { CPUState *env; ram_addr_t ram_offset; qemu_irq *cpu_pic; qemu_irq pic[32]; qemu_irq sic[32]; DeviceState *dev; PCIBus *pci_bus; NICInfo *nd; int n; int done_smc = 0; if (!cpu_model) cpu_model = "arm926"; env = cpu_init(cpu_model); if (!env) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } ram_offset = qemu_ram_alloc(ram_size); /* ??? RAM should repeat to fill physical memory space. */ /* SDRAM at address zero. */ cpu_register_physical_memory(0, ram_size, ram_offset | IO_MEM_RAM); arm_sysctl_init(0x10000000, 0x41007004); cpu_pic = arm_pic_init_cpu(env); dev = sysbus_create_varargs("pl190", 0x10140000, cpu_pic[0], cpu_pic[1], NULL); for (n = 0; n < 32; n++) { pic[n] = qdev_get_gpio_in(dev, n); } dev = sysbus_create_simple("versatilepb_sic", 0x10003000, NULL); for (n = 0; n < 32; n++) { sysbus_connect_irq(sysbus_from_qdev(dev), n, pic[n]); sic[n] = qdev_get_gpio_in(dev, n); } sysbus_create_simple("pl050_keyboard", 0x10006000, sic[3]); sysbus_create_simple("pl050_mouse", 0x10007000, sic[4]); dev = sysbus_create_varargs("versatile_pci", 0x40000000, sic[27], sic[28], sic[29], sic[30], NULL); pci_bus = (PCIBus *)qdev_get_child_bus(dev, "pci"); /* The Versatile PCI bridge does not provide access to PCI IO space, so many of the qemu PCI devices are not useable. */ for(n = 0; n < nb_nics; n++) { nd = &nd_table[n]; if ((!nd->model && !done_smc) || strcmp(nd->model, "smc91c111") == 0) { smc91c111_init(nd, 0x10010000, sic[25]); done_smc = 1; } else { pci_nic_init_nofail(nd, "rtl8139", NULL); } } if (usb_enabled) { usb_ohci_init_pci(pci_bus, -1); } n = drive_get_max_bus(IF_SCSI); while (n >= 0) { pci_create_simple(pci_bus, -1, "lsi53c895a"); n--; } sysbus_create_simple("pl011", 0x101f1000, pic[12]); sysbus_create_simple("pl011", 0x101f2000, pic[13]); sysbus_create_simple("pl011", 0x101f3000, pic[14]); sysbus_create_simple("pl011", 0x10009000, sic[6]); sysbus_create_simple("pl080", 0x10130000, pic[17]); sysbus_create_simple("sp804", 0x101e2000, pic[4]); sysbus_create_simple("sp804", 0x101e3000, pic[5]); /* The versatile/PB actually has a modified Color LCD controller that includes hardware cursor support from the PL111. */ sysbus_create_simple("pl110_versatile", 0x10120000, pic[16]); sysbus_create_varargs("pl181", 0x10005000, sic[22], sic[1], NULL); sysbus_create_varargs("pl181", 0x1000b000, sic[23], sic[2], NULL); /* Add PL031 Real Time Clock. */ sysbus_create_simple("pl031", 0x101e8000, pic[10]); /* Memory map for Versatile/PB: */ /* 0x10000000 System registers. */ /* 0x10001000 PCI controller config registers. */ /* 0x10002000 Serial bus interface. */ /* 0x10003000 Secondary interrupt controller. */ /* 0x10004000 AACI (audio). */ /* 0x10005000 MMCI0. */ /* 0x10006000 KMI0 (keyboard). */ /* 0x10007000 KMI1 (mouse). */ /* 0x10008000 Character LCD Interface. */ /* 0x10009000 UART3. */ /* 0x1000a000 Smart card 1. */ /* 0x1000b000 MMCI1. */ /* 0x10010000 Ethernet. */ /* 0x10020000 USB. */ /* 0x10100000 SSMC. */ /* 0x10110000 MPMC. */ /* 0x10120000 CLCD Controller. */ /* 0x10130000 DMA Controller. */ /* 0x10140000 Vectored interrupt controller. */ /* 0x101d0000 AHB Monitor Interface. */ /* 0x101e0000 System Controller. */ /* 0x101e1000 Watchdog Interface. */ /* 0x101e2000 Timer 0/1. */ /* 0x101e3000 Timer 2/3. */ /* 0x101e4000 GPIO port 0. */ /* 0x101e5000 GPIO port 1. */ /* 0x101e6000 GPIO port 2. */ /* 0x101e7000 GPIO port 3. */ /* 0x101e8000 RTC. */ /* 0x101f0000 Smart card 0. */ /* 0x101f1000 UART0. */ /* 0x101f2000 UART1. */ /* 0x101f3000 UART2. */ /* 0x101f4000 SSPI. */ versatile_binfo.ram_size = ram_size; versatile_binfo.kernel_filename = kernel_filename; versatile_binfo.kernel_cmdline = kernel_cmdline; versatile_binfo.initrd_filename = initrd_filename; versatile_binfo.board_id = board_id; arm_load_kernel(env, &versatile_binfo); }
16,840
qemu
5379229a2708df3a1506113315214c3ce5325859
0
static void tftp_send_next_block(struct tftp_session *spt, struct tftp_t *recv_tp) { struct sockaddr_in saddr, daddr; struct mbuf *m; struct tftp_t *tp; int nobytes; m = m_get(spt->slirp); if (!m) { return; } memset(m->m_data, 0, m->m_size); m->m_data += IF_MAXLINKHDR; tp = (void *)m->m_data; m->m_data += sizeof(struct udpiphdr); tp->tp_op = htons(TFTP_DATA); tp->x.tp_data.tp_block_nr = htons((spt->block_nr + 1) & 0xffff); saddr.sin_addr = recv_tp->ip.ip_dst; saddr.sin_port = recv_tp->udp.uh_dport; daddr.sin_addr = spt->client_ip; daddr.sin_port = spt->client_port; nobytes = tftp_read_data(spt, spt->block_nr, tp->x.tp_data.tp_buf, 512); if (nobytes < 0) { m_free(m); /* send "file not found" error back */ tftp_send_error(spt, 1, "File not found", tp); return; } m->m_len = sizeof(struct tftp_t) - (512 - nobytes) - sizeof(struct ip) - sizeof(struct udphdr); udp_output2(NULL, m, &saddr, &daddr, IPTOS_LOWDELAY); if (nobytes == 512) { tftp_session_update(spt); } else { tftp_session_terminate(spt); } spt->block_nr++; }
16,841
qemu
8cd2ce7aaa3c3fadc561f40045d4d53ff72e94ef
0
static void cas_handle_compat_cpu(PowerPCCPUClass *pcc, uint32_t pvr, unsigned max_lvl, unsigned *compat_lvl, unsigned *cpu_version) { unsigned lvl = get_compat_level(pvr); bool is205, is206; if (!lvl) { return; } /* If it is a logical PVR, try to determine the highest level */ is205 = (pcc->pcr_mask & PCR_COMPAT_2_05) && (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_05)); is206 = (pcc->pcr_mask & PCR_COMPAT_2_06) && ((lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06)) || (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06_PLUS))); if (is205 || is206) { if (!max_lvl) { /* User did not set the level, choose the highest */ if (*compat_lvl <= lvl) { *compat_lvl = lvl; *cpu_version = pvr; } } else if (max_lvl >= lvl) { /* User chose the level, don't set higher than this */ *compat_lvl = lvl; *cpu_version = pvr; } } }
16,843
qemu
a9fc37f6bc3f2ab90585cb16493da9f6dcfbfbcf
1
static void qobject_input_type_str(Visitor *v, const char *name, char **obj, Error **errp) { QObjectInputVisitor *qiv = to_qiv(v); QObject *qobj = qobject_input_get_object(qiv, name, true, errp); QString *qstr; *obj = NULL; if (!qobj) { return; } qstr = qobject_to_qstring(qobj); if (!qstr) { error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", "string"); return; } *obj = g_strdup(qstring_get_str(qstr)); }
16,844
qemu
b68e60e6f0d2865e961a800fb8db96a7fc6494c4
1
static void gen_or(DisasContext *ctx) { int rs, ra, rb; rs = rS(ctx->opcode); ra = rA(ctx->opcode); rb = rB(ctx->opcode); /* Optimisation for mr. ri case */ if (rs != ra || rs != rb) { if (rs != rb) tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]); else tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[ra]); } else if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rs]); #if defined(TARGET_PPC64) } else { int prio = 0; switch (rs) { case 1: /* Set process priority to low */ prio = 2; break; case 6: /* Set process priority to medium-low */ prio = 3; break; case 2: /* Set process priority to normal */ prio = 4; break; #if !defined(CONFIG_USER_ONLY) case 31: if (!ctx->pr) { /* Set process priority to very low */ prio = 1; } break; case 5: if (!ctx->pr) { /* Set process priority to medium-hight */ prio = 5; } break; case 3: if (!ctx->pr) { /* Set process priority to high */ prio = 6; } break; case 7: if (ctx->hv) { /* Set process priority to very high */ prio = 7; } break; #endif default: /* nop */ break; } if (prio) { TCGv t0 = tcg_temp_new(); gen_load_spr(t0, SPR_PPR); tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL); tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50); gen_store_spr(SPR_PPR, t0); tcg_temp_free(t0); } #endif } }
16,845
qemu
81bad50ec40311797c38a7691844c7d2df9b3823
1
target_ulong helper_dvpe(CPUMIPSState *env) { CPUMIPSState *other_cpu = first_cpu; target_ulong prev = env->mvp->CP0_MVPControl; do { /* Turn off all VPEs except the one executing the dvpe. */ if (other_cpu != env) { other_cpu->mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP); mips_vpe_sleep(other_cpu); } other_cpu = other_cpu->next_cpu; } while (other_cpu); return prev; }
16,846
FFmpeg
c6d3b3be1555257ff3f88da6b8dca2158dad2a85
1
static int sbr_make_f_master(AACContext *ac, SpectralBandReplication *sbr, SpectrumParameters *spectrum) { unsigned int temp, max_qmf_subbands; unsigned int start_min, stop_min; int k; const int8_t *sbr_offset_ptr; int16_t stop_dk[13]; if (sbr->sample_rate < 32000) { temp = 3000; } else if (sbr->sample_rate < 64000) { temp = 4000; } else temp = 5000; switch (sbr->sample_rate) { case 16000: sbr_offset_ptr = sbr_offset[0]; break; case 22050: sbr_offset_ptr = sbr_offset[1]; break; case 24000: sbr_offset_ptr = sbr_offset[2]; break; case 32000: sbr_offset_ptr = sbr_offset[3]; break; case 44100: case 48000: case 64000: sbr_offset_ptr = sbr_offset[4]; break; case 88200: case 96000: case 128000: case 176400: case 192000: sbr_offset_ptr = sbr_offset[5]; break; default: av_log(ac->avctx, AV_LOG_ERROR, "Unsupported sample rate for SBR: %d\n", sbr->sample_rate); return -1; } start_min = ((temp << 7) + (sbr->sample_rate >> 1)) / sbr->sample_rate; stop_min = ((temp << 8) + (sbr->sample_rate >> 1)) / sbr->sample_rate; sbr->k[0] = start_min + sbr_offset_ptr[spectrum->bs_start_freq]; if (spectrum->bs_stop_freq < 14) { sbr->k[2] = stop_min; make_bands(stop_dk, stop_min, 64, 13); qsort(stop_dk, 13, sizeof(stop_dk[0]), qsort_comparison_function_int16); for (k = 0; k < spectrum->bs_stop_freq; k++) sbr->k[2] += stop_dk[k]; } else if (spectrum->bs_stop_freq == 14) { sbr->k[2] = 2*sbr->k[0]; } else if (spectrum->bs_stop_freq == 15) { sbr->k[2] = 3*sbr->k[0]; } else { av_log(ac->avctx, AV_LOG_ERROR, "Invalid bs_stop_freq: %d\n", spectrum->bs_stop_freq); return -1; } sbr->k[2] = FFMIN(64, sbr->k[2]); // Requirements (14496-3 sp04 p205) if (sbr->sample_rate <= 32000) { max_qmf_subbands = 48; } else if (sbr->sample_rate == 44100) { max_qmf_subbands = 35; } else if (sbr->sample_rate >= 48000) max_qmf_subbands = 32; if (sbr->k[2] - sbr->k[0] > max_qmf_subbands) { av_log(ac->avctx, AV_LOG_ERROR, "Invalid bitstream, too many QMF subbands: %d\n", sbr->k[2] - sbr->k[0]); return -1; } if (!spectrum->bs_freq_scale) { int dk, k2diff; dk = spectrum->bs_alter_scale + 1; sbr->n_master = ((sbr->k[2] - sbr->k[0] + (dk&2)) >> dk) << 1; if (check_n_master(ac->avctx, sbr->n_master, sbr->spectrum_params.bs_xover_band)) return -1; for (k = 1; k <= sbr->n_master; k++) sbr->f_master[k] = dk; k2diff = sbr->k[2] - sbr->k[0] - sbr->n_master * dk; if (k2diff < 0) { sbr->f_master[1]--; sbr->f_master[2]-= (k2diff < -1); } else if (k2diff) { sbr->f_master[sbr->n_master]++; } sbr->f_master[0] = sbr->k[0]; for (k = 1; k <= sbr->n_master; k++) sbr->f_master[k] += sbr->f_master[k - 1]; } else { int half_bands = 7 - spectrum->bs_freq_scale; // bs_freq_scale = {1,2,3} int two_regions, num_bands_0; int vdk0_max, vdk1_min; int16_t vk0[49]; if (49 * sbr->k[2] > 110 * sbr->k[0]) { two_regions = 1; sbr->k[1] = 2 * sbr->k[0]; } else { two_regions = 0; sbr->k[1] = sbr->k[2]; } num_bands_0 = lrintf(half_bands * log2f(sbr->k[1] / (float)sbr->k[0])) * 2; if (num_bands_0 <= 0) { // Requirements (14496-3 sp04 p205) av_log(ac->avctx, AV_LOG_ERROR, "Invalid num_bands_0: %d\n", num_bands_0); return -1; } vk0[0] = 0; make_bands(vk0+1, sbr->k[0], sbr->k[1], num_bands_0); qsort(vk0 + 1, num_bands_0, sizeof(vk0[1]), qsort_comparison_function_int16); vdk0_max = vk0[num_bands_0]; vk0[0] = sbr->k[0]; for (k = 1; k <= num_bands_0; k++) { if (vk0[k] <= 0) { // Requirements (14496-3 sp04 p205) av_log(ac->avctx, AV_LOG_ERROR, "Invalid vDk0[%d]: %d\n", k, vk0[k]); return -1; } vk0[k] += vk0[k-1]; } if (two_regions) { int16_t vk1[49]; float invwarp = spectrum->bs_alter_scale ? 0.76923076923076923077f : 1.0f; // bs_alter_scale = {0,1} int num_bands_1 = lrintf(half_bands * invwarp * log2f(sbr->k[2] / (float)sbr->k[1])) * 2; make_bands(vk1+1, sbr->k[1], sbr->k[2], num_bands_1); vdk1_min = array_min_int16(vk1 + 1, num_bands_1); if (vdk1_min < vdk0_max) { int change; qsort(vk1 + 1, num_bands_1, sizeof(vk1[1]), qsort_comparison_function_int16); change = FFMIN(vdk0_max - vk1[1], (vk1[num_bands_1] - vk1[1]) >> 1); vk1[1] += change; vk1[num_bands_1] -= change; } qsort(vk1 + 1, num_bands_1, sizeof(vk1[1]), qsort_comparison_function_int16); vk1[0] = sbr->k[1]; for (k = 1; k <= num_bands_1; k++) { if (vk1[k] <= 0) { // Requirements (14496-3 sp04 p205) av_log(ac->avctx, AV_LOG_ERROR, "Invalid vDk1[%d]: %d\n", k, vk1[k]); return -1; } vk1[k] += vk1[k-1]; } sbr->n_master = num_bands_0 + num_bands_1; if (check_n_master(ac->avctx, sbr->n_master, sbr->spectrum_params.bs_xover_band)) return -1; memcpy(&sbr->f_master[0], vk0, (num_bands_0 + 1) * sizeof(sbr->f_master[0])); memcpy(&sbr->f_master[num_bands_0 + 1], vk1 + 1, num_bands_1 * sizeof(sbr->f_master[0])); } else { sbr->n_master = num_bands_0; if (check_n_master(ac->avctx, sbr->n_master, sbr->spectrum_params.bs_xover_band)) return -1; memcpy(sbr->f_master, vk0, (num_bands_0 + 1) * sizeof(sbr->f_master[0])); } } return 0; }
16,847
FFmpeg
87917a328320ce77992ed4d87d8825c7216f6f32
1
static void event_loop(VideoState *cur_stream) { SDL_Event event; double incr, pos, frac; for (;;) { double x; refresh_loop_wait_event(cur_stream, &event); switch (event.type) { case SDL_KEYDOWN: if (exit_on_keydown) { do_exit(cur_stream); break; } switch (event.key.keysym.sym) { case SDLK_ESCAPE: case SDLK_q: do_exit(cur_stream); break; case SDLK_f: toggle_full_screen(cur_stream); cur_stream->force_refresh = 1; break; case SDLK_p: case SDLK_SPACE: toggle_pause(cur_stream); break; case SDLK_s: // S: Step to next frame step_to_next_frame(cur_stream); break; case SDLK_a: stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO); break; case SDLK_v: stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO); break; case SDLK_t: stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE); break; case SDLK_w: toggle_audio_display(cur_stream); break; case SDLK_PAGEUP: incr = 600.0; goto do_seek; case SDLK_PAGEDOWN: incr = -600.0; goto do_seek; case SDLK_LEFT: incr = -10.0; goto do_seek; case SDLK_RIGHT: incr = 10.0; goto do_seek; case SDLK_UP: incr = 60.0; goto do_seek; case SDLK_DOWN: incr = -60.0; do_seek: if (seek_by_bytes) { if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) { pos = cur_stream->video_current_pos; } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) { pos = cur_stream->audio_pkt.pos; } else pos = avio_tell(cur_stream->ic->pb); if (cur_stream->ic->bit_rate) incr *= cur_stream->ic->bit_rate / 8.0; else incr *= 180000.0; pos += incr; stream_seek(cur_stream, pos, incr, 1); } else { pos = get_master_clock(cur_stream); if (isnan(pos)) pos = (double)cur_stream->seek_pos / AV_TIME_BASE; pos += incr; if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE) pos = cur_stream->ic->start_time / (double)AV_TIME_BASE; stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0); } break; default: break; } break; case SDL_VIDEOEXPOSE: cur_stream->force_refresh = 1; break; case SDL_MOUSEBUTTONDOWN: if (exit_on_mousedown) { do_exit(cur_stream); break; } case SDL_MOUSEMOTION: if (cursor_hidden) { SDL_ShowCursor(1); cursor_hidden = 0; } cursor_last_shown = av_gettime(); if (event.type == SDL_MOUSEBUTTONDOWN) { x = event.button.x; } else { if (event.motion.state != SDL_PRESSED) break; x = event.motion.x; } if (seek_by_bytes || cur_stream->ic->duration <= 0) { uint64_t size = avio_size(cur_stream->ic->pb); stream_seek(cur_stream, size*x/cur_stream->width, 0, 1); } else { int64_t ts; int ns, hh, mm, ss; int tns, thh, tmm, tss; tns = cur_stream->ic->duration / 1000000LL; thh = tns / 3600; tmm = (tns % 3600) / 60; tss = (tns % 60); frac = x / cur_stream->width; ns = frac * tns; hh = ns / 3600; mm = (ns % 3600) / 60; ss = (ns % 60); fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100, hh, mm, ss, thh, tmm, tss); ts = frac * cur_stream->ic->duration; if (cur_stream->ic->start_time != AV_NOPTS_VALUE) ts += cur_stream->ic->start_time; stream_seek(cur_stream, ts, 0, 0); } break; case SDL_VIDEORESIZE: screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0, SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL); screen_width = cur_stream->width = event.resize.w; screen_height = cur_stream->height = event.resize.h; cur_stream->force_refresh = 1; break; case SDL_QUIT: case FF_QUIT_EVENT: do_exit(cur_stream); break; case FF_ALLOC_EVENT: alloc_picture(event.user.data1); break; default: break; } } }
16,848
qemu
9201bb9a8c7cd3ba2382b7db5b2e40f603e61528
1
sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) { SDHCIState *s = (SDHCIState *)opaque; unsigned shift = 8 * (offset & 0x3); uint32_t mask = ~(((1ULL << (size * 8)) - 1) << shift); uint32_t value = val; value <<= shift; switch (offset & ~0x3) { case SDHC_SYSAD: s->sdmasysad = (s->sdmasysad & mask) | value; MASKED_WRITE(s->sdmasysad, mask, value); /* Writing to last byte of sdmasysad might trigger transfer */ if (!(mask & 0xFF000000) && TRANSFERRING_DATA(s->prnsts) && s->blkcnt && s->blksize && SDHC_DMA_TYPE(s->hostctl) == SDHC_CTRL_SDMA) { sdhci_sdma_transfer_multi_blocks(s); break; case SDHC_BLKSIZE: if (!TRANSFERRING_DATA(s->prnsts)) { MASKED_WRITE(s->blksize, mask, value); MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16); break; case SDHC_ARGUMENT: MASKED_WRITE(s->argument, mask, value); break; case SDHC_TRNMOD: /* DMA can be enabled only if it is supported as indicated by * capabilities register */ if (!(s->capareg & SDHC_CAN_DO_DMA)) { value &= ~SDHC_TRNS_DMA; MASKED_WRITE(s->trnmod, mask, value); MASKED_WRITE(s->cmdreg, mask >> 16, value >> 16); /* Writing to the upper byte of CMDREG triggers SD command generation */ if ((mask & 0xFF000000) || !sdhci_can_issue_command(s)) { break; sdhci_send_command(s); break; case SDHC_BDATA: if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) { sdhci_write_dataport(s, value >> shift, size); break; case SDHC_HOSTCTL: if (!(mask & 0xFF0000)) { sdhci_blkgap_write(s, value >> 16); MASKED_WRITE(s->hostctl, mask, value); MASKED_WRITE(s->pwrcon, mask >> 8, value >> 8); MASKED_WRITE(s->wakcon, mask >> 24, value >> 24); if (!(s->prnsts & SDHC_CARD_PRESENT) || ((s->pwrcon >> 1) & 0x7) < 5 || !(s->capareg & (1 << (31 - ((s->pwrcon >> 1) & 0x7))))) { s->pwrcon &= ~SDHC_POWER_ON; break; case SDHC_CLKCON: if (!(mask & 0xFF000000)) { sdhci_reset_write(s, value >> 24); MASKED_WRITE(s->clkcon, mask, value); MASKED_WRITE(s->timeoutcon, mask >> 16, value >> 16); if (s->clkcon & SDHC_CLOCK_INT_EN) { s->clkcon |= SDHC_CLOCK_INT_STABLE; } else { s->clkcon &= ~SDHC_CLOCK_INT_STABLE; break; case SDHC_NORINTSTS: if (s->norintstsen & SDHC_NISEN_CARDINT) { value &= ~SDHC_NIS_CARDINT; s->norintsts &= mask | ~value; s->errintsts &= (mask >> 16) | ~(value >> 16); if (s->errintsts) { s->norintsts |= SDHC_NIS_ERR; } else { s->norintsts &= ~SDHC_NIS_ERR; sdhci_update_irq(s); break; case SDHC_NORINTSTSEN: MASKED_WRITE(s->norintstsen, mask, value); MASKED_WRITE(s->errintstsen, mask >> 16, value >> 16); s->norintsts &= s->norintstsen; s->errintsts &= s->errintstsen; if (s->errintsts) { s->norintsts |= SDHC_NIS_ERR; } else { s->norintsts &= ~SDHC_NIS_ERR; sdhci_update_irq(s); break; case SDHC_NORINTSIGEN: MASKED_WRITE(s->norintsigen, mask, value); MASKED_WRITE(s->errintsigen, mask >> 16, value >> 16); sdhci_update_irq(s); break; case SDHC_ADMAERR: MASKED_WRITE(s->admaerr, mask, value); break; case SDHC_ADMASYSADDR: s->admasysaddr = (s->admasysaddr & (0xFFFFFFFF00000000ULL | (uint64_t)mask)) | (uint64_t)value; break; case SDHC_ADMASYSADDR + 4: s->admasysaddr = (s->admasysaddr & (0x00000000FFFFFFFFULL | ((uint64_t)mask << 32))) | ((uint64_t)value << 32); break; case SDHC_FEAER: s->acmd12errsts |= value; s->errintsts |= (value >> 16) & s->errintstsen; if (s->acmd12errsts) { s->errintsts |= SDHC_EIS_CMD12ERR; if (s->errintsts) { s->norintsts |= SDHC_NIS_ERR; sdhci_update_irq(s); break; default: ERRPRINT("bad %ub write offset: addr[0x%04x] <- %u(0x%x)\n", size, (int)offset, value >> shift, value >> shift); break; DPRINT_L2("write %ub: addr[0x%04x] <- %u(0x%x)\n", size, (int)offset, value >> shift, value >> shift);
16,849
FFmpeg
f297dd3812510fc83080e265dc4534a3898005b0
1
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf) { AVFilterContext *ctx = inlink->dst; ASyncContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; int nb_channels = av_get_channel_layout_nb_channels(buf->audio->channel_layout); int64_t pts = (buf->pts == AV_NOPTS_VALUE) ? buf->pts : av_rescale_q(buf->pts, inlink->time_base, outlink->time_base); int out_size; int64_t delta; /* buffer data until we get the first timestamp */ if (s->pts == AV_NOPTS_VALUE) { if (pts != AV_NOPTS_VALUE) { s->pts = pts - get_delay(s); } write_to_fifo(s, buf); return; } /* now wait for the next timestamp */ if (pts == AV_NOPTS_VALUE) { write_to_fifo(s, buf); return; } /* when we have two timestamps, compute how many samples would we have * to add/remove to get proper sync between data and timestamps */ delta = pts - s->pts - get_delay(s); out_size = avresample_available(s->avr); if (labs(delta) > s->min_delta) { av_log(ctx, AV_LOG_VERBOSE, "Discontinuity - %"PRId64" samples.\n", delta); out_size += delta; } else if (s->resample) { int comp = av_clip(delta, -s->max_comp, s->max_comp); av_log(ctx, AV_LOG_VERBOSE, "Compensating %d samples per second.\n", comp); avresample_set_compensation(s->avr, delta, inlink->sample_rate); } if (out_size > 0) { AVFilterBufferRef *buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, out_size); if (!buf_out) return; avresample_read(s->avr, (void**)buf_out->extended_data, out_size); buf_out->pts = s->pts; if (delta > 0) { av_samples_set_silence(buf_out->extended_data, out_size - delta, delta, nb_channels, buf->format); } ff_filter_samples(outlink, buf_out); } else { av_log(ctx, AV_LOG_WARNING, "Non-monotonous timestamps, dropping " "whole buffer.\n"); } /* drain any remaining buffered data */ avresample_read(s->avr, NULL, avresample_available(s->avr)); s->pts = pts - avresample_get_delay(s->avr); avresample_convert(s->avr, NULL, 0, 0, (void**)buf->extended_data, buf->linesize[0], buf->audio->nb_samples); avfilter_unref_buffer(buf); }
16,851
qemu
1635eecc413ed680013cf77e6994901cafe15590
1
static void bmdma_irq(void *opaque, int n, int level) { BMDMAState *bm = opaque; if (!level) { /* pass through lower */ qemu_set_irq(bm->irq, level); return; } if (bm) { bm->status |= BM_STATUS_INT; } /* trigger the real irq */ qemu_set_irq(bm->irq, level); }
16,852
qemu
64e69e80920d82df3fa679bc41b13770d2f99360
1
iscsi_aio_cancel(BlockDriverAIOCB *blockacb) { IscsiAIOCB *acb = (IscsiAIOCB *)blockacb; IscsiLun *iscsilun = acb->iscsilun; acb->common.cb(acb->common.opaque, -ECANCELED); acb->canceled = 1; /* send a task mgmt call to the target to cancel the task on the target */ iscsi_task_mgmt_abort_task_async(iscsilun->iscsi, acb->task, iscsi_abort_task_cb, NULL); /* then also cancel the task locally in libiscsi */ iscsi_scsi_task_cancel(iscsilun->iscsi, acb->task); }
16,853
FFmpeg
7fc73d9ab781f66b63f3bbe2f384f4f639ae78e9
1
static int rm_read_packet(AVFormatContext *s, AVPacket *pkt) { RMDemuxContext *rm = s->priv_data; AVStream *st; int i, len, res, seq = 1; int64_t timestamp, pos; int flags; for (;;) { if (rm->audio_pkt_cnt) { // If there are queued audio packet return them first st = s->streams[rm->audio_stream_num]; res = ff_rm_retrieve_cache(s, s->pb, st, st->priv_data, pkt); if(res < 0) return res; flags = 0; } else { if (rm->old_format) { RMStream *ast; st = s->streams[0]; ast = st->priv_data; timestamp = AV_NOPTS_VALUE; len = !ast->audio_framesize ? RAW_PACKET_SIZE : ast->coded_framesize * ast->sub_packet_h / 2; flags = (seq++ == 1) ? 2 : 0; pos = avio_tell(s->pb); } else { len=sync(s, &timestamp, &flags, &i, &pos); if (len > 0) st = s->streams[i]; } if(len<0 || url_feof(s->pb)) return AVERROR(EIO); res = ff_rm_parse_packet (s, s->pb, st, st->priv_data, len, pkt, &seq, flags, timestamp); if((flags&2) && (seq&0x7F) == 1) av_add_index_entry(st, pos, timestamp, 0, 0, AVINDEX_KEYFRAME); if (res) continue; } if( (st->discard >= AVDISCARD_NONKEY && !(flags&2)) || st->discard >= AVDISCARD_ALL){ av_free_packet(pkt); } else break; } return 0; }
16,854
qemu
193982c6f9424779b53a168fe32ebc30a776cbf1
1
static void do_pci_unregister_device(PCIDevice *pci_dev) { pci_dev->bus->devices[pci_dev->devfn] = NULL; pci_config_free(pci_dev); memory_region_del_subregion(&pci_dev->bus_master_container_region, &pci_dev->bus_master_enable_region); address_space_destroy(&pci_dev->bus_master_as); }
16,855
qemu
b931bfbf042983f311b3b09894d8030b2755a638
0
static void vhost_user_cleanup(NetClientState *nc) { VhostUserState *s = DO_UPCAST(VhostUserState, nc, nc); vhost_user_stop(s); qemu_purge_queued_packets(nc); }
16,857
qemu
0187c7989a5cedd4f88bba76839cc9c44fb3fc81
0
static void virtio_net_tx_timer(void *opaque) { VirtIONetQueue *q = opaque; VirtIONet *n = q->n; VirtIODevice *vdev = VIRTIO_DEVICE(n); assert(vdev->vm_running); q->tx_waiting = 0; /* Just in case the driver is not ready on more */ if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { return; } virtio_queue_set_notification(q->tx_vq, 1); virtio_net_flush_tx(q); }
16,858
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static uint64_t omap_pwl_read(void *opaque, target_phys_addr_t addr, unsigned size) { struct omap_pwl_s *s = (struct omap_pwl_s *) opaque; int offset = addr & OMAP_MPUI_REG_MASK; if (size != 1) { return omap_badwidth_read8(opaque, addr); } switch (offset) { case 0x00: /* PWL_LEVEL */ return s->level; case 0x04: /* PWL_CTRL */ return s->enable; } OMAP_BAD_REG(addr); return 0; }
16,859
FFmpeg
a7196795613f2cd416cf2c51c767a1125e27b057
0
int ff_parse_packing_format(int *ret, const char *arg, void *log_ctx) { char *tail; int planar = strtol(arg, &tail, 10); if (*tail) { planar = (strcmp(arg, "packed") != 0); } else if (planar != 0 && planar != 1) { av_log(log_ctx, AV_LOG_ERROR, "Invalid packing format '%s'\n", arg); return AVERROR(EINVAL); } *ret = planar; return 0; }
16,860
qemu
b9bec74bcb16519a876ec21cd5277c526a9b512d
0
static int kvm_get_debugregs(CPUState *env) { #ifdef KVM_CAP_DEBUGREGS struct kvm_debugregs dbgregs; int i, ret; if (!kvm_has_debugregs()) { return 0; } ret = kvm_vcpu_ioctl(env, KVM_GET_DEBUGREGS, &dbgregs); if (ret < 0) { return ret; } for (i = 0; i < 4; i++) { env->dr[i] = dbgregs.db[i]; } env->dr[4] = env->dr[6] = dbgregs.dr6; env->dr[5] = env->dr[7] = dbgregs.dr7; #endif return 0; }
16,861
qemu
df1d4c341a735334de23513f17bf110c8c49b3e7
0
static void scsi_hd_realize(SCSIDevice *dev, Error **errp) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); blkconf_blocksizes(&s->qdev.conf); s->qdev.blocksize = s->qdev.conf.logical_block_size; s->qdev.type = TYPE_DISK; if (!s->product) { s->product = g_strdup("QEMU HARDDISK"); } scsi_realize(&s->qdev, errp); }
16,863
qemu
51b19ebe4320f3dcd93cea71235c1219318ddfd2
0
static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size) { VirtIONet *n = qemu_get_nic_opaque(nc); VirtIONetQueue *q = virtio_net_get_subqueue(nc); VirtIODevice *vdev = VIRTIO_DEVICE(n); struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; struct virtio_net_hdr_mrg_rxbuf mhdr; unsigned mhdr_cnt = 0; size_t offset, i, guest_offset; if (!virtio_net_can_receive(nc)) { return -1; } /* hdr_len refers to the header we supply to the guest */ if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) { return 0; } if (!receive_filter(n, buf, size)) return size; offset = i = 0; while (offset < size) { VirtQueueElement elem; int len, total; const struct iovec *sg = elem.in_sg; total = 0; if (virtqueue_pop(q->rx_vq, &elem) == 0) { if (i == 0) return -1; error_report("virtio-net unexpected empty queue: " "i %zd mergeable %d offset %zd, size %zd, " "guest hdr len %zd, host hdr len %zd " "guest features 0x%" PRIx64, i, n->mergeable_rx_bufs, offset, size, n->guest_hdr_len, n->host_hdr_len, vdev->guest_features); exit(1); } if (elem.in_num < 1) { error_report("virtio-net receive queue contains no in buffers"); exit(1); } if (i == 0) { assert(offset == 0); if (n->mergeable_rx_bufs) { mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg), sg, elem.in_num, offsetof(typeof(mhdr), num_buffers), sizeof(mhdr.num_buffers)); } receive_header(n, sg, elem.in_num, buf, size); offset = n->host_hdr_len; total += n->guest_hdr_len; guest_offset = n->guest_hdr_len; } else { guest_offset = 0; } /* copy in packet. ugh */ len = iov_from_buf(sg, elem.in_num, guest_offset, buf + offset, size - offset); total += len; offset += len; /* If buffers can't be merged, at this point we * must have consumed the complete packet. * Otherwise, drop it. */ if (!n->mergeable_rx_bufs && offset < size) { virtqueue_discard(q->rx_vq, &elem, total); return size; } /* signal other side */ virtqueue_fill(q->rx_vq, &elem, total, i++); } if (mhdr_cnt) { virtio_stw_p(vdev, &mhdr.num_buffers, i); iov_from_buf(mhdr_sg, mhdr_cnt, 0, &mhdr.num_buffers, sizeof mhdr.num_buffers); } virtqueue_flush(q->rx_vq, i); virtio_notify(vdev, q->rx_vq); return size; }
16,864
qemu
7d553f27fce284805d7f94603932045ee3bbb979
0
int usb_claim_port(USBDevice *dev) { USBBus *bus = usb_bus_from_device(dev); USBPort *port; assert(dev->port == NULL); if (dev->port_path) { QTAILQ_FOREACH(port, &bus->free, next) { if (strcmp(port->path, dev->port_path) == 0) { break; } } if (port == NULL) { error_report("Error: usb port %s (bus %s) not found (in use?)", dev->port_path, bus->qbus.name); return -1; } } else { if (bus->nfree == 1 && strcmp(object_get_typename(OBJECT(dev)), "usb-hub") != 0) { /* Create a new hub and chain it on */ usb_create_simple(bus, "usb-hub"); } if (bus->nfree == 0) { error_report("Error: tried to attach usb device %s to a bus " "with no free ports", dev->product_desc); return -1; } port = QTAILQ_FIRST(&bus->free); } trace_usb_port_claim(bus->busnr, port->path); QTAILQ_REMOVE(&bus->free, port, next); bus->nfree--; dev->port = port; port->dev = dev; QTAILQ_INSERT_TAIL(&bus->used, port, next); bus->nused++; return 0; }
16,865
qemu
15408b428f5b4db56da555fbda4f1aaf40d77f4b
0
lqspi_read(void *opaque, hwaddr addr, unsigned int size) { int i; XilinxQSPIPS *q = opaque; XilinxSPIPS *s = opaque; uint32_t ret; if (addr >= q->lqspi_cached_addr && addr <= q->lqspi_cached_addr + LQSPI_CACHE_SIZE - 4) { ret = q->lqspi_buf[(addr - q->lqspi_cached_addr) >> 2]; DB_PRINT("addr: %08x, data: %08x\n", (unsigned)addr, (unsigned)ret); return ret; } else { int flash_addr = (addr / num_effective_busses(s)); int slave = flash_addr >> LQSPI_ADDRESS_BITS; int cache_entry = 0; DB_PRINT("config reg status: %08x\n", s->regs[R_LQSPI_CFG]); fifo8_reset(&s->tx_fifo); fifo8_reset(&s->rx_fifo); s->regs[R_CONFIG] &= ~CS; s->regs[R_CONFIG] |= ((~(1 << slave) << CS_SHIFT) & CS) | MANUAL_CS; xilinx_spips_update_cs_lines(s); /* instruction */ DB_PRINT("pushing read instruction: %02x\n", (uint8_t)(s->regs[R_LQSPI_CFG] & LQSPI_CFG_INST_CODE)); fifo8_push(&s->tx_fifo, s->regs[R_LQSPI_CFG] & LQSPI_CFG_INST_CODE); /* read address */ DB_PRINT("pushing read address %06x\n", flash_addr); fifo8_push(&s->tx_fifo, (uint8_t)(flash_addr >> 16)); fifo8_push(&s->tx_fifo, (uint8_t)(flash_addr >> 8)); fifo8_push(&s->tx_fifo, (uint8_t)flash_addr); /* mode bits */ if (s->regs[R_LQSPI_CFG] & LQSPI_CFG_MODE_EN) { fifo8_push(&s->tx_fifo, extract32(s->regs[R_LQSPI_CFG], LQSPI_CFG_MODE_SHIFT, LQSPI_CFG_MODE_WIDTH)); } /* dummy bytes */ for (i = 0; i < (extract32(s->regs[R_LQSPI_CFG], LQSPI_CFG_DUMMY_SHIFT, LQSPI_CFG_DUMMY_WIDTH)); ++i) { DB_PRINT("pushing dummy byte\n"); fifo8_push(&s->tx_fifo, 0); } xilinx_spips_update_cs_lines(s); xilinx_spips_flush_txfifo(s); fifo8_reset(&s->rx_fifo); DB_PRINT("starting QSPI data read\n"); for (i = 0; i < LQSPI_CACHE_SIZE / 4; ++i) { tx_data_bytes(s, 0, 4); xilinx_spips_flush_txfifo(s); rx_data_bytes(s, &q->lqspi_buf[cache_entry], 4); cache_entry++; } xilinx_spips_update_cs_lines(s); s->regs[R_CONFIG] |= CS; xilinx_spips_update_cs_lines(s); q->lqspi_cached_addr = addr; return lqspi_read(opaque, addr, size); } }
16,866
qemu
245f7b51c0ea04fb2224b1127430a096c91aee70
0
static int send_jpeg_rect(VncState *vs, int x, int y, int w, int h, int quality) { struct jpeg_compress_struct cinfo; struct jpeg_error_mgr jerr; struct jpeg_destination_mgr manager; JSAMPROW row[1]; uint8_t *buf; int dy; if (ds_get_bytes_per_pixel(vs->ds) == 1) return send_full_color_rect(vs, w, h); buf = qemu_malloc(w * 3); row[0] = buf; buffer_reserve(&vs->tight_jpeg, 2048); cinfo.err = jpeg_std_error(&jerr); jpeg_create_compress(&cinfo); cinfo.client_data = vs; cinfo.image_width = w; cinfo.image_height = h; cinfo.input_components = 3; cinfo.in_color_space = JCS_RGB; jpeg_set_defaults(&cinfo); jpeg_set_quality(&cinfo, quality, true); manager.init_destination = jpeg_init_destination; manager.empty_output_buffer = jpeg_empty_output_buffer; manager.term_destination = jpeg_term_destination; cinfo.dest = &manager; jpeg_start_compress(&cinfo, true); for (dy = 0; dy < h; dy++) { jpeg_prepare_row(vs, buf, x, y + dy, w); jpeg_write_scanlines(&cinfo, row, 1); } jpeg_finish_compress(&cinfo); jpeg_destroy_compress(&cinfo); vnc_write_u8(vs, VNC_TIGHT_JPEG << 4); tight_send_compact_size(vs, vs->tight_jpeg.offset); vnc_write(vs, vs->tight_jpeg.buffer, vs->tight_jpeg.offset); buffer_reset(&vs->tight_jpeg); return 1; }
16,867
qemu
1ea879e5580f63414693655fcf0328559cdce138
0
static int alsa_init_out (HWVoiceOut *hw, audsettings_t *as) { ALSAVoiceOut *alsa = (ALSAVoiceOut *) hw; struct alsa_params_req req; struct alsa_params_obt obt; snd_pcm_t *handle; audsettings_t obt_as; req.fmt = aud_to_alsafmt (as->fmt); req.freq = as->freq; req.nchannels = as->nchannels; req.period_size = conf.period_size_out; req.buffer_size = conf.buffer_size_out; req.size_in_usec = conf.size_in_usec_out; req.override_mask = !!conf.period_size_out_overridden | (!!conf.buffer_size_out_overridden << 1); if (alsa_open (0, &req, &obt, &handle)) { return -1; } obt_as.freq = obt.freq; obt_as.nchannels = obt.nchannels; obt_as.fmt = obt.fmt; obt_as.endianness = obt.endianness; audio_pcm_init_info (&hw->info, &obt_as); hw->samples = obt.samples; alsa->pcm_buf = audio_calloc (AUDIO_FUNC, obt.samples, 1 << hw->info.shift); if (!alsa->pcm_buf) { dolog ("Could not allocate DAC buffer (%d samples, each %d bytes)\n", hw->samples, 1 << hw->info.shift); alsa_anal_close (&handle); return -1; } alsa->handle = handle; return 0; }
16,869
FFmpeg
04001767728fd4ed8b4f9d2ebbb9f9a8c9a7be0d
0
static int query_format(struct vf_instance *vf, unsigned int fmt) { /* FIXME - figure out which other formats work */ switch (fmt) { case IMGFMT_YV12: case IMGFMT_IYUV: case IMGFMT_I420: return ff_vf_next_query_format(vf, fmt); } return 0; }
16,871
qemu
f5793fd9e1fd89808f4adbfe690235b094176a37
0
static void vfio_probe_ati_bar4_quirk(VFIOPCIDevice *vdev, int nr) { VFIOQuirk *quirk; VFIOConfigWindowQuirk *window; /* This windows doesn't seem to be used except by legacy VGA code */ if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) || !vdev->has_vga || nr != 4) { return; } quirk = g_malloc0(sizeof(*quirk)); quirk->mem = g_new0(MemoryRegion, 2); quirk->nr_mem = 2; window = quirk->data = g_malloc0(sizeof(*window) + sizeof(VFIOConfigWindowMatch)); window->vdev = vdev; window->address_offset = 0; window->data_offset = 4; window->nr_matches = 1; window->matches[0].match = 0x4000; window->matches[0].mask = PCIE_CONFIG_SPACE_SIZE - 1; window->bar = nr; window->addr_mem = &quirk->mem[0]; window->data_mem = &quirk->mem[1]; memory_region_init_io(window->addr_mem, OBJECT(vdev), &vfio_generic_window_address_quirk, window, "vfio-ati-bar4-window-address-quirk", 4); memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, window->address_offset, window->addr_mem, 1); memory_region_init_io(window->data_mem, OBJECT(vdev), &vfio_generic_window_data_quirk, window, "vfio-ati-bar4-window-data-quirk", 4); memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem, window->data_offset, window->data_mem, 1); QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); trace_vfio_quirk_ati_bar4_probe(vdev->vbasedev.name); }
16,872
qemu
fc3959e4669a1c2149b91ccb05101cfc7ae1fc05
0
int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) { trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags); if (!(bs->open_flags & BDRV_O_UNMAP)) { flags &= ~BDRV_REQ_MAY_UNMAP; } return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL, BDRV_REQ_ZERO_WRITE | flags); }
16,873
qemu
c1e1a491906bd1d769edb16f2b2be7ff6833d26f
0
void axisdev88_init (ram_addr_t ram_size, int vga_ram_size, const char *boot_device, DisplayState *ds, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { CPUState *env; struct etraxfs_pic *pic; void *etraxfs_dmac; struct etraxfs_dma_client *eth[2] = {NULL, NULL}; int kernel_size; int i; int nand_regs; int gpio_regs; ram_addr_t phys_ram; ram_addr_t phys_intmem; /* init CPUs */ if (cpu_model == NULL) { cpu_model = "crisv32"; } env = cpu_init(cpu_model); qemu_register_reset(main_cpu_reset, env); /* allocate RAM */ phys_ram = qemu_ram_alloc(ram_size); cpu_register_physical_memory(0x40000000, ram_size, phys_ram | IO_MEM_RAM); /* The ETRAX-FS has 128Kb on chip ram, the docs refer to it as the internal memory. */ phys_intmem = qemu_ram_alloc(INTMEM_SIZE); cpu_register_physical_memory(0x38000000, INTMEM_SIZE, phys_intmem | IO_MEM_RAM); /* Attach a NAND flash to CS1. */ nand_state.nand = nand_init(NAND_MFR_STMICRO, 0x39); nand_regs = cpu_register_io_memory(0, nand_read, nand_write, &nand_state); cpu_register_physical_memory(0x10000000, 0x05000000, nand_regs); gpio_state.nand = &nand_state; gpio_regs = cpu_register_io_memory(0, gpio_read, gpio_write, &gpio_state); cpu_register_physical_memory(0x3001a000, 0x5c, gpio_regs); pic = etraxfs_pic_init(env, 0x3001c000); etraxfs_dmac = etraxfs_dmac_init(env, 0x30000000, 10); for (i = 0; i < 10; i++) { /* On ETRAX, odd numbered channels are inputs. */ etraxfs_dmac_connect(etraxfs_dmac, i, pic->irq + 7 + i, i & 1); } /* Add the two ethernet blocks. */ eth[0] = etraxfs_eth_init(&nd_table[0], env, pic->irq + 25, 0x30034000); if (nb_nics > 1) eth[1] = etraxfs_eth_init(&nd_table[1], env, pic->irq + 26, 0x30036000); /* The DMA Connector block is missing, hardwire things for now. */ etraxfs_dmac_connect_client(etraxfs_dmac, 0, eth[0]); etraxfs_dmac_connect_client(etraxfs_dmac, 1, eth[0] + 1); if (eth[1]) { etraxfs_dmac_connect_client(etraxfs_dmac, 6, eth[1]); etraxfs_dmac_connect_client(etraxfs_dmac, 7, eth[1] + 1); } /* 2 timers. */ etraxfs_timer_init(env, pic->irq + 0x1b, pic->nmi + 1, 0x3001e000); etraxfs_timer_init(env, pic->irq + 0x1b, pic->nmi + 1, 0x3005e000); for (i = 0; i < 4; i++) { if (serial_hds[i]) { etraxfs_ser_init(env, pic->irq + 0x14 + i, serial_hds[i], 0x30026000 + i * 0x2000); } } if (kernel_filename) { uint64_t entry, high; int kcmdline_len; /* Boots a kernel elf binary, os/linux-2.6/vmlinux from the axis devboard SDK. */ kernel_size = load_elf(kernel_filename, -0x80000000LL, &entry, NULL, &high); bootstrap_pc = entry; if (kernel_size < 0) { /* Takes a kimage from the axis devboard SDK. */ kernel_size = load_image(kernel_filename, phys_ram_base + 0x4000); bootstrap_pc = 0x40004000; env->regs[9] = 0x40004000 + kernel_size; } env->regs[8] = 0x56902387; /* RAM init magic. */ if (kernel_cmdline && (kcmdline_len = strlen(kernel_cmdline))) { if (kcmdline_len > 256) { fprintf(stderr, "Too long CRIS kernel cmdline (max 256)\n"); exit(1); } pstrcpy_targphys(high, 256, kernel_cmdline); /* Let the kernel know we are modifying the cmdline. */ env->regs[10] = 0x87109563; env->regs[11] = high; } } env->pc = bootstrap_pc; printf ("pc =%x\n", env->pc); printf ("ram size =%ld\n", ram_size); }
16,874
qemu
79c18be7dfe660ab48f9f535e6cabd38c9f1d73b
0
uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b) { float32 f0 = make_float32(a); float32 f1 = make_float32(b); return float32_val((float32_compare_quiet(f0, f1, NFS) == 1) ? float32_sub(f0, f1, NFS) : float32_sub(f1, f0, NFS)); }
16,875
qemu
9f2130f58d5dd4e1fcb435cca08bf77e7c32e6c6
0
static void xen_init_pv(MachineState *machine) { DriveInfo *dinfo; int i; /* Initialize backend core & drivers */ if (xen_be_init() != 0) { fprintf(stderr, "%s: xen backend core setup failed\n", __FUNCTION__); exit(1); } switch (xen_mode) { case XEN_ATTACH: /* nothing to do, xend handles everything */ break; #ifdef CONFIG_XEN_PV_DOMAIN_BUILD case XEN_CREATE: { const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; if (xen_domain_build_pv(kernel_filename, initrd_filename, kernel_cmdline) < 0) { fprintf(stderr, "xen pv domain creation failed\n"); exit(1); } break; } #endif case XEN_EMULATE: fprintf(stderr, "xen emulation not implemented (yet)\n"); exit(1); break; default: fprintf(stderr, "unhandled xen_mode %d\n", xen_mode); exit(1); break; } xen_be_register_common(); xen_be_register("vfb", &xen_framebuffer_ops); xen_be_register("qnic", &xen_netdev_ops); /* configure framebuffer */ if (xenfb_enabled) { xen_config_dev_vfb(0, "vnc"); xen_config_dev_vkbd(0); } /* configure disks */ for (i = 0; i < 16; i++) { dinfo = drive_get(IF_XEN, 0, i); if (!dinfo) continue; xen_config_dev_blk(dinfo); } /* configure nics */ for (i = 0; i < nb_nics; i++) { if (!nd_table[i].model || 0 != strcmp(nd_table[i].model, "xen")) continue; xen_config_dev_nic(nd_table + i); } /* config cleanup hook */ atexit(xen_config_cleanup); /* setup framebuffer */ xen_init_display(xen_domid); }
16,876
qemu
c2b38b277a7882a592f4f2ec955084b2b756daaa
0
static void iohandler_init(void) { if (!iohandler_ctx) { iohandler_ctx = aio_context_new(&error_abort); } }
16,877
qemu
23dceda62a3643f734b7aa474fa6052593ae1a70
0
static void tcg_out_tb_finalize(TCGContext *s) { static const void * const helpers[8] = { helper_ret_stb_mmu, helper_le_stw_mmu, helper_le_stl_mmu, helper_le_stq_mmu, helper_ret_ldub_mmu, helper_le_lduw_mmu, helper_le_ldul_mmu, helper_le_ldq_mmu, }; tcg_insn_unit *thunks[8] = { }; TCGLabelQemuLdst *l; for (l = s->be->labels; l != NULL; l = l->next) { long x = l->is_ld * 4 + l->size; tcg_insn_unit *dest = thunks[x]; /* The out-of-line thunks are all the same; load the return address from B0, load the GP, and branch to the code. Note that we are always post-call, so the register window has rolled, so we're using incoming parameter register numbers, not outgoing. */ if (dest == NULL) { uintptr_t *desc = (uintptr_t *)helpers[x]; uintptr_t func = desc[0], gp = desc[1], disp; thunks[x] = dest = s->code_ptr; tcg_out_bundle(s, mlx, INSN_NOP_M, tcg_opc_l2 (gp), tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, TCG_REG_R1, gp)); tcg_out_bundle(s, mii, INSN_NOP_M, INSN_NOP_I, tcg_opc_i22(TCG_REG_P0, OPC_MOV_I22, l->is_ld ? TCG_REG_R35 : TCG_REG_R36, TCG_REG_B0)); disp = (tcg_insn_unit *)func - s->code_ptr; tcg_out_bundle(s, mLX, INSN_NOP_M, tcg_opc_l3 (disp), tcg_opc_x3 (TCG_REG_P0, OPC_BRL_SPTK_MANY_X3, disp)); } reloc_pcrel21b_slot2(l->label_ptr, dest); } }
16,878
qemu
dc162c8e4f088b08575460cca35b042d58c141aa
0
void bdrv_set_dirty_iter(HBitmapIter *hbi, int64_t offset) { assert(hbi->hb); hbitmap_iter_init(hbi, hbi->hb, offset); }
16,880
qemu
4a1418e07bdcfaa3177739e04707ecaec75d89e1
0
static uint32_t qpi_mem_readw(void *opaque, target_phys_addr_t addr) { return 0; }
16,881
FFmpeg
3176217c60ca7828712985092d9102d331ea4f3d
0
static av_always_inline void hl_decode_mb_idct_luma(const H264Context *h, H264SliceContext *sl, int mb_type, int simple, int transform_bypass, int pixel_shift, const int *block_offset, int linesize, uint8_t *dest_y, int p) { void (*idct_add)(uint8_t *dst, int16_t *block, int stride); int i; block_offset += 16 * p; if (!IS_INTRA4x4(mb_type)) { if (IS_INTRA16x16(mb_type)) { if (transform_bypass) { if (h->sps.profile_idc == 244 && (sl->intra16x16_pred_mode == VERT_PRED8x8 || sl->intra16x16_pred_mode == HOR_PRED8x8)) { h->hpc.pred16x16_add[sl->intra16x16_pred_mode](dest_y, block_offset, sl->mb + (p * 256 << pixel_shift), linesize); } else { for (i = 0; i < 16; i++) if (sl->non_zero_count_cache[scan8[i + p * 16]] || dctcoef_get(sl->mb, pixel_shift, i * 16 + p * 256)) h->h264dsp.h264_add_pixels4_clear(dest_y + block_offset[i], sl->mb + (i * 16 + p * 256 << pixel_shift), linesize); } } else { h->h264dsp.h264_idct_add16intra(dest_y, block_offset, sl->mb + (p * 256 << pixel_shift), linesize, sl->non_zero_count_cache + p * 5 * 8); } } else if (sl->cbp & 15) { if (transform_bypass) { const int di = IS_8x8DCT(mb_type) ? 4 : 1; idct_add = IS_8x8DCT(mb_type) ? h->h264dsp.h264_add_pixels8_clear : h->h264dsp.h264_add_pixels4_clear; for (i = 0; i < 16; i += di) if (sl->non_zero_count_cache[scan8[i + p * 16]]) idct_add(dest_y + block_offset[i], sl->mb + (i * 16 + p * 256 << pixel_shift), linesize); } else { if (IS_8x8DCT(mb_type)) h->h264dsp.h264_idct8_add4(dest_y, block_offset, sl->mb + (p * 256 << pixel_shift), linesize, sl->non_zero_count_cache + p * 5 * 8); else h->h264dsp.h264_idct_add16(dest_y, block_offset, sl->mb + (p * 256 << pixel_shift), linesize, sl->non_zero_count_cache + p * 5 * 8); } } } }
16,882
qemu
7ec1e5ea4bd0700fa48da86bffa2fcc6146c410a
0
static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn) { TCGv_i32 r_asi, r_size, r_sign; TCGv_i64 s64, d64 = tcg_temp_new_i64(); r_asi = gen_get_asi(dc, insn); r_size = tcg_const_i32(1); r_sign = tcg_const_i32(0); gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_size, r_sign); tcg_temp_free_i32(r_sign); s64 = tcg_const_i64(0xff); gen_helper_st_asi(cpu_env, addr, s64, r_asi, r_size); tcg_temp_free_i64(s64); tcg_temp_free_i32(r_size); tcg_temp_free_i32(r_asi); tcg_gen_trunc_i64_tl(dst, d64); tcg_temp_free_i64(d64); }
16,883
qemu
e8ede0a8bb5298a6979bcf7ed84ef64a64a4e3fe
0
float32 HELPER(ucf64_df2si)(float64 x, CPUUniCore32State *env) { return ucf64_itos(float64_to_int32(x, &env->ucf64.fp_status)); }
16,884
qemu
4fa4ce7107c6ec432f185307158c5df91ce54308
0
static int local_link(FsContext *ctx, V9fsPath *oldpath, V9fsPath *dirpath, const char *name) { int ret; V9fsString newpath; char buffer[PATH_MAX], buffer1[PATH_MAX]; v9fs_string_init(&newpath); v9fs_string_sprintf(&newpath, "%s/%s", dirpath->data, name); ret = link(rpath(ctx, oldpath->data, buffer), rpath(ctx, newpath.data, buffer1)); /* now link the virtfs_metadata files */ if (!ret && (ctx->export_flags & V9FS_SM_MAPPED_FILE)) { /* Link the .virtfs_metadata files. Create the metada directory */ ret = local_create_mapped_attr_dir(ctx, newpath.data); if (ret < 0) { goto err_out; } ret = link(local_mapped_attr_path(ctx, oldpath->data, buffer), local_mapped_attr_path(ctx, newpath.data, buffer1)); if (ret < 0 && errno != ENOENT) { goto err_out; } } err_out: v9fs_string_free(&newpath); return ret; }
16,885
FFmpeg
72dbc610be3272ba36603f78a39cc2d2d8fe0cc3
0
void ff_avg_h264_qpel4_mc02_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_vt_and_aver_dst_4x4_msa(src - (stride * 2), stride, dst, stride); }
16,886
qemu
8dfba2797761d8a43744e4e6571c8175e448a478
0
static void commit_complete(BlockJob *job, void *opaque) { CommitBlockJob *s = container_of(job, CommitBlockJob, common); CommitCompleteData *data = opaque; BlockDriverState *active = s->active; BlockDriverState *top = blk_bs(s->top); BlockDriverState *base = blk_bs(s->base); BlockDriverState *overlay_bs = bdrv_find_overlay(active, top); int ret = data->ret; if (!block_job_is_cancelled(&s->common) && ret == 0) { /* success */ ret = bdrv_drop_intermediate(active, top, base, s->backing_file_str); } /* restore base open flags here if appropriate (e.g., change the base back * to r/o). These reopens do not need to be atomic, since we won't abort * even on failure here */ if (s->base_flags != bdrv_get_flags(base)) { bdrv_reopen(base, s->base_flags, NULL); } if (overlay_bs && s->orig_overlay_flags != bdrv_get_flags(overlay_bs)) { bdrv_reopen(overlay_bs, s->orig_overlay_flags, NULL); } g_free(s->backing_file_str); blk_unref(s->top); blk_unref(s->base); block_job_completed(&s->common, ret); g_free(data); }
16,887
qemu
6b896ab261942f441a16836e3fa3c83f3f4488b9
0
static int ide_drive_post_load(void *opaque, int version_id) { IDEState *s = opaque; if (s->identify_set) { blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5))); } return 0; }
16,889
qemu
b0fbf7d3420f5f66be9728b1b070846bb054c872
0
void do_migrate_set_downtime(Monitor *mon, const QDict *qdict) { char *ptr; double d; const char *value = qdict_get_str(qdict, "value"); d = strtod(value, &ptr); if (!strcmp(ptr,"ms")) { d *= 1000000; } else if (!strcmp(ptr,"us")) { d *= 1000; } else if (!strcmp(ptr,"ns")) { } else { /* all else considered to be seconds */ d *= 1000000000; } max_downtime = (uint64_t)d; }
16,892
qemu
98c8573eb37bf5d7bb0c07225985a78537c73101
0
int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign) { struct kvm_ioeventfd kick = { .datamatch = val, .addr = addr, .len = 2, .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO, .fd = fd, }; int r; if (!kvm_enabled()) return -ENOSYS; if (!assign) kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); if (r < 0) return r; return 0; }
16,893
qemu
8a6b28c7b5104263344508df0f4bce97f22cfcaf
0
static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest) { if (use_goto_tb(s, dest)) { tcg_gen_goto_tb(n); gen_set_pc_im(s, dest); tcg_gen_exit_tb((uintptr_t)s->tb + n); } else { TCGv addr = tcg_temp_new(); gen_set_pc_im(s, dest); tcg_gen_extu_i32_tl(addr, cpu_R[15]); tcg_gen_lookup_and_goto_ptr(addr); tcg_temp_free(addr); } }
16,896
qemu
40055951a7afbfc037c6c7351d72a5c5d83ed99b
0
static int img_convert(int argc, char **argv) { int c, n, n1, bs_n, bs_i, compress, cluster_sectors, skip_create; int64_t ret = 0; int progress = 0, flags; const char *fmt, *out_fmt, *cache, *out_baseimg, *out_filename; BlockDriver *drv, *proto_drv; BlockDriverState **bs = NULL, *out_bs = NULL; int64_t total_sectors, nb_sectors, sector_num, bs_offset; int64_t *bs_sectors = NULL; uint8_t * buf = NULL; size_t bufsectors = IO_BUF_SIZE / BDRV_SECTOR_SIZE; const uint8_t *buf1; BlockDriverInfo bdi; QemuOpts *opts = NULL; QemuOptsList *create_opts = NULL; const char *out_baseimg_param; char *options = NULL; const char *snapshot_name = NULL; int min_sparse = 8; /* Need at least 4k of zeros for sparse detection */ bool quiet = false; Error *local_err = NULL; QemuOpts *sn_opts = NULL; fmt = NULL; out_fmt = "raw"; cache = "unsafe"; out_baseimg = NULL; compress = 0; skip_create = 0; for(;;) { c = getopt(argc, argv, "f:O:B:s:hce6o:pS:t:qnl:"); if (c == -1) { break; } switch(c) { case '?': case 'h': help(); break; case 'f': fmt = optarg; break; case 'O': out_fmt = optarg; break; case 'B': out_baseimg = optarg; break; case 'c': compress = 1; break; case 'e': error_report("option -e is deprecated, please use \'-o " "encryption\' instead!"); ret = -1; goto fail_getopt; case '6': error_report("option -6 is deprecated, please use \'-o " "compat6\' instead!"); ret = -1; goto fail_getopt; case 'o': if (!is_valid_option_list(optarg)) { error_report("Invalid option list: %s", optarg); ret = -1; goto fail_getopt; } if (!options) { options = g_strdup(optarg); } else { char *old_options = options; options = g_strdup_printf("%s,%s", options, optarg); g_free(old_options); } break; case 's': snapshot_name = optarg; break; case 'l': if (strstart(optarg, SNAPSHOT_OPT_BASE, NULL)) { sn_opts = qemu_opts_parse(&internal_snapshot_opts, optarg, 0); if (!sn_opts) { error_report("Failed in parsing snapshot param '%s'", optarg); ret = -1; goto fail_getopt; } } else { snapshot_name = optarg; } break; case 'S': { int64_t sval; char *end; sval = strtosz_suffix(optarg, &end, STRTOSZ_DEFSUFFIX_B); if (sval < 0 || *end) { error_report("Invalid minimum zero buffer size for sparse output specified"); ret = -1; goto fail_getopt; } min_sparse = sval / BDRV_SECTOR_SIZE; break; } case 'p': progress = 1; break; case 't': cache = optarg; break; case 'q': quiet = true; break; case 'n': skip_create = 1; break; } } /* Initialize before goto out */ if (quiet) { progress = 0; } qemu_progress_init(progress, 1.0); bs_n = argc - optind - 1; out_filename = bs_n >= 1 ? argv[argc - 1] : NULL; if (options && has_help_option(options)) { ret = print_block_option_help(out_filename, out_fmt); goto out; } if (bs_n < 1) { error_exit("Must specify image file name"); } if (bs_n > 1 && out_baseimg) { error_report("-B makes no sense when concatenating multiple input " "images"); ret = -1; goto out; } qemu_progress_print(0, 100); bs = g_new0(BlockDriverState *, bs_n); bs_sectors = g_new(int64_t, bs_n); total_sectors = 0; for (bs_i = 0; bs_i < bs_n; bs_i++) { char *id = bs_n > 1 ? g_strdup_printf("source %d", bs_i) : g_strdup("source"); bs[bs_i] = bdrv_new_open(id, argv[optind + bs_i], fmt, BDRV_O_FLAGS, true, quiet); g_free(id); if (!bs[bs_i]) { error_report("Could not open '%s'", argv[optind + bs_i]); ret = -1; goto out; } bs_sectors[bs_i] = bdrv_nb_sectors(bs[bs_i]); if (bs_sectors[bs_i] < 0) { error_report("Could not get size of %s: %s", argv[optind + bs_i], strerror(-bs_sectors[bs_i])); ret = -1; goto out; } total_sectors += bs_sectors[bs_i]; } if (sn_opts) { ret = bdrv_snapshot_load_tmp(bs[0], qemu_opt_get(sn_opts, SNAPSHOT_OPT_ID), qemu_opt_get(sn_opts, SNAPSHOT_OPT_NAME), &local_err); } else if (snapshot_name != NULL) { if (bs_n > 1) { error_report("No support for concatenating multiple snapshot"); ret = -1; goto out; } bdrv_snapshot_load_tmp_by_id_or_name(bs[0], snapshot_name, &local_err); } if (local_err) { error_report("Failed to load snapshot: %s", error_get_pretty(local_err)); error_free(local_err); ret = -1; goto out; } /* Find driver and parse its options */ drv = bdrv_find_format(out_fmt); if (!drv) { error_report("Unknown file format '%s'", out_fmt); ret = -1; goto out; } proto_drv = bdrv_find_protocol(out_filename, true); if (!proto_drv) { error_report("Unknown protocol '%s'", out_filename); ret = -1; goto out; } create_opts = qemu_opts_append(create_opts, drv->create_opts); create_opts = qemu_opts_append(create_opts, proto_drv->create_opts); opts = qemu_opts_create(create_opts, NULL, 0, &error_abort); if (options && qemu_opts_do_parse(opts, options, NULL)) { error_report("Invalid options for file format '%s'", out_fmt); ret = -1; goto out; } qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_sectors * 512); ret = add_old_style_options(out_fmt, opts, out_baseimg, NULL); if (ret < 0) { goto out; } /* Get backing file name if -o backing_file was used */ out_baseimg_param = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE); if (out_baseimg_param) { out_baseimg = out_baseimg_param; } /* Check if compression is supported */ if (compress) { bool encryption = qemu_opt_get_bool(opts, BLOCK_OPT_ENCRYPT, false); const char *preallocation = qemu_opt_get(opts, BLOCK_OPT_PREALLOC); if (!drv->bdrv_write_compressed) { error_report("Compression not supported for this file format"); ret = -1; goto out; } if (encryption) { error_report("Compression and encryption not supported at " "the same time"); ret = -1; goto out; } if (preallocation && strcmp(preallocation, "off")) { error_report("Compression and preallocation not supported at " "the same time"); ret = -1; goto out; } } if (!skip_create) { /* Create the new image */ ret = bdrv_create(drv, out_filename, opts, &local_err); if (ret < 0) { error_report("%s: error while converting %s: %s", out_filename, out_fmt, error_get_pretty(local_err)); error_free(local_err); goto out; } } flags = min_sparse ? (BDRV_O_RDWR | BDRV_O_UNMAP) : BDRV_O_RDWR; ret = bdrv_parse_cache_flags(cache, &flags); if (ret < 0) { error_report("Invalid cache option: %s", cache); goto out; } out_bs = bdrv_new_open("target", out_filename, out_fmt, flags, true, quiet); if (!out_bs) { ret = -1; goto out; } bs_i = 0; bs_offset = 0; /* increase bufsectors from the default 4096 (2M) if opt_transfer_length * or discard_alignment of the out_bs is greater. Limit to 32768 (16MB) * as maximum. */ bufsectors = MIN(32768, MAX(bufsectors, MAX(out_bs->bl.opt_transfer_length, out_bs->bl.discard_alignment)) ); buf = qemu_blockalign(out_bs, bufsectors * BDRV_SECTOR_SIZE); if (skip_create) { int64_t output_sectors = bdrv_nb_sectors(out_bs); if (output_sectors < 0) { error_report("unable to get output image length: %s\n", strerror(-output_sectors)); ret = -1; goto out; } else if (output_sectors < total_sectors) { error_report("output file is smaller than input file"); ret = -1; goto out; } } cluster_sectors = 0; ret = bdrv_get_info(out_bs, &bdi); if (ret < 0) { if (compress) { error_report("could not get block driver info"); goto out; } } else { compress = compress || bdi.needs_compressed_writes; cluster_sectors = bdi.cluster_size / BDRV_SECTOR_SIZE; } if (compress) { if (cluster_sectors <= 0 || cluster_sectors > bufsectors) { error_report("invalid cluster size"); ret = -1; goto out; } sector_num = 0; nb_sectors = total_sectors; for(;;) { int64_t bs_num; int remainder; uint8_t *buf2; nb_sectors = total_sectors - sector_num; if (nb_sectors <= 0) break; if (nb_sectors >= cluster_sectors) n = cluster_sectors; else n = nb_sectors; bs_num = sector_num - bs_offset; assert (bs_num >= 0); remainder = n; buf2 = buf; while (remainder > 0) { int nlow; while (bs_num == bs_sectors[bs_i]) { bs_offset += bs_sectors[bs_i]; bs_i++; assert (bs_i < bs_n); bs_num = 0; /* printf("changing part: sector_num=%" PRId64 ", " "bs_i=%d, bs_offset=%" PRId64 ", bs_sectors=%" PRId64 "\n", sector_num, bs_i, bs_offset, bs_sectors[bs_i]); */ } assert (bs_num < bs_sectors[bs_i]); nlow = remainder > bs_sectors[bs_i] - bs_num ? bs_sectors[bs_i] - bs_num : remainder; ret = bdrv_read(bs[bs_i], bs_num, buf2, nlow); if (ret < 0) { error_report("error while reading sector %" PRId64 ": %s", bs_num, strerror(-ret)); goto out; } buf2 += nlow * 512; bs_num += nlow; remainder -= nlow; } assert (remainder == 0); if (!buffer_is_zero(buf, n * BDRV_SECTOR_SIZE)) { ret = bdrv_write_compressed(out_bs, sector_num, buf, n); if (ret != 0) { error_report("error while compressing sector %" PRId64 ": %s", sector_num, strerror(-ret)); goto out; } } sector_num += n; qemu_progress_print(100.0 * sector_num / total_sectors, 0); } /* signal EOF to align */ bdrv_write_compressed(out_bs, 0, NULL, 0); } else { int64_t sectors_to_read, sectors_read, sector_num_next_status; bool count_allocated_sectors; int has_zero_init = min_sparse ? bdrv_has_zero_init(out_bs) : 0; if (!has_zero_init && bdrv_can_write_zeroes_with_unmap(out_bs)) { ret = bdrv_make_zero(out_bs, BDRV_REQ_MAY_UNMAP); if (ret < 0) { goto out; } has_zero_init = 1; } sectors_to_read = total_sectors; count_allocated_sectors = progress && (out_baseimg || has_zero_init); restart: sector_num = 0; // total number of sectors converted so far sectors_read = 0; sector_num_next_status = 0; for(;;) { nb_sectors = total_sectors - sector_num; if (nb_sectors <= 0) { if (count_allocated_sectors) { sectors_to_read = sectors_read; count_allocated_sectors = false; goto restart; } ret = 0; break; } while (sector_num - bs_offset >= bs_sectors[bs_i]) { bs_offset += bs_sectors[bs_i]; bs_i ++; assert (bs_i < bs_n); /* printf("changing part: sector_num=%" PRId64 ", bs_i=%d, " "bs_offset=%" PRId64 ", bs_sectors=%" PRId64 "\n", sector_num, bs_i, bs_offset, bs_sectors[bs_i]); */ } if ((out_baseimg || has_zero_init) && sector_num >= sector_num_next_status) { n = nb_sectors > INT_MAX ? INT_MAX : nb_sectors; ret = bdrv_get_block_status(bs[bs_i], sector_num - bs_offset, n, &n1); if (ret < 0) { error_report("error while reading block status of sector %" PRId64 ": %s", sector_num - bs_offset, strerror(-ret)); goto out; } /* If the output image is zero initialized, we are not working * on a shared base and the input is zero we can skip the next * n1 sectors */ if (has_zero_init && !out_baseimg && (ret & BDRV_BLOCK_ZERO)) { sector_num += n1; continue; } /* If the output image is being created as a copy on write * image, assume that sectors which are unallocated in the * input image are present in both the output's and input's * base images (no need to copy them). */ if (out_baseimg) { if (!(ret & BDRV_BLOCK_DATA)) { sector_num += n1; continue; } /* The next 'n1' sectors are allocated in the input image. * Copy only those as they may be followed by unallocated * sectors. */ nb_sectors = n1; } /* avoid redundant callouts to get_block_status */ sector_num_next_status = sector_num + n1; } n = MIN(nb_sectors, bufsectors); /* round down request length to an aligned sector, but * do not bother doing this on short requests. They happen * when we found an all-zero area, and the next sector to * write will not be sector_num + n. */ if (cluster_sectors > 0 && n >= cluster_sectors) { int64_t next_aligned_sector = (sector_num + n); next_aligned_sector -= next_aligned_sector % cluster_sectors; if (sector_num + n > next_aligned_sector) { n = next_aligned_sector - sector_num; } } n = MIN(n, bs_sectors[bs_i] - (sector_num - bs_offset)); sectors_read += n; if (count_allocated_sectors) { sector_num += n; continue; } n1 = n; ret = bdrv_read(bs[bs_i], sector_num - bs_offset, buf, n); if (ret < 0) { error_report("error while reading sector %" PRId64 ": %s", sector_num - bs_offset, strerror(-ret)); goto out; } /* NOTE: at the same time we convert, we do not write zero sectors to have a chance to compress the image. Ideally, we should add a specific call to have the info to go faster */ buf1 = buf; while (n > 0) { if (!has_zero_init || is_allocated_sectors_min(buf1, n, &n1, min_sparse)) { ret = bdrv_write(out_bs, sector_num, buf1, n1); if (ret < 0) { error_report("error while writing sector %" PRId64 ": %s", sector_num, strerror(-ret)); goto out; } } sector_num += n1; n -= n1; buf1 += n1 * 512; } qemu_progress_print(100.0 * sectors_read / sectors_to_read, 0); } } out: if (!ret) { qemu_progress_print(100, 0); } qemu_progress_end(); qemu_opts_del(opts); qemu_opts_free(create_opts); qemu_vfree(buf); if (sn_opts) { qemu_opts_del(sn_opts); } if (out_bs) { bdrv_unref(out_bs); } if (bs) { for (bs_i = 0; bs_i < bs_n; bs_i++) { if (bs[bs_i]) { bdrv_unref(bs[bs_i]); } } g_free(bs); } g_free(bs_sectors); fail_getopt: g_free(options); if (ret) { return 1; } return 0; }
16,899
qemu
079d0b7f1eedcc634c371fe05b617fdc55c8b762
0
static void musb_packet(MUSBState *s, MUSBEndPoint *ep, int epnum, int pid, int len, USBCallback cb, int dir) { USBDevice *dev; int ret; int idx = epnum && dir; int ttype; /* ep->type[0,1] contains: * in bits 7:6 the speed (0 - invalid, 1 - high, 2 - full, 3 - slow) * in bits 5:4 the transfer type (BULK / INT) * in bits 3:0 the EP num */ ttype = epnum ? (ep->type[idx] >> 4) & 3 : 0; ep->timeout[dir] = musb_timeout(ttype, ep->type[idx] >> 6, ep->interval[idx]); ep->interrupt[dir] = ttype == USB_ENDPOINT_XFER_INT; ep->delayed_cb[dir] = cb; /* A wild guess on the FADDR semantics... */ usb_packet_setup(&ep->packey[dir].p, pid, ep->faddr[idx], ep->type[idx] & 0xf); usb_packet_addbuf(&ep->packey[dir].p, ep->buf[idx], len); ep->packey[dir].ep = ep; ep->packey[dir].dir = dir; dev = usb_find_device(&s->port, ep->packey[dir].p.devaddr); ret = usb_handle_packet(dev, &ep->packey[dir].p); if (ret == USB_RET_ASYNC) { ep->status[dir] = len; return; } ep->status[dir] = ret; musb_schedule_cb(&s->port, &ep->packey[dir].p); }
16,900
qemu
eb700029c7836798046191d62d595363d92c84d4
0
static bool net_tx_pkt_parse_headers(struct NetTxPkt *pkt) { struct iovec *l2_hdr, *l3_hdr; size_t bytes_read; size_t full_ip6hdr_len; uint16_t l3_proto; assert(pkt); l2_hdr = &pkt->vec[NET_TX_PKT_L2HDR_FRAG]; l3_hdr = &pkt->vec[NET_TX_PKT_L3HDR_FRAG]; bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, 0, l2_hdr->iov_base, ETH_MAX_L2_HDR_LEN); if (bytes_read < sizeof(struct eth_header)) { l2_hdr->iov_len = 0; return false; } l2_hdr->iov_len = sizeof(struct eth_header); switch (be16_to_cpu(PKT_GET_ETH_HDR(l2_hdr->iov_base)->h_proto)) { case ETH_P_VLAN: l2_hdr->iov_len += sizeof(struct vlan_header); break; case ETH_P_DVLAN: l2_hdr->iov_len += 2 * sizeof(struct vlan_header); break; } if (bytes_read < l2_hdr->iov_len) { l2_hdr->iov_len = 0; return false; } l3_proto = eth_get_l3_proto(l2_hdr->iov_base, l2_hdr->iov_len); switch (l3_proto) { case ETH_P_IP: l3_hdr->iov_base = g_malloc(ETH_MAX_IP4_HDR_LEN); bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len, l3_hdr->iov_base, sizeof(struct ip_header)); if (bytes_read < sizeof(struct ip_header)) { l3_hdr->iov_len = 0; return false; } l3_hdr->iov_len = IP_HDR_GET_LEN(l3_hdr->iov_base); pkt->l4proto = ((struct ip_header *) l3_hdr->iov_base)->ip_p; /* copy optional IPv4 header data */ bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len + sizeof(struct ip_header), l3_hdr->iov_base + sizeof(struct ip_header), l3_hdr->iov_len - sizeof(struct ip_header)); if (bytes_read < l3_hdr->iov_len - sizeof(struct ip_header)) { l3_hdr->iov_len = 0; return false; } break; case ETH_P_IPV6: if (!eth_parse_ipv6_hdr(pkt->raw, pkt->raw_frags, l2_hdr->iov_len, &pkt->l4proto, &full_ip6hdr_len)) { l3_hdr->iov_len = 0; return false; } l3_hdr->iov_base = g_malloc(full_ip6hdr_len); bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len, l3_hdr->iov_base, full_ip6hdr_len); if (bytes_read < full_ip6hdr_len) { l3_hdr->iov_len = 0; return false; } else { l3_hdr->iov_len = full_ip6hdr_len; } break; default: l3_hdr->iov_len = 0; break; } net_tx_pkt_calculate_hdr_len(pkt); pkt->packet_type = get_eth_packet_type(l2_hdr->iov_base); return true; }
16,901
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static uint32_t bonito_sbridge_pciaddr(void *opaque, target_phys_addr_t addr) { PCIBonitoState *s = opaque; PCIHostState *phb = PCI_HOST_BRIDGE(s->pcihost); uint32_t cfgaddr; uint32_t idsel; uint32_t devno; uint32_t funno; uint32_t regno; uint32_t pciaddr; /* support type0 pci config */ if ((s->regs[BONITO_PCIMAP_CFG] & 0x10000) != 0x0) { return 0xffffffff; } cfgaddr = addr & 0xffff; cfgaddr |= (s->regs[BONITO_PCIMAP_CFG] & 0xffff) << 16; idsel = (cfgaddr & BONITO_PCICONF_IDSEL_MASK) >> BONITO_PCICONF_IDSEL_OFFSET; devno = ffs(idsel) - 1; funno = (cfgaddr & BONITO_PCICONF_FUN_MASK) >> BONITO_PCICONF_FUN_OFFSET; regno = (cfgaddr & BONITO_PCICONF_REG_MASK) >> BONITO_PCICONF_REG_OFFSET; if (idsel == 0) { fprintf(stderr, "error in bonito pci config address " TARGET_FMT_plx ",pcimap_cfg=%x\n", addr, s->regs[BONITO_PCIMAP_CFG]); exit(1); } pciaddr = PCI_ADDR(pci_bus_num(phb->bus), devno, funno, regno); DPRINTF("cfgaddr %x pciaddr %x busno %x devno %d funno %d regno %d\n", cfgaddr, pciaddr, pci_bus_num(phb->bus), devno, funno, regno); return pciaddr; }
16,902
qemu
4be746345f13e99e468c60acbd3a355e8183e3ce
0
static void ide_init1(IDEBus *bus, int unit) { static int drive_serial = 1; IDEState *s = &bus->ifs[unit]; s->bus = bus; s->unit = unit; s->drive_serial = drive_serial++; /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */ s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4; s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len); memset(s->io_buffer, 0, s->io_buffer_total_len); s->smart_selftest_data = qemu_blockalign(s->bs, 512); memset(s->smart_selftest_data, 0, 512); s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ide_sector_write_timer_cb, s); }
16,903
qemu
fda74f826baec78d685e5a87fd8a95bfb7bb2243
0
int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, bool exact_size) { BDRVQcowState *s = bs->opaque; int new_l1_size2, ret, i; uint64_t *new_l1_table; int64_t new_l1_table_offset, new_l1_size; uint8_t data[12]; if (min_size <= s->l1_size) return 0; if (exact_size) { new_l1_size = min_size; } else { /* Bump size up to reduce the number of times we have to grow */ new_l1_size = s->l1_size; if (new_l1_size == 0) { new_l1_size = 1; } while (min_size > new_l1_size) { new_l1_size = (new_l1_size * 3 + 1) / 2; } } if (new_l1_size > INT_MAX) { return -EFBIG; } #ifdef DEBUG_ALLOC2 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", s->l1_size, new_l1_size); #endif new_l1_size2 = sizeof(uint64_t) * new_l1_size; new_l1_table = g_malloc0(align_offset(new_l1_size2, 512)); memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); /* write new table (align to cluster) */ BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); if (new_l1_table_offset < 0) { g_free(new_l1_table); return new_l1_table_offset; } ret = qcow2_cache_flush(bs, s->refcount_block_cache); if (ret < 0) { goto fail; } /* the L1 position has not yet been updated, so these clusters must * indeed be completely free */ ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT, new_l1_table_offset, new_l1_size2); if (ret < 0) { goto fail; } BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); for(i = 0; i < s->l1_size; i++) new_l1_table[i] = cpu_to_be64(new_l1_table[i]); ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2); if (ret < 0) goto fail; for(i = 0; i < s->l1_size; i++) new_l1_table[i] = be64_to_cpu(new_l1_table[i]); /* set new table */ BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); cpu_to_be32w((uint32_t*)data, new_l1_size); cpu_to_be64wu((uint64_t*)(data + 4), new_l1_table_offset); ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data)); if (ret < 0) { goto fail; } g_free(s->l1_table); qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t), QCOW2_DISCARD_OTHER); s->l1_table_offset = new_l1_table_offset; s->l1_table = new_l1_table; s->l1_size = new_l1_size; return 0; fail: g_free(new_l1_table); qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, QCOW2_DISCARD_OTHER); return ret; }
16,904
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static uint64_t bw_io_read(void *opaque, target_phys_addr_t addr, unsigned size) { switch (size) { case 1: return cpu_inb(addr); case 2: return cpu_inw(addr); case 4: return cpu_inl(addr); } abort(); }
16,905
qemu
1dcea8e82b1d7795e6719a8ac8762993fc1ed4b3
0
static void cirrus_init_common(CirrusVGAState * s, int device_id, int is_pci) { int i; static int inited; if (!inited) { inited = 1; for(i = 0;i < 256; i++) rop_to_index[i] = CIRRUS_ROP_NOP_INDEX; /* nop rop */ rop_to_index[CIRRUS_ROP_0] = 0; rop_to_index[CIRRUS_ROP_SRC_AND_DST] = 1; rop_to_index[CIRRUS_ROP_NOP] = 2; rop_to_index[CIRRUS_ROP_SRC_AND_NOTDST] = 3; rop_to_index[CIRRUS_ROP_NOTDST] = 4; rop_to_index[CIRRUS_ROP_SRC] = 5; rop_to_index[CIRRUS_ROP_1] = 6; rop_to_index[CIRRUS_ROP_NOTSRC_AND_DST] = 7; rop_to_index[CIRRUS_ROP_SRC_XOR_DST] = 8; rop_to_index[CIRRUS_ROP_SRC_OR_DST] = 9; rop_to_index[CIRRUS_ROP_NOTSRC_OR_NOTDST] = 10; rop_to_index[CIRRUS_ROP_SRC_NOTXOR_DST] = 11; rop_to_index[CIRRUS_ROP_SRC_OR_NOTDST] = 12; rop_to_index[CIRRUS_ROP_NOTSRC] = 13; rop_to_index[CIRRUS_ROP_NOTSRC_OR_DST] = 14; rop_to_index[CIRRUS_ROP_NOTSRC_AND_NOTDST] = 15; s->device_id = device_id; if (is_pci) s->bustype = CIRRUS_BUSTYPE_PCI; else s->bustype = CIRRUS_BUSTYPE_ISA; } register_ioport_write(0x3c0, 16, 1, vga_ioport_write, s); register_ioport_write(0x3b4, 2, 1, vga_ioport_write, s); register_ioport_write(0x3d4, 2, 1, vga_ioport_write, s); register_ioport_write(0x3ba, 1, 1, vga_ioport_write, s); register_ioport_write(0x3da, 1, 1, vga_ioport_write, s); register_ioport_read(0x3c0, 16, 1, vga_ioport_read, s); register_ioport_read(0x3b4, 2, 1, vga_ioport_read, s); register_ioport_read(0x3d4, 2, 1, vga_ioport_read, s); register_ioport_read(0x3ba, 1, 1, vga_ioport_read, s); register_ioport_read(0x3da, 1, 1, vga_ioport_read, s); s->vga_io_memory = cpu_register_io_memory(0, cirrus_vga_mem_read, cirrus_vga_mem_write, s); cpu_register_physical_memory(isa_mem_base + 0x000a0000, 0x20000, s->vga_io_memory); qemu_register_coalesced_mmio(isa_mem_base + 0x000a0000, 0x20000); /* I/O handler for LFB */ s->cirrus_linear_io_addr = cpu_register_io_memory(0, cirrus_linear_read, cirrus_linear_write, s); s->cirrus_linear_write = cpu_get_io_memory_write(s->cirrus_linear_io_addr); /* I/O handler for LFB */ s->cirrus_linear_bitblt_io_addr = cpu_register_io_memory(0, cirrus_linear_bitblt_read, cirrus_linear_bitblt_write, s); /* I/O handler for memory-mapped I/O */ s->cirrus_mmio_io_addr = cpu_register_io_memory(0, cirrus_mmio_read, cirrus_mmio_write, s); s->real_vram_size = (s->device_id == CIRRUS_ID_CLGD5446) ? 4096 * 1024 : 2048 * 1024; /* XXX: s->vram_size must be a power of two */ s->cirrus_addr_mask = s->real_vram_size - 1; s->linear_mmio_mask = s->real_vram_size - 256; s->get_bpp = cirrus_get_bpp; s->get_offsets = cirrus_get_offsets; s->get_resolution = cirrus_get_resolution; s->cursor_invalidate = cirrus_cursor_invalidate; s->cursor_draw_line = cirrus_cursor_draw_line; qemu_register_reset(cirrus_reset, s); cirrus_reset(s); register_savevm("cirrus_vga", 0, 2, cirrus_vga_save, cirrus_vga_load, s); }
16,906
qemu
8150847061f8d2606101bfff77cc6ec86b081ab0
0
static void translate_priority(GICState *s, int irq, int cpu, uint32_t *field, bool to_kernel) { if (to_kernel) { *field = GIC_GET_PRIORITY(irq, cpu) & 0xff; } else { gic_set_priority(s, cpu, irq, *field & 0xff); } }
16,907
FFmpeg
c9fe0caf7a1abde7ca0b1a359f551103064867b1
0
static void FUNC(transquant_bypass32x32)(uint8_t *_dst, int16_t *coeffs, ptrdiff_t stride) { int x, y; pixel *dst = (pixel *)_dst; stride /= sizeof(pixel); for (y = 0; y < 32; y++) { for (x = 0; x < 32; x++) { dst[x] += *coeffs; coeffs++; } dst += stride; } }
16,908
qemu
b39491a83d0b9d573d5fd21163f61f66a11b54b9
0
static void ppc_heathrow_init (ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { CPUState *env = NULL; char *filename; qemu_irq *pic, **heathrow_irqs; int linux_boot, i; ram_addr_t ram_offset, bios_offset; uint32_t kernel_base, initrd_base, cmdline_base = 0; int32_t kernel_size, initrd_size; PCIBus *pci_bus; MacIONVRAMState *nvr; int bios_size; MemoryRegion *pic_mem, *dbdma_mem, *cuda_mem; MemoryRegion *escc_mem, *escc_bar = g_new(MemoryRegion, 1), *ide_mem[2]; uint16_t ppc_boot_device; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; void *fw_cfg; void *dbdma; linux_boot = (kernel_filename != NULL); /* init CPUs */ if (cpu_model == NULL) cpu_model = "G3"; for (i = 0; i < smp_cpus; i++) { env = cpu_init(cpu_model); if (!env) { fprintf(stderr, "Unable to find PowerPC CPU definition\n"); exit(1); } /* Set time-base frequency to 16.6 Mhz */ cpu_ppc_tb_init(env, 16600000UL); qemu_register_reset((QEMUResetHandler*)&cpu_reset, env); } /* allocate RAM */ if (ram_size > (2047 << 20)) { fprintf(stderr, "qemu: Too much memory for this machine: %d MB, maximum 2047 MB\n", ((unsigned int)ram_size / (1 << 20))); exit(1); } ram_offset = qemu_ram_alloc(NULL, "ppc_heathrow.ram", ram_size); cpu_register_physical_memory(0, ram_size, ram_offset); /* allocate and load BIOS */ bios_offset = qemu_ram_alloc(NULL, "ppc_heathrow.bios", BIOS_SIZE); if (bios_name == NULL) bios_name = PROM_FILENAME; filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); cpu_register_physical_memory(PROM_ADDR, BIOS_SIZE, bios_offset | IO_MEM_ROM); /* Load OpenBIOS (ELF) */ if (filename) { bios_size = load_elf(filename, 0, NULL, NULL, NULL, NULL, 1, ELF_MACHINE, 0); g_free(filename); } else { bios_size = -1; } if (bios_size < 0 || bios_size > BIOS_SIZE) { hw_error("qemu: could not load PowerPC bios '%s'\n", bios_name); exit(1); } if (linux_boot) { uint64_t lowaddr = 0; int bswap_needed; #ifdef BSWAP_NEEDED bswap_needed = 1; #else bswap_needed = 0; #endif kernel_base = KERNEL_LOAD_ADDR; kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL, NULL, &lowaddr, NULL, 1, ELF_MACHINE, 0); if (kernel_size < 0) kernel_size = load_aout(kernel_filename, kernel_base, ram_size - kernel_base, bswap_needed, TARGET_PAGE_SIZE); if (kernel_size < 0) kernel_size = load_image_targphys(kernel_filename, kernel_base, ram_size - kernel_base); if (kernel_size < 0) { hw_error("qemu: could not load kernel '%s'\n", kernel_filename); exit(1); } /* load initrd */ if (initrd_filename) { initrd_base = round_page(kernel_base + kernel_size + KERNEL_GAP); initrd_size = load_image_targphys(initrd_filename, initrd_base, ram_size - initrd_base); if (initrd_size < 0) { hw_error("qemu: could not load initial ram disk '%s'\n", initrd_filename); exit(1); } cmdline_base = round_page(initrd_base + initrd_size); } else { initrd_base = 0; initrd_size = 0; cmdline_base = round_page(kernel_base + kernel_size + KERNEL_GAP); } ppc_boot_device = 'm'; } else { kernel_base = 0; kernel_size = 0; initrd_base = 0; initrd_size = 0; ppc_boot_device = '\0'; for (i = 0; boot_device[i] != '\0'; i++) { /* TOFIX: for now, the second IDE channel is not properly * used by OHW. The Mac floppy disk are not emulated. * For now, OHW cannot boot from the network. */ #if 0 if (boot_device[i] >= 'a' && boot_device[i] <= 'f') { ppc_boot_device = boot_device[i]; break; } #else if (boot_device[i] >= 'c' && boot_device[i] <= 'd') { ppc_boot_device = boot_device[i]; break; } #endif } if (ppc_boot_device == '\0') { fprintf(stderr, "No valid boot device for G3 Beige machine\n"); exit(1); } } /* Register 2 MB of ISA IO space */ isa_mmio_init(0xfe000000, 0x00200000); /* XXX: we register only 1 output pin for heathrow PIC */ heathrow_irqs = g_malloc0(smp_cpus * sizeof(qemu_irq *)); heathrow_irqs[0] = g_malloc0(smp_cpus * sizeof(qemu_irq) * 1); /* Connect the heathrow PIC outputs to the 6xx bus */ for (i = 0; i < smp_cpus; i++) { switch (PPC_INPUT(env)) { case PPC_FLAGS_INPUT_6xx: heathrow_irqs[i] = heathrow_irqs[0] + (i * 1); heathrow_irqs[i][0] = ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT]; break; default: hw_error("Bus model not supported on OldWorld Mac machine\n"); } } /* init basic PC hardware */ if (PPC_INPUT(env) != PPC_FLAGS_INPUT_6xx) { hw_error("Only 6xx bus is supported on heathrow machine\n"); } pic = heathrow_pic_init(&pic_mem, 1, heathrow_irqs); pci_bus = pci_grackle_init(0xfec00000, pic, get_system_memory(), get_system_io()); pci_vga_init(pci_bus); escc_mem = escc_init(0x80013000, pic[0x0f], pic[0x10], serial_hds[0], serial_hds[1], ESCC_CLOCK, 4); memory_region_init_alias(escc_bar, "escc-bar", escc_mem, 0, memory_region_size(escc_mem)); for(i = 0; i < nb_nics; i++) pci_nic_init_nofail(&nd_table[i], "ne2k_pci", NULL); ide_drive_get(hd, MAX_IDE_BUS); /* First IDE channel is a MAC IDE on the MacIO bus */ dbdma = DBDMA_init(&dbdma_mem); ide_mem[0] = NULL; ide_mem[1] = pmac_ide_init(hd, pic[0x0D], dbdma, 0x16, pic[0x02]); /* Second IDE channel is a CMD646 on the PCI bus */ hd[0] = hd[MAX_IDE_DEVS]; hd[1] = hd[MAX_IDE_DEVS + 1]; hd[3] = hd[2] = NULL; pci_cmd646_ide_init(pci_bus, hd, 0); /* cuda also initialize ADB */ cuda_init(&cuda_mem, pic[0x12]); adb_kbd_init(&adb_bus); adb_mouse_init(&adb_bus); nvr = macio_nvram_init(0x2000, 4); pmac_format_nvram_partition(nvr, 0x2000); macio_init(pci_bus, PCI_DEVICE_ID_APPLE_343S1201, 1, pic_mem, dbdma_mem, cuda_mem, nvr, 2, ide_mem, escc_bar); if (usb_enabled) { usb_ohci_init_pci(pci_bus, -1); } if (graphic_depth != 15 && graphic_depth != 32 && graphic_depth != 8) graphic_depth = 15; /* No PCI init: the BIOS will do it */ fw_cfg = fw_cfg_init(0, 0, CFG_ADDR, CFG_ADDR + 2); fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, ARCH_HEATHROW); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_base); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); if (kernel_cmdline) { fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, cmdline_base); pstrcpy_targphys("cmdline", cmdline_base, TARGET_PAGE_SIZE, kernel_cmdline); } else { fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, 0); } fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_base); fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, ppc_boot_device); fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_WIDTH, graphic_width); fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_HEIGHT, graphic_height); fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_DEPTH, graphic_depth); fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_IS_KVM, kvm_enabled()); if (kvm_enabled()) { #ifdef CONFIG_KVM uint8_t *hypercall; fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, kvmppc_get_tbfreq()); hypercall = g_malloc(16); kvmppc_get_hypercall(env, hypercall, 16); fw_cfg_add_bytes(fw_cfg, FW_CFG_PPC_KVM_HC, hypercall, 16); fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_KVM_PID, getpid()); #endif } else { fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, get_ticks_per_sec()); } qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); }
16,910
qemu
1ea879e5580f63414693655fcf0328559cdce138
0
static void *qesd_thread_out (void *arg) { ESDVoiceOut *esd = arg; HWVoiceOut *hw = &esd->hw; int threshold; threshold = conf.divisor ? hw->samples / conf.divisor : 0; if (audio_pt_lock (&esd->pt, AUDIO_FUNC)) { return NULL; } for (;;) { int decr, to_mix, rpos; for (;;) { if (esd->done) { goto exit; } if (esd->live > threshold) { break; } if (audio_pt_wait (&esd->pt, AUDIO_FUNC)) { goto exit; } } decr = to_mix = esd->live; rpos = hw->rpos; if (audio_pt_unlock (&esd->pt, AUDIO_FUNC)) { return NULL; } while (to_mix) { ssize_t written; int chunk = audio_MIN (to_mix, hw->samples - rpos); st_sample_t *src = hw->mix_buf + rpos; hw->clip (esd->pcm_buf, src, chunk); again: written = write (esd->fd, esd->pcm_buf, chunk << hw->info.shift); if (written == -1) { if (errno == EINTR || errno == EAGAIN) { goto again; } qesd_logerr (errno, "write failed\n"); return NULL; } if (written != chunk << hw->info.shift) { int wsamples = written >> hw->info.shift; int wbytes = wsamples << hw->info.shift; if (wbytes != written) { dolog ("warning: Misaligned write %d (requested %d), " "alignment %d\n", wbytes, written, hw->info.align + 1); } to_mix -= wsamples; rpos = (rpos + wsamples) % hw->samples; break; } rpos = (rpos + chunk) % hw->samples; to_mix -= chunk; } if (audio_pt_lock (&esd->pt, AUDIO_FUNC)) { return NULL; } esd->rpos = rpos; esd->live -= decr; esd->decr += decr; } exit: audio_pt_unlock (&esd->pt, AUDIO_FUNC); return NULL; }
16,911
qemu
ddca7f86ac022289840e0200fd4050b2b58e9176
0
static void v9fs_mkdir(void *opaque) { V9fsPDU *pdu = opaque; size_t offset = 7; int32_t fid; struct stat stbuf; V9fsQID qid; V9fsString name; V9fsFidState *fidp; gid_t gid; int mode; int err = 0; pdu_unmarshal(pdu, offset, "dsdd", &fid, &name, &mode, &gid); trace_v9fs_mkdir(pdu->tag, pdu->id, fid, name.data, mode, gid); fidp = get_fid(pdu, fid); if (fidp == NULL) { err = -ENOENT; goto out_nofid; } err = v9fs_co_mkdir(pdu, fidp, &name, mode, fidp->uid, gid, &stbuf); if (err < 0) { goto out; } stat_to_qid(&stbuf, &qid); offset += pdu_marshal(pdu, offset, "Q", &qid); err = offset; trace_v9fs_mkdir_return(pdu->tag, pdu->id, qid.type, qid.version, qid.path, err); out: put_fid(pdu, fidp); out_nofid: complete_pdu(pdu->s, pdu, err); v9fs_string_free(&name); }
16,912
qemu
d6085e3ace20bc9b0fa625d8d79b22668710e217
0
static void peer_test_vnet_hdr(VirtIONet *n) { NetClientState *nc = qemu_get_queue(n->nic); if (!nc->peer) { return; } n->has_vnet_hdr = qemu_peer_has_vnet_hdr(nc); }
16,913
qemu
fd859081453f94c3cbd6527289e41b7fddbf645f
0
static void tpm_tis_receive_bh(void *opaque) { TPMState *s = opaque; TPMTISEmuState *tis = &s->s.tis; uint8_t locty = s->locty_number; tis->loc[locty].sts = TPM_TIS_STS_VALID | TPM_TIS_STS_DATA_AVAILABLE; tis->loc[locty].state = TPM_TIS_STATE_COMPLETION; tis->loc[locty].r_offset = 0; tis->loc[locty].w_offset = 0; if (TPM_TIS_IS_VALID_LOCTY(tis->next_locty)) { tpm_tis_abort(s, locty); } #ifndef RAISE_STS_IRQ tpm_tis_raise_irq(s, locty, TPM_TIS_INT_DATA_AVAILABLE); #else tpm_tis_raise_irq(s, locty, TPM_TIS_INT_DATA_AVAILABLE | TPM_TIS_INT_STS_VALID); #endif }
16,914
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static int omap_validate_imif_addr(struct omap_mpu_state_s *s, target_phys_addr_t addr) { return range_covers_byte(OMAP_IMIF_BASE, s->sram_size, addr); }
16,915
qemu
7b7258f810d2bd40e2fb99c469c5db318d6c3d92
0
static void rtas_set_indicator(PowerPCCPU *cpu, sPAPRMachineState *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { uint32_t sensor_type; uint32_t sensor_index; uint32_t sensor_state; uint32_t ret = RTAS_OUT_SUCCESS; sPAPRDRConnector *drc; sPAPRDRConnectorClass *drck; if (nargs != 3 || nret != 1) { ret = RTAS_OUT_PARAM_ERROR; goto out; } sensor_type = rtas_ld(args, 0); sensor_index = rtas_ld(args, 1); sensor_state = rtas_ld(args, 2); if (!sensor_type_is_dr(sensor_type)) { goto out_unimplemented; } /* if this is a DR sensor we can assume sensor_index == drc_index */ drc = spapr_drc_by_index(sensor_index); if (!drc) { trace_spapr_rtas_set_indicator_invalid(sensor_index); ret = RTAS_OUT_PARAM_ERROR; goto out; } drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); switch (sensor_type) { case RTAS_SENSOR_TYPE_ISOLATION_STATE: ret = drck->set_isolation_state(drc, sensor_state); break; case RTAS_SENSOR_TYPE_DR: ret = drck->set_indicator_state(drc, sensor_state); break; case RTAS_SENSOR_TYPE_ALLOCATION_STATE: ret = drck->set_allocation_state(drc, sensor_state); break; default: goto out_unimplemented; } out: rtas_st(rets, 0, ret); return; out_unimplemented: /* currently only DR-related sensors are implemented */ trace_spapr_rtas_set_indicator_not_supported(sensor_index, sensor_type); rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED); }
16,918
FFmpeg
ada497e61660b4a23d49eaf07fe19386573a6ba9
0
static int g2m_decode_frame(AVCodecContext *avctx, void *data, int *got_picture_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; G2MContext *c = avctx->priv_data; AVFrame *pic = data; GetByteContext bc, tbc; int magic; int got_header = 0; uint32_t chunk_size, cur_size; int chunk_type; int i; int ret; if (buf_size < 12) { av_log(avctx, AV_LOG_ERROR, "Frame should have at least 12 bytes, got %d instead\n", buf_size); return AVERROR_INVALIDDATA; } bytestream2_init(&bc, buf, buf_size); magic = bytestream2_get_be32(&bc); if ((magic & ~0xF) != MKBETAG('G', '2', 'M', '0') || (magic & 0xF) < 2 || (magic & 0xF) > 4) { av_log(avctx, AV_LOG_ERROR, "Wrong magic %08X\n", magic); return AVERROR_INVALIDDATA; } if ((magic & 0xF) != 4) { av_log(avctx, AV_LOG_ERROR, "G2M2 and G2M3 are not yet supported\n"); return AVERROR(ENOSYS); } while (bytestream2_get_bytes_left(&bc) > 5) { chunk_size = bytestream2_get_le32(&bc) - 1; chunk_type = bytestream2_get_byte(&bc); if (chunk_size > bytestream2_get_bytes_left(&bc)) { av_log(avctx, AV_LOG_ERROR, "Invalid chunk size %d type %02X\n", chunk_size, chunk_type); break; } switch (chunk_type) { case FRAME_INFO: c->got_header = 0; if (chunk_size < 21) { av_log(avctx, AV_LOG_ERROR, "Invalid frame info size %d\n", chunk_size); break; } c->width = bytestream2_get_be32(&bc); c->height = bytestream2_get_be32(&bc); if (c->width < 16 || c->width > avctx->width || c->height < 16 || c->height > avctx->height) { av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d\n", c->width, c->height); c->width = c->height = 0; bytestream2_skip(&bc, bytestream2_get_bytes_left(&bc)); } if (c->width != avctx->width || c->height != avctx->height) avcodec_set_dimensions(avctx, c->width, c->height); c->compression = bytestream2_get_be32(&bc); if (c->compression != 2 && c->compression != 3) { av_log(avctx, AV_LOG_ERROR, "Unknown compression method %d\n", c->compression); return AVERROR_PATCHWELCOME; } c->tile_width = bytestream2_get_be32(&bc); c->tile_height = bytestream2_get_be32(&bc); if (!c->tile_width || !c->tile_height) { av_log(avctx, AV_LOG_ERROR, "Invalid tile dimensions %dx%d\n", c->tile_width, c->tile_height); return AVERROR_INVALIDDATA; } c->tiles_x = (c->width + c->tile_width - 1) / c->tile_width; c->tiles_y = (c->height + c->tile_height - 1) / c->tile_height; c->bpp = bytestream2_get_byte(&bc); chunk_size -= 21; bytestream2_skip(&bc, chunk_size); if (g2m_init_buffers(c)) return AVERROR(ENOMEM); got_header = 1; break; case TILE_DATA: if (!c->tiles_x || !c->tiles_y) { av_log(avctx, AV_LOG_WARNING, "No frame header - skipping tile\n"); bytestream2_skip(&bc, bytestream2_get_bytes_left(&bc)); break; } if (chunk_size < 2) { av_log(avctx, AV_LOG_ERROR, "Invalid tile data size %d\n", chunk_size); break; } c->tile_x = bytestream2_get_byte(&bc); c->tile_y = bytestream2_get_byte(&bc); if (c->tile_x >= c->tiles_x || c->tile_y >= c->tiles_y) { av_log(avctx, AV_LOG_ERROR, "Invalid tile pos %d,%d (in %dx%d grid)\n", c->tile_x, c->tile_y, c->tiles_x, c->tiles_y); break; } chunk_size -= 2; ret = 0; switch (c->compression) { case COMPR_EPIC_J_B: av_log(avctx, AV_LOG_ERROR, "ePIC j-b compression is not implemented yet\n"); return AVERROR(ENOSYS); case COMPR_KEMPF_J_B: ret = kempf_decode_tile(c, c->tile_x, c->tile_y, buf + bytestream2_tell(&bc), chunk_size); break; } if (ret && c->framebuf) av_log(avctx, AV_LOG_ERROR, "Error decoding tile %d,%d\n", c->tile_x, c->tile_y); bytestream2_skip(&bc, chunk_size); break; case CURSOR_POS: if (chunk_size < 5) { av_log(avctx, AV_LOG_ERROR, "Invalid cursor pos size %d\n", chunk_size); break; } c->cursor_x = bytestream2_get_be16(&bc); c->cursor_y = bytestream2_get_be16(&bc); bytestream2_skip(&bc, chunk_size - 4); break; case CURSOR_SHAPE: if (chunk_size < 8) { av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %d\n", chunk_size); break; } bytestream2_init(&tbc, buf + bytestream2_tell(&bc), chunk_size - 4); cur_size = bytestream2_get_be32(&tbc); c->cursor_w = bytestream2_get_byte(&tbc); c->cursor_h = bytestream2_get_byte(&tbc); c->cursor_hot_x = bytestream2_get_byte(&tbc); c->cursor_hot_y = bytestream2_get_byte(&tbc); c->cursor_fmt = bytestream2_get_byte(&tbc); if (cur_size >= chunk_size || c->cursor_w * c->cursor_h / 4 > cur_size) { av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %d\n", chunk_size); break; } g2m_load_cursor(c, &tbc); bytestream2_skip(&bc, chunk_size); break; case CHUNK_CC: case CHUNK_CD: bytestream2_skip(&bc, chunk_size); break; default: av_log(avctx, AV_LOG_WARNING, "Skipping chunk type %02X\n", chunk_type); bytestream2_skip(&bc, chunk_size); } } if (got_header) c->got_header = 1; if (c->width && c->height && c->framebuf) { if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } pic->key_frame = got_header; pic->pict_type = got_header ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; for (i = 0; i < avctx->height; i++) memcpy(pic->data[0] + i * pic->linesize[0], c->framebuf + i * c->framebuf_stride, c->width * 3); g2m_paint_cursor(c, pic->data[0], pic->linesize[0]); *got_picture_ptr = 1; } return buf_size; }
16,919
FFmpeg
0abe923d20db6280dfdfa8a4ed07710ad8376e97
0
static inline void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, h264_chroma_mc_func * pix_op, int mx, int my) { const int lowres = s->avctx->lowres; const int op_index = FFMIN(lowres, 2); const int block_s = 8 >> lowres; const int s_mask = (2 << lowres) - 1; const int h_edge_pos = s->h_edge_pos >> lowres + 1; const int v_edge_pos = s->v_edge_pos >> lowres + 1; int emu = 0, src_x, src_y, offset, sx, sy; uint8_t *ptr; if (s->quarter_sample) { mx /= 2; my /= 2; } /* In case of 8X8, we construct a single chroma motion vector with a special rounding */ mx = ff_h263_round_chroma(mx); my = ff_h263_round_chroma(my); sx = mx & s_mask; sy = my & s_mask; src_x = s->mb_x * block_s + (mx >> lowres + 1); src_y = s->mb_y * block_s + (my >> lowres + 1); offset = src_y * s->uvlinesize + src_x; ptr = ref_picture[1] + offset; if (s->flags & CODEC_FLAG_EMU_EDGE) { if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) || (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) { s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos); ptr = s->edge_emu_buffer; emu = 1; } } sx = (sx << 2) >> lowres; sy = (sy << 2) >> lowres; pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy); ptr = ref_picture[2] + offset; if (emu) { s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos); ptr = s->edge_emu_buffer; } pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy); }
16,920
qemu
7e486f7577764a07aa35588e119903c80a5c30a2
1
static inline int vmsvga_fifo_length(struct vmsvga_state_s *s) { int num; if (!s->config || !s->enable) { return 0; } /* Check range and alignment. */ if ((CMD(min) | CMD(max) | CMD(next_cmd) | CMD(stop)) & 3) { return 0; } if (CMD(min) < (uint8_t *) s->cmd->fifo - (uint8_t *) s->fifo) { return 0; } if (CMD(max) > SVGA_FIFO_SIZE || CMD(min) >= SVGA_FIFO_SIZE || CMD(stop) >= SVGA_FIFO_SIZE || CMD(next_cmd) >= SVGA_FIFO_SIZE) { return 0; } if (CMD(max) < CMD(min) + 10 * 1024) { return 0; } num = CMD(next_cmd) - CMD(stop); if (num < 0) { num += CMD(max) - CMD(min); } return num >> 2; }
16,922
qemu
d4218d996d2274f4136b8bd22e946bf56f050c9e
1
static void disas_sparc_insn(DisasContext * dc) { unsigned int insn, opc, rs1, rs2, rd; insn = ldl_code(dc->pc); opc = GET_FIELD(insn, 0, 1); rd = GET_FIELD(insn, 2, 6); switch (opc) { case 0: /* branches/sethi */ { unsigned int xop = GET_FIELD(insn, 7, 9); int32_t target; switch (xop) { #ifdef TARGET_SPARC64 case 0x1: /* V9 BPcc */ { int cc; target = GET_FIELD_SP(insn, 0, 18); target = sign_extend(target, 18); target <<= 2; cc = GET_FIELD_SP(insn, 20, 21); if (cc == 0) do_branch(dc, target, insn, 0); else if (cc == 2) do_branch(dc, target, insn, 1); else goto jmp_insn; } case 0x3: /* V9 BPr */ { target = GET_FIELD_SP(insn, 0, 13) | (GET_FIELD_SP(insn, 20, 21) << 14); target = sign_extend(target, 16); target <<= 2; rs1 = GET_FIELD(insn, 13, 17); gen_movl_reg_T0(rs1); do_branch_reg(dc, target, insn); goto jmp_insn; } case 0x5: /* V9 FBPcc */ { int cc = GET_FIELD_SP(insn, 20, 21); if (gen_trap_ifnofpu(dc)) goto jmp_insn; target = GET_FIELD_SP(insn, 0, 18); target = sign_extend(target, 19); target <<= 2; do_fbranch(dc, target, insn, cc); goto jmp_insn; } #endif case 0x2: /* BN+x */ { target = GET_FIELD(insn, 10, 31); target = sign_extend(target, 22); target <<= 2; do_branch(dc, target, insn, 0); goto jmp_insn; } case 0x6: /* FBN+x */ { if (gen_trap_ifnofpu(dc)) goto jmp_insn; target = GET_FIELD(insn, 10, 31); target = sign_extend(target, 22); target <<= 2; do_fbranch(dc, target, insn, 0); goto jmp_insn; } case 0x4: /* SETHI */ #define OPTIM #if defined(OPTIM) if (rd) { // nop #endif uint32_t value = GET_FIELD(insn, 10, 31); gen_movl_imm_T0(value << 10); gen_movl_T0_reg(rd); #if defined(OPTIM) } #endif break; case 0x0: /* UNIMPL */ default: } break; } break; case 1: /*CALL*/ { target_long target = GET_FIELDs(insn, 2, 31) << 2; #ifdef TARGET_SPARC64 if (dc->pc == (uint32_t)dc->pc) { gen_op_movl_T0_im(dc->pc); } else { gen_op_movq_T0_im64(dc->pc >> 32, dc->pc); } #else gen_op_movl_T0_im(dc->pc); #endif gen_movl_T0_reg(15); target += dc->pc; gen_mov_pc_npc(dc); dc->npc = target; } goto jmp_insn; case 2: /* FPU & Logical Operations */ { unsigned int xop = GET_FIELD(insn, 7, 12); if (xop == 0x3a) { /* generate trap */ int cond; rs1 = GET_FIELD(insn, 13, 17); gen_movl_reg_T0(rs1); if (IS_IMM) { rs2 = GET_FIELD(insn, 25, 31); #if defined(OPTIM) if (rs2 != 0) { #endif gen_movl_simm_T1(rs2); gen_op_add_T1_T0(); #if defined(OPTIM) } #endif } else { rs2 = GET_FIELD(insn, 27, 31); #if defined(OPTIM) if (rs2 != 0) { #endif gen_movl_reg_T1(rs2); gen_op_add_T1_T0(); #if defined(OPTIM) } #endif } cond = GET_FIELD(insn, 3, 6); if (cond == 0x8) { save_state(dc); gen_op_trap_T0(); } else if (cond != 0) { #ifdef TARGET_SPARC64 /* V9 icc/xcc */ int cc = GET_FIELD_SP(insn, 11, 12); flush_T2(dc); save_state(dc); if (cc == 0) gen_cond[0][cond](); else if (cc == 2) gen_cond[1][cond](); else #else flush_T2(dc); save_state(dc); gen_cond[0][cond](); #endif gen_op_trapcc_T0(); } gen_op_next_insn(); gen_op_movl_T0_0(); gen_op_exit_tb(); dc->is_br = 1; goto jmp_insn; } else if (xop == 0x28) { rs1 = GET_FIELD(insn, 13, 17); switch(rs1) { case 0: /* rdy */ #ifndef TARGET_SPARC64 case 0x01 ... 0x0e: /* undefined in the SPARCv8 manual, rdy on the microSPARC II */ case 0x0f: /* stbar in the SPARCv8 manual, rdy on the microSPARC II */ case 0x10 ... 0x1f: /* implementation-dependent in the SPARCv8 manual, rdy on the microSPARC II */ #endif gen_op_movtl_T0_env(offsetof(CPUSPARCState, y)); gen_movl_T0_reg(rd); break; #ifdef TARGET_SPARC64 case 0x2: /* V9 rdccr */ gen_op_rdccr(); gen_movl_T0_reg(rd); break; case 0x3: /* V9 rdasi */ gen_op_movl_T0_env(offsetof(CPUSPARCState, asi)); gen_movl_T0_reg(rd); break; case 0x4: /* V9 rdtick */ gen_op_rdtick(); gen_movl_T0_reg(rd); break; case 0x5: /* V9 rdpc */ if (dc->pc == (uint32_t)dc->pc) { gen_op_movl_T0_im(dc->pc); } else { gen_op_movq_T0_im64(dc->pc >> 32, dc->pc); } gen_movl_T0_reg(rd); break; case 0x6: /* V9 rdfprs */ gen_op_movl_T0_env(offsetof(CPUSPARCState, fprs)); gen_movl_T0_reg(rd); break; case 0xf: /* V9 membar */ break; /* no effect */ case 0x13: /* Graphics Status */ if (gen_trap_ifnofpu(dc)) goto jmp_insn; gen_op_movtl_T0_env(offsetof(CPUSPARCState, gsr)); gen_movl_T0_reg(rd); break; case 0x17: /* Tick compare */ gen_op_movtl_T0_env(offsetof(CPUSPARCState, tick_cmpr)); gen_movl_T0_reg(rd); break; case 0x18: /* System tick */ gen_op_rdtick(); // XXX gen_movl_T0_reg(rd); break; case 0x19: /* System tick compare */ gen_op_movtl_T0_env(offsetof(CPUSPARCState, stick_cmpr)); gen_movl_T0_reg(rd); break; case 0x10: /* Performance Control */ case 0x11: /* Performance Instrumentation Counter */ case 0x12: /* Dispatch Control */ case 0x14: /* Softint set, WO */ case 0x15: /* Softint clear, WO */ case 0x16: /* Softint write */ #endif default: } #if !defined(CONFIG_USER_ONLY) #ifndef TARGET_SPARC64 } else if (xop == 0x29) { /* rdpsr / V9 unimp */ if (!supervisor(dc)) goto priv_insn; gen_op_rdpsr(); gen_movl_T0_reg(rd); break; #endif } else if (xop == 0x2a) { /* rdwim / V9 rdpr */ if (!supervisor(dc)) goto priv_insn; #ifdef TARGET_SPARC64 rs1 = GET_FIELD(insn, 13, 17); switch (rs1) { case 0: // tpc gen_op_rdtpc(); break; case 1: // tnpc gen_op_rdtnpc(); break; case 2: // tstate gen_op_rdtstate(); break; case 3: // tt gen_op_rdtt(); break; case 4: // tick gen_op_rdtick(); break; case 5: // tba gen_op_movtl_T0_env(offsetof(CPUSPARCState, tbr)); break; case 6: // pstate gen_op_rdpstate(); break; case 7: // tl gen_op_movl_T0_env(offsetof(CPUSPARCState, tl)); break; case 8: // pil gen_op_movl_T0_env(offsetof(CPUSPARCState, psrpil)); break; case 9: // cwp gen_op_rdcwp(); break; case 10: // cansave gen_op_movl_T0_env(offsetof(CPUSPARCState, cansave)); break; case 11: // canrestore gen_op_movl_T0_env(offsetof(CPUSPARCState, canrestore)); break; case 12: // cleanwin gen_op_movl_T0_env(offsetof(CPUSPARCState, cleanwin)); break; case 13: // otherwin gen_op_movl_T0_env(offsetof(CPUSPARCState, otherwin)); break; case 14: // wstate gen_op_movl_T0_env(offsetof(CPUSPARCState, wstate)); break; case 31: // ver gen_op_movtl_T0_env(offsetof(CPUSPARCState, version)); break; case 15: // fq default: } #else gen_op_movl_T0_env(offsetof(CPUSPARCState, wim)); #endif gen_movl_T0_reg(rd); break; } else if (xop == 0x2b) { /* rdtbr / V9 flushw */ #ifdef TARGET_SPARC64 gen_op_flushw(); #else if (!supervisor(dc)) goto priv_insn; gen_op_movtl_T0_env(offsetof(CPUSPARCState, tbr)); gen_movl_T0_reg(rd); #endif break; #endif } else if (xop == 0x34) { /* FPU Operations */ if (gen_trap_ifnofpu(dc)) goto jmp_insn; rs1 = GET_FIELD(insn, 13, 17); rs2 = GET_FIELD(insn, 27, 31); xop = GET_FIELD(insn, 18, 26); switch (xop) { case 0x1: /* fmovs */ gen_op_load_fpr_FT0(rs2); gen_op_store_FT0_fpr(rd); break; case 0x5: /* fnegs */ gen_op_load_fpr_FT1(rs2); gen_op_fnegs(); gen_op_store_FT0_fpr(rd); break; case 0x9: /* fabss */ gen_op_load_fpr_FT1(rs2); gen_op_fabss(); gen_op_store_FT0_fpr(rd); break; case 0x29: /* fsqrts */ gen_op_load_fpr_FT1(rs2); gen_op_fsqrts(); gen_op_store_FT0_fpr(rd); break; case 0x2a: /* fsqrtd */ gen_op_load_fpr_DT1(DFPREG(rs2)); gen_op_fsqrtd(); gen_op_store_DT0_fpr(DFPREG(rd)); break; case 0x2b: /* fsqrtq */ goto nfpu_insn; case 0x41: gen_op_load_fpr_FT0(rs1); gen_op_load_fpr_FT1(rs2); gen_op_fadds(); gen_op_store_FT0_fpr(rd); break; case 0x42: gen_op_load_fpr_DT0(DFPREG(rs1)); gen_op_load_fpr_DT1(DFPREG(rs2)); gen_op_faddd(); gen_op_store_DT0_fpr(DFPREG(rd)); break; case 0x43: /* faddq */ goto nfpu_insn; case 0x45: gen_op_load_fpr_FT0(rs1); gen_op_load_fpr_FT1(rs2); gen_op_fsubs(); gen_op_store_FT0_fpr(rd); break; case 0x46: gen_op_load_fpr_DT0(DFPREG(rs1)); gen_op_load_fpr_DT1(DFPREG(rs2)); gen_op_fsubd(); gen_op_store_DT0_fpr(DFPREG(rd)); break; case 0x47: /* fsubq */ goto nfpu_insn; case 0x49: gen_op_load_fpr_FT0(rs1); gen_op_load_fpr_FT1(rs2); gen_op_fmuls(); gen_op_store_FT0_fpr(rd); break; case 0x4a: gen_op_load_fpr_DT0(DFPREG(rs1)); gen_op_load_fpr_DT1(DFPREG(rs2)); gen_op_fmuld(); gen_op_store_DT0_fpr(rd); break; case 0x4b: /* fmulq */ goto nfpu_insn; case 0x4d: gen_op_load_fpr_FT0(rs1); gen_op_load_fpr_FT1(rs2); gen_op_fdivs(); gen_op_store_FT0_fpr(rd); break; case 0x4e: gen_op_load_fpr_DT0(DFPREG(rs1)); gen_op_load_fpr_DT1(DFPREG(rs2)); gen_op_fdivd(); gen_op_store_DT0_fpr(DFPREG(rd)); break; case 0x4f: /* fdivq */ goto nfpu_insn; case 0x69: gen_op_load_fpr_FT0(rs1); gen_op_load_fpr_FT1(rs2); gen_op_fsmuld(); gen_op_store_DT0_fpr(DFPREG(rd)); break; case 0x6e: /* fdmulq */ goto nfpu_insn; case 0xc4: gen_op_load_fpr_FT1(rs2); gen_op_fitos(); gen_op_store_FT0_fpr(rd); break; case 0xc6: gen_op_load_fpr_DT1(DFPREG(rs2)); gen_op_fdtos(); gen_op_store_FT0_fpr(rd); break; case 0xc7: /* fqtos */ goto nfpu_insn; case 0xc8: gen_op_load_fpr_FT1(rs2); gen_op_fitod(); gen_op_store_DT0_fpr(DFPREG(rd)); break; case 0xc9: gen_op_load_fpr_FT1(rs2); gen_op_fstod(); gen_op_store_DT0_fpr(DFPREG(rd)); break; case 0xcb: /* fqtod */ goto nfpu_insn; case 0xcc: /* fitoq */ goto nfpu_insn; case 0xcd: /* fstoq */ goto nfpu_insn; case 0xce: /* fdtoq */ goto nfpu_insn; case 0xd1: gen_op_load_fpr_FT1(rs2); gen_op_fstoi(); gen_op_store_FT0_fpr(rd); break; case 0xd2: gen_op_load_fpr_DT1(rs2); gen_op_fdtoi(); gen_op_store_FT0_fpr(rd); break; case 0xd3: /* fqtoi */ goto nfpu_insn; #ifdef TARGET_SPARC64 case 0x2: /* V9 fmovd */ gen_op_load_fpr_DT0(DFPREG(rs2)); gen_op_store_DT0_fpr(DFPREG(rd)); break; case 0x6: /* V9 fnegd */ gen_op_load_fpr_DT1(DFPREG(rs2)); gen_op_fnegd(); gen_op_store_DT0_fpr(DFPREG(rd)); break; case 0xa: /* V9 fabsd */ gen_op_load_fpr_DT1(DFPREG(rs2)); gen_op_fabsd(); gen_op_store_DT0_fpr(DFPREG(rd)); break; case 0x81: /* V9 fstox */ gen_op_load_fpr_FT1(rs2); gen_op_fstox(); gen_op_store_DT0_fpr(DFPREG(rd)); break; case 0x82: /* V9 fdtox */ gen_op_load_fpr_DT1(DFPREG(rs2)); gen_op_fdtox(); gen_op_store_DT0_fpr(DFPREG(rd)); break; case 0x84: /* V9 fxtos */ gen_op_load_fpr_DT1(DFPREG(rs2)); gen_op_fxtos(); gen_op_store_FT0_fpr(rd); break; case 0x88: /* V9 fxtod */ gen_op_load_fpr_DT1(DFPREG(rs2)); gen_op_fxtod(); gen_op_store_DT0_fpr(DFPREG(rd)); break; case 0x3: /* V9 fmovq */ case 0x7: /* V9 fnegq */ case 0xb: /* V9 fabsq */ case 0x83: /* V9 fqtox */ case 0x8c: /* V9 fxtoq */ goto nfpu_insn; #endif default: } } else if (xop == 0x35) { /* FPU Operations */ #ifdef TARGET_SPARC64 int cond; #endif if (gen_trap_ifnofpu(dc)) goto jmp_insn; rs1 = GET_FIELD(insn, 13, 17); rs2 = GET_FIELD(insn, 27, 31); xop = GET_FIELD(insn, 18, 26); #ifdef TARGET_SPARC64 if ((xop & 0x11f) == 0x005) { // V9 fmovsr cond = GET_FIELD_SP(insn, 14, 17); gen_op_load_fpr_FT0(rd); gen_op_load_fpr_FT1(rs2); rs1 = GET_FIELD(insn, 13, 17); gen_movl_reg_T0(rs1); flush_T2(dc); gen_cond_reg(cond); gen_op_fmovs_cc(); gen_op_store_FT0_fpr(rd); break; } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr cond = GET_FIELD_SP(insn, 14, 17); gen_op_load_fpr_DT0(rd); gen_op_load_fpr_DT1(rs2); flush_T2(dc); rs1 = GET_FIELD(insn, 13, 17); gen_movl_reg_T0(rs1); gen_cond_reg(cond); gen_op_fmovs_cc(); gen_op_store_DT0_fpr(rd); break; } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr goto nfpu_insn; } #endif switch (xop) { #ifdef TARGET_SPARC64 case 0x001: /* V9 fmovscc %fcc0 */ cond = GET_FIELD_SP(insn, 14, 17); gen_op_load_fpr_FT0(rd); gen_op_load_fpr_FT1(rs2); flush_T2(dc); gen_fcond[0][cond](); gen_op_fmovs_cc(); gen_op_store_FT0_fpr(rd); break; case 0x002: /* V9 fmovdcc %fcc0 */ cond = GET_FIELD_SP(insn, 14, 17); gen_op_load_fpr_DT0(rd); gen_op_load_fpr_DT1(rs2); flush_T2(dc); gen_fcond[0][cond](); gen_op_fmovd_cc(); gen_op_store_DT0_fpr(rd); break; case 0x003: /* V9 fmovqcc %fcc0 */ goto nfpu_insn; case 0x041: /* V9 fmovscc %fcc1 */ cond = GET_FIELD_SP(insn, 14, 17); gen_op_load_fpr_FT0(rd); gen_op_load_fpr_FT1(rs2); flush_T2(dc); gen_fcond[1][cond](); gen_op_fmovs_cc(); gen_op_store_FT0_fpr(rd); break; case 0x042: /* V9 fmovdcc %fcc1 */ cond = GET_FIELD_SP(insn, 14, 17); gen_op_load_fpr_DT0(rd); gen_op_load_fpr_DT1(rs2); flush_T2(dc); gen_fcond[1][cond](); gen_op_fmovd_cc(); gen_op_store_DT0_fpr(rd); break; case 0x043: /* V9 fmovqcc %fcc1 */ goto nfpu_insn; case 0x081: /* V9 fmovscc %fcc2 */ cond = GET_FIELD_SP(insn, 14, 17); gen_op_load_fpr_FT0(rd); gen_op_load_fpr_FT1(rs2); flush_T2(dc); gen_fcond[2][cond](); gen_op_fmovs_cc(); gen_op_store_FT0_fpr(rd); break; case 0x082: /* V9 fmovdcc %fcc2 */ cond = GET_FIELD_SP(insn, 14, 17); gen_op_load_fpr_DT0(rd); gen_op_load_fpr_DT1(rs2); flush_T2(dc); gen_fcond[2][cond](); gen_op_fmovd_cc(); gen_op_store_DT0_fpr(rd); break; case 0x083: /* V9 fmovqcc %fcc2 */ goto nfpu_insn; case 0x0c1: /* V9 fmovscc %fcc3 */ cond = GET_FIELD_SP(insn, 14, 17); gen_op_load_fpr_FT0(rd); gen_op_load_fpr_FT1(rs2); flush_T2(dc); gen_fcond[3][cond](); gen_op_fmovs_cc(); gen_op_store_FT0_fpr(rd); break; case 0x0c2: /* V9 fmovdcc %fcc3 */ cond = GET_FIELD_SP(insn, 14, 17); gen_op_load_fpr_DT0(rd); gen_op_load_fpr_DT1(rs2); flush_T2(dc); gen_fcond[3][cond](); gen_op_fmovd_cc(); gen_op_store_DT0_fpr(rd); break; case 0x0c3: /* V9 fmovqcc %fcc3 */ goto nfpu_insn; case 0x101: /* V9 fmovscc %icc */ cond = GET_FIELD_SP(insn, 14, 17); gen_op_load_fpr_FT0(rd); gen_op_load_fpr_FT1(rs2); flush_T2(dc); gen_cond[0][cond](); gen_op_fmovs_cc(); gen_op_store_FT0_fpr(rd); break; case 0x102: /* V9 fmovdcc %icc */ cond = GET_FIELD_SP(insn, 14, 17); gen_op_load_fpr_DT0(rd); gen_op_load_fpr_DT1(rs2); flush_T2(dc); gen_cond[0][cond](); gen_op_fmovd_cc(); gen_op_store_DT0_fpr(rd); break; case 0x103: /* V9 fmovqcc %icc */ goto nfpu_insn; case 0x181: /* V9 fmovscc %xcc */ cond = GET_FIELD_SP(insn, 14, 17); gen_op_load_fpr_FT0(rd); gen_op_load_fpr_FT1(rs2); flush_T2(dc); gen_cond[1][cond](); gen_op_fmovs_cc(); gen_op_store_FT0_fpr(rd); break; case 0x182: /* V9 fmovdcc %xcc */ cond = GET_FIELD_SP(insn, 14, 17); gen_op_load_fpr_DT0(rd); gen_op_load_fpr_DT1(rs2); flush_T2(dc); gen_cond[1][cond](); gen_op_fmovd_cc(); gen_op_store_DT0_fpr(rd); break; case 0x183: /* V9 fmovqcc %xcc */ goto nfpu_insn; #endif case 0x51: /* V9 %fcc */ gen_op_load_fpr_FT0(rs1); gen_op_load_fpr_FT1(rs2); #ifdef TARGET_SPARC64 gen_fcmps[rd & 3](); #else gen_op_fcmps(); #endif break; case 0x52: /* V9 %fcc */ gen_op_load_fpr_DT0(DFPREG(rs1)); gen_op_load_fpr_DT1(DFPREG(rs2)); #ifdef TARGET_SPARC64 gen_fcmpd[rd & 3](); #else gen_op_fcmpd(); #endif break; case 0x53: /* fcmpq */ goto nfpu_insn; case 0x55: /* fcmpes, V9 %fcc */ gen_op_load_fpr_FT0(rs1); gen_op_load_fpr_FT1(rs2); #ifdef TARGET_SPARC64 gen_fcmps[rd & 3](); #else gen_op_fcmps(); /* XXX should trap if qNaN or sNaN */ #endif break; case 0x56: /* fcmped, V9 %fcc */ gen_op_load_fpr_DT0(DFPREG(rs1)); gen_op_load_fpr_DT1(DFPREG(rs2)); #ifdef TARGET_SPARC64 gen_fcmpd[rd & 3](); #else gen_op_fcmpd(); /* XXX should trap if qNaN or sNaN */ #endif break; case 0x57: /* fcmpeq */ goto nfpu_insn; default: } #if defined(OPTIM) } else if (xop == 0x2) { // clr/mov shortcut rs1 = GET_FIELD(insn, 13, 17); if (rs1 == 0) { // or %g0, x, y -> mov T1, x; mov y, T1 if (IS_IMM) { /* immediate */ rs2 = GET_FIELDs(insn, 19, 31); gen_movl_simm_T1(rs2); } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); gen_movl_reg_T1(rs2); } gen_movl_T1_reg(rd); } else { gen_movl_reg_T0(rs1); if (IS_IMM) { /* immediate */ // or x, #0, y -> mov T1, x; mov y, T1 rs2 = GET_FIELDs(insn, 19, 31); if (rs2 != 0) { gen_movl_simm_T1(rs2); gen_op_or_T1_T0(); } } else { /* register */ // or x, %g0, y -> mov T1, x; mov y, T1 rs2 = GET_FIELD(insn, 27, 31); if (rs2 != 0) { gen_movl_reg_T1(rs2); gen_op_or_T1_T0(); } } gen_movl_T0_reg(rd); } #endif #ifdef TARGET_SPARC64 } else if (xop == 0x25) { /* sll, V9 sllx ( == sll) */ rs1 = GET_FIELD(insn, 13, 17); gen_movl_reg_T0(rs1); if (IS_IMM) { /* immediate */ rs2 = GET_FIELDs(insn, 20, 31); gen_movl_simm_T1(rs2); } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); gen_movl_reg_T1(rs2); } gen_op_sll(); gen_movl_T0_reg(rd); } else if (xop == 0x26) { /* srl, V9 srlx */ rs1 = GET_FIELD(insn, 13, 17); gen_movl_reg_T0(rs1); if (IS_IMM) { /* immediate */ rs2 = GET_FIELDs(insn, 20, 31); gen_movl_simm_T1(rs2); } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); gen_movl_reg_T1(rs2); } if (insn & (1 << 12)) gen_op_srlx(); else gen_op_srl(); gen_movl_T0_reg(rd); } else if (xop == 0x27) { /* sra, V9 srax */ rs1 = GET_FIELD(insn, 13, 17); gen_movl_reg_T0(rs1); if (IS_IMM) { /* immediate */ rs2 = GET_FIELDs(insn, 20, 31); gen_movl_simm_T1(rs2); } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); gen_movl_reg_T1(rs2); } if (insn & (1 << 12)) gen_op_srax(); else gen_op_sra(); gen_movl_T0_reg(rd); #endif } else if (xop < 0x36) { rs1 = GET_FIELD(insn, 13, 17); gen_movl_reg_T0(rs1); if (IS_IMM) { /* immediate */ rs2 = GET_FIELDs(insn, 19, 31); gen_movl_simm_T1(rs2); } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); gen_movl_reg_T1(rs2); } if (xop < 0x20) { switch (xop & ~0x10) { case 0x0: if (xop & 0x10) gen_op_add_T1_T0_cc(); else gen_op_add_T1_T0(); break; case 0x1: gen_op_and_T1_T0(); if (xop & 0x10) gen_op_logic_T0_cc(); break; case 0x2: gen_op_or_T1_T0(); if (xop & 0x10) gen_op_logic_T0_cc(); break; case 0x3: gen_op_xor_T1_T0(); if (xop & 0x10) gen_op_logic_T0_cc(); break; case 0x4: if (xop & 0x10) gen_op_sub_T1_T0_cc(); else gen_op_sub_T1_T0(); break; case 0x5: gen_op_andn_T1_T0(); if (xop & 0x10) gen_op_logic_T0_cc(); break; case 0x6: gen_op_orn_T1_T0(); if (xop & 0x10) gen_op_logic_T0_cc(); break; case 0x7: gen_op_xnor_T1_T0(); if (xop & 0x10) gen_op_logic_T0_cc(); break; case 0x8: if (xop & 0x10) gen_op_addx_T1_T0_cc(); else gen_op_addx_T1_T0(); break; #ifdef TARGET_SPARC64 case 0x9: /* V9 mulx */ gen_op_mulx_T1_T0(); break; #endif case 0xa: gen_op_umul_T1_T0(); if (xop & 0x10) gen_op_logic_T0_cc(); break; case 0xb: gen_op_smul_T1_T0(); if (xop & 0x10) gen_op_logic_T0_cc(); break; case 0xc: if (xop & 0x10) gen_op_subx_T1_T0_cc(); else gen_op_subx_T1_T0(); break; #ifdef TARGET_SPARC64 case 0xd: /* V9 udivx */ gen_op_udivx_T1_T0(); break; #endif case 0xe: gen_op_udiv_T1_T0(); if (xop & 0x10) gen_op_div_cc(); break; case 0xf: gen_op_sdiv_T1_T0(); if (xop & 0x10) gen_op_div_cc(); break; default: } gen_movl_T0_reg(rd); } else { switch (xop) { case 0x20: /* taddcc */ gen_op_tadd_T1_T0_cc(); gen_movl_T0_reg(rd); break; case 0x21: /* tsubcc */ gen_op_tsub_T1_T0_cc(); gen_movl_T0_reg(rd); break; case 0x22: /* taddcctv */ gen_op_tadd_T1_T0_ccTV(); gen_movl_T0_reg(rd); break; case 0x23: /* tsubcctv */ gen_op_tsub_T1_T0_ccTV(); gen_movl_T0_reg(rd); break; case 0x24: /* mulscc */ gen_op_mulscc_T1_T0(); gen_movl_T0_reg(rd); break; #ifndef TARGET_SPARC64 case 0x25: /* sll */ gen_op_sll(); gen_movl_T0_reg(rd); break; case 0x26: /* srl */ gen_op_srl(); gen_movl_T0_reg(rd); break; case 0x27: /* sra */ gen_op_sra(); gen_movl_T0_reg(rd); break; #endif case 0x30: { switch(rd) { case 0: /* wry */ gen_op_xor_T1_T0(); gen_op_movtl_env_T0(offsetof(CPUSPARCState, y)); break; #ifndef TARGET_SPARC64 case 0x01 ... 0x0f: /* undefined in the SPARCv8 manual, nop on the microSPARC II */ case 0x10 ... 0x1f: /* implementation-dependent in the SPARCv8 manual, nop on the microSPARC II */ break; #else case 0x2: /* V9 wrccr */ gen_op_wrccr(); break; case 0x3: /* V9 wrasi */ gen_op_movl_env_T0(offsetof(CPUSPARCState, asi)); break; case 0x6: /* V9 wrfprs */ gen_op_movl_env_T0(offsetof(CPUSPARCState, fprs)); break; case 0xf: /* V9 sir, nop if user */ #if !defined(CONFIG_USER_ONLY) if (supervisor(dc)) gen_op_sir(); #endif break; case 0x13: /* Graphics Status */ if (gen_trap_ifnofpu(dc)) goto jmp_insn; gen_op_movtl_env_T0(offsetof(CPUSPARCState, gsr)); break; case 0x17: /* Tick compare */ #if !defined(CONFIG_USER_ONLY) if (!supervisor(dc)) #endif gen_op_movtl_env_T0(offsetof(CPUSPARCState, tick_cmpr)); break; case 0x18: /* System tick */ #if !defined(CONFIG_USER_ONLY) if (!supervisor(dc)) #endif gen_op_movtl_env_T0(offsetof(CPUSPARCState, stick_cmpr)); break; case 0x19: /* System tick compare */ #if !defined(CONFIG_USER_ONLY) if (!supervisor(dc)) #endif gen_op_movtl_env_T0(offsetof(CPUSPARCState, stick_cmpr)); break; case 0x10: /* Performance Control */ case 0x11: /* Performance Instrumentation Counter */ case 0x12: /* Dispatch Control */ case 0x14: /* Softint set */ case 0x15: /* Softint clear */ case 0x16: /* Softint write */ #endif default: } } break; #if !defined(CONFIG_USER_ONLY) case 0x31: /* wrpsr, V9 saved, restored */ { if (!supervisor(dc)) goto priv_insn; #ifdef TARGET_SPARC64 switch (rd) { case 0: gen_op_saved(); break; case 1: gen_op_restored(); break; default: } #else gen_op_xor_T1_T0(); gen_op_wrpsr(); save_state(dc); gen_op_next_insn(); gen_op_movl_T0_0(); gen_op_exit_tb(); dc->is_br = 1; #endif } break; case 0x32: /* wrwim, V9 wrpr */ { if (!supervisor(dc)) goto priv_insn; gen_op_xor_T1_T0(); #ifdef TARGET_SPARC64 switch (rd) { case 0: // tpc gen_op_wrtpc(); break; case 1: // tnpc gen_op_wrtnpc(); break; case 2: // tstate gen_op_wrtstate(); break; case 3: // tt gen_op_wrtt(); break; case 4: // tick gen_op_wrtick(); break; case 5: // tba gen_op_movtl_env_T0(offsetof(CPUSPARCState, tbr)); break; case 6: // pstate gen_op_wrpstate(); save_state(dc); gen_op_next_insn(); gen_op_movl_T0_0(); gen_op_exit_tb(); dc->is_br = 1; break; case 7: // tl gen_op_movl_env_T0(offsetof(CPUSPARCState, tl)); break; case 8: // pil gen_op_movl_env_T0(offsetof(CPUSPARCState, psrpil)); break; case 9: // cwp gen_op_wrcwp(); break; case 10: // cansave gen_op_movl_env_T0(offsetof(CPUSPARCState, cansave)); break; case 11: // canrestore gen_op_movl_env_T0(offsetof(CPUSPARCState, canrestore)); break; case 12: // cleanwin gen_op_movl_env_T0(offsetof(CPUSPARCState, cleanwin)); break; case 13: // otherwin gen_op_movl_env_T0(offsetof(CPUSPARCState, otherwin)); break; case 14: // wstate gen_op_movl_env_T0(offsetof(CPUSPARCState, wstate)); break; default: } #else gen_op_wrwim(); #endif } break; #ifndef TARGET_SPARC64 case 0x33: /* wrtbr, V9 unimp */ { if (!supervisor(dc)) goto priv_insn; gen_op_xor_T1_T0(); gen_op_movtl_env_T0(offsetof(CPUSPARCState, tbr)); } break; #endif #endif #ifdef TARGET_SPARC64 case 0x2c: /* V9 movcc */ { int cc = GET_FIELD_SP(insn, 11, 12); int cond = GET_FIELD_SP(insn, 14, 17); if (IS_IMM) { /* immediate */ rs2 = GET_FIELD_SPs(insn, 0, 10); gen_movl_simm_T1(rs2); } else { rs2 = GET_FIELD_SP(insn, 0, 4); gen_movl_reg_T1(rs2); } gen_movl_reg_T0(rd); flush_T2(dc); if (insn & (1 << 18)) { if (cc == 0) gen_cond[0][cond](); else if (cc == 2) gen_cond[1][cond](); else } else { gen_fcond[cc][cond](); } gen_op_mov_cc(); gen_movl_T0_reg(rd); break; } case 0x2d: /* V9 sdivx */ gen_op_sdivx_T1_T0(); gen_movl_T0_reg(rd); break; case 0x2e: /* V9 popc */ { if (IS_IMM) { /* immediate */ rs2 = GET_FIELD_SPs(insn, 0, 12); gen_movl_simm_T1(rs2); // XXX optimize: popc(constant) } else { rs2 = GET_FIELD_SP(insn, 0, 4); gen_movl_reg_T1(rs2); } gen_op_popc(); gen_movl_T0_reg(rd); } case 0x2f: /* V9 movr */ { int cond = GET_FIELD_SP(insn, 10, 12); rs1 = GET_FIELD(insn, 13, 17); flush_T2(dc); gen_movl_reg_T0(rs1); gen_cond_reg(cond); if (IS_IMM) { /* immediate */ rs2 = GET_FIELD_SPs(insn, 0, 10); gen_movl_simm_T1(rs2); } else { rs2 = GET_FIELD_SP(insn, 0, 4); gen_movl_reg_T1(rs2); } gen_movl_reg_T0(rd); gen_op_mov_cc(); gen_movl_T0_reg(rd); break; } case 0x36: /* UltraSparc shutdown, VIS */ { int opf = GET_FIELD_SP(insn, 5, 13); rs1 = GET_FIELD(insn, 13, 17); rs2 = GET_FIELD(insn, 27, 31); switch (opf) { case 0x018: /* VIS I alignaddr */ if (gen_trap_ifnofpu(dc)) goto jmp_insn; gen_movl_reg_T0(rs1); gen_movl_reg_T1(rs2); gen_op_alignaddr(); gen_movl_T0_reg(rd); break; case 0x01a: /* VIS I alignaddrl */ if (gen_trap_ifnofpu(dc)) goto jmp_insn; // XXX break; case 0x048: /* VIS I faligndata */ if (gen_trap_ifnofpu(dc)) goto jmp_insn; gen_op_load_fpr_DT0(rs1); gen_op_load_fpr_DT1(rs2); gen_op_faligndata(); gen_op_store_DT0_fpr(rd); break; default: } break; } #endif default: } } } else if (xop == 0x36 || xop == 0x37) { /* CPop1 & CPop2, V9 impdep1 & impdep2 */ #ifdef TARGET_SPARC64 #else goto ncp_insn; #endif #ifdef TARGET_SPARC64 } else if (xop == 0x39) { /* V9 return */ rs1 = GET_FIELD(insn, 13, 17); gen_movl_reg_T0(rs1); if (IS_IMM) { /* immediate */ rs2 = GET_FIELDs(insn, 19, 31); #if defined(OPTIM) if (rs2) { #endif gen_movl_simm_T1(rs2); gen_op_add_T1_T0(); #if defined(OPTIM) } #endif } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); #if defined(OPTIM) if (rs2) { #endif gen_movl_reg_T1(rs2); gen_op_add_T1_T0(); #if defined(OPTIM) } #endif } gen_op_restore(); gen_mov_pc_npc(dc); gen_op_movl_npc_T0(); dc->npc = DYNAMIC_PC; goto jmp_insn; #endif } else { rs1 = GET_FIELD(insn, 13, 17); gen_movl_reg_T0(rs1); if (IS_IMM) { /* immediate */ rs2 = GET_FIELDs(insn, 19, 31); #if defined(OPTIM) if (rs2) { #endif gen_movl_simm_T1(rs2); gen_op_add_T1_T0(); #if defined(OPTIM) } #endif } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); #if defined(OPTIM) if (rs2) { #endif gen_movl_reg_T1(rs2); gen_op_add_T1_T0(); #if defined(OPTIM) } #endif } switch (xop) { case 0x38: /* jmpl */ { if (rd != 0) { #ifdef TARGET_SPARC64 if (dc->pc == (uint32_t)dc->pc) { gen_op_movl_T1_im(dc->pc); } else { gen_op_movq_T1_im64(dc->pc >> 32, dc->pc); } #else gen_op_movl_T1_im(dc->pc); #endif gen_movl_T1_reg(rd); } gen_mov_pc_npc(dc); gen_op_movl_npc_T0(); dc->npc = DYNAMIC_PC; } goto jmp_insn; #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64) case 0x39: /* rett, V9 return */ { if (!supervisor(dc)) goto priv_insn; gen_mov_pc_npc(dc); gen_op_movl_npc_T0(); dc->npc = DYNAMIC_PC; gen_op_rett(); } goto jmp_insn; #endif case 0x3b: /* flush */ gen_op_flush_T0(); break; case 0x3c: /* save */ save_state(dc); gen_op_save(); gen_movl_T0_reg(rd); break; case 0x3d: /* restore */ save_state(dc); gen_op_restore(); gen_movl_T0_reg(rd); break; #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64) case 0x3e: /* V9 done/retry */ { switch (rd) { case 0: if (!supervisor(dc)) goto priv_insn; dc->npc = DYNAMIC_PC; dc->pc = DYNAMIC_PC; gen_op_done(); goto jmp_insn; case 1: if (!supervisor(dc)) goto priv_insn; dc->npc = DYNAMIC_PC; dc->pc = DYNAMIC_PC; gen_op_retry(); goto jmp_insn; default: } } break; #endif default: } } break; } break; case 3: /* load/store instructions */ { unsigned int xop = GET_FIELD(insn, 7, 12); rs1 = GET_FIELD(insn, 13, 17); gen_movl_reg_T0(rs1); if (IS_IMM) { /* immediate */ rs2 = GET_FIELDs(insn, 19, 31); #if defined(OPTIM) if (rs2 != 0) { #endif gen_movl_simm_T1(rs2); gen_op_add_T1_T0(); #if defined(OPTIM) } #endif } else { /* register */ rs2 = GET_FIELD(insn, 27, 31); #if defined(OPTIM) if (rs2 != 0) { #endif gen_movl_reg_T1(rs2); gen_op_add_T1_T0(); #if defined(OPTIM) } #endif } if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) || \ (xop > 0x17 && xop < 0x1d ) || \ (xop > 0x2c && xop < 0x33) || xop == 0x1f) { switch (xop) { case 0x0: /* load word */ gen_op_ldst(ld); break; case 0x1: /* load unsigned byte */ gen_op_ldst(ldub); break; case 0x2: /* load unsigned halfword */ gen_op_ldst(lduh); break; case 0x3: /* load double word */ gen_op_ldst(ldd); gen_movl_T0_reg(rd + 1); break; case 0x9: /* load signed byte */ gen_op_ldst(ldsb); break; case 0xa: /* load signed halfword */ gen_op_ldst(ldsh); break; case 0xd: /* ldstub -- XXX: should be atomically */ gen_op_ldst(ldstub); break; case 0x0f: /* swap register with memory. Also atomically */ gen_movl_reg_T1(rd); gen_op_ldst(swap); break; #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) case 0x10: /* load word alternate */ #ifndef TARGET_SPARC64 if (IS_IMM) if (!supervisor(dc)) goto priv_insn; #endif gen_op_lda(insn, 1, 4, 0); break; case 0x11: /* load unsigned byte alternate */ #ifndef TARGET_SPARC64 if (IS_IMM) if (!supervisor(dc)) goto priv_insn; #endif gen_op_lduba(insn, 1, 1, 0); break; case 0x12: /* load unsigned halfword alternate */ #ifndef TARGET_SPARC64 if (IS_IMM) if (!supervisor(dc)) goto priv_insn; #endif gen_op_lduha(insn, 1, 2, 0); break; case 0x13: /* load double word alternate */ #ifndef TARGET_SPARC64 if (IS_IMM) if (!supervisor(dc)) goto priv_insn; #endif gen_op_ldda(insn, 1, 8, 0); gen_movl_T0_reg(rd + 1); break; case 0x19: /* load signed byte alternate */ #ifndef TARGET_SPARC64 if (IS_IMM) if (!supervisor(dc)) goto priv_insn; #endif gen_op_ldsba(insn, 1, 1, 1); break; case 0x1a: /* load signed halfword alternate */ #ifndef TARGET_SPARC64 if (IS_IMM) if (!supervisor(dc)) goto priv_insn; #endif gen_op_ldsha(insn, 1, 2 ,1); break; case 0x1d: /* ldstuba -- XXX: should be atomically */ #ifndef TARGET_SPARC64 if (IS_IMM) if (!supervisor(dc)) goto priv_insn; #endif gen_op_ldstuba(insn, 1, 1, 0); break; case 0x1f: /* swap reg with alt. memory. Also atomically */ #ifndef TARGET_SPARC64 if (IS_IMM) if (!supervisor(dc)) goto priv_insn; #endif gen_movl_reg_T1(rd); gen_op_swapa(insn, 1, 4, 0); break; #ifndef TARGET_SPARC64 case 0x30: /* ldc */ case 0x31: /* ldcsr */ case 0x33: /* lddc */ case 0x34: /* stc */ case 0x35: /* stcsr */ case 0x36: /* stdcq */ case 0x37: /* stdc */ goto ncp_insn; break; /* avoid warnings */ (void) &gen_op_stfa; (void) &gen_op_stdfa; (void) &gen_op_ldfa; (void) &gen_op_lddfa; #else #if !defined(CONFIG_USER_ONLY) (void) &gen_op_cas; (void) &gen_op_casx; #endif #endif #endif #ifdef TARGET_SPARC64 case 0x08: /* V9 ldsw */ gen_op_ldst(ldsw); break; case 0x0b: /* V9 ldx */ gen_op_ldst(ldx); break; case 0x18: /* V9 ldswa */ gen_op_ldswa(insn, 1, 4, 1); break; case 0x1b: /* V9 ldxa */ gen_op_ldxa(insn, 1, 8, 0); break; case 0x2d: /* V9 prefetch, no effect */ goto skip_move; case 0x30: /* V9 ldfa */ gen_op_ldfa(insn, 1, 8, 0); // XXX break; case 0x33: /* V9 lddfa */ gen_op_lddfa(insn, 1, 8, 0); // XXX break; case 0x3d: /* V9 prefetcha, no effect */ goto skip_move; case 0x32: /* V9 ldqfa */ goto nfpu_insn; #endif default: } gen_movl_T1_reg(rd); #ifdef TARGET_SPARC64 skip_move: ; #endif } else if (xop >= 0x20 && xop < 0x24) { if (gen_trap_ifnofpu(dc)) goto jmp_insn; switch (xop) { case 0x20: /* load fpreg */ gen_op_ldst(ldf); gen_op_store_FT0_fpr(rd); break; case 0x21: /* load fsr */ gen_op_ldst(ldf); gen_op_ldfsr(); break; case 0x22: /* load quad fpreg */ goto nfpu_insn; case 0x23: /* load double fpreg */ gen_op_ldst(lddf); gen_op_store_DT0_fpr(DFPREG(rd)); break; default: } } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) || \ xop == 0xe || xop == 0x1e) { gen_movl_reg_T1(rd); switch (xop) { case 0x4: gen_op_ldst(st); break; case 0x5: gen_op_ldst(stb); break; case 0x6: gen_op_ldst(sth); break; case 0x7: flush_T2(dc); gen_movl_reg_T2(rd + 1); gen_op_ldst(std); break; #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64) case 0x14: #ifndef TARGET_SPARC64 if (IS_IMM) if (!supervisor(dc)) goto priv_insn; #endif gen_op_sta(insn, 0, 4, 0); break; case 0x15: #ifndef TARGET_SPARC64 if (IS_IMM) if (!supervisor(dc)) goto priv_insn; #endif gen_op_stba(insn, 0, 1, 0); break; case 0x16: #ifndef TARGET_SPARC64 if (IS_IMM) if (!supervisor(dc)) goto priv_insn; #endif gen_op_stha(insn, 0, 2, 0); break; case 0x17: #ifndef TARGET_SPARC64 if (IS_IMM) if (!supervisor(dc)) goto priv_insn; #endif flush_T2(dc); gen_movl_reg_T2(rd + 1); gen_op_stda(insn, 0, 8, 0); break; #endif #ifdef TARGET_SPARC64 case 0x0e: /* V9 stx */ gen_op_ldst(stx); break; case 0x1e: /* V9 stxa */ gen_op_stxa(insn, 0, 8, 0); // XXX break; #endif default: } } else if (xop > 0x23 && xop < 0x28) { if (gen_trap_ifnofpu(dc)) goto jmp_insn; switch (xop) { case 0x24: gen_op_load_fpr_FT0(rd); gen_op_ldst(stf); break; case 0x25: /* stfsr, V9 stxfsr */ gen_op_stfsr(); gen_op_ldst(stf); break; case 0x26: /* stdfq */ goto nfpu_insn; case 0x27: gen_op_load_fpr_DT0(DFPREG(rd)); gen_op_ldst(stdf); break; default: } } else if (xop > 0x33 && xop < 0x3f) { #ifdef TARGET_SPARC64 switch (xop) { case 0x34: /* V9 stfa */ gen_op_stfa(insn, 0, 0, 0); // XXX break; case 0x37: /* V9 stdfa */ gen_op_stdfa(insn, 0, 0, 0); // XXX break; case 0x3c: /* V9 casa */ gen_op_casa(insn, 0, 4, 0); // XXX break; case 0x3e: /* V9 casxa */ gen_op_casxa(insn, 0, 8, 0); // XXX break; case 0x36: /* V9 stqfa */ goto nfpu_insn; default: } #else #endif } else } break; } /* default case for non jump instructions */ if (dc->npc == DYNAMIC_PC) { dc->pc = DYNAMIC_PC; gen_op_next_insn(); } else if (dc->npc == JUMP_PC) { /* we can do a static jump */ gen_branch2(dc, (long)dc->tb, dc->jump_pc[0], dc->jump_pc[1]); dc->is_br = 1; } else { dc->pc = dc->npc; dc->npc = dc->npc + 4; } jmp_insn: return; illegal_insn: save_state(dc); gen_op_exception(TT_ILL_INSN); dc->is_br = 1; return; #if !defined(CONFIG_USER_ONLY) priv_insn: save_state(dc); gen_op_exception(TT_PRIV_INSN); dc->is_br = 1; return; #endif nfpu_insn: save_state(dc); gen_op_fpexception_im(FSR_FTT_UNIMPFPOP); dc->is_br = 1; return; #ifndef TARGET_SPARC64 ncp_insn: save_state(dc); gen_op_exception(TT_NCP_INSN); dc->is_br = 1; return; #endif }
16,923
FFmpeg
617814b4a717b38add5ccb8dd200dbb655f98f09
1
int av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf) { BufferSourceContext *s = ctx->priv; AVFrame *frame = NULL; AVBufferRef *dummy_buf = NULL; int ret = 0, planes, i; if (!buf) { s->eof = 1; return 0; } else if (s->eof) return AVERROR(EINVAL); frame = av_frame_alloc(); if (!frame) return AVERROR(ENOMEM); dummy_buf = av_buffer_create(NULL, 0, compat_free_buffer, buf, 0); if (!dummy_buf) { ret = AVERROR(ENOMEM); goto fail; } if ((ret = avfilter_copy_buf_props(frame, buf)) < 0) goto fail; #define WRAP_PLANE(ref_out, data, data_size) \ do { \ AVBufferRef *dummy_ref = av_buffer_ref(dummy_buf); \ if (!dummy_ref) { \ ret = AVERROR(ENOMEM); \ goto fail; \ } \ ref_out = av_buffer_create(data, data_size, compat_unref_buffer, \ dummy_ref, 0); \ if (!ref_out) { \ av_frame_unref(frame); \ ret = AVERROR(ENOMEM); \ goto fail; \ } \ } while (0) if (ctx->outputs[0]->type == AVMEDIA_TYPE_VIDEO) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); planes = av_pix_fmt_count_planes(frame->format); if (!desc || planes <= 0) { ret = AVERROR(EINVAL); goto fail; } for (i = 0; i < planes; i++) { int v_shift = (i == 1 || i == 2) ? desc->log2_chroma_h : 0; int plane_size = (frame->height >> v_shift) * frame->linesize[i]; WRAP_PLANE(frame->buf[i], frame->data[i], plane_size); } } else { int planar = av_sample_fmt_is_planar(frame->format); int channels = av_get_channel_layout_nb_channels(frame->channel_layout); planes = planar ? channels : 1; if (planes > FF_ARRAY_ELEMS(frame->buf)) { frame->nb_extended_buf = planes - FF_ARRAY_ELEMS(frame->buf); frame->extended_buf = av_mallocz(sizeof(*frame->extended_buf) * frame->nb_extended_buf); if (!frame->extended_buf) { ret = AVERROR(ENOMEM); goto fail; } } for (i = 0; i < FFMIN(planes, FF_ARRAY_ELEMS(frame->buf)); i++) WRAP_PLANE(frame->buf[i], frame->extended_data[i], frame->linesize[0]); for (i = 0; i < planes - FF_ARRAY_ELEMS(frame->buf); i++) WRAP_PLANE(frame->extended_buf[i], frame->extended_data[i + FF_ARRAY_ELEMS(frame->buf)], frame->linesize[0]); } ret = av_buffersrc_add_frame(ctx, frame); fail: av_buffer_unref(&dummy_buf); av_frame_free(&frame); return ret; }
16,925
qemu
45fe15c25a5c9feea6e0f78434f5e9f632de9d94
1
void msi_uninit(struct PCIDevice *dev) { uint16_t flags = pci_get_word(dev->config + msi_flags_off(dev)); uint8_t cap_size = msi_cap_sizeof(flags); pci_del_capability(dev, PCI_CAP_ID_MSIX, cap_size); MSI_DEV_PRINTF(dev, "uninit\n"); }
16,926
qemu
15c2f669e3fb2bc97f7b42d1871f595c0ac24af8
1
Object *user_creatable_add(const QDict *qdict, Visitor *v, Error **errp) { char *type = NULL; char *id = NULL; Object *obj = NULL; Error *local_err = NULL, *end_err = NULL; QDict *pdict; pdict = qdict_clone_shallow(qdict); visit_start_struct(v, NULL, NULL, 0, &local_err); if (local_err) { goto out; } qdict_del(pdict, "qom-type"); visit_type_str(v, "qom-type", &type, &local_err); if (local_err) { goto out_visit; } qdict_del(pdict, "id"); visit_type_str(v, "id", &id, &local_err); if (local_err) { goto out_visit; } obj = user_creatable_add_type(type, id, pdict, v, &local_err); if (local_err) { goto out_visit; } out_visit: visit_end_struct(v, &end_err); if (end_err) { error_propagate(&local_err, end_err); if (obj) { user_creatable_del(id, NULL); } goto out; } out: QDECREF(pdict); g_free(id); g_free(type); if (local_err) { error_propagate(errp, local_err); object_unref(obj); return NULL; } return obj; }
16,927
FFmpeg
072be3e8969f24113d599444be4d6a0ed04a6602
1
int ff_h264_decode_seq_parameter_set(H264Context *h){ MpegEncContext * const s = &h->s; int profile_idc, level_idc, constraint_set_flags = 0; unsigned int sps_id; int i, log2_max_frame_num_minus4; SPS *sps; profile_idc= get_bits(&s->gb, 8); constraint_set_flags |= get_bits1(&s->gb) << 0; //constraint_set0_flag constraint_set_flags |= get_bits1(&s->gb) << 1; //constraint_set1_flag constraint_set_flags |= get_bits1(&s->gb) << 2; //constraint_set2_flag constraint_set_flags |= get_bits1(&s->gb) << 3; //constraint_set3_flag get_bits(&s->gb, 4); // reserved level_idc= get_bits(&s->gb, 8); sps_id= get_ue_golomb_31(&s->gb); if(sps_id >= MAX_SPS_COUNT) { av_log(h->s.avctx, AV_LOG_ERROR, "sps_id (%d) out of range\n", sps_id); return -1; } sps= av_mallocz(sizeof(SPS)); if(sps == NULL) return -1; sps->time_offset_length = 24; sps->profile_idc= profile_idc; sps->constraint_set_flags = constraint_set_flags; sps->level_idc= level_idc; memset(sps->scaling_matrix4, 16, sizeof(sps->scaling_matrix4)); memset(sps->scaling_matrix8, 16, sizeof(sps->scaling_matrix8)); sps->scaling_matrix_present = 0; if(sps->profile_idc >= 100){ //high profile sps->chroma_format_idc= get_ue_golomb_31(&s->gb); if(sps->chroma_format_idc > 3) { av_log(h->s.avctx, AV_LOG_ERROR, "chroma_format_idc (%u) out of range\n", sps->chroma_format_idc); goto fail; } else if(sps->chroma_format_idc == 3) { sps->residual_color_transform_flag = get_bits1(&s->gb); } sps->bit_depth_luma = get_ue_golomb(&s->gb) + 8; sps->bit_depth_chroma = get_ue_golomb(&s->gb) + 8; sps->transform_bypass = get_bits1(&s->gb); decode_scaling_matrices(h, sps, NULL, 1, sps->scaling_matrix4, sps->scaling_matrix8); }else{ sps->chroma_format_idc= 1; sps->bit_depth_luma = 8; sps->bit_depth_chroma = 8; } log2_max_frame_num_minus4 = get_ue_golomb(&s->gb); if (log2_max_frame_num_minus4 < MIN_LOG2_MAX_FRAME_NUM - 4 || log2_max_frame_num_minus4 > MAX_LOG2_MAX_FRAME_NUM - 4) { av_log(h->s.avctx, AV_LOG_ERROR, "log2_max_frame_num_minus4 out of range (0-12): %d\n", log2_max_frame_num_minus4); return AVERROR_INVALIDDATA; } sps->log2_max_frame_num = log2_max_frame_num_minus4 + 4; sps->poc_type= get_ue_golomb_31(&s->gb); if(sps->poc_type == 0){ //FIXME #define sps->log2_max_poc_lsb= get_ue_golomb(&s->gb) + 4; } else if(sps->poc_type == 1){//FIXME #define sps->delta_pic_order_always_zero_flag= get_bits1(&s->gb); sps->offset_for_non_ref_pic= get_se_golomb(&s->gb); sps->offset_for_top_to_bottom_field= get_se_golomb(&s->gb); sps->poc_cycle_length = get_ue_golomb(&s->gb); if((unsigned)sps->poc_cycle_length >= FF_ARRAY_ELEMS(sps->offset_for_ref_frame)){ av_log(h->s.avctx, AV_LOG_ERROR, "poc_cycle_length overflow %u\n", sps->poc_cycle_length); goto fail; } for(i=0; i<sps->poc_cycle_length; i++) sps->offset_for_ref_frame[i]= get_se_golomb(&s->gb); }else if(sps->poc_type != 2){ av_log(h->s.avctx, AV_LOG_ERROR, "illegal POC type %d\n", sps->poc_type); goto fail; } sps->ref_frame_count= get_ue_golomb_31(&s->gb); if(sps->ref_frame_count > MAX_PICTURE_COUNT-2 || sps->ref_frame_count >= 32U){ av_log(h->s.avctx, AV_LOG_ERROR, "too many reference frames\n"); goto fail; } sps->gaps_in_frame_num_allowed_flag= get_bits1(&s->gb); sps->mb_width = get_ue_golomb(&s->gb) + 1; sps->mb_height= get_ue_golomb(&s->gb) + 1; if((unsigned)sps->mb_width >= INT_MAX/16 || (unsigned)sps->mb_height >= INT_MAX/16 || av_image_check_size(16*sps->mb_width, 16*sps->mb_height, 0, h->s.avctx)){ av_log(h->s.avctx, AV_LOG_ERROR, "mb_width/height overflow\n"); goto fail; } sps->frame_mbs_only_flag= get_bits1(&s->gb); if(!sps->frame_mbs_only_flag) sps->mb_aff= get_bits1(&s->gb); else sps->mb_aff= 0; sps->direct_8x8_inference_flag= get_bits1(&s->gb); if(!sps->frame_mbs_only_flag && !sps->direct_8x8_inference_flag){ av_log(h->s.avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n"); goto fail; } #ifndef ALLOW_INTERLACE if(sps->mb_aff) av_log(h->s.avctx, AV_LOG_ERROR, "MBAFF support not included; enable it at compile-time.\n"); #endif sps->crop= get_bits1(&s->gb); if(sps->crop){ int crop_vertical_limit = sps->chroma_format_idc & 2 ? 16 : 8; int crop_horizontal_limit = sps->chroma_format_idc == 3 ? 16 : 8; sps->crop_left = get_ue_golomb(&s->gb); sps->crop_right = get_ue_golomb(&s->gb); sps->crop_top = get_ue_golomb(&s->gb); sps->crop_bottom= get_ue_golomb(&s->gb); if(sps->crop_left || sps->crop_top){ av_log(h->s.avctx, AV_LOG_ERROR, "insane cropping not completely supported, this could look slightly wrong ...\n"); } if(sps->crop_right >= crop_horizontal_limit || sps->crop_bottom >= crop_vertical_limit){ av_log(h->s.avctx, AV_LOG_ERROR, "brainfart cropping not supported, this could look slightly wrong ...\n"); } }else{ sps->crop_left = sps->crop_right = sps->crop_top = sps->crop_bottom= 0; } sps->vui_parameters_present_flag= get_bits1(&s->gb); if( sps->vui_parameters_present_flag ) if (decode_vui_parameters(h, sps) < 0) goto fail; if(!sps->sar.den) sps->sar.den= 1; if(s->avctx->debug&FF_DEBUG_PICT_INFO){ static const char csp[4][5] = { "Gray", "420", "422", "444" }; av_log(h->s.avctx, AV_LOG_DEBUG, "sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%d/%d/%d/%d %s %s %d/%d\n", sps_id, sps->profile_idc, sps->level_idc, sps->poc_type, sps->ref_frame_count, sps->mb_width, sps->mb_height, sps->frame_mbs_only_flag ? "FRM" : (sps->mb_aff ? "MB-AFF" : "PIC-AFF"), sps->direct_8x8_inference_flag ? "8B8" : "", sps->crop_left, sps->crop_right, sps->crop_top, sps->crop_bottom, sps->vui_parameters_present_flag ? "VUI" : "", csp[sps->chroma_format_idc], sps->timing_info_present_flag ? sps->num_units_in_tick : 0, sps->timing_info_present_flag ? sps->time_scale : 0 ); } av_free(h->sps_buffers[sps_id]); h->sps_buffers[sps_id]= sps; h->sps = *sps; return 0; fail: av_free(sps); return -1; }
16,928
qemu
d9bce9d99f4656ae0b0127f7472db9067b8f84ab
1
void do_compute_hflags (CPUPPCState *env) { /* Compute current hflags */ env->hflags = (msr_pr << MSR_PR) | (msr_le << MSR_LE) | (msr_fp << MSR_FP) | (msr_fe0 << MSR_FE0) | (msr_fe1 << MSR_FE1) | (msr_vr << MSR_VR) | (msr_ap << MSR_AP) | (msr_sa << MSR_SA) | (msr_se << MSR_SE) | (msr_be << MSR_BE); #if defined (TARGET_PPC64) env->hflags |= (msr_sf << MSR_SF) | (msr_hv << MSR_HV); #endif }
16,929
qemu
efec3dd631d94160288392721a5f9c39e50fb2bc
1
static void bonito_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->init = bonito_initfn; k->vendor_id = 0xdf53; k->device_id = 0x00d5; k->revision = 0x01; k->class_id = PCI_CLASS_BRIDGE_HOST; dc->desc = "Host bridge"; dc->no_user = 1; dc->vmsd = &vmstate_bonito; }
16,931
qemu
365aa1131fa61815eb1d672df6ba451bfe7f2cea
1
static void kvm_apic_realize(DeviceState *dev, Error **errp) { APICCommonState *s = APIC_COMMON(dev); memory_region_init_io(&s->io_memory, NULL, &kvm_apic_io_ops, s, "kvm-apic-msi", APIC_SPACE_SIZE); if (kvm_has_gsi_routing()) { msi_nonbroken = true; } }
16,934
qemu
f4f0d391b26afcce86df85566788be7170127116
1
static uint64_t *l2_allocate(BlockDriverState *bs, int l1_index) { BDRVQcowState *s = bs->opaque; int min_index; uint64_t old_l2_offset; uint64_t *l2_table, l2_offset; old_l2_offset = s->l1_table[l1_index]; /* allocate a new l2 entry */ l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); if (l2_offset < 0) { return NULL; } /* update the L1 entry */ s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; if (write_l1_entry(s, l1_index) < 0) { return NULL; } /* allocate a new entry in the l2 cache */ min_index = l2_cache_new_entry(bs); l2_table = s->l2_cache + (min_index << s->l2_bits); if (old_l2_offset == 0) { /* if there was no old l2 table, clear the new table */ memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); } else { /* if there was an old l2 table, read it from the disk */ if (bdrv_pread(s->hd, old_l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) != s->l2_size * sizeof(uint64_t)) return NULL; } /* write the l2 table to the file */ if (bdrv_pwrite(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) != s->l2_size * sizeof(uint64_t)) return NULL; /* update the l2 cache entry */ s->l2_cache_offsets[min_index] = l2_offset; s->l2_cache_counts[min_index] = 1; return l2_table; }
16,935
FFmpeg
5b4d026a030a775f0bd287e3a27188e8b5c9009f
1
static inline int op(uint8_t **dst, const uint8_t *dst_end, const uint8_t **buf, const uint8_t *buf_end, int pixel, int count, int *x, int width, int linesize) { int remaining = width - *x; while(count > 0) { int striplen = FFMIN(count, remaining); if (buf) { striplen = FFMIN(striplen, buf_end - *buf); if (*buf >= buf_end) goto exhausted; memcpy(*dst, *buf, striplen); *buf += striplen; } else if (pixel >= 0) memset(*dst, pixel, striplen); *dst += striplen; remaining -= striplen; count -= striplen; if (remaining <= 0) { *dst += linesize - width; remaining = width; } if (linesize > 0) { if (*dst >= dst_end) goto exhausted; } else { if (*dst <= dst_end) goto exhausted; } } *x = width - remaining; return 0; exhausted: *x = width - remaining; return 1; }
16,936
FFmpeg
c3390fd56cf55259ea7665ecea6c8aeddf56e2fc
1
static void ra144_encode_subblock(RA144Context *ractx, const int16_t *sblock_data, const int16_t *lpc_coefs, unsigned int rms, PutBitContext *pb) { float data[BLOCKSIZE] = { 0 }, work[LPC_ORDER + BLOCKSIZE]; float coefs[LPC_ORDER]; float zero[BLOCKSIZE], cba[BLOCKSIZE], cb1[BLOCKSIZE], cb2[BLOCKSIZE]; int16_t cba_vect[BLOCKSIZE]; int cba_idx, cb1_idx, cb2_idx, gain; int i, n; unsigned m[3]; float g[3]; float error, best_error; for (i = 0; i < LPC_ORDER; i++) { work[i] = ractx->curr_sblock[BLOCKSIZE + i]; coefs[i] = lpc_coefs[i] * (1/4096.0); } /** * Calculate the zero-input response of the LPC filter and subtract it from * input data. */ ff_celp_lp_synthesis_filterf(work + LPC_ORDER, coefs, data, BLOCKSIZE, LPC_ORDER); for (i = 0; i < BLOCKSIZE; i++) { zero[i] = work[LPC_ORDER + i]; data[i] = sblock_data[i] - zero[i]; } /** * Codebook search is performed without taking into account the contribution * of the previous subblock, since it has been just subtracted from input * data. */ memset(work, 0, LPC_ORDER * sizeof(*work)); cba_idx = adaptive_cb_search(ractx->adapt_cb, work + LPC_ORDER, coefs, data); if (cba_idx) { /** * The filtered vector from the adaptive codebook can be retrieved from * work, see implementation of adaptive_cb_search(). */ memcpy(cba, work + LPC_ORDER, sizeof(cba)); ff_copy_and_dup(cba_vect, ractx->adapt_cb, cba_idx + BLOCKSIZE / 2 - 1); m[0] = (ff_irms(cba_vect) * rms) >> 12; } fixed_cb_search(work + LPC_ORDER, coefs, data, cba_idx, &cb1_idx, &cb2_idx); for (i = 0; i < BLOCKSIZE; i++) { cb1[i] = ff_cb1_vects[cb1_idx][i]; cb2[i] = ff_cb2_vects[cb2_idx][i]; } ff_celp_lp_synthesis_filterf(work + LPC_ORDER, coefs, cb1, BLOCKSIZE, LPC_ORDER); memcpy(cb1, work + LPC_ORDER, sizeof(cb1)); m[1] = (ff_cb1_base[cb1_idx] * rms) >> 8; ff_celp_lp_synthesis_filterf(work + LPC_ORDER, coefs, cb2, BLOCKSIZE, LPC_ORDER); memcpy(cb2, work + LPC_ORDER, sizeof(cb2)); m[2] = (ff_cb2_base[cb2_idx] * rms) >> 8; best_error = FLT_MAX; gain = 0; for (n = 0; n < 256; n++) { g[1] = ((ff_gain_val_tab[n][1] * m[1]) >> ff_gain_exp_tab[n]) * (1/4096.0); g[2] = ((ff_gain_val_tab[n][2] * m[2]) >> ff_gain_exp_tab[n]) * (1/4096.0); error = 0; if (cba_idx) { g[0] = ((ff_gain_val_tab[n][0] * m[0]) >> ff_gain_exp_tab[n]) * (1/4096.0); for (i = 0; i < BLOCKSIZE; i++) { data[i] = zero[i] + g[0] * cba[i] + g[1] * cb1[i] + g[2] * cb2[i]; error += (data[i] - sblock_data[i]) * (data[i] - sblock_data[i]); } } else { for (i = 0; i < BLOCKSIZE; i++) { data[i] = zero[i] + g[1] * cb1[i] + g[2] * cb2[i]; error += (data[i] - sblock_data[i]) * (data[i] - sblock_data[i]); } } if (error < best_error) { best_error = error; gain = n; } } put_bits(pb, 7, cba_idx); put_bits(pb, 8, gain); put_bits(pb, 7, cb1_idx); put_bits(pb, 7, cb2_idx); ff_subblock_synthesis(ractx, lpc_coefs, cba_idx, cb1_idx, cb2_idx, rms, gain); }
16,937
FFmpeg
4691a77db4672026d62d524fd292fb17db6514b4
1
build_qp_table(PPS *pps, int index) { int i; for(i = 0; i < 255; i++) pps->chroma_qp_table[i & 0xff] = chroma_qp[av_clip(i + index, 0, 51)]; pps->chroma_qp_index_offset = index; }
16,938
qemu
e3cffe6fad29e07d401eabb913a6d88501d5c143
1
static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr, target_ulong opcode, target_ulong *args) { CPUPPCState *env = &cpu->env; int i; target_ulong rc = H_SUCCESS; for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { target_ulong *tsh = &args[i*2]; target_ulong tsl = args[i*2 + 1]; target_ulong v, r, ret; if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) { break; } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) { return H_PARAMETER; } *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS; *tsh |= H_BULK_REMOVE_RESPONSE; if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) { *tsh |= H_BULK_REMOVE_PARM; return H_PARAMETER; } ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl, (*tsh & H_BULK_REMOVE_FLAGS) >> 26, &v, &r); *tsh |= ret << 60; switch (ret) { case REMOVE_SUCCESS: *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43; break; case REMOVE_PARM: rc = H_PARAMETER; goto exit; case REMOVE_HW: rc = H_HARDWARE; goto exit; } } exit: check_tlb_flush(env); return rc; }
16,939
qemu
34d5e948e8a0d0d3a37801a418475a8632ce0891
1
static void cpu_notify_map_clients(void) { MapClient *client; while (!LIST_EMPTY(&map_client_list)) { client = LIST_FIRST(&map_client_list); client->callback(client->opaque); LIST_REMOVE(client, link); } }
16,940
FFmpeg
80aa89bdff6e3e9dd6bc58d806db0cbe99403149
0
static int asf_read_stream_properties(AVFormatContext *s, int64_t size) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; ASFStream *asf_st; ff_asf_guid g; enum AVMediaType type; int type_specific_size, sizeX; unsigned int tag1; int64_t pos1, pos2, start_time; int test_for_ext_stream_audio, is_dvr_ms_audio=0; if (s->nb_streams == ASF_MAX_STREAMS) { av_log(s, AV_LOG_ERROR, "too many streams\n"); return AVERROR(EINVAL); } pos1 = avio_tell(pb); st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); avpriv_set_pts_info(st, 32, 1, 1000); /* 32 bit pts in ms */ asf_st = av_mallocz(sizeof(ASFStream)); if (!asf_st) return AVERROR(ENOMEM); st->priv_data = asf_st; start_time = asf->hdr.preroll; asf_st->stream_language_index = 128; // invalid stream index means no language info if(!(asf->hdr.flags & 0x01)) { // if we aren't streaming... int64_t fsize = avio_size(pb); if (fsize <= 0 || (int64_t)asf->hdr.file_size <= 0 || FFABS(fsize - (int64_t)asf->hdr.file_size) < 10000) st->duration = asf->hdr.play_time / (10000000 / 1000) - start_time; } ff_get_guid(pb, &g); test_for_ext_stream_audio = 0; if (!ff_guidcmp(&g, &ff_asf_audio_stream)) { type = AVMEDIA_TYPE_AUDIO; } else if (!ff_guidcmp(&g, &ff_asf_video_stream)) { type = AVMEDIA_TYPE_VIDEO; } else if (!ff_guidcmp(&g, &ff_asf_jfif_media)) { type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = AV_CODEC_ID_MJPEG; } else if (!ff_guidcmp(&g, &ff_asf_command_stream)) { type = AVMEDIA_TYPE_DATA; } else if (!ff_guidcmp(&g, &ff_asf_ext_stream_embed_stream_header)) { test_for_ext_stream_audio = 1; type = AVMEDIA_TYPE_UNKNOWN; } else { return -1; } ff_get_guid(pb, &g); avio_skip(pb, 8); /* total_size */ type_specific_size = avio_rl32(pb); avio_rl32(pb); st->id = avio_rl16(pb) & 0x7f; /* stream id */ // mapping of asf ID to AV stream ID; asf->asfid2avid[st->id] = s->nb_streams - 1; avio_rl32(pb); if (test_for_ext_stream_audio) { ff_get_guid(pb, &g); if (!ff_guidcmp(&g, &ff_asf_ext_stream_audio_stream)) { type = AVMEDIA_TYPE_AUDIO; is_dvr_ms_audio=1; ff_get_guid(pb, &g); avio_rl32(pb); avio_rl32(pb); avio_rl32(pb); ff_get_guid(pb, &g); avio_rl32(pb); } } st->codec->codec_type = type; if (type == AVMEDIA_TYPE_AUDIO) { int ret = ff_get_wav_header(pb, st->codec, type_specific_size); if (ret < 0) return ret; if (is_dvr_ms_audio) { // codec_id and codec_tag are unreliable in dvr_ms // files. Set them later by probing stream. st->request_probe= 1; st->codec->codec_tag = 0; } if (st->codec->codec_id == AV_CODEC_ID_AAC) { st->need_parsing = AVSTREAM_PARSE_NONE; } else { st->need_parsing = AVSTREAM_PARSE_FULL; } /* We have to init the frame size at some point .... */ pos2 = avio_tell(pb); if (size >= (pos2 + 8 - pos1 + 24)) { asf_st->ds_span = avio_r8(pb); asf_st->ds_packet_size = avio_rl16(pb); asf_st->ds_chunk_size = avio_rl16(pb); avio_rl16(pb); //ds_data_size avio_r8(pb); //ds_silence_data } if (asf_st->ds_span > 1) { if (!asf_st->ds_chunk_size || (asf_st->ds_packet_size/asf_st->ds_chunk_size <= 1) || asf_st->ds_packet_size % asf_st->ds_chunk_size) asf_st->ds_span = 0; // disable descrambling } } else if (type == AVMEDIA_TYPE_VIDEO && size - (avio_tell(pb) - pos1 + 24) >= 51) { avio_rl32(pb); avio_rl32(pb); avio_r8(pb); avio_rl16(pb); /* size */ sizeX= avio_rl32(pb); /* size */ st->codec->width = avio_rl32(pb); st->codec->height = avio_rl32(pb); /* not available for asf */ avio_rl16(pb); /* panes */ st->codec->bits_per_coded_sample = avio_rl16(pb); /* depth */ tag1 = avio_rl32(pb); avio_skip(pb, 20); if (sizeX > 40) { st->codec->extradata_size = sizeX - 40; st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); avio_read(pb, st->codec->extradata, st->codec->extradata_size); } /* Extract palette from extradata if bpp <= 8 */ /* This code assumes that extradata contains only palette */ /* This is true for all paletted codecs implemented in libavcodec */ if (st->codec->extradata_size && (st->codec->bits_per_coded_sample <= 8)) { #if HAVE_BIGENDIAN int i; for (i = 0; i < FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)/4; i++) asf_st->palette[i] = av_bswap32(((uint32_t*)st->codec->extradata)[i]); #else memcpy(asf_st->palette, st->codec->extradata, FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)); #endif asf_st->palette_changed = 1; } st->codec->codec_tag = tag1; st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, tag1); if(tag1 == MKTAG('D', 'V', 'R', ' ')){ st->need_parsing = AVSTREAM_PARSE_FULL; // issue658 containse wrong w/h and MS even puts a fake seq header with wrong w/h in extradata while a correct one is in te stream. maximum lameness st->codec->width = st->codec->height = 0; av_freep(&st->codec->extradata); st->codec->extradata_size=0; } if(st->codec->codec_id == AV_CODEC_ID_H264) st->need_parsing = AVSTREAM_PARSE_FULL_ONCE; } pos2 = avio_tell(pb); avio_skip(pb, size - (pos2 - pos1 + 24)); return 0; }
16,941
qemu
32b089808f125470b3563bf4209c2301fa35c58e
0
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) { int mmu_idx, page_index, pd; void *p; MemoryRegion *mr; page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); mmu_idx = cpu_mmu_index(env1); if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code != (addr & TARGET_PAGE_MASK))) { #ifdef CONFIG_TCG_PASS_AREG0 cpu_ldub_code(env1, addr); #else ldub_code(addr); #endif } pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK; mr = iotlb_to_region(pd); if (mr != &io_mem_ram && mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device) { #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC) cpu_unassigned_access(env1, addr, 0, 1, 0, 4); #else cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr); #endif } p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend); return qemu_ram_addr_from_host_nofail(p); }
16,944
qemu
e81a982aa5398269a2cc344091ffa4930bdd242f
0
void cpu_ppc_store_decr (CPUPPCState *env, uint32_t value) { PowerPCCPU *cpu = ppc_env_get_cpu(env); _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value, 0); }
16,945
qemu
8a805c222caa0e20bf11d2267f726d0bb5917d94
0
static void test_submit_co(void) { WorkerTestData data; Coroutine *co = qemu_coroutine_create(co_test_cb); qemu_coroutine_enter(co, &data); /* Back here once the worker has started. */ g_assert_cmpint(active, ==, 1); g_assert_cmpint(data.ret, ==, -EINPROGRESS); /* qemu_aio_flush will execute the rest of the coroutine. */ qemu_aio_flush(); /* Back here after the coroutine has finished. */ g_assert_cmpint(active, ==, 0); g_assert_cmpint(data.ret, ==, 0); }
16,946
FFmpeg
dddc9b7a8ec3a03e48c69991ca7f20f10dd6f022
0
static int decode_profile_tier_level(HEVCContext *s, ProfileTierLevel *ptl) { int i; HEVCLocalContext *lc = s->HEVClc; GetBitContext *gb = &lc->gb; ptl->profile_space = get_bits(gb, 2); ptl->tier_flag = get_bits1(gb); ptl->profile_idc = get_bits(gb, 5); if (ptl->profile_idc == 1) av_log(s->avctx, AV_LOG_DEBUG, "Main profile bitstream\n"); else if (ptl->profile_idc == 2) av_log(s->avctx, AV_LOG_DEBUG, "Main10 profile bitstream\n"); else av_log(s->avctx, AV_LOG_WARNING, "No profile indication! (%d)\n", ptl->profile_idc); for (i = 0; i < 32; i++) ptl->profile_compatibility_flag[i] = get_bits1(gb); ptl->progressive_source_flag = get_bits1(gb); ptl->interlaced_source_flag = get_bits1(gb); ptl->non_packed_constraint_flag = get_bits1(gb); ptl->frame_only_constraint_flag = get_bits1(gb); if (get_bits(gb, 16) != 0) // XXX_reserved_zero_44bits[0..15] return -1; if (get_bits(gb, 16) != 0) // XXX_reserved_zero_44bits[16..31] return -1; if (get_bits(gb, 12) != 0) // XXX_reserved_zero_44bits[32..43] return -1; return 0; }
16,949
qemu
e1833e1f96456fd8fc17463246fe0b2050e68efb
0
int cpu_ppc_handle_mmu_fault (CPUState *env, target_ulong address, int rw, int is_user, int is_softmmu) { mmu_ctx_t ctx; int exception = 0, error_code = 0; int access_type; int ret = 0; if (rw == 2) { /* code access */ rw = 0; access_type = ACCESS_CODE; } else { /* data access */ /* XXX: put correct access by using cpu_restore_state() correctly */ access_type = ACCESS_INT; // access_type = env->access_type; } ret = get_physical_address(env, &ctx, address, rw, access_type, 1); if (ret == 0) { ret = tlb_set_page(env, address & TARGET_PAGE_MASK, ctx.raddr & TARGET_PAGE_MASK, ctx.prot, is_user, is_softmmu); } else if (ret < 0) { #if defined (DEBUG_MMU) if (loglevel != 0) cpu_dump_state(env, logfile, fprintf, 0); #endif if (access_type == ACCESS_CODE) { exception = EXCP_ISI; switch (ret) { case -1: /* No matches in page tables or TLB */ switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: exception = EXCP_I_TLBMISS; env->spr[SPR_IMISS] = address; env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; error_code = 1 << 18; goto tlb_miss; case POWERPC_MMU_SOFT_4xx: case POWERPC_MMU_SOFT_4xx_Z: exception = EXCP_40x_ITLBMISS; error_code = 0; env->spr[SPR_40x_DEAR] = address; env->spr[SPR_40x_ESR] = 0x00000000; break; case POWERPC_MMU_32B: error_code = 0x40000000; break; #if defined(TARGET_PPC64) case POWERPC_MMU_64B: /* XXX: TODO */ cpu_abort(env, "MMU model not implemented\n"); return -1; case POWERPC_MMU_64BRIDGE: /* XXX: TODO */ cpu_abort(env, "MMU model not implemented\n"); return -1; #endif case POWERPC_MMU_601: /* XXX: TODO */ cpu_abort(env, "MMU model not implemented\n"); return -1; case POWERPC_MMU_BOOKE: /* XXX: TODO */ cpu_abort(env, "MMU model not implemented\n"); return -1; case POWERPC_MMU_BOOKE_FSL: /* XXX: TODO */ cpu_abort(env, "MMU model not implemented\n"); return -1; case POWERPC_MMU_REAL_4xx: cpu_abort(env, "PowerPC 401 should never raise any MMU " "exceptions\n"); return -1; default: cpu_abort(env, "Unknown or invalid MMU model\n"); return -1; } break; case -2: /* Access rights violation */ error_code = 0x08000000; break; case -3: /* No execute protection violation */ error_code = 0x10000000; break; case -4: /* Direct store exception */ /* No code fetch is allowed in direct-store areas */ error_code = 0x10000000; break; case -5: /* No match in segment table */ exception = EXCP_ISEG; error_code = 0; break; } } else { exception = EXCP_DSI; switch (ret) { case -1: /* No matches in page tables or TLB */ switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: if (rw == 1) { exception = EXCP_DS_TLBMISS; error_code = 1 << 16; } else { exception = EXCP_DL_TLBMISS; error_code = 0; } env->spr[SPR_DMISS] = address; env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; tlb_miss: error_code |= ctx.key << 19; env->spr[SPR_HASH1] = ctx.pg_addr[0]; env->spr[SPR_HASH2] = ctx.pg_addr[1]; /* Do not alter DAR nor DSISR */ goto out; case POWERPC_MMU_SOFT_4xx: case POWERPC_MMU_SOFT_4xx_Z: exception = EXCP_40x_DTLBMISS; error_code = 0; env->spr[SPR_40x_DEAR] = address; if (rw) env->spr[SPR_40x_ESR] = 0x00800000; else env->spr[SPR_40x_ESR] = 0x00000000; break; case POWERPC_MMU_32B: error_code = 0x40000000; break; #if defined(TARGET_PPC64) case POWERPC_MMU_64B: /* XXX: TODO */ cpu_abort(env, "MMU model not implemented\n"); return -1; case POWERPC_MMU_64BRIDGE: /* XXX: TODO */ cpu_abort(env, "MMU model not implemented\n"); return -1; #endif case POWERPC_MMU_601: /* XXX: TODO */ cpu_abort(env, "MMU model not implemented\n"); return -1; case POWERPC_MMU_BOOKE: /* XXX: TODO */ cpu_abort(env, "MMU model not implemented\n"); return -1; case POWERPC_MMU_BOOKE_FSL: /* XXX: TODO */ cpu_abort(env, "MMU model not implemented\n"); return -1; case POWERPC_MMU_REAL_4xx: cpu_abort(env, "PowerPC 401 should never raise any MMU " "exceptions\n"); return -1; default: cpu_abort(env, "Unknown or invalid MMU model\n"); return -1; } break; case -2: /* Access rights violation */ error_code = 0x08000000; break; case -4: /* Direct store exception */ switch (access_type) { case ACCESS_FLOAT: /* Floating point load/store */ exception = EXCP_ALIGN; error_code = EXCP_ALIGN_FP; break; case ACCESS_RES: /* lwarx, ldarx or srwcx. */ error_code = 0x04000000; break; case ACCESS_EXT: /* eciwx or ecowx */ error_code = 0x04100000; break; default: printf("DSI: invalid exception (%d)\n", ret); exception = EXCP_PROGRAM; error_code = EXCP_INVAL | EXCP_INVAL_INVAL; break; } break; case -5: /* No match in segment table */ exception = EXCP_DSEG; error_code = 0; break; } if (exception == EXCP_DSI && rw == 1) error_code |= 0x02000000; /* Store fault address */ env->spr[SPR_DAR] = address; env->spr[SPR_DSISR] = error_code; } out: #if 0 printf("%s: set exception to %d %02x\n", __func__, exception, error_code); #endif env->exception_index = exception; env->error_code = error_code; ret = 1; } return ret; }
16,950
qemu
3e9418e160cd8901c83a3c88967158084f5b5c03
0
static int qemu_signal_init(void) { int sigfd; sigset_t set; /* * SIG_IPI must be blocked in the main thread and must not be caught * by sigwait() in the signal thread. Otherwise, the cpu thread will * not catch it reliably. */ sigemptyset(&set); sigaddset(&set, SIG_IPI); sigaddset(&set, SIGIO); sigaddset(&set, SIGALRM); sigaddset(&set, SIGBUS); sigaddset(&set, SIGINT); sigaddset(&set, SIGHUP); sigaddset(&set, SIGTERM); pthread_sigmask(SIG_BLOCK, &set, NULL); sigdelset(&set, SIG_IPI); sigfd = qemu_signalfd(&set); if (sigfd == -1) { fprintf(stderr, "failed to create signalfd\n"); return -errno; } fcntl_setfl(sigfd, O_NONBLOCK); qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL, (void *)(intptr_t)sigfd); return 0; }
16,951
qemu
72cf2d4f0e181d0d3a3122e04129c58a95da713e
0
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env, target_ulong pc) { struct kvm_sw_breakpoint *bp; TAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) { if (bp->pc == pc) return bp; } return NULL; }
16,952
qemu
72cf2d4f0e181d0d3a3122e04129c58a95da713e
0
static inline void gen_intermediate_code_internal(TranslationBlock * tb, int spc, CPUSPARCState *env) { target_ulong pc_start, last_pc; uint16_t *gen_opc_end; DisasContext dc1, *dc = &dc1; CPUBreakpoint *bp; int j, lj = -1; int num_insns; int max_insns; memset(dc, 0, sizeof(DisasContext)); dc->tb = tb; pc_start = tb->pc; dc->pc = pc_start; last_pc = dc->pc; dc->npc = (target_ulong) tb->cs_base; dc->cc_op = CC_OP_DYNAMIC; dc->mem_idx = cpu_mmu_index(env); dc->def = env->def; if ((dc->def->features & CPU_FEATURE_FLOAT)) dc->fpu_enabled = cpu_fpu_enabled(env); else dc->fpu_enabled = 0; #ifdef TARGET_SPARC64 dc->address_mask_32bit = env->pstate & PS_AM; #endif gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; cpu_tmp0 = tcg_temp_new(); cpu_tmp32 = tcg_temp_new_i32(); cpu_tmp64 = tcg_temp_new_i64(); cpu_dst = tcg_temp_local_new(); // loads and stores cpu_val = tcg_temp_local_new(); cpu_addr = tcg_temp_local_new(); num_insns = 0; max_insns = tb->cflags & CF_COUNT_MASK; if (max_insns == 0) max_insns = CF_COUNT_MASK; gen_icount_start(); do { if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) { TAILQ_FOREACH(bp, &env->breakpoints, entry) { if (bp->pc == dc->pc) { if (dc->pc != pc_start) save_state(dc, cpu_cond); gen_helper_debug(); tcg_gen_exit_tb(0); dc->is_br = 1; goto exit_gen_loop; } } } if (spc) { qemu_log("Search PC...\n"); j = gen_opc_ptr - gen_opc_buf; if (lj < j) { lj++; while (lj < j) gen_opc_instr_start[lj++] = 0; gen_opc_pc[lj] = dc->pc; gen_opc_npc[lj] = dc->npc; gen_opc_instr_start[lj] = 1; gen_opc_icount[lj] = num_insns; } } if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) gen_io_start(); last_pc = dc->pc; disas_sparc_insn(dc); num_insns++; if (dc->is_br) break; /* if the next PC is different, we abort now */ if (dc->pc != (last_pc + 4)) break; /* if we reach a page boundary, we stop generation so that the PC of a TT_TFAULT exception is always in the right page */ if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0) break; /* if single step mode, we generate only one instruction and generate an exception */ if (env->singlestep_enabled || singlestep) { tcg_gen_movi_tl(cpu_pc, dc->pc); tcg_gen_exit_tb(0); break; } } while ((gen_opc_ptr < gen_opc_end) && (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) && num_insns < max_insns); exit_gen_loop: tcg_temp_free(cpu_addr); tcg_temp_free(cpu_val); tcg_temp_free(cpu_dst); tcg_temp_free_i64(cpu_tmp64); tcg_temp_free_i32(cpu_tmp32); tcg_temp_free(cpu_tmp0); if (tb->cflags & CF_LAST_IO) gen_io_end(); if (!dc->is_br) { if (dc->pc != DYNAMIC_PC && (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) { /* static PC and NPC: we can use direct chaining */ gen_goto_tb(dc, 0, dc->pc, dc->npc); } else { if (dc->pc != DYNAMIC_PC) tcg_gen_movi_tl(cpu_pc, dc->pc); save_npc(dc, cpu_cond); tcg_gen_exit_tb(0); } } gen_icount_end(tb, num_insns); *gen_opc_ptr = INDEX_op_end; if (spc) { j = gen_opc_ptr - gen_opc_buf; lj++; while (lj <= j) gen_opc_instr_start[lj++] = 0; #if 0 log_page_dump(); #endif gen_opc_jump_pc[0] = dc->jump_pc[0]; gen_opc_jump_pc[1] = dc->jump_pc[1]; } else { tb->size = last_pc + 4 - pc_start; tb->icount = num_insns; } #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { qemu_log("--------------\n"); qemu_log("IN: %s\n", lookup_symbol(pc_start)); log_target_disas(pc_start, last_pc + 4 - pc_start, 0); qemu_log("\n"); } #endif }
16,953
qemu
b3db211f3c80bb996a704d665fe275619f728bd4
0
static void test_visitor_in_native_list_uint64(TestInputVisitorData *data, const void *unused) { test_native_list_integer_helper(data, unused, USER_DEF_NATIVE_LIST_UNION_KIND_U64); }
16,954
qemu
37f51384ae05bd50f83308339dbffa3e78404874
0
static int vtd_page_walk_level(dma_addr_t addr, uint64_t start, uint64_t end, vtd_page_walk_hook hook_fn, void *private, uint32_t level, bool read, bool write, bool notify_unmap) { bool read_cur, write_cur, entry_valid; uint32_t offset; uint64_t slpte; uint64_t subpage_size, subpage_mask; IOMMUTLBEntry entry; uint64_t iova = start; uint64_t iova_next; int ret = 0; trace_vtd_page_walk_level(addr, level, start, end); subpage_size = 1ULL << vtd_slpt_level_shift(level); subpage_mask = vtd_slpt_level_page_mask(level); while (iova < end) { iova_next = (iova & subpage_mask) + subpage_size; offset = vtd_iova_level_offset(iova, level); slpte = vtd_get_slpte(addr, offset); if (slpte == (uint64_t)-1) { trace_vtd_page_walk_skip_read(iova, iova_next); goto next; } if (vtd_slpte_nonzero_rsvd(slpte, level)) { trace_vtd_page_walk_skip_reserve(iova, iova_next); goto next; } /* Permissions are stacked with parents' */ read_cur = read && (slpte & VTD_SL_R); write_cur = write && (slpte & VTD_SL_W); /* * As long as we have either read/write permission, this is a * valid entry. The rule works for both page entries and page * table entries. */ entry_valid = read_cur | write_cur; if (vtd_is_last_slpte(slpte, level)) { entry.target_as = &address_space_memory; entry.iova = iova & subpage_mask; /* NOTE: this is only meaningful if entry_valid == true */ entry.translated_addr = vtd_get_slpte_addr(slpte); entry.addr_mask = ~subpage_mask; entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur); if (!entry_valid && !notify_unmap) { trace_vtd_page_walk_skip_perm(iova, iova_next); goto next; } trace_vtd_page_walk_one(level, entry.iova, entry.translated_addr, entry.addr_mask, entry.perm); if (hook_fn) { ret = hook_fn(&entry, private); if (ret < 0) { return ret; } } } else { if (!entry_valid) { trace_vtd_page_walk_skip_perm(iova, iova_next); goto next; } ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte), iova, MIN(iova_next, end), hook_fn, private, level - 1, read_cur, write_cur, notify_unmap); if (ret < 0) { return ret; } } next: iova = iova_next; } return 0; }
16,955
qemu
9be385980d37e8f4fd33f605f5fb1c3d144170a8
0
static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy, unsigned int queue_no, unsigned int vector) { VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); VirtQueue *vq = virtio_get_queue(vdev, queue_no); EventNotifier *n = virtio_queue_get_guest_notifier(vq); int ret; ret = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq); return ret; }
16,956