project
stringclasses
2 values
commit_id
stringlengths
40
40
target
int64
0
1
func
stringlengths
26
142k
idx
int64
0
27.3k
qemu
e268ca52328eb0460ae0d10b7f4313a63d5b000c
0
static BlockDriverAIOCB *qcow_aio_readv(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { QCowAIOCB *acb; acb = qemu_aio_get(bs, cb, opaque); if (!acb) return NULL; acb->hd_aiocb = NULL; acb->sector_num = sector_num; acb->qiov = qiov; if (qiov->niov > 1) acb->buf = acb->orig_buf = qemu_memalign(512, qiov->size); else acb->buf = (uint8_t *)qiov->iov->iov_base; acb->nb_sectors = nb_sectors; acb->n = 0; acb->cluster_offset = 0; qcow_aio_read_cb(acb, 0); return &acb->common; }
19,254
qemu
2fb50a33401a2415b71ddc291e8a77bcd2f9e547
0
void armv7m_nvic_set_pending(void *opaque, int irq) { NVICState *s = (NVICState *)opaque; VecInfo *vec; assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq); vec = &s->vectors[irq]; trace_nvic_set_pending(irq, vec->enabled, vec->prio); if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) { /* If a synchronous exception is pending then it may be * escalated to HardFault if: * * it is equal or lower priority to current execution * * it is disabled * (ie we need to take it immediately but we can't do so). * Asynchronous exceptions (and interrupts) simply remain pending. * * For QEMU, we don't have any imprecise (asynchronous) faults, * so we can assume that PREFETCH_ABORT and DATA_ABORT are always * synchronous. * Debug exceptions are awkward because only Debug exceptions * resulting from the BKPT instruction should be escalated, * but we don't currently implement any Debug exceptions other * than those that result from BKPT, so we treat all debug exceptions * as needing escalation. * * This all means we can identify whether to escalate based only on * the exception number and don't (yet) need the caller to explicitly * tell us whether this exception is synchronous or not. */ int running = nvic_exec_prio(s); bool escalate = false; if (vec->prio >= running) { trace_nvic_escalate_prio(irq, vec->prio, running); escalate = true; } else if (!vec->enabled) { trace_nvic_escalate_disabled(irq); escalate = true; } if (escalate) { if (running < 0) { /* We want to escalate to HardFault but we can't take a * synchronous HardFault at this point either. This is a * Lockup condition due to a guest bug. We don't model * Lockup, so report via cpu_abort() instead. */ cpu_abort(&s->cpu->parent_obj, "Lockup: can't escalate %d to HardFault " "(current priority %d)\n", irq, running); } /* We can do the escalation, so we take HardFault instead */ irq = ARMV7M_EXCP_HARD; vec = &s->vectors[irq]; s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK; } } if (!vec->pending) { vec->pending = 1; nvic_irq_update(s); } }
19,255
qemu
45a50b1668822c23afc2a89f724654e176518bc4
0
static void generate_bootsect(target_phys_addr_t option_rom, uint32_t gpr[8], uint16_t segs[6], uint16_t ip) { uint8_t rom[512], *p, *reloc; uint8_t sum; int i; memset(rom, 0, sizeof(rom)); p = rom; /* Make sure we have an option rom signature */ *p++ = 0x55; *p++ = 0xaa; /* ROM size in sectors*/ *p++ = 1; /* Hook int19 */ *p++ = 0x50; /* push ax */ *p++ = 0x1e; /* push ds */ *p++ = 0x31; *p++ = 0xc0; /* xor ax, ax */ *p++ = 0x8e; *p++ = 0xd8; /* mov ax, ds */ *p++ = 0xc7; *p++ = 0x06; /* movvw _start,0x64 */ *p++ = 0x64; *p++ = 0x00; reloc = p; *p++ = 0x00; *p++ = 0x00; *p++ = 0x8c; *p++ = 0x0e; /* mov cs,0x66 */ *p++ = 0x66; *p++ = 0x00; *p++ = 0x1f; /* pop ds */ *p++ = 0x58; /* pop ax */ *p++ = 0xcb; /* lret */ /* Actual code */ *reloc = (p - rom); *p++ = 0xfa; /* CLI */ *p++ = 0xfc; /* CLD */ for (i = 0; i < 6; i++) { if (i == 1) /* Skip CS */ continue; *p++ = 0xb8; /* MOV AX,imm16 */ *p++ = segs[i]; *p++ = segs[i] >> 8; *p++ = 0x8e; /* MOV <seg>,AX */ *p++ = 0xc0 + (i << 3); } for (i = 0; i < 8; i++) { *p++ = 0x66; /* 32-bit operand size */ *p++ = 0xb8 + i; /* MOV <reg>,imm32 */ *p++ = gpr[i]; *p++ = gpr[i] >> 8; *p++ = gpr[i] >> 16; *p++ = gpr[i] >> 24; } *p++ = 0xea; /* JMP FAR */ *p++ = ip; /* IP */ *p++ = ip >> 8; *p++ = segs[1]; /* CS */ *p++ = segs[1] >> 8; /* sign rom */ sum = 0; for (i = 0; i < (sizeof(rom) - 1); i++) sum += rom[i]; rom[sizeof(rom) - 1] = -sum; cpu_physical_memory_write_rom(option_rom, rom, sizeof(rom)); option_rom_setup_reset(option_rom, sizeof (rom)); }
19,257
FFmpeg
99f296b30462e6b940aff712920a3fe6b8ba5cc6
0
static int gxf_packet(AVFormatContext *s, AVPacket *pkt) { ByteIOContext *pb = s->pb; pkt_type_t pkt_type; int pkt_len; while (!url_feof(pb)) { int track_type, track_id, ret; int field_nr; int stream_index; if (!parse_packet_header(pb, &pkt_type, &pkt_len)) { if (!url_feof(pb)) av_log(s, AV_LOG_ERROR, "GXF: sync lost\n"); return -1; } if (pkt_type == PKT_FLT) { gxf_read_index(s, pkt_len); continue; } if (pkt_type != PKT_MEDIA) { url_fskip(pb, pkt_len); continue; } if (pkt_len < 16) { av_log(s, AV_LOG_ERROR, "GXF: invalid media packet length\n"); continue; } pkt_len -= 16; track_type = get_byte(pb); track_id = get_byte(pb); stream_index = get_sindex(s, track_id, track_type); if (stream_index < 0) return stream_index; field_nr = get_be32(pb); get_be32(pb); // field information get_be32(pb); // "timeline" field number get_byte(pb); // flags get_byte(pb); // reserved // NOTE: there is also data length information in the // field information, it might be better to take this into account // as well. ret = av_get_packet(pb, pkt, pkt_len); pkt->stream_index = stream_index; pkt->dts = field_nr; return ret; } return AVERROR(EIO); }
19,258
qemu
38931fa8cfb074a08ce65fd1982bd4a5bef9d6fb
0
static int usb_hid_handle_data(USBDevice *dev, USBPacket *p) { USBHIDState *us = DO_UPCAST(USBHIDState, dev, dev); HIDState *hs = &us->hid; uint8_t buf[p->iov.size]; int ret = 0; switch (p->pid) { case USB_TOKEN_IN: if (p->devep == 1) { int64_t curtime = qemu_get_clock_ns(vm_clock); if (!us->changed && (!us->idle || us->next_idle_clock - curtime > 0)) { return USB_RET_NAK; } usb_hid_set_next_idle(us, curtime); if (hs->kind == HID_MOUSE || hs->kind == HID_TABLET) { ret = hid_pointer_poll(hs, buf, p->iov.size); } else if (hs->kind == HID_KEYBOARD) { ret = hid_keyboard_poll(hs, buf, p->iov.size); } usb_packet_copy(p, buf, ret); us->changed = hs->n > 0; } else { goto fail; } break; case USB_TOKEN_OUT: default: fail: ret = USB_RET_STALL; break; } return ret; }
19,259
qemu
03fc0548b70393b0c8d43703591a9e34fb8e3123
0
tcg_target_ulong tcg_qemu_tb_exec(CPUArchState *cpustate, uint8_t *tb_ptr) { tcg_target_ulong next_tb = 0; env = cpustate; tci_reg[TCG_AREG0] = (tcg_target_ulong)env; assert(tb_ptr); for (;;) { #if defined(GETPC) tci_tb_ptr = (uintptr_t)tb_ptr; #endif TCGOpcode opc = tb_ptr[0]; #if !defined(NDEBUG) uint8_t op_size = tb_ptr[1]; uint8_t *old_code_ptr = tb_ptr; #endif tcg_target_ulong t0; tcg_target_ulong t1; tcg_target_ulong t2; tcg_target_ulong label; TCGCond condition; target_ulong taddr; #ifndef CONFIG_SOFTMMU tcg_target_ulong host_addr; #endif uint8_t tmp8; uint16_t tmp16; uint32_t tmp32; uint64_t tmp64; #if TCG_TARGET_REG_BITS == 32 uint64_t v64; #endif /* Skip opcode and size entry. */ tb_ptr += 2; switch (opc) { case INDEX_op_end: case INDEX_op_nop: break; case INDEX_op_nop1: case INDEX_op_nop2: case INDEX_op_nop3: case INDEX_op_nopn: case INDEX_op_discard: TODO(); break; case INDEX_op_set_label: TODO(); break; case INDEX_op_call: t0 = tci_read_ri(&tb_ptr); #if TCG_TARGET_REG_BITS == 32 tmp64 = ((helper_function)t0)(tci_read_reg(TCG_REG_R0), tci_read_reg(TCG_REG_R1), tci_read_reg(TCG_REG_R2), tci_read_reg(TCG_REG_R3), tci_read_reg(TCG_REG_R5), tci_read_reg(TCG_REG_R6), tci_read_reg(TCG_REG_R7), tci_read_reg(TCG_REG_R8), tci_read_reg(TCG_REG_R9), tci_read_reg(TCG_REG_R10)); tci_write_reg(TCG_REG_R0, tmp64); tci_write_reg(TCG_REG_R1, tmp64 >> 32); #else tmp64 = ((helper_function)t0)(tci_read_reg(TCG_REG_R0), tci_read_reg(TCG_REG_R1), tci_read_reg(TCG_REG_R2), tci_read_reg(TCG_REG_R3), tci_read_reg(TCG_REG_R5)); tci_write_reg(TCG_REG_R0, tmp64); #endif break; case INDEX_op_br: label = tci_read_label(&tb_ptr); assert(tb_ptr == old_code_ptr + op_size); tb_ptr = (uint8_t *)label; continue; case INDEX_op_setcond_i32: t0 = *tb_ptr++; t1 = tci_read_r32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); condition = *tb_ptr++; tci_write_reg32(t0, tci_compare32(t1, t2, condition)); break; #if TCG_TARGET_REG_BITS == 32 case INDEX_op_setcond2_i32: t0 = *tb_ptr++; tmp64 = tci_read_r64(&tb_ptr); v64 = tci_read_ri64(&tb_ptr); condition = *tb_ptr++; tci_write_reg32(t0, tci_compare64(tmp64, v64, condition)); break; #elif TCG_TARGET_REG_BITS == 64 case INDEX_op_setcond_i64: t0 = *tb_ptr++; t1 = tci_read_r64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr); condition = *tb_ptr++; tci_write_reg64(t0, tci_compare64(t1, t2, condition)); break; #endif case INDEX_op_mov_i32: t0 = *tb_ptr++; t1 = tci_read_r32(&tb_ptr); tci_write_reg32(t0, t1); break; case INDEX_op_movi_i32: t0 = *tb_ptr++; t1 = tci_read_i32(&tb_ptr); tci_write_reg32(t0, t1); break; /* Load/store operations (32 bit). */ case INDEX_op_ld8u_i32: t0 = *tb_ptr++; t1 = tci_read_r(&tb_ptr); t2 = tci_read_i32(&tb_ptr); tci_write_reg8(t0, *(uint8_t *)(t1 + t2)); break; case INDEX_op_ld8s_i32: case INDEX_op_ld16u_i32: TODO(); break; case INDEX_op_ld16s_i32: TODO(); break; case INDEX_op_ld_i32: t0 = *tb_ptr++; t1 = tci_read_r(&tb_ptr); t2 = tci_read_i32(&tb_ptr); tci_write_reg32(t0, *(uint32_t *)(t1 + t2)); break; case INDEX_op_st8_i32: t0 = tci_read_r8(&tb_ptr); t1 = tci_read_r(&tb_ptr); t2 = tci_read_i32(&tb_ptr); *(uint8_t *)(t1 + t2) = t0; break; case INDEX_op_st16_i32: t0 = tci_read_r16(&tb_ptr); t1 = tci_read_r(&tb_ptr); t2 = tci_read_i32(&tb_ptr); *(uint16_t *)(t1 + t2) = t0; break; case INDEX_op_st_i32: t0 = tci_read_r32(&tb_ptr); t1 = tci_read_r(&tb_ptr); t2 = tci_read_i32(&tb_ptr); *(uint32_t *)(t1 + t2) = t0; break; /* Arithmetic operations (32 bit). */ case INDEX_op_add_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); tci_write_reg32(t0, t1 + t2); break; case INDEX_op_sub_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); tci_write_reg32(t0, t1 - t2); break; case INDEX_op_mul_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); tci_write_reg32(t0, t1 * t2); break; #if TCG_TARGET_HAS_div_i32 case INDEX_op_div_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); tci_write_reg32(t0, (int32_t)t1 / (int32_t)t2); break; case INDEX_op_divu_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); tci_write_reg32(t0, t1 / t2); break; case INDEX_op_rem_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); tci_write_reg32(t0, (int32_t)t1 % (int32_t)t2); break; case INDEX_op_remu_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); tci_write_reg32(t0, t1 % t2); break; #elif TCG_TARGET_HAS_div2_i32 case INDEX_op_div2_i32: case INDEX_op_divu2_i32: TODO(); break; #endif case INDEX_op_and_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); tci_write_reg32(t0, t1 & t2); break; case INDEX_op_or_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); tci_write_reg32(t0, t1 | t2); break; case INDEX_op_xor_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); tci_write_reg32(t0, t1 ^ t2); break; /* Shift/rotate operations (32 bit). */ case INDEX_op_shl_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); tci_write_reg32(t0, t1 << t2); break; case INDEX_op_shr_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); tci_write_reg32(t0, t1 >> t2); break; case INDEX_op_sar_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); tci_write_reg32(t0, ((int32_t)t1 >> t2)); break; #if TCG_TARGET_HAS_rot_i32 case INDEX_op_rotl_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); tci_write_reg32(t0, (t1 << t2) | (t1 >> (32 - t2))); break; case INDEX_op_rotr_i32: t0 = *tb_ptr++; t1 = tci_read_ri32(&tb_ptr); t2 = tci_read_ri32(&tb_ptr); tci_write_reg32(t0, (t1 >> t2) | (t1 << (32 - t2))); break; #endif #if TCG_TARGET_HAS_deposit_i32 case INDEX_op_deposit_i32: t0 = *tb_ptr++; t1 = tci_read_r32(&tb_ptr); t2 = tci_read_r32(&tb_ptr); tmp16 = *tb_ptr++; tmp8 = *tb_ptr++; tmp32 = (((1 << tmp8) - 1) << tmp16); tci_write_reg32(t0, (t1 & ~tmp32) | ((t2 << tmp16) & tmp32)); break; #endif case INDEX_op_brcond_i32: t0 = tci_read_r32(&tb_ptr); t1 = tci_read_ri32(&tb_ptr); condition = *tb_ptr++; label = tci_read_label(&tb_ptr); if (tci_compare32(t0, t1, condition)) { assert(tb_ptr == old_code_ptr + op_size); tb_ptr = (uint8_t *)label; continue; } break; #if TCG_TARGET_REG_BITS == 32 case INDEX_op_add2_i32: t0 = *tb_ptr++; t1 = *tb_ptr++; tmp64 = tci_read_r64(&tb_ptr); tmp64 += tci_read_r64(&tb_ptr); tci_write_reg64(t1, t0, tmp64); break; case INDEX_op_sub2_i32: t0 = *tb_ptr++; t1 = *tb_ptr++; tmp64 = tci_read_r64(&tb_ptr); tmp64 -= tci_read_r64(&tb_ptr); tci_write_reg64(t1, t0, tmp64); break; case INDEX_op_brcond2_i32: tmp64 = tci_read_r64(&tb_ptr); v64 = tci_read_ri64(&tb_ptr); condition = *tb_ptr++; label = tci_read_label(&tb_ptr); if (tci_compare64(tmp64, v64, condition)) { assert(tb_ptr == old_code_ptr + op_size); tb_ptr = (uint8_t *)label; continue; } break; case INDEX_op_mulu2_i32: t0 = *tb_ptr++; t1 = *tb_ptr++; t2 = tci_read_r32(&tb_ptr); tmp64 = tci_read_r32(&tb_ptr); tci_write_reg64(t1, t0, t2 * tmp64); break; #endif /* TCG_TARGET_REG_BITS == 32 */ #if TCG_TARGET_HAS_ext8s_i32 case INDEX_op_ext8s_i32: t0 = *tb_ptr++; t1 = tci_read_r8s(&tb_ptr); tci_write_reg32(t0, t1); break; #endif #if TCG_TARGET_HAS_ext16s_i32 case INDEX_op_ext16s_i32: t0 = *tb_ptr++; t1 = tci_read_r16s(&tb_ptr); tci_write_reg32(t0, t1); break; #endif #if TCG_TARGET_HAS_ext8u_i32 case INDEX_op_ext8u_i32: t0 = *tb_ptr++; t1 = tci_read_r8(&tb_ptr); tci_write_reg32(t0, t1); break; #endif #if TCG_TARGET_HAS_ext16u_i32 case INDEX_op_ext16u_i32: t0 = *tb_ptr++; t1 = tci_read_r16(&tb_ptr); tci_write_reg32(t0, t1); break; #endif #if TCG_TARGET_HAS_bswap16_i32 case INDEX_op_bswap16_i32: t0 = *tb_ptr++; t1 = tci_read_r16(&tb_ptr); tci_write_reg32(t0, bswap16(t1)); break; #endif #if TCG_TARGET_HAS_bswap32_i32 case INDEX_op_bswap32_i32: t0 = *tb_ptr++; t1 = tci_read_r32(&tb_ptr); tci_write_reg32(t0, bswap32(t1)); break; #endif #if TCG_TARGET_HAS_not_i32 case INDEX_op_not_i32: t0 = *tb_ptr++; t1 = tci_read_r32(&tb_ptr); tci_write_reg32(t0, ~t1); break; #endif #if TCG_TARGET_HAS_neg_i32 case INDEX_op_neg_i32: t0 = *tb_ptr++; t1 = tci_read_r32(&tb_ptr); tci_write_reg32(t0, -t1); break; #endif #if TCG_TARGET_REG_BITS == 64 case INDEX_op_mov_i64: t0 = *tb_ptr++; t1 = tci_read_r64(&tb_ptr); tci_write_reg64(t0, t1); break; case INDEX_op_movi_i64: t0 = *tb_ptr++; t1 = tci_read_i64(&tb_ptr); tci_write_reg64(t0, t1); break; /* Load/store operations (64 bit). */ case INDEX_op_ld8u_i64: t0 = *tb_ptr++; t1 = tci_read_r(&tb_ptr); t2 = tci_read_i32(&tb_ptr); tci_write_reg8(t0, *(uint8_t *)(t1 + t2)); break; case INDEX_op_ld8s_i64: case INDEX_op_ld16u_i64: case INDEX_op_ld16s_i64: TODO(); break; case INDEX_op_ld32u_i64: t0 = *tb_ptr++; t1 = tci_read_r(&tb_ptr); t2 = tci_read_i32(&tb_ptr); tci_write_reg32(t0, *(uint32_t *)(t1 + t2)); break; case INDEX_op_ld32s_i64: t0 = *tb_ptr++; t1 = tci_read_r(&tb_ptr); t2 = tci_read_i32(&tb_ptr); tci_write_reg32s(t0, *(int32_t *)(t1 + t2)); break; case INDEX_op_ld_i64: t0 = *tb_ptr++; t1 = tci_read_r(&tb_ptr); t2 = tci_read_i32(&tb_ptr); tci_write_reg64(t0, *(uint64_t *)(t1 + t2)); break; case INDEX_op_st8_i64: t0 = tci_read_r8(&tb_ptr); t1 = tci_read_r(&tb_ptr); t2 = tci_read_i32(&tb_ptr); *(uint8_t *)(t1 + t2) = t0; break; case INDEX_op_st16_i64: t0 = tci_read_r16(&tb_ptr); t1 = tci_read_r(&tb_ptr); t2 = tci_read_i32(&tb_ptr); *(uint16_t *)(t1 + t2) = t0; break; case INDEX_op_st32_i64: t0 = tci_read_r32(&tb_ptr); t1 = tci_read_r(&tb_ptr); t2 = tci_read_i32(&tb_ptr); *(uint32_t *)(t1 + t2) = t0; break; case INDEX_op_st_i64: t0 = tci_read_r64(&tb_ptr); t1 = tci_read_r(&tb_ptr); t2 = tci_read_i32(&tb_ptr); *(uint64_t *)(t1 + t2) = t0; break; /* Arithmetic operations (64 bit). */ case INDEX_op_add_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr); tci_write_reg64(t0, t1 + t2); break; case INDEX_op_sub_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr); tci_write_reg64(t0, t1 - t2); break; case INDEX_op_mul_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr); tci_write_reg64(t0, t1 * t2); break; #if TCG_TARGET_HAS_div_i64 case INDEX_op_div_i64: case INDEX_op_divu_i64: case INDEX_op_rem_i64: case INDEX_op_remu_i64: TODO(); break; #elif TCG_TARGET_HAS_div2_i64 case INDEX_op_div2_i64: case INDEX_op_divu2_i64: TODO(); break; #endif case INDEX_op_and_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr); tci_write_reg64(t0, t1 & t2); break; case INDEX_op_or_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr); tci_write_reg64(t0, t1 | t2); break; case INDEX_op_xor_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr); tci_write_reg64(t0, t1 ^ t2); break; /* Shift/rotate operations (64 bit). */ case INDEX_op_shl_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr); tci_write_reg64(t0, t1 << t2); break; case INDEX_op_shr_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr); tci_write_reg64(t0, t1 >> t2); break; case INDEX_op_sar_i64: t0 = *tb_ptr++; t1 = tci_read_ri64(&tb_ptr); t2 = tci_read_ri64(&tb_ptr); tci_write_reg64(t0, ((int64_t)t1 >> t2)); break; #if TCG_TARGET_HAS_rot_i64 case INDEX_op_rotl_i64: case INDEX_op_rotr_i64: TODO(); break; #endif #if TCG_TARGET_HAS_deposit_i64 case INDEX_op_deposit_i64: t0 = *tb_ptr++; t1 = tci_read_r64(&tb_ptr); t2 = tci_read_r64(&tb_ptr); tmp16 = *tb_ptr++; tmp8 = *tb_ptr++; tmp64 = (((1ULL << tmp8) - 1) << tmp16); tci_write_reg64(t0, (t1 & ~tmp64) | ((t2 << tmp16) & tmp64)); break; #endif case INDEX_op_brcond_i64: t0 = tci_read_r64(&tb_ptr); t1 = tci_read_ri64(&tb_ptr); condition = *tb_ptr++; label = tci_read_label(&tb_ptr); if (tci_compare64(t0, t1, condition)) { assert(tb_ptr == old_code_ptr + op_size); tb_ptr = (uint8_t *)label; continue; } break; #if TCG_TARGET_HAS_ext8u_i64 case INDEX_op_ext8u_i64: t0 = *tb_ptr++; t1 = tci_read_r8(&tb_ptr); tci_write_reg64(t0, t1); break; #endif #if TCG_TARGET_HAS_ext8s_i64 case INDEX_op_ext8s_i64: t0 = *tb_ptr++; t1 = tci_read_r8s(&tb_ptr); tci_write_reg64(t0, t1); break; #endif #if TCG_TARGET_HAS_ext16s_i64 case INDEX_op_ext16s_i64: t0 = *tb_ptr++; t1 = tci_read_r16s(&tb_ptr); tci_write_reg64(t0, t1); break; #endif #if TCG_TARGET_HAS_ext16u_i64 case INDEX_op_ext16u_i64: t0 = *tb_ptr++; t1 = tci_read_r16(&tb_ptr); tci_write_reg64(t0, t1); break; #endif #if TCG_TARGET_HAS_ext32s_i64 case INDEX_op_ext32s_i64: t0 = *tb_ptr++; t1 = tci_read_r32s(&tb_ptr); tci_write_reg64(t0, t1); break; #endif #if TCG_TARGET_HAS_ext32u_i64 case INDEX_op_ext32u_i64: t0 = *tb_ptr++; t1 = tci_read_r32(&tb_ptr); tci_write_reg64(t0, t1); break; #endif #if TCG_TARGET_HAS_bswap16_i64 case INDEX_op_bswap16_i64: TODO(); t0 = *tb_ptr++; t1 = tci_read_r16(&tb_ptr); tci_write_reg64(t0, bswap16(t1)); break; #endif #if TCG_TARGET_HAS_bswap32_i64 case INDEX_op_bswap32_i64: t0 = *tb_ptr++; t1 = tci_read_r32(&tb_ptr); tci_write_reg64(t0, bswap32(t1)); break; #endif #if TCG_TARGET_HAS_bswap64_i64 case INDEX_op_bswap64_i64: t0 = *tb_ptr++; t1 = tci_read_r64(&tb_ptr); tci_write_reg64(t0, bswap64(t1)); break; #endif #if TCG_TARGET_HAS_not_i64 case INDEX_op_not_i64: t0 = *tb_ptr++; t1 = tci_read_r64(&tb_ptr); tci_write_reg64(t0, ~t1); break; #endif #if TCG_TARGET_HAS_neg_i64 case INDEX_op_neg_i64: t0 = *tb_ptr++; t1 = tci_read_r64(&tb_ptr); tci_write_reg64(t0, -t1); break; #endif #endif /* TCG_TARGET_REG_BITS == 64 */ /* QEMU specific operations. */ #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS case INDEX_op_debug_insn_start: TODO(); break; #else case INDEX_op_debug_insn_start: TODO(); break; #endif case INDEX_op_exit_tb: next_tb = *(uint64_t *)tb_ptr; goto exit; break; case INDEX_op_goto_tb: t0 = tci_read_i32(&tb_ptr); assert(tb_ptr == old_code_ptr + op_size); tb_ptr += (int32_t)t0; continue; case INDEX_op_qemu_ld8u: t0 = *tb_ptr++; taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU tmp8 = helper_ldb_mmu(env, taddr, tci_read_i(&tb_ptr)); #else host_addr = (tcg_target_ulong)taddr; assert(taddr == host_addr); tmp8 = *(uint8_t *)(host_addr + GUEST_BASE); #endif tci_write_reg8(t0, tmp8); break; case INDEX_op_qemu_ld8s: t0 = *tb_ptr++; taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU tmp8 = helper_ldb_mmu(env, taddr, tci_read_i(&tb_ptr)); #else host_addr = (tcg_target_ulong)taddr; assert(taddr == host_addr); tmp8 = *(uint8_t *)(host_addr + GUEST_BASE); #endif tci_write_reg8s(t0, tmp8); break; case INDEX_op_qemu_ld16u: t0 = *tb_ptr++; taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU tmp16 = helper_ldw_mmu(env, taddr, tci_read_i(&tb_ptr)); #else host_addr = (tcg_target_ulong)taddr; assert(taddr == host_addr); tmp16 = tswap16(*(uint16_t *)(host_addr + GUEST_BASE)); #endif tci_write_reg16(t0, tmp16); break; case INDEX_op_qemu_ld16s: t0 = *tb_ptr++; taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU tmp16 = helper_ldw_mmu(env, taddr, tci_read_i(&tb_ptr)); #else host_addr = (tcg_target_ulong)taddr; assert(taddr == host_addr); tmp16 = tswap16(*(uint16_t *)(host_addr + GUEST_BASE)); #endif tci_write_reg16s(t0, tmp16); break; #if TCG_TARGET_REG_BITS == 64 case INDEX_op_qemu_ld32u: t0 = *tb_ptr++; taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU tmp32 = helper_ldl_mmu(env, taddr, tci_read_i(&tb_ptr)); #else host_addr = (tcg_target_ulong)taddr; assert(taddr == host_addr); tmp32 = tswap32(*(uint32_t *)(host_addr + GUEST_BASE)); #endif tci_write_reg32(t0, tmp32); break; case INDEX_op_qemu_ld32s: t0 = *tb_ptr++; taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU tmp32 = helper_ldl_mmu(env, taddr, tci_read_i(&tb_ptr)); #else host_addr = (tcg_target_ulong)taddr; assert(taddr == host_addr); tmp32 = tswap32(*(uint32_t *)(host_addr + GUEST_BASE)); #endif tci_write_reg32s(t0, tmp32); break; #endif /* TCG_TARGET_REG_BITS == 64 */ case INDEX_op_qemu_ld32: t0 = *tb_ptr++; taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU tmp32 = helper_ldl_mmu(env, taddr, tci_read_i(&tb_ptr)); #else host_addr = (tcg_target_ulong)taddr; assert(taddr == host_addr); tmp32 = tswap32(*(uint32_t *)(host_addr + GUEST_BASE)); #endif tci_write_reg32(t0, tmp32); break; case INDEX_op_qemu_ld64: t0 = *tb_ptr++; #if TCG_TARGET_REG_BITS == 32 t1 = *tb_ptr++; #endif taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU tmp64 = helper_ldq_mmu(env, taddr, tci_read_i(&tb_ptr)); #else host_addr = (tcg_target_ulong)taddr; assert(taddr == host_addr); tmp64 = tswap64(*(uint64_t *)(host_addr + GUEST_BASE)); #endif tci_write_reg(t0, tmp64); #if TCG_TARGET_REG_BITS == 32 tci_write_reg(t1, tmp64 >> 32); #endif break; case INDEX_op_qemu_st8: t0 = tci_read_r8(&tb_ptr); taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU t2 = tci_read_i(&tb_ptr); helper_stb_mmu(env, taddr, t0, t2); #else host_addr = (tcg_target_ulong)taddr; assert(taddr == host_addr); *(uint8_t *)(host_addr + GUEST_BASE) = t0; #endif break; case INDEX_op_qemu_st16: t0 = tci_read_r16(&tb_ptr); taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU t2 = tci_read_i(&tb_ptr); helper_stw_mmu(env, taddr, t0, t2); #else host_addr = (tcg_target_ulong)taddr; assert(taddr == host_addr); *(uint16_t *)(host_addr + GUEST_BASE) = tswap16(t0); #endif break; case INDEX_op_qemu_st32: t0 = tci_read_r32(&tb_ptr); taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU t2 = tci_read_i(&tb_ptr); helper_stl_mmu(env, taddr, t0, t2); #else host_addr = (tcg_target_ulong)taddr; assert(taddr == host_addr); *(uint32_t *)(host_addr + GUEST_BASE) = tswap32(t0); #endif break; case INDEX_op_qemu_st64: tmp64 = tci_read_r64(&tb_ptr); taddr = tci_read_ulong(&tb_ptr); #ifdef CONFIG_SOFTMMU t2 = tci_read_i(&tb_ptr); helper_stq_mmu(env, taddr, tmp64, t2); #else host_addr = (tcg_target_ulong)taddr; assert(taddr == host_addr); *(uint64_t *)(host_addr + GUEST_BASE) = tswap64(tmp64); #endif break; default: TODO(); break; } assert(tb_ptr == old_code_ptr + op_size); } exit: return next_tb; }
19,260
qemu
7ad4c7200111d20eb97eed4f46b6026e3f0b0eef
0
void *g_realloc_n(void *ptr, size_t nmemb, size_t size) { size_t sz; __coverity_negative_sink__(nmemb); __coverity_negative_sink__(size); sz = nmemb * size; __coverity_escape__(ptr); ptr = __coverity_alloc__(size); /* * Memory beyond the old size isn't actually initialized. Can't * model that. See Coverity's realloc() model */ __coverity_writeall__(ptr); __coverity_mark_as_afm_allocated__(ptr, AFM_free); return ptr; }
19,261
qemu
6e99c631f116221d169ea53953d91b8aa74d297a
0
static void net_socket_send_dgram(void *opaque) { NetSocketState *s = opaque; int size; size = qemu_recv(s->fd, s->buf, sizeof(s->buf), 0); if (size < 0) return; if (size == 0) { /* end of connection */ net_socket_read_poll(s, false); net_socket_write_poll(s, false); return; } qemu_send_packet(&s->nc, s->buf, size); }
19,262
qemu
245f7b51c0ea04fb2224b1127430a096c91aee70
0
static int tight_fill_palette(VncState *vs, int x, int y, size_t count, uint32_t *bg, uint32_t *fg, struct QDict **palette) { int max; max = count / tight_conf[vs->tight_compression].idx_max_colors_divisor; if (max < 2 && count >= tight_conf[vs->tight_compression].mono_min_rect_size) { max = 2; } if (max >= 256) { max = 256; } switch(vs->clientds.pf.bytes_per_pixel) { case 4: return tight_fill_palette32(vs, x, y, max, count, bg, fg, palette); case 2: return tight_fill_palette16(vs, x, y, max, count, bg, fg, palette); default: max = 2; return tight_fill_palette8(vs, x, y, max, count, bg, fg, palette); } return 0; }
19,263
qemu
7e2515e87c41e2e658aaed466e11cbdf1ea8bcb1
0
static int term_can_read(void *opaque) { return 128; }
19,265
qemu
2ee4aed86ff2ba38a0e1846de18a9aec38d73015
0
void do_tlbwi (void) { /* Discard cached TLB entries. We could avoid doing this if the tlbwi is just upgrading access permissions on the current entry; that might be a further win. */ mips_tlb_flush_extra (env, MIPS_TLB_NB); /* Wildly undefined effects for CP0_index containing a too high value and MIPS_TLB_NB not being a power of two. But so does real silicon. */ invalidate_tlb(env->CP0_index & (MIPS_TLB_NB - 1), 0); fill_tlb(env->CP0_index & (MIPS_TLB_NB - 1)); }
19,266
qemu
eabb7b91b36b202b4dac2df2d59d698e3aff197a
0
static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm, int index, int shift, intptr_t offset) { int mod, len; if (index < 0 && rm < 0) { if (TCG_TARGET_REG_BITS == 64) { /* Try for a rip-relative addressing mode. This has replaced the 32-bit-mode absolute addressing encoding. */ intptr_t pc = (intptr_t)s->code_ptr + 5 + ~rm; intptr_t disp = offset - pc; if (disp == (int32_t)disp) { tcg_out_opc(s, opc, r, 0, 0); tcg_out8(s, (LOWREGMASK(r) << 3) | 5); tcg_out32(s, disp); return; } /* Try for an absolute address encoding. This requires the use of the MODRM+SIB encoding and is therefore larger than rip-relative addressing. */ if (offset == (int32_t)offset) { tcg_out_opc(s, opc, r, 0, 0); tcg_out8(s, (LOWREGMASK(r) << 3) | 4); tcg_out8(s, (4 << 3) | 5); tcg_out32(s, offset); return; } /* ??? The memory isn't directly addressable. */ tcg_abort(); } else { /* Absolute address. */ tcg_out_opc(s, opc, r, 0, 0); tcg_out8(s, (r << 3) | 5); tcg_out32(s, offset); return; } } /* Find the length of the immediate addend. Note that the encoding that would be used for (%ebp) indicates absolute addressing. */ if (rm < 0) { mod = 0, len = 4, rm = 5; } else if (offset == 0 && LOWREGMASK(rm) != TCG_REG_EBP) { mod = 0, len = 0; } else if (offset == (int8_t)offset) { mod = 0x40, len = 1; } else { mod = 0x80, len = 4; } /* Use a single byte MODRM format if possible. Note that the encoding that would be used for %esp is the escape to the two byte form. */ if (index < 0 && LOWREGMASK(rm) != TCG_REG_ESP) { /* Single byte MODRM format. */ tcg_out_opc(s, opc, r, rm, 0); tcg_out8(s, mod | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); } else { /* Two byte MODRM+SIB format. */ /* Note that the encoding that would place %esp into the index field indicates no index register. In 64-bit mode, the REX.X bit counts, so %r12 can be used as the index. */ if (index < 0) { index = 4; } else { assert(index != TCG_REG_ESP); } tcg_out_opc(s, opc, r, rm, index); tcg_out8(s, mod | (LOWREGMASK(r) << 3) | 4); tcg_out8(s, (shift << 6) | (LOWREGMASK(index) << 3) | LOWREGMASK(rm)); } if (len == 1) { tcg_out8(s, offset); } else if (len == 4) { tcg_out32(s, offset); } }
19,267
qemu
643f59322432d77165329dfabe2d040d7e30dae8
0
static int common_bind(struct common *c) { int mfn; if (xenstore_read_fe_int(&c->xendev, "page-ref", &mfn) == -1) return -1; if (xenstore_read_fe_int(&c->xendev, "event-channel", &c->xendev.remote_port) == -1) return -1; c->page = xc_map_foreign_range(xen_xc, c->xendev.dom, XC_PAGE_SIZE, PROT_READ | PROT_WRITE, mfn); if (c->page == NULL) return -1; xen_be_bind_evtchn(&c->xendev); xen_be_printf(&c->xendev, 1, "ring mfn %d, remote-port %d, local-port %d\n", mfn, c->xendev.remote_port, c->xendev.local_port); return 0; }
19,268
FFmpeg
9d36cab4c0dc5089c023661aef9aeb8b009048fd
0
static int nvenc_get_frame(AVCodecContext *avctx, AVPacket *pkt) { NVENCContext *ctx = avctx->priv_data; NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs; NV_ENC_LOCK_BITSTREAM params = { 0 }; NVENCOutputSurface *out = NULL; int ret; ret = nvenc_dequeue_surface(ctx->pending, &out); if (ret) return ret; params.version = NV_ENC_LOCK_BITSTREAM_VER; params.outputBitstream = out->out; ret = nv->nvEncLockBitstream(ctx->nvenc_ctx, &params); if (ret < 0) return nvenc_print_error(avctx, ret, "Cannot lock the bitstream"); ret = ff_alloc_packet(pkt, params.bitstreamSizeInBytes); if (ret < 0) return ret; memcpy(pkt->data, params.bitstreamBufferPtr, pkt->size); ret = nv->nvEncUnlockBitstream(ctx->nvenc_ctx, out->out); if (ret < 0) return nvenc_print_error(avctx, ret, "Cannot unlock the bitstream"); out->busy = out->in->locked = 0; ret = nvenc_set_timestamp(ctx, &params, pkt); if (ret < 0) return ret; switch (params.pictureType) { case NV_ENC_PIC_TYPE_IDR: pkt->flags |= AV_PKT_FLAG_KEY; #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS case NV_ENC_PIC_TYPE_INTRA_REFRESH: case NV_ENC_PIC_TYPE_I: avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; break; case NV_ENC_PIC_TYPE_P: avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P; break; case NV_ENC_PIC_TYPE_B: avctx->coded_frame->pict_type = AV_PICTURE_TYPE_B; break; case NV_ENC_PIC_TYPE_BI: avctx->coded_frame->pict_type = AV_PICTURE_TYPE_BI; break; FF_ENABLE_DEPRECATION_WARNINGS #endif } return 0; }
19,269
qemu
2c406b8fc8fc09853e74924d7067712d7a75108f
0
static ssize_t rtl8139_do_receive(VLANClientState *nc, const uint8_t *buf, size_t size_, int do_interrupt) { RTL8139State *s = DO_UPCAST(NICState, nc, nc)->opaque; int size = size_; uint32_t packet_header = 0; uint8_t buf1[60]; static const uint8_t broadcast_macaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; DEBUG_PRINT((">>> RTL8139: received len=%d\n", size)); /* test if board clock is stopped */ if (!s->clock_enabled) { DEBUG_PRINT(("RTL8139: stopped ==========================\n")); return -1; } /* first check if receiver is enabled */ if (!rtl8139_receiver_enabled(s)) { DEBUG_PRINT(("RTL8139: receiver disabled ================\n")); return -1; } /* XXX: check this */ if (s->RxConfig & AcceptAllPhys) { /* promiscuous: receive all */ DEBUG_PRINT((">>> RTL8139: packet received in promiscuous mode\n")); } else { if (!memcmp(buf, broadcast_macaddr, 6)) { /* broadcast address */ if (!(s->RxConfig & AcceptBroadcast)) { DEBUG_PRINT((">>> RTL8139: broadcast packet rejected\n")); /* update tally counter */ ++s->tally_counters.RxERR; return size; } packet_header |= RxBroadcast; DEBUG_PRINT((">>> RTL8139: broadcast packet received\n")); /* update tally counter */ ++s->tally_counters.RxOkBrd; } else if (buf[0] & 0x01) { /* multicast */ if (!(s->RxConfig & AcceptMulticast)) { DEBUG_PRINT((">>> RTL8139: multicast packet rejected\n")); /* update tally counter */ ++s->tally_counters.RxERR; return size; } int mcast_idx = compute_mcast_idx(buf); if (!(s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7)))) { DEBUG_PRINT((">>> RTL8139: multicast address mismatch\n")); /* update tally counter */ ++s->tally_counters.RxERR; return size; } packet_header |= RxMulticast; DEBUG_PRINT((">>> RTL8139: multicast packet received\n")); /* update tally counter */ ++s->tally_counters.RxOkMul; } else if (s->phys[0] == buf[0] && s->phys[1] == buf[1] && s->phys[2] == buf[2] && s->phys[3] == buf[3] && s->phys[4] == buf[4] && s->phys[5] == buf[5]) { /* match */ if (!(s->RxConfig & AcceptMyPhys)) { DEBUG_PRINT((">>> RTL8139: rejecting physical address matching packet\n")); /* update tally counter */ ++s->tally_counters.RxERR; return size; } packet_header |= RxPhysical; DEBUG_PRINT((">>> RTL8139: physical address matching packet received\n")); /* update tally counter */ ++s->tally_counters.RxOkPhy; } else { DEBUG_PRINT((">>> RTL8139: unknown packet\n")); /* update tally counter */ ++s->tally_counters.RxERR; return size; } } /* if too small buffer, then expand it */ if (size < MIN_BUF_SIZE) { memcpy(buf1, buf, size); memset(buf1 + size, 0, MIN_BUF_SIZE - size); buf = buf1; size = MIN_BUF_SIZE; } if (rtl8139_cp_receiver_enabled(s)) { DEBUG_PRINT(("RTL8139: in C+ Rx mode ================\n")); /* begin C+ receiver mode */ /* w0 ownership flag */ #define CP_RX_OWN (1<<31) /* w0 end of ring flag */ #define CP_RX_EOR (1<<30) /* w0 bits 0...12 : buffer size */ #define CP_RX_BUFFER_SIZE_MASK ((1<<13) - 1) /* w1 tag available flag */ #define CP_RX_TAVA (1<<16) /* w1 bits 0...15 : VLAN tag */ #define CP_RX_VLAN_TAG_MASK ((1<<16) - 1) /* w2 low 32bit of Rx buffer ptr */ /* w3 high 32bit of Rx buffer ptr */ int descriptor = s->currCPlusRxDesc; target_phys_addr_t cplus_rx_ring_desc; cplus_rx_ring_desc = rtl8139_addr64(s->RxRingAddrLO, s->RxRingAddrHI); cplus_rx_ring_desc += 16 * descriptor; DEBUG_PRINT(("RTL8139: +++ C+ mode reading RX descriptor %d from host memory at %08x %08x = %016" PRIx64 "\n", descriptor, s->RxRingAddrHI, s->RxRingAddrLO, (uint64_t)cplus_rx_ring_desc)); uint32_t val, rxdw0,rxdw1,rxbufLO,rxbufHI; cpu_physical_memory_read(cplus_rx_ring_desc, (uint8_t *)&val, 4); rxdw0 = le32_to_cpu(val); cpu_physical_memory_read(cplus_rx_ring_desc+4, (uint8_t *)&val, 4); rxdw1 = le32_to_cpu(val); cpu_physical_memory_read(cplus_rx_ring_desc+8, (uint8_t *)&val, 4); rxbufLO = le32_to_cpu(val); cpu_physical_memory_read(cplus_rx_ring_desc+12, (uint8_t *)&val, 4); rxbufHI = le32_to_cpu(val); DEBUG_PRINT(("RTL8139: +++ C+ mode RX descriptor %d %08x %08x %08x %08x\n", descriptor, rxdw0, rxdw1, rxbufLO, rxbufHI)); if (!(rxdw0 & CP_RX_OWN)) { DEBUG_PRINT(("RTL8139: C+ Rx mode : descriptor %d is owned by host\n", descriptor)); s->IntrStatus |= RxOverflow; ++s->RxMissed; /* update tally counter */ ++s->tally_counters.RxERR; ++s->tally_counters.MissPkt; rtl8139_update_irq(s); return size_; } uint32_t rx_space = rxdw0 & CP_RX_BUFFER_SIZE_MASK; /* TODO: scatter the packet over available receive ring descriptors space */ if (size+4 > rx_space) { DEBUG_PRINT(("RTL8139: C+ Rx mode : descriptor %d size %d received %d + 4\n", descriptor, rx_space, size)); s->IntrStatus |= RxOverflow; ++s->RxMissed; /* update tally counter */ ++s->tally_counters.RxERR; ++s->tally_counters.MissPkt; rtl8139_update_irq(s); return size_; } target_phys_addr_t rx_addr = rtl8139_addr64(rxbufLO, rxbufHI); /* receive/copy to target memory */ cpu_physical_memory_write( rx_addr, buf, size ); if (s->CpCmd & CPlusRxChkSum) { /* do some packet checksumming */ } /* write checksum */ #if defined (RTL8139_CALCULATE_RXCRC) val = cpu_to_le32(crc32(0, buf, size)); #else val = 0; #endif cpu_physical_memory_write( rx_addr+size, (uint8_t *)&val, 4); /* first segment of received packet flag */ #define CP_RX_STATUS_FS (1<<29) /* last segment of received packet flag */ #define CP_RX_STATUS_LS (1<<28) /* multicast packet flag */ #define CP_RX_STATUS_MAR (1<<26) /* physical-matching packet flag */ #define CP_RX_STATUS_PAM (1<<25) /* broadcast packet flag */ #define CP_RX_STATUS_BAR (1<<24) /* runt packet flag */ #define CP_RX_STATUS_RUNT (1<<19) /* crc error flag */ #define CP_RX_STATUS_CRC (1<<18) /* IP checksum error flag */ #define CP_RX_STATUS_IPF (1<<15) /* UDP checksum error flag */ #define CP_RX_STATUS_UDPF (1<<14) /* TCP checksum error flag */ #define CP_RX_STATUS_TCPF (1<<13) /* transfer ownership to target */ rxdw0 &= ~CP_RX_OWN; /* set first segment bit */ rxdw0 |= CP_RX_STATUS_FS; /* set last segment bit */ rxdw0 |= CP_RX_STATUS_LS; /* set received packet type flags */ if (packet_header & RxBroadcast) rxdw0 |= CP_RX_STATUS_BAR; if (packet_header & RxMulticast) rxdw0 |= CP_RX_STATUS_MAR; if (packet_header & RxPhysical) rxdw0 |= CP_RX_STATUS_PAM; /* set received size */ rxdw0 &= ~CP_RX_BUFFER_SIZE_MASK; rxdw0 |= (size+4); /* reset VLAN tag flag */ rxdw1 &= ~CP_RX_TAVA; /* update ring data */ val = cpu_to_le32(rxdw0); cpu_physical_memory_write(cplus_rx_ring_desc, (uint8_t *)&val, 4); val = cpu_to_le32(rxdw1); cpu_physical_memory_write(cplus_rx_ring_desc+4, (uint8_t *)&val, 4); /* update tally counter */ ++s->tally_counters.RxOk; /* seek to next Rx descriptor */ if (rxdw0 & CP_RX_EOR) { s->currCPlusRxDesc = 0; } else { ++s->currCPlusRxDesc; } DEBUG_PRINT(("RTL8139: done C+ Rx mode ----------------\n")); } else { DEBUG_PRINT(("RTL8139: in ring Rx mode ================\n")); /* begin ring receiver mode */ int avail = MOD2(s->RxBufferSize + s->RxBufPtr - s->RxBufAddr, s->RxBufferSize); /* if receiver buffer is empty then avail == 0 */ if (avail != 0 && size + 8 >= avail) { DEBUG_PRINT(("rx overflow: rx buffer length %d head 0x%04x read 0x%04x === available 0x%04x need 0x%04x\n", s->RxBufferSize, s->RxBufAddr, s->RxBufPtr, avail, size + 8)); s->IntrStatus |= RxOverflow; ++s->RxMissed; rtl8139_update_irq(s); return size_; } packet_header |= RxStatusOK; packet_header |= (((size+4) << 16) & 0xffff0000); /* write header */ uint32_t val = cpu_to_le32(packet_header); rtl8139_write_buffer(s, (uint8_t *)&val, 4); rtl8139_write_buffer(s, buf, size); /* write checksum */ #if defined (RTL8139_CALCULATE_RXCRC) val = cpu_to_le32(crc32(0, buf, size)); #else val = 0; #endif rtl8139_write_buffer(s, (uint8_t *)&val, 4); /* correct buffer write pointer */ s->RxBufAddr = MOD2((s->RxBufAddr + 3) & ~0x3, s->RxBufferSize); /* now we can signal we have received something */ DEBUG_PRINT((" received: rx buffer length %d head 0x%04x read 0x%04x\n", s->RxBufferSize, s->RxBufAddr, s->RxBufPtr)); } s->IntrStatus |= RxOK; if (do_interrupt) { rtl8139_update_irq(s); } return size_; }
19,270
qemu
b23046abe78f48498a423b802d6d86ba0172d57f
0
static void build_pci_bus_end(PCIBus *bus, void *bus_state) { AcpiBuildPciBusHotplugState *child = bus_state; AcpiBuildPciBusHotplugState *parent = child->parent; GArray *bus_table = build_alloc_array(); DECLARE_BITMAP(slot_hotplug_enable, PCI_SLOT_MAX); DECLARE_BITMAP(slot_device_present, PCI_SLOT_MAX); DECLARE_BITMAP(slot_device_system, PCI_SLOT_MAX); DECLARE_BITMAP(slot_device_vga, PCI_SLOT_MAX); DECLARE_BITMAP(slot_device_qxl, PCI_SLOT_MAX); uint8_t op; int i; QObject *bsel; GArray *method; bool bus_hotplug_support = false; /* * Skip bridge subtree creation if bridge hotplug is disabled * to make acpi tables compatible with legacy machine types. * Skip creation for hotplugged bridges as well. */ if (bus->parent_dev && (!child->pcihp_bridge_en || DEVICE(bus->parent_dev)->hotplugged)) { build_free_array(bus_table); build_pci_bus_state_cleanup(child); g_free(child); return; } if (bus->parent_dev) { op = 0x82; /* DeviceOp */ build_append_namestring(bus_table, "S%.02X", bus->parent_dev->devfn); build_append_byte(bus_table, 0x08); /* NameOp */ build_append_namestring(bus_table, "_SUN"); build_append_int(bus_table, PCI_SLOT(bus->parent_dev->devfn)); build_append_byte(bus_table, 0x08); /* NameOp */ build_append_namestring(bus_table, "_ADR"); build_append_int(bus_table, (PCI_SLOT(bus->parent_dev->devfn) << 16) | PCI_FUNC(bus->parent_dev->devfn)); } else { op = 0x10; /* ScopeOp */; build_append_namestring(bus_table, "PCI0"); } bsel = object_property_get_qobject(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, NULL); if (bsel) { build_append_byte(bus_table, 0x08); /* NameOp */ build_append_namestring(bus_table, "BSEL"); build_append_int(bus_table, qint_get_int(qobject_to_qint(bsel))); memset(slot_hotplug_enable, 0xff, sizeof slot_hotplug_enable); } else { /* No bsel - no slots are hot-pluggable */ memset(slot_hotplug_enable, 0x00, sizeof slot_hotplug_enable); } memset(slot_device_present, 0x00, sizeof slot_device_present); memset(slot_device_system, 0x00, sizeof slot_device_present); memset(slot_device_vga, 0x00, sizeof slot_device_vga); memset(slot_device_qxl, 0x00, sizeof slot_device_qxl); for (i = 0; i < ARRAY_SIZE(bus->devices); i += PCI_FUNC_MAX) { DeviceClass *dc; PCIDeviceClass *pc; PCIDevice *pdev = bus->devices[i]; int slot = PCI_SLOT(i); bool bridge_in_acpi; if (!pdev) { continue; } set_bit(slot, slot_device_present); pc = PCI_DEVICE_GET_CLASS(pdev); dc = DEVICE_GET_CLASS(pdev); /* When hotplug for bridges is enabled, bridges are * described in ACPI separately (see build_pci_bus_end). * In this case they aren't themselves hot-pluggable. * Hotplugged bridges *are* hot-pluggable. */ bridge_in_acpi = pc->is_bridge && child->pcihp_bridge_en && !DEVICE(pdev)->hotplugged; if (pc->class_id == PCI_CLASS_BRIDGE_ISA || bridge_in_acpi) { set_bit(slot, slot_device_system); } if (pc->class_id == PCI_CLASS_DISPLAY_VGA) { set_bit(slot, slot_device_vga); if (object_dynamic_cast(OBJECT(pdev), "qxl-vga")) { set_bit(slot, slot_device_qxl); } } if (!dc->hotpluggable || bridge_in_acpi) { clear_bit(slot, slot_hotplug_enable); } } /* Append Device object for each slot */ for (i = 0; i < PCI_SLOT_MAX; i++) { bool can_eject = test_bit(i, slot_hotplug_enable); bool present = test_bit(i, slot_device_present); bool vga = test_bit(i, slot_device_vga); bool qxl = test_bit(i, slot_device_qxl); bool system = test_bit(i, slot_device_system); if (can_eject) { void *pcihp = acpi_data_push(bus_table, ACPI_PCIHP_SIZEOF); memcpy(pcihp, ACPI_PCIHP_AML, ACPI_PCIHP_SIZEOF); patch_pcihp(i, pcihp); bus_hotplug_support = true; } else if (qxl) { void *pcihp = acpi_data_push(bus_table, ACPI_PCIQXL_SIZEOF); memcpy(pcihp, ACPI_PCIQXL_AML, ACPI_PCIQXL_SIZEOF); patch_pciqxl(i, pcihp); } else if (vga) { void *pcihp = acpi_data_push(bus_table, ACPI_PCIVGA_SIZEOF); memcpy(pcihp, ACPI_PCIVGA_AML, ACPI_PCIVGA_SIZEOF); patch_pcivga(i, pcihp); } else if (system) { /* Nothing to do: system devices are in DSDT or in SSDT above. */ } else if (present) { void *pcihp = acpi_data_push(bus_table, ACPI_PCINOHP_SIZEOF); memcpy(pcihp, ACPI_PCINOHP_AML, ACPI_PCINOHP_SIZEOF); patch_pcinohp(i, pcihp); } } if (bsel) { method = build_alloc_method("DVNT", 2); for (i = 0; i < PCI_SLOT_MAX; i++) { GArray *notify; uint8_t op; if (!test_bit(i, slot_hotplug_enable)) { continue; } notify = build_alloc_array(); op = 0xA0; /* IfOp */ build_append_byte(notify, 0x7B); /* AndOp */ build_append_byte(notify, 0x68); /* Arg0Op */ build_append_int(notify, 0x1U << i); build_append_byte(notify, 0x00); /* NullName */ build_append_byte(notify, 0x86); /* NotifyOp */ build_append_namestring(notify, "S%.02X", PCI_DEVFN(i, 0)); build_append_byte(notify, 0x69); /* Arg1Op */ /* Pack it up */ build_package(notify, op); build_append_array(method, notify); build_free_array(notify); } build_append_and_cleanup_method(bus_table, method); } /* Append PCNT method to notify about events on local and child buses. * Add unconditionally for root since DSDT expects it. */ if (bus_hotplug_support || child->notify_table->len || !bus->parent_dev) { method = build_alloc_method("PCNT", 0); /* If bus supports hotplug select it and notify about local events */ if (bsel) { build_append_byte(method, 0x70); /* StoreOp */ build_append_int(method, qint_get_int(qobject_to_qint(bsel))); build_append_namestring(method, "BNUM"); build_append_namestring(method, "DVNT"); build_append_namestring(method, "PCIU"); build_append_int(method, 1); /* Device Check */ build_append_namestring(method, "DVNT"); build_append_namestring(method, "PCID"); build_append_int(method, 3); /* Eject Request */ } /* Notify about child bus events in any case */ build_append_array(method, child->notify_table); build_append_and_cleanup_method(bus_table, method); /* Append description of child buses */ build_append_array(bus_table, child->device_table); /* Pack it up */ if (bus->parent_dev) { build_extop_package(bus_table, op); } else { build_package(bus_table, op); } /* Append our bus description to parent table */ build_append_array(parent->device_table, bus_table); /* Also tell parent how to notify us, invoking PCNT method. * At the moment this is not needed for root as we have a single root. */ if (bus->parent_dev) { build_append_namestring(parent->notify_table, "^PCNT.S%.02X", bus->parent_dev->devfn); } } qobject_decref(bsel); build_free_array(bus_table); build_pci_bus_state_cleanup(child); g_free(child); }
19,271
qemu
e7d54416cf7a30928a455ddf86ca57d766e9a902
0
static void arm_gicv3_icc_reset(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu; GICv3State *s; GICv3CPUState *c; c = (GICv3CPUState *)env->gicv3state; s = c->gic; cpu = ARM_CPU(c->cpu); /* Initialize to actual HW supported configuration */ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS, KVM_VGIC_ATTR(ICC_CTLR_EL1, cpu->mp_affinity), &c->icc_ctlr_el1[GICV3_NS], false); c->icc_ctlr_el1[GICV3_S] = c->icc_ctlr_el1[GICV3_NS]; c->icc_pmr_el1 = 0; c->icc_bpr[GICV3_G0] = GIC_MIN_BPR; c->icc_bpr[GICV3_G1] = GIC_MIN_BPR; c->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR; c->icc_sre_el1 = 0x7; memset(c->icc_apr, 0, sizeof(c->icc_apr)); memset(c->icc_igrpen, 0, sizeof(c->icc_igrpen)); }
19,272
qemu
84f1c148da2b35fbb5a436597872765257e8914e
0
static bool tb_cmp(const void *p, const void *d) { const TranslationBlock *tb = p; const struct tb_desc *desc = d; if (tb->pc == desc->pc && tb->page_addr[0] == desc->phys_page1 && tb->cs_base == desc->cs_base && tb->flags == desc->flags && tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && !atomic_read(&tb->invalid)) { /* check next page if needed */ if (tb->page_addr[1] == -1) { return true; } else { tb_page_addr_t phys_page2; target_ulong virt_page2; virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; phys_page2 = get_page_addr_code(desc->env, virt_page2); if (tb->page_addr[1] == phys_page2) { return true; } } } return false; }
19,273
qemu
57a6d6d538c596292003d131035dc4f7cb44474d
0
static int vnc_display_listen_addr(VncDisplay *vd, SocketAddress *addr, const char *name, QIOChannelSocket ***lsock, guint **lsock_tag, size_t *nlsock, Error **errp) { *nlsock = 1; *lsock = g_new0(QIOChannelSocket *, 1); *lsock_tag = g_new0(guint, 1); (*lsock)[0] = qio_channel_socket_new(); qio_channel_set_name(QIO_CHANNEL((*lsock)[0]), name); if (qio_channel_socket_listen_sync((*lsock)[0], addr, errp) < 0) { return -1; } (*lsock_tag)[0] = qio_channel_add_watch( QIO_CHANNEL((*lsock)[0]), G_IO_IN, vnc_listen_io, vd, NULL); return 0; }
19,275
qemu
ddf21908961073199f3d186204da4810f2ea150b
0
static SpiceChannelList *qmp_query_spice_channels(void) { SpiceChannelList *cur_item = NULL, *head = NULL; ChannelList *item; QTAILQ_FOREACH(item, &channel_list, link) { SpiceChannelList *chan; char host[NI_MAXHOST], port[NI_MAXSERV]; struct sockaddr *paddr; socklen_t plen; assert(item->info->flags & SPICE_CHANNEL_EVENT_FLAG_ADDR_EXT); chan = g_malloc0(sizeof(*chan)); chan->value = g_malloc0(sizeof(*chan->value)); chan->value->base = g_malloc0(sizeof(*chan->value->base)); paddr = (struct sockaddr *)&item->info->paddr_ext; plen = item->info->plen_ext; getnameinfo(paddr, plen, host, sizeof(host), port, sizeof(port), NI_NUMERICHOST | NI_NUMERICSERV); chan->value->base->host = g_strdup(host); chan->value->base->port = g_strdup(port); chan->value->base->family = inet_netfamily(paddr->sa_family); chan->value->connection_id = item->info->connection_id; chan->value->channel_type = item->info->type; chan->value->channel_id = item->info->id; chan->value->tls = item->info->flags & SPICE_CHANNEL_EVENT_FLAG_TLS; /* XXX: waiting for the qapi to support GSList */ if (!cur_item) { head = cur_item = chan; } else { cur_item->next = chan; cur_item = chan; } } return head; }
19,276
qemu
731de38052b245eab79e417aeac5e1dcebe6437f
0
static int raw_create(const char *filename, QemuOpts *opts, Error **errp) { int fd; int result = 0; int64_t total_size = 0; bool nocow = false; PreallocMode prealloc; char *buf = NULL; Error *local_err = NULL; strstart(filename, "file:", &filename); /* Read out options */ total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), BDRV_SECTOR_SIZE); nocow = qemu_opt_get_bool(opts, BLOCK_OPT_NOCOW, false); buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); prealloc = qapi_enum_parse(PreallocMode_lookup, buf, PREALLOC_MODE_MAX, PREALLOC_MODE_OFF, &local_err); g_free(buf); if (local_err) { error_propagate(errp, local_err); result = -EINVAL; goto out; } fd = qemu_open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0644); if (fd < 0) { result = -errno; error_setg_errno(errp, -result, "Could not create file"); goto out; } if (nocow) { #ifdef __linux__ /* Set NOCOW flag to solve performance issue on fs like btrfs. * This is an optimisation. The FS_IOC_SETFLAGS ioctl return value * will be ignored since any failure of this operation should not * block the left work. */ int attr; if (ioctl(fd, FS_IOC_GETFLAGS, &attr) == 0) { attr |= FS_NOCOW_FL; ioctl(fd, FS_IOC_SETFLAGS, &attr); } #endif } if (ftruncate(fd, total_size) != 0) { result = -errno; error_setg_errno(errp, -result, "Could not resize file"); goto out_close; } switch (prealloc) { #ifdef CONFIG_POSIX_FALLOCATE case PREALLOC_MODE_FALLOC: /* posix_fallocate() doesn't set errno. */ result = -posix_fallocate(fd, 0, total_size); if (result != 0) { error_setg_errno(errp, -result, "Could not preallocate data for the new file"); } break; #endif case PREALLOC_MODE_FULL: { int64_t num = 0, left = total_size; buf = g_malloc0(65536); while (left > 0) { num = MIN(left, 65536); result = write(fd, buf, num); if (result < 0) { result = -errno; error_setg_errno(errp, -result, "Could not write to the new file"); break; } left -= result; } fsync(fd); g_free(buf); break; } case PREALLOC_MODE_OFF: break; default: result = -EINVAL; error_setg(errp, "Unsupported preallocation mode: %s", PreallocMode_lookup[prealloc]); break; } out_close: if (qemu_close(fd) != 0 && result == 0) { result = -errno; error_setg_errno(errp, -result, "Could not close the new file"); } out: return result; }
19,277
qemu
85b3ed1db5e50b66016ef59ca2afce10e753cbc6
0
main (void) { struct timeval t_m = {0, 0}; struct timezone t_z = {0, 0}; struct timeval t_m1 = {0, 0}; int i; if (gettimeofday (&t_m, &t_z) != 0) err ("gettimeofday"); for (i = 1; i < 10000; i++) if (gettimeofday (&t_m1, NULL) != 0) err ("gettimeofday 1"); else if (t_m1.tv_sec * 1000000 + t_m1.tv_usec != (t_m.tv_sec * 1000000 + t_m.tv_usec + i * 1000)) { fprintf (stderr, "t0 (%ld, %ld), i %d, t1 (%ld, %ld)\n", t_m.tv_sec, t_m.tv_usec, i, t_m1.tv_sec, t_m1.tv_usec); abort (); } if (time (NULL) != t_m1.tv_sec) { fprintf (stderr, "time != gettod\n"); abort (); } printf ("pass\n"); exit (0); }
19,279
qemu
61007b316cd71ee7333ff7a0a749a8949527575f
0
bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base) { while (top && top != base) { top = top->backing_hd; } return top != NULL; }
19,280
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static void bonito_ldma_writel(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned size) { PCIBonitoState *s = opaque; ((uint32_t *)(&s->bonldma))[addr/sizeof(uint32_t)] = val & 0xffffffff; }
19,283
qemu
53cb28cbfea038f8ad50132dc8a684e638c7d48b
0
static uint32_t phys_map_node_alloc(void) { unsigned i; uint32_t ret; ret = next_map.nodes_nb++; assert(ret != PHYS_MAP_NODE_NIL); assert(ret != next_map.nodes_nb_alloc); for (i = 0; i < P_L2_SIZE; ++i) { next_map.nodes[ret][i].skip = 1; next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL; } return ret; }
19,284
qemu
498f21405a286f718a0767c791b7d2db19f4e5bd
0
static bool check_overwrapping_aiocb(BDRVSheepdogState *s, SheepdogAIOCB *aiocb) { SheepdogAIOCB *cb; QLIST_FOREACH(cb, &s->inflight_aiocb_head, aiocb_siblings) { if (AIOCBOverwrapping(aiocb, cb)) { return true; } } QLIST_INSERT_HEAD(&s->inflight_aiocb_head, aiocb, aiocb_siblings); return false; }
19,285
qemu
32951860834f09d1c1a0b81d8d7d5529e2d0e074
0
static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset, uint64_t *data, MemTxAttrs attrs) { switch (offset) { case 0x00: /* Control */ *data = s->cpu_enabled[cpu]; break; case 0x04: /* Priority mask */ *data = s->priority_mask[cpu]; break; case 0x08: /* Binary Point */ if (s->security_extn && !attrs.secure) { /* BPR is banked. Non-secure copy stored in ABPR. */ *data = s->abpr[cpu]; } else { *data = s->bpr[cpu]; } break; case 0x0c: /* Acknowledge */ *data = gic_acknowledge_irq(s, cpu); break; case 0x14: /* Running Priority */ *data = s->running_priority[cpu]; break; case 0x18: /* Highest Pending Interrupt */ *data = s->current_pending[cpu]; break; case 0x1c: /* Aliased Binary Point */ /* GIC v2, no security: ABPR * GIC v1, no security: not implemented (RAZ/WI) * With security extensions, secure access: ABPR (alias of NS BPR) * With security extensions, nonsecure access: RAZ/WI */ if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) { *data = 0; } else { *data = s->abpr[cpu]; } break; case 0xd0: case 0xd4: case 0xd8: case 0xdc: *data = s->apr[(offset - 0xd0) / 4][cpu]; break; default: qemu_log_mask(LOG_GUEST_ERROR, "gic_cpu_read: Bad offset %x\n", (int)offset); return MEMTX_ERROR; } return MEMTX_OK; }
19,286
qemu
67a0fd2a9bca204d2b39f910a97c7137636a0715
0
static int64_t coroutine_fn vpc_co_get_block_status(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum) { BDRVVPCState *s = bs->opaque; VHDFooter *footer = (VHDFooter*) s->footer_buf; int64_t start, offset; bool allocated; int n; if (be32_to_cpu(footer->type) == VHD_FIXED) { *pnum = nb_sectors; return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID | BDRV_BLOCK_DATA | (sector_num << BDRV_SECTOR_BITS); } offset = get_sector_offset(bs, sector_num, 0); start = offset; allocated = (offset != -1); *pnum = 0; do { /* All sectors in a block are contiguous (without using the bitmap) */ n = ROUND_UP(sector_num + 1, s->block_size / BDRV_SECTOR_SIZE) - sector_num; n = MIN(n, nb_sectors); *pnum += n; sector_num += n; nb_sectors -= n; /* *pnum can't be greater than one block for allocated * sectors since there is always a bitmap in between. */ if (allocated) { return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | start; } if (nb_sectors == 0) { break; } offset = get_sector_offset(bs, sector_num, 0); } while (offset == -1); return 0; }
19,287
qemu
acf6e5f0962c4be670d4a93ede77423512521876
0
static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { SheepdogAIOCB acb; int ret; int64_t offset = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE; BDRVSheepdogState *s = bs->opaque; if (offset > s->inode.vdi_size) { ret = sd_truncate(bs, offset); if (ret < 0) { return ret; } } sd_aio_setup(&acb, s, qiov, sector_num, nb_sectors, AIOCB_WRITE_UDATA); retry: if (check_overlapping_aiocb(s, &acb)) { qemu_co_queue_wait(&s->overlapping_queue); goto retry; } sd_co_rw_vector(&acb); sd_write_done(&acb); QLIST_REMOVE(&acb, aiocb_siblings); qemu_co_queue_restart_all(&s->overlapping_queue); return acb.ret; }
19,288
qemu
1c275925bfbbc2de84a8f0e09d1dd70bbefb6da3
0
long do_rt_sigreturn(CPUS390XState *env) { rt_sigframe *frame; abi_ulong frame_addr = env->regs[15]; qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__, (unsigned long long)frame_addr); sigset_t set; if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { goto badframe; } target_to_host_sigset(&set, &frame->uc.tuc_sigmask); sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */ if (restore_sigregs(env, &frame->uc.tuc_mcontext)) { goto badframe; } if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) { goto badframe; } unlock_user_struct(frame, frame_addr, 0); return env->regs[2]; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); return 0; }
19,289
qemu
61007b316cd71ee7333ff7a0a749a8949527575f
0
static bool tracked_request_overlaps(BdrvTrackedRequest *req, int64_t offset, unsigned int bytes) { /* aaaa bbbb */ if (offset >= req->overlap_offset + req->overlap_bytes) { return false; } /* bbbb aaaa */ if (req->overlap_offset >= offset + bytes) { return false; } return true; }
19,292
FFmpeg
1c6183233d56fb27a4a154e7e64ecab98bd877f1
0
static av_cold void init_mv_table(MVTable *tab) { int i, x, y; tab->table_mv_index = av_malloc(sizeof(uint16_t) * 4096); /* mark all entries as not used */ for(i=0;i<4096;i++) tab->table_mv_index[i] = tab->n; for(i=0;i<tab->n;i++) { x = tab->table_mvx[i]; y = tab->table_mvy[i]; tab->table_mv_index[(x << 6) | y] = i; } }
19,293
qemu
09e68369a88d7de0f988972bf28eec1b80cc47f9
0
static const QListEntry *qmp_input_push(QmpInputVisitor *qiv, QObject *obj, void *qapi, Error **errp) { GHashTable *h; StackObject *tos = g_new0(StackObject, 1); assert(obj); tos->obj = obj; tos->qapi = qapi; if (qiv->strict && qobject_type(obj) == QTYPE_QDICT) { h = g_hash_table_new(g_str_hash, g_str_equal); qdict_iter(qobject_to_qdict(obj), qdict_add_key, h); tos->h = h; } else if (qobject_type(obj) == QTYPE_QLIST) { tos->entry = qlist_first(qobject_to_qlist(obj)); } QSLIST_INSERT_HEAD(&qiv->stack, tos, node); return tos->entry; }
19,295
qemu
c2b38b277a7882a592f4f2ec955084b2b756daaa
0
int aio_bh_poll(AioContext *ctx) { QEMUBH *bh, **bhp, *next; int ret; bool deleted = false; qemu_lockcnt_inc(&ctx->list_lock); ret = 0; for (bh = atomic_rcu_read(&ctx->first_bh); bh; bh = next) { next = atomic_rcu_read(&bh->next); /* The atomic_xchg is paired with the one in qemu_bh_schedule. The * implicit memory barrier ensures that the callback sees all writes * done by the scheduling thread. It also ensures that the scheduling * thread sees the zero before bh->cb has run, and thus will call * aio_notify again if necessary. */ if (atomic_xchg(&bh->scheduled, 0)) { /* Idle BHs don't count as progress */ if (!bh->idle) { ret = 1; } bh->idle = 0; aio_bh_call(bh); } if (bh->deleted) { deleted = true; } } /* remove deleted bhs */ if (!deleted) { qemu_lockcnt_dec(&ctx->list_lock); return ret; } if (qemu_lockcnt_dec_and_lock(&ctx->list_lock)) { bhp = &ctx->first_bh; while (*bhp) { bh = *bhp; if (bh->deleted && !bh->scheduled) { *bhp = bh->next; g_free(bh); } else { bhp = &bh->next; } } qemu_lockcnt_unlock(&ctx->list_lock); } return ret; }
19,296
qemu
151c5693258f594541fa9ea585547a0a8dd68abc
0
static void test_dispatch_cmd_error(void) { QDict *req = qdict_new(); QObject *resp; qdict_put_obj(req, "execute", QOBJECT(qstring_from_str("user_def_cmd2"))); resp = qmp_dispatch(QOBJECT(req)); assert(resp != NULL); assert(qdict_haskey(qobject_to_qdict(resp), "error")); g_print("\nresp: %s\n", qstring_get_str(qobject_to_json_pretty(resp))); qobject_decref(resp); QDECREF(req); }
19,297
qemu
61007b316cd71ee7333ff7a0a749a8949527575f
0
int bdrv_truncate(BlockDriverState *bs, int64_t offset) { BlockDriver *drv = bs->drv; int ret; if (!drv) return -ENOMEDIUM; if (!drv->bdrv_truncate) return -ENOTSUP; if (bs->read_only) return -EACCES; ret = drv->bdrv_truncate(bs, offset); if (ret == 0) { ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS); bdrv_dirty_bitmap_truncate(bs); if (bs->blk) { blk_dev_resize_cb(bs->blk); } } return ret; }
19,298
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static uint64_t dbdma_read(void *opaque, target_phys_addr_t addr, unsigned size) { uint32_t value; int channel = addr >> DBDMA_CHANNEL_SHIFT; DBDMAState *s = opaque; DBDMA_channel *ch = &s->channels[channel]; int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2; value = ch->regs[reg]; DBDMA_DPRINTF("readl 0x" TARGET_FMT_plx " => 0x%08x\n", addr, value); DBDMA_DPRINTF("channel 0x%x reg 0x%x\n", (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg); switch(reg) { case DBDMA_CONTROL: value = 0; break; case DBDMA_STATUS: case DBDMA_CMDPTR_LO: case DBDMA_INTR_SEL: case DBDMA_BRANCH_SEL: case DBDMA_WAIT_SEL: /* nothing to do */ break; case DBDMA_XFER_MODE: case DBDMA_CMDPTR_HI: case DBDMA_DATA2PTR_HI: case DBDMA_DATA2PTR_LO: case DBDMA_ADDRESS_HI: case DBDMA_BRANCH_ADDR_HI: /* unused */ value = 0; break; case DBDMA_RES1: case DBDMA_RES2: case DBDMA_RES3: case DBDMA_RES4: /* reserved */ break; } return value; }
19,299
qemu
02b07434bed8360715198b4cbfdfebd17f7cac32
0
static int pxb_dev_initfn(PCIDevice *dev) { PXBDev *pxb = PXB_DEV(dev); DeviceState *ds, *bds; PCIBus *bus; const char *dev_name = NULL; if (pxb->numa_node != NUMA_NODE_UNASSIGNED && pxb->numa_node >= nb_numa_nodes) { error_report("Illegal numa node %d.", pxb->numa_node); return -EINVAL; } if (dev->qdev.id && *dev->qdev.id) { dev_name = dev->qdev.id; } ds = qdev_create(NULL, TYPE_PXB_HOST); bus = pci_bus_new(ds, "pxb-internal", NULL, NULL, 0, TYPE_PXB_BUS); bus->parent_dev = dev; bus->address_space_mem = dev->bus->address_space_mem; bus->address_space_io = dev->bus->address_space_io; bus->map_irq = pxb_map_irq_fn; bds = qdev_create(BUS(bus), "pci-bridge"); bds->id = dev_name; qdev_prop_set_uint8(bds, PCI_BRIDGE_DEV_PROP_CHASSIS_NR, pxb->bus_nr); qdev_prop_set_bit(bds, PCI_BRIDGE_DEV_PROP_SHPC, false); PCI_HOST_BRIDGE(ds)->bus = bus; if (pxb_register_bus(dev, bus)) { return -EINVAL; } qdev_init_nofail(ds); qdev_init_nofail(bds); pci_word_test_and_set_mask(dev->config + PCI_STATUS, PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK); pci_config_set_class(dev->config, PCI_CLASS_BRIDGE_HOST); pxb_dev_list = g_list_insert_sorted(pxb_dev_list, pxb, pxb_compare); return 0; }
19,300
qemu
5858dd1801883309bdd208d72ddb81c4e9fee30c
0
static bool blit_region_is_unsafe(struct CirrusVGAState *s, int32_t pitch, int32_t addr) { if (!pitch) { return true; } if (pitch < 0) { int64_t min = addr + ((int64_t)s->cirrus_blt_height-1) * pitch; int32_t max = addr + s->cirrus_blt_width; if (min < 0 || max > s->vga.vram_size) { return true; } } else { int64_t max = addr + ((int64_t)s->cirrus_blt_height-1) * pitch + s->cirrus_blt_width; if (max > s->vga.vram_size) { return true; } } return false; }
19,302
qemu
d488ddd8352e1e25f13e9c1f644dd1d7ebc0b342
0
int cpu_load(QEMUFile *f, void *opaque, int version_id) { CPUCRISState *env = opaque; int i; int s; int mmu; for (i = 0; i < 16; i++) env->regs[i] = qemu_get_be32(f); for (i = 0; i < 16; i++) env->pregs[i] = qemu_get_be32(f); env->pc = qemu_get_be32(f); env->ksp = qemu_get_be32(f); env->dslot = qemu_get_be32(f); env->btaken = qemu_get_be32(f); env->btarget = qemu_get_be32(f); env->cc_op = qemu_get_be32(f); env->cc_mask = qemu_get_be32(f); env->cc_dest = qemu_get_be32(f); env->cc_src = qemu_get_be32(f); env->cc_result = qemu_get_be32(f); env->cc_size = qemu_get_be32(f); env->cc_x = qemu_get_be32(f); for (s = 0; s < 4; i++) { for (i = 0; i < 16; i++) env->sregs[s][i] = qemu_get_be32(f); } env->mmu_rand_lfsr = qemu_get_be32(f); for (mmu = 0; mmu < 2; mmu++) { for (s = 0; s < 4; i++) { for (i = 0; i < 16; i++) { env->tlbsets[mmu][s][i].lo = qemu_get_be32(f); env->tlbsets[mmu][s][i].hi = qemu_get_be32(f); } } } return 0; }
19,303
FFmpeg
f929ab0569ff31ed5a59b0b0adb7ce09df3fca39
0
static int xvid_strip_vol_header(AVCodecContext *avctx, AVPacket *pkt, unsigned int header_len, unsigned int frame_len) { int vo_len = 0, i; for( i = 0; i < header_len - 3; i++ ) { if( pkt->data[i] == 0x00 && pkt->data[i+1] == 0x00 && pkt->data[i+2] == 0x01 && pkt->data[i+3] == 0xB6 ) { vo_len = i; break; } } if( vo_len > 0 ) { /* We need to store the header, so extract it */ if( avctx->extradata == NULL ) { avctx->extradata = av_malloc(vo_len); memcpy(avctx->extradata, pkt->data, vo_len); avctx->extradata_size = vo_len; } /* Less dangerous now, memmove properly copies the two chunks of overlapping data */ memmove(pkt->data, &pkt->data[vo_len], frame_len - vo_len); pkt->size = frame_len - vo_len; } return 0; }
19,304
qemu
fd56e0612b6454a282fa6a953fdb09281a98c589
0
static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size, Error **errp) { uint16_t flags; uint8_t type; flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS); type = (flags & PCI_EXP_FLAGS_TYPE) >> 4; if (type != PCI_EXP_TYPE_ENDPOINT && type != PCI_EXP_TYPE_LEG_END && type != PCI_EXP_TYPE_RC_END) { error_setg(errp, "assignment of PCIe type 0x%x " "devices is not currently supported", type); return -EINVAL; } if (!pci_bus_is_express(vdev->pdev.bus)) { PCIBus *bus = vdev->pdev.bus; PCIDevice *bridge; /* * Traditionally PCI device assignment exposes the PCIe capability * as-is on non-express buses. The reason being that some drivers * simply assume that it's there, for example tg3. However when * we're running on a native PCIe machine type, like Q35, we need * to hide the PCIe capability. The reason for this is twofold; * first Windows guests get a Code 10 error when the PCIe capability * is exposed in this configuration. Therefore express devices won't * work at all unless they're attached to express buses in the VM. * Second, a native PCIe machine introduces the possibility of fine * granularity IOMMUs supporting both translation and isolation. * Guest code to discover the IOMMU visibility of a device, such as * IOMMU grouping code on Linux, is very aware of device types and * valid transitions between bus types. An express device on a non- * express bus is not a valid combination on bare metal systems. * * Drivers that require a PCIe capability to make the device * functional are simply going to need to have their devices placed * on a PCIe bus in the VM. */ while (!pci_bus_is_root(bus)) { bridge = pci_bridge_get_device(bus); bus = bridge->bus; } if (pci_bus_is_express(bus)) { return 0; } } else if (pci_bus_is_root(vdev->pdev.bus)) { /* * On a Root Complex bus Endpoints become Root Complex Integrated * Endpoints, which changes the type and clears the LNK & LNK2 fields. */ if (type == PCI_EXP_TYPE_ENDPOINT) { vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, PCI_EXP_TYPE_RC_END << 4, PCI_EXP_FLAGS_TYPE); /* Link Capabilities, Status, and Control goes away */ if (size > PCI_EXP_LNKCTL) { vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0); vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0); #ifndef PCI_EXP_LNKCAP2 #define PCI_EXP_LNKCAP2 44 #endif #ifndef PCI_EXP_LNKSTA2 #define PCI_EXP_LNKSTA2 50 #endif /* Link 2 Capabilities, Status, and Control goes away */ if (size > PCI_EXP_LNKCAP2) { vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0); vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0); vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0); } } } else if (type == PCI_EXP_TYPE_LEG_END) { /* * Legacy endpoints don't belong on the root complex. Windows * seems to be happier with devices if we skip the capability. */ return 0; } } else { /* * Convert Root Complex Integrated Endpoints to regular endpoints. * These devices don't support LNK/LNK2 capabilities, so make them up. */ if (type == PCI_EXP_TYPE_RC_END) { vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, PCI_EXP_TYPE_ENDPOINT << 4, PCI_EXP_FLAGS_TYPE); vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0); vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); } /* Mark the Link Status bits as emulated to allow virtual negotiation */ vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, pci_get_word(vdev->pdev.config + pos + PCI_EXP_LNKSTA), PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS); } /* * Intel 82599 SR-IOV VFs report an invalid PCIe capability version 0 * (Niantic errate #35) causing Windows to error with a Code 10 for the * device on Q35. Fixup any such devices to report version 1. If we * were to remove the capability entirely the guest would lose extended * config space. */ if ((flags & PCI_EXP_FLAGS_VERS) == 0) { vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, 1, PCI_EXP_FLAGS_VERS); } pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size, errp); if (pos < 0) { return pos; } vdev->pdev.exp.exp_cap = pos; return pos; }
19,305
qemu
5861a33898bbddfd1a80c2e202cb9352e3b1ba62
0
static void IRQ_local_pipe (openpic_t *opp, int n_CPU, int n_IRQ) { IRQ_dst_t *dst; IRQ_src_t *src; int priority; dst = &opp->dst[n_CPU]; src = &opp->src[n_IRQ]; priority = IPVP_PRIORITY(src->ipvp); if (priority <= dst->pctp) { /* Too low priority */ DPRINTF("%s: IRQ %d has too low priority on CPU %d\n", __func__, n_IRQ, n_CPU); return; } if (IRQ_testbit(&dst->raised, n_IRQ)) { /* Interrupt miss */ DPRINTF("%s: IRQ %d was missed on CPU %d\n", __func__, n_IRQ, n_CPU); return; } set_bit(&src->ipvp, IPVP_ACTIVITY); IRQ_setbit(&dst->raised, n_IRQ); if (priority < dst->raised.priority) { /* An higher priority IRQ is already raised */ DPRINTF("%s: IRQ %d is hidden by raised IRQ %d on CPU %d\n", __func__, n_IRQ, dst->raised.next, n_CPU); return; } IRQ_get_next(opp, &dst->raised); if (IRQ_get_next(opp, &dst->servicing) != -1 && priority <= dst->servicing.priority) { DPRINTF("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d\n", __func__, n_IRQ, dst->servicing.next, n_CPU); /* Already servicing a higher priority IRQ */ return; } DPRINTF("Raise OpenPIC INT output cpu %d irq %d\n", n_CPU, n_IRQ); opp->irq_raise(opp, n_CPU, src); }
19,306
qemu
f9c6a7f1395c6d88a3bb1a0cb48811994709966e
0
void armv7m_nvic_complete_irq(void *opaque, int irq) { nvic_state *s = (nvic_state *)opaque; if (irq >= 16) irq += 16; gic_complete_irq(&s->gic, 0, irq); }
19,308
qemu
a3276f786c84d3410be5bc3a4b3e5036745e5a90
0
static void amdvi_iommu_notify_flag_changed(MemoryRegion *iommu, IOMMUNotifierFlag old, IOMMUNotifierFlag new) { AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu); hw_error("device %02x.%02x.%x requires iommu notifier which is not " "currently supported", as->bus_num, PCI_SLOT(as->devfn), PCI_FUNC(as->devfn)); }
19,309
qemu
c7fe4b12984a36b87438080e48aff5e8f6d48ac9
0
static int kvm_put_tscdeadline_msr(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct { struct kvm_msrs info; struct kvm_msr_entry entries[1]; } msr_data; struct kvm_msr_entry *msrs = msr_data.entries; if (!has_msr_tsc_deadline) { return 0; } kvm_msr_entry_set(&msrs[0], MSR_IA32_TSCDEADLINE, env->tsc_deadline); msr_data.info.nmsrs = 1; return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data); }
19,310
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static void m5208_sys_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { hw_error("m5208_sys_write: Bad offset 0x%x\n", (int)addr); }
19,311
qemu
299b520cd4092be3c53f8380b81315c33927d9d3
1
static int get_physical_address_data(CPUState *env, target_phys_addr_t *physical, int *prot, target_ulong address, int rw, int is_user) { unsigned int i; uint64_t context; int is_nucleus; if ((env->lsu & DMMU_E) == 0) { /* DMMU disabled */ *physical = ultrasparc_truncate_physical(address); *prot = PAGE_READ | PAGE_WRITE; return 0; } context = env->dmmu.mmu_primary_context & 0x1fff; is_nucleus = env->tl > 0; for (i = 0; i < 64; i++) { // ctx match, vaddr match, valid? if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical, is_nucleus)) { // access ok? if (((env->dtlb[i].tte & 0x4) && is_user) || (!(env->dtlb[i].tte & 0x2) && (rw == 1))) { uint8_t fault_type = 0; if ((env->dtlb[i].tte & 0x4) && is_user) { fault_type |= 1; /* privilege violation */ } if (env->dmmu.sfsr & 1) /* Fault status register */ env->dmmu.sfsr = 2; /* overflow (not read before another fault) */ env->dmmu.sfsr |= (is_user << 3) | ((rw == 1) << 2) | 1; env->dmmu.sfsr |= (fault_type << 7); env->dmmu.sfar = address; /* Fault address register */ env->exception_index = TT_DFAULT; #ifdef DEBUG_MMU printf("DFAULT at 0x%" PRIx64 "\n", address); #endif return 1; } *prot = PAGE_READ; if (env->dtlb[i].tte & 0x2) *prot |= PAGE_WRITE; TTE_SET_USED(env->dtlb[i].tte); return 0; } } #ifdef DEBUG_MMU printf("DMISS at 0x%" PRIx64 "\n", address); #endif env->dmmu.tag_access = (address & ~0x1fffULL) | context; env->exception_index = TT_DMISS; return 1; }
19,312
qemu
1d06cb7ab93f879ac25c9f5ef1d1ac8d97a42dfc
1
static void sd_response_r1_make(SDState *sd, uint8_t *response) { uint32_t status = sd->card_status; /* Clear the "clear on read" status bits (except APP_CMD) */ sd->card_status &= ~CARD_STATUS_C | APP_CMD; response[0] = (status >> 24) & 0xff; response[1] = (status >> 16) & 0xff; response[2] = (status >> 8) & 0xff; response[3] = (status >> 0) & 0xff; }
19,313
FFmpeg
f78cd0c243b9149c7f604ecf1006d78e344aa6ca
1
void FUNC(ff_simple_idct_add)(uint8_t *dest_, int line_size, DCTELEM *block) { pixel *dest = (pixel *)dest_; int i; line_size /= sizeof(pixel); for (i = 0; i < 8; i++) FUNC(idctRowCondDC)(block + i*8); for (i = 0; i < 8; i++) FUNC(idctSparseColAdd)(dest + i, line_size, block + i); }
19,314
qemu
d036bb215e0ac1d1fd467239f1d3b7d904cac90a
1
static void pci_set_irq(void *opaque, int irq_num, int level) { PCIDevice *pci_dev = opaque; PCIBus *bus; int change; change = level - pci_dev->irq_state[irq_num]; if (!change) return; pci_dev->irq_state[irq_num] = level; for (;;) { bus = pci_dev->bus; irq_num = bus->map_irq(pci_dev, irq_num); if (bus->set_irq) break; pci_dev = bus->parent_dev; } bus->irq_count[irq_num] += change; bus->set_irq(bus->irq_opaque, irq_num, bus->irq_count[irq_num] != 0); }
19,315
FFmpeg
55d7371fe0c44c025eb0e75215e0685870f31874
1
static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size) { const uint8_t *sizes = buf; int i; s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2); buf += 3 * (s->num_coeff_partitions - 1); buf_size -= 3 * (s->num_coeff_partitions - 1); if (buf_size < 0) return -1; for (i = 0; i < s->num_coeff_partitions - 1; i++) { int size = AV_RL24(sizes + 3 * i); if (buf_size - size < 0) return -1; ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, size); buf += size; buf_size -= size; } ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size); return 0; }
19,316
FFmpeg
21234c835d2d003d390d462b6e1b2622e7b02c39
1
static int load_data(AVFilterContext *ctx, int azim, int elev, float radius) { struct SOFAlizerContext *s = ctx->priv; const int n_samples = s->sofa.n_samples; int n_conv = s->n_conv; /* no. channels to convolve */ int n_fft = s->n_fft; int delay_l[16]; /* broadband delay for each IR */ int delay_r[16]; int nb_input_channels = ctx->inputs[0]->channels; /* no. input channels */ float gain_lin = expf((s->gain - 3 * nb_input_channels) / 20 * M_LN10); /* gain - 3dB/channel */ FFTComplex *data_hrtf_l = NULL; FFTComplex *data_hrtf_r = NULL; FFTComplex *fft_in_l = NULL; FFTComplex *fft_in_r = NULL; float *data_ir_l = NULL; float *data_ir_r = NULL; int offset = 0; /* used for faster pointer arithmetics in for-loop */ int m[16]; /* measurement index m of IR closest to required source positions */ int i, j, azim_orig = azim, elev_orig = elev; if (!s->sofa.ncid) { /* if an invalid SOFA file has been selected */ av_log(ctx, AV_LOG_ERROR, "Selected SOFA file is invalid. Please select valid SOFA file.\n"); return AVERROR_INVALIDDATA; } if (s->type == TIME_DOMAIN) { s->temp_src[0] = av_calloc(FFALIGN(n_samples, 16), sizeof(float)); s->temp_src[1] = av_calloc(FFALIGN(n_samples, 16), sizeof(float)); /* get temporary IR for L and R channel */ data_ir_l = av_malloc_array(n_conv * n_samples, sizeof(*data_ir_l)); data_ir_r = av_malloc_array(n_conv * n_samples, sizeof(*data_ir_r)); if (!data_ir_r || !data_ir_l || !s->temp_src[0] || !s->temp_src[1]) { av_free(data_ir_l); av_free(data_ir_r); return AVERROR(ENOMEM); } } else { /* get temporary HRTF memory for L and R channel */ data_hrtf_l = av_malloc_array(n_fft, sizeof(*data_hrtf_l) * n_conv); data_hrtf_r = av_malloc_array(n_fft, sizeof(*data_hrtf_r) * n_conv); if (!data_hrtf_r || !data_hrtf_l) { av_free(data_hrtf_l); av_free(data_hrtf_r); return AVERROR(ENOMEM); } } for (i = 0; i < s->n_conv; i++) { /* load and store IRs and corresponding delays */ azim = (int)(s->speaker_azim[i] + azim_orig) % 360; elev = (int)(s->speaker_elev[i] + elev_orig) % 90; /* get id of IR closest to desired position */ m[i] = find_m(s, azim, elev, radius); /* load the delays associated with the current IRs */ delay_l[i] = *(s->sofa.data_delay + 2 * m[i]); delay_r[i] = *(s->sofa.data_delay + 2 * m[i] + 1); if (s->type == TIME_DOMAIN) { offset = i * n_samples; /* no. samples already written */ for (j = 0; j < n_samples; j++) { /* load reversed IRs of the specified source position * sample-by-sample for left and right ear; and apply gain */ *(data_ir_l + offset + j) = /* left channel */ *(s->sofa.data_ir + 2 * m[i] * n_samples + n_samples - 1 - j) * gain_lin; *(data_ir_r + offset + j) = /* right channel */ *(s->sofa.data_ir + 2 * m[i] * n_samples + n_samples - 1 - j + n_samples) * gain_lin; } } else { fft_in_l = av_calloc(n_fft, sizeof(*fft_in_l)); fft_in_r = av_calloc(n_fft, sizeof(*fft_in_r)); if (!fft_in_l || !fft_in_r) { av_free(data_hrtf_l); av_free(data_hrtf_r); av_free(fft_in_l); av_free(fft_in_r); return AVERROR(ENOMEM); } offset = i * n_fft; /* no. samples already written */ for (j = 0; j < n_samples; j++) { /* load non-reversed IRs of the specified source position * sample-by-sample and apply gain, * L channel is loaded to real part, R channel to imag part, * IRs ared shifted by L and R delay */ fft_in_l[delay_l[i] + j].re = /* left channel */ *(s->sofa.data_ir + 2 * m[i] * n_samples + j) * gain_lin; fft_in_r[delay_r[i] + j].re = /* right channel */ *(s->sofa.data_ir + (2 * m[i] + 1) * n_samples + j) * gain_lin; } /* actually transform to frequency domain (IRs -> HRTFs) */ av_fft_permute(s->fft[0], fft_in_l); av_fft_calc(s->fft[0], fft_in_l); memcpy(data_hrtf_l + offset, fft_in_l, n_fft * sizeof(*fft_in_l)); av_fft_permute(s->fft[0], fft_in_r); av_fft_calc(s->fft[0], fft_in_r); memcpy(data_hrtf_r + offset, fft_in_r, n_fft * sizeof(*fft_in_r)); } av_log(ctx, AV_LOG_DEBUG, "Index: %d, Azimuth: %f, Elevation: %f, Radius: %f of SOFA file.\n", m[i], *(s->sofa.sp_a + m[i]), *(s->sofa.sp_e + m[i]), *(s->sofa.sp_r + m[i])); } if (s->type == TIME_DOMAIN) { /* copy IRs and delays to allocated memory in the SOFAlizerContext struct: */ memcpy(s->data_ir[0], data_ir_l, sizeof(float) * n_conv * n_samples); memcpy(s->data_ir[1], data_ir_r, sizeof(float) * n_conv * n_samples); av_freep(&data_ir_l); /* free temporary IR memory */ av_freep(&data_ir_r); } else { s->data_hrtf[0] = av_malloc_array(n_fft * s->n_conv, sizeof(FFTComplex)); s->data_hrtf[1] = av_malloc_array(n_fft * s->n_conv, sizeof(FFTComplex)); if (!s->data_hrtf[0] || !s->data_hrtf[1]) { av_freep(&data_hrtf_l); av_freep(&data_hrtf_r); av_freep(&fft_in_l); av_freep(&fft_in_r); return AVERROR(ENOMEM); /* memory allocation failed */ } memcpy(s->data_hrtf[0], data_hrtf_l, /* copy HRTF data to */ sizeof(FFTComplex) * n_conv * n_fft); /* filter struct */ memcpy(s->data_hrtf[1], data_hrtf_r, sizeof(FFTComplex) * n_conv * n_fft); av_freep(&data_hrtf_l); /* free temporary HRTF memory */ av_freep(&data_hrtf_r); av_freep(&fft_in_l); /* free temporary FFT memory */ av_freep(&fft_in_r); } memcpy(s->delay[0], &delay_l[0], sizeof(int) * s->n_conv); memcpy(s->delay[1], &delay_r[0], sizeof(int) * s->n_conv); return 0; }
19,317
FFmpeg
e273dade78943e22b71d0ddb67cd0d737fc26edf
1
static int decode_555(GetByteContext *gB, uint16_t *dst, int stride, int keyframe, int w, int h) { int last_symbol = 0, repeat = 0, prev_avail = 0; if (!keyframe) { int x, y, endx, endy, t; #define READ_PAIR(a, b) \ a = bytestream2_get_byte(gB) << 4; \ t = bytestream2_get_byte(gB); \ a |= t >> 4; \ b = (t & 0xF) << 8; \ b |= bytestream2_get_byte(gB); \ READ_PAIR(x, endx) READ_PAIR(y, endy) if (endx >= w || endy >= h || x > endx || y > endy) return AVERROR_INVALIDDATA; dst += x + stride * y; w = endx - x + 1; h = endy - y + 1; if (y) prev_avail = 1; } do { uint16_t *p = dst; do { if (repeat-- < 1) { int b = bytestream2_get_byte(gB); if (b < 128) last_symbol = b << 8 | bytestream2_get_byte(gB); else if (b > 129) { repeat = 0; while (b-- > 130) repeat = (repeat << 8) + bytestream2_get_byte(gB) + 1; if (last_symbol == -2) { int skip = FFMIN((unsigned)repeat, dst + w - p); repeat -= skip; p += skip; } } else last_symbol = 127 - b; } if (last_symbol >= 0) *p = last_symbol; else if (last_symbol == -1 && prev_avail) *p = *(p - stride); } while (++p < dst + w); dst += stride; prev_avail = 1; } while (--h); return 0; }
19,318
FFmpeg
e0706e9cc8f30a8242d2b140edace7bf76170506
0
static int get_rice_un(GetBitContext *gb, int k) { unsigned int v = get_unary(gb, 1, 128); return (v << k) | get_bits_long(gb, k); }
19,319
FFmpeg
bf00a73ace9b1aba790b75dcb26d43adfceb769f
0
static void qmf_32_subbands(DCAContext * s, int chans, float samples_in[32][8], float *samples_out, float scale) { const float *prCoeff; int i; int sb_act = s->subband_activity[chans]; int subindex; scale *= sqrt(1/8.0); /* Select filter */ if (!s->multirate_inter) /* Non-perfect reconstruction */ prCoeff = fir_32bands_nonperfect; else /* Perfect reconstruction */ prCoeff = fir_32bands_perfect; /* Reconstructed channel sample index */ for (subindex = 0; subindex < 8; subindex++) { /* Load in one sample from each subband and clear inactive subbands */ for (i = 0; i < sb_act; i++){ uint32_t v = AV_RN32A(&samples_in[i][subindex]) ^ ((i-1)&2)<<30; AV_WN32A(&s->raXin[i], v); } for (; i < 32; i++) s->raXin[i] = 0.0; s->synth.synth_filter_float(&s->imdct, s->subband_fir_hist[chans], &s->hist_index[chans], s->subband_fir_noidea[chans], prCoeff, samples_out, s->raXin, scale); samples_out+= 32; } }
19,320
qemu
bd269ebc82fbaa5fe7ce5bc7c1770ac8acecd884
0
static int vnc_display_connect(VncDisplay *vd, SocketAddressLegacy **saddr, size_t nsaddr, SocketAddressLegacy **wsaddr, size_t nwsaddr, Error **errp) { /* connect to viewer */ QIOChannelSocket *sioc = NULL; if (nwsaddr != 0) { error_setg(errp, "Cannot use websockets in reverse mode"); return -1; } if (nsaddr != 1) { error_setg(errp, "Expected a single address in reverse mode"); return -1; } /* TODO SOCKET_ADDRESS_LEGACY_KIND_FD when fd has AF_UNIX */ vd->is_unix = saddr[0]->type == SOCKET_ADDRESS_LEGACY_KIND_UNIX; sioc = qio_channel_socket_new(); qio_channel_set_name(QIO_CHANNEL(sioc), "vnc-reverse"); if (qio_channel_socket_connect_sync(sioc, saddr[0], errp) < 0) { return -1; } vnc_connect(vd, sioc, false, false); object_unref(OBJECT(sioc)); return 0; }
19,321
FFmpeg
cc276c85d15272df6e44fb3252657a43cbd49555
0
int64_t av_get_channel_layout(const char *name) { int i = 0; do { if (!strcmp(channel_layout_map[i].name, name)) return channel_layout_map[i].layout; i++; } while (channel_layout_map[i].name); return 0; }
19,323
qemu
faabdbb792889bf011a593578d1de51e14616bb7
0
static DevicePropertyInfo *make_device_property_info(ObjectClass *klass, const char *name, const char *default_type, const char *description) { DevicePropertyInfo *info; Property *prop; do { for (prop = DEVICE_CLASS(klass)->props; prop && prop->name; prop++) { if (strcmp(name, prop->name) != 0) { continue; } /* * TODO Properties without a parser are just for dirty hacks. * qdev_prop_ptr is the only such PropertyInfo. It's marked * for removal. This conditional should be removed along with * it. */ if (!prop->info->set) { return NULL; /* no way to set it, don't show */ } info = g_malloc0(sizeof(*info)); info->name = g_strdup(prop->name); info->type = g_strdup(prop->info->name); info->has_description = !!prop->info->description; info->description = g_strdup(prop->info->description); return info; } klass = object_class_get_parent(klass); } while (klass != object_class_by_name(TYPE_DEVICE)); /* Not a qdev property, use the default type */ info = g_malloc0(sizeof(*info)); info->name = g_strdup(name); info->type = g_strdup(default_type); info->has_description = !!description; info->description = g_strdup(description); return info; }
19,324
qemu
c16547326988cc321c9bff43ed91cbe753e52892
0
static void sun4uv_init(MemoryRegion *address_space_mem, QEMUMachineInitArgs *args, const struct hwdef *hwdef) { SPARCCPU *cpu; M48t59State *nvram; unsigned int i; uint64_t initrd_addr, initrd_size, kernel_addr, kernel_size, kernel_entry; PCIBus *pci_bus, *pci_bus2, *pci_bus3; ISABus *isa_bus; qemu_irq *ivec_irqs, *pbm_irqs; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; DriveInfo *fd[MAX_FD]; FWCfgState *fw_cfg; /* init CPUs */ cpu = cpu_devinit(args->cpu_model, hwdef); /* set up devices */ ram_init(0, args->ram_size); prom_init(hwdef->prom_addr, bios_name); ivec_irqs = qemu_allocate_irqs(cpu_set_ivec_irq, cpu, IVEC_MAX); pci_bus = pci_apb_init(APB_SPECIAL_BASE, APB_MEM_BASE, ivec_irqs, &pci_bus2, &pci_bus3, &pbm_irqs); pci_vga_init(pci_bus); // XXX Should be pci_bus3 isa_bus = pci_ebus_init(pci_bus, -1, pbm_irqs); i = 0; if (hwdef->console_serial_base) { serial_mm_init(address_space_mem, hwdef->console_serial_base, 0, NULL, 115200, serial_hds[i], DEVICE_BIG_ENDIAN); i++; } for(; i < MAX_SERIAL_PORTS; i++) { if (serial_hds[i]) { serial_isa_init(isa_bus, i, serial_hds[i]); } } for(i = 0; i < MAX_PARALLEL_PORTS; i++) { if (parallel_hds[i]) { parallel_init(isa_bus, i, parallel_hds[i]); } } for(i = 0; i < nb_nics; i++) pci_nic_init_nofail(&nd_table[i], pci_bus, "ne2k_pci", NULL); ide_drive_get(hd, MAX_IDE_BUS); pci_cmd646_ide_init(pci_bus, hd, 1); isa_create_simple(isa_bus, "i8042"); for(i = 0; i < MAX_FD; i++) { fd[i] = drive_get(IF_FLOPPY, 0, i); } fdctrl_init_isa(isa_bus, fd); nvram = m48t59_init_isa(isa_bus, 0x0074, NVRAM_SIZE, 59); initrd_size = 0; initrd_addr = 0; kernel_size = sun4u_load_kernel(args->kernel_filename, args->initrd_filename, ram_size, &initrd_size, &initrd_addr, &kernel_addr, &kernel_entry); sun4u_NVRAM_set_params(nvram, NVRAM_SIZE, "Sun4u", args->ram_size, args->boot_device, kernel_addr, kernel_size, args->kernel_cmdline, initrd_addr, initrd_size, /* XXX: need an option to load a NVRAM image */ 0, graphic_width, graphic_height, graphic_depth, (uint8_t *)&nd_table[0].macaddr); fw_cfg = fw_cfg_init(BIOS_CFG_IOPORT, BIOS_CFG_IOPORT + 1, 0, 0); fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus); fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, hwdef->machine_id); fw_cfg_add_i64(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_entry); fw_cfg_add_i64(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); if (args->kernel_cmdline) { fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(args->kernel_cmdline) + 1); fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, args->kernel_cmdline); } else { fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, 0); } fw_cfg_add_i64(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr); fw_cfg_add_i64(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, args->boot_device[0]); fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_WIDTH, graphic_width); fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_HEIGHT, graphic_height); fw_cfg_add_i16(fw_cfg, FW_CFG_SPARC64_DEPTH, graphic_depth); qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); }
19,326
qemu
7bd427d801e1e3293a634d3c83beadaa90ffb911
0
static void buffered_rate_tick(void *opaque) { QEMUFileBuffered *s = opaque; if (s->has_error) { buffered_close(s); return; } qemu_mod_timer(s->timer, qemu_get_clock(rt_clock) + 100); if (s->freeze_output) return; s->bytes_xfer = 0; buffered_flush(s); /* Add some checks around this */ s->put_ready(s->opaque); }
19,327
qemu
f53a829bb9ef14be800556cbc02d8b20fc1050a7
0
void nbd_client_session_close(NbdClientSession *client) { struct nbd_request request = { .type = NBD_CMD_DISC, .from = 0, .len = 0 }; if (!client->bs) { return; } if (client->sock == -1) { return; } nbd_send_request(client->sock, &request); nbd_teardown_connection(client); client->bs = NULL; }
19,328
qemu
d5e6f437c5508614803d11e59ee16a758dde09ef
0
void blk_insert_bs(BlockBackend *blk, BlockDriverState *bs) { bdrv_ref(bs); blk->root = bdrv_root_attach_child(bs, "root", &child_root, blk); notifier_list_notify(&blk->insert_bs_notifiers, blk); if (blk->public.throttle_state) { throttle_timers_attach_aio_context( &blk->public.throttle_timers, bdrv_get_aio_context(bs)); } }
19,329
qemu
5add35bec1e249bb5345a47008c8f298d4760be4
0
static void test_init(TestData *d) { QPCIBus *bus; QTestState *qs; char *s; s = g_strdup_printf("-machine q35 %s", !d->args ? "" : d->args); qs = qtest_start(s); qtest_irq_intercept_in(qs, "ioapic"); g_free(s); bus = qpci_init_pc(); d->dev = qpci_device_find(bus, QPCI_DEVFN(0x1f, 0x00)); g_assert(d->dev != NULL); /* map PCI-to-LPC bridge interface BAR */ d->lpc_base = qpci_iomap(d->dev, 0, NULL); qpci_device_enable(d->dev); g_assert(d->lpc_base != NULL); /* set ACPI PM I/O space base address */ qpci_config_writel(d->dev, (uintptr_t)d->lpc_base + ICH9_LPC_PMBASE, PM_IO_BASE_ADDR | 0x1); /* enable ACPI I/O */ qpci_config_writeb(d->dev, (uintptr_t)d->lpc_base + ICH9_LPC_ACPI_CTRL, 0x80); /* set Root Complex BAR */ qpci_config_writel(d->dev, (uintptr_t)d->lpc_base + ICH9_LPC_RCBA, RCBA_BASE_ADDR | 0x1); d->tco_io_base = (void *)((uintptr_t)PM_IO_BASE_ADDR + 0x60); }
19,330
qemu
ac531cb6e542b1e61d668604adf9dc5306a948c0
0
START_TEST(qdict_stress_test) { size_t lines; char key[128]; FILE *test_file; QDict *qdict; QString *value; const char *test_file_path = "qdict-test-data.txt"; test_file = fopen(test_file_path, "r"); fail_unless(test_file != NULL); // Create the dict qdict = qdict_new(); fail_unless(qdict != NULL); // Add everything from the test file for (lines = 0;; lines++) { value = read_line(test_file, key); if (!value) break; qdict_put(qdict, key, value); } fail_unless(qdict_size(qdict) == lines); // Check if everything is really in there reset_file(test_file); for (;;) { const char *str1, *str2; value = read_line(test_file, key); if (!value) break; str1 = qstring_get_str(value); str2 = qdict_get_str(qdict, key); fail_unless(str2 != NULL); fail_unless(strcmp(str1, str2) == 0); QDECREF(value); } // Delete everything reset_file(test_file); for (;;) { value = read_line(test_file, key); if (!value) break; qdict_del(qdict, key); QDECREF(value); fail_unless(qdict_haskey(qdict, key) == 0); } fclose(test_file); fail_unless(qdict_size(qdict) == 0); QDECREF(qdict); }
19,331
qemu
4be746345f13e99e468c60acbd3a355e8183e3ce
0
static int sl_nand_init(SysBusDevice *dev) { SLNANDState *s = SL_NAND(dev); DriveInfo *nand; s->ctl = 0; nand = drive_get(IF_MTD, 0, 0); s->nand = nand_init(nand ? blk_bs(blk_by_legacy_dinfo(nand)) : NULL, s->manf_id, s->chip_id); memory_region_init_io(&s->iomem, OBJECT(s), &sl_ops, s, "sl", 0x40); sysbus_init_mmio(dev, &s->iomem); return 0; }
19,332
qemu
ad02b96ad86baf6dd72a43b04876b2d6ea957112
0
void kbd_mouse_event(int dx, int dy, int dz, int buttons_state) { QEMUPutMouseEntry *entry; QEMUPutMouseEvent *mouse_event; void *mouse_event_opaque; int width, height; if (!runstate_is_running()) { return; } if (QTAILQ_EMPTY(&mouse_handlers)) { return; } entry = QTAILQ_FIRST(&mouse_handlers); mouse_event = entry->qemu_put_mouse_event; mouse_event_opaque = entry->qemu_put_mouse_event_opaque; if (mouse_event) { if (entry->qemu_put_mouse_event_absolute) { width = 0x7fff; height = 0x7fff; } else { width = graphic_width - 1; height = graphic_height - 1; } switch (graphic_rotate) { case 0: mouse_event(mouse_event_opaque, dx, dy, dz, buttons_state); break; case 90: mouse_event(mouse_event_opaque, width - dy, dx, dz, buttons_state); break; case 180: mouse_event(mouse_event_opaque, width - dx, height - dy, dz, buttons_state); break; case 270: mouse_event(mouse_event_opaque, dy, height - dx, dz, buttons_state); break; } } }
19,333
FFmpeg
32c3047cac9294bb56d23c89a40a22409db5cc70
0
static int vmdvideo_decode_init(AVCodecContext *avctx) { VmdVideoContext *s = avctx->priv_data; int i; unsigned int *palette32; int palette_index = 0; unsigned char r, g, b; unsigned char *vmd_header; unsigned char *raw_palette; s->avctx = avctx; avctx->pix_fmt = PIX_FMT_PAL8; dsputil_init(&s->dsp, avctx); /* make sure the VMD header made it */ if (s->avctx->extradata_size != VMD_HEADER_SIZE) { av_log(s->avctx, AV_LOG_ERROR, "VMD video: expected extradata size of %d\n", VMD_HEADER_SIZE); return -1; } vmd_header = (unsigned char *)avctx->extradata; s->unpack_buffer_size = AV_RL32(&vmd_header[800]); s->unpack_buffer = av_malloc(s->unpack_buffer_size); if (!s->unpack_buffer) return -1; /* load up the initial palette */ raw_palette = &vmd_header[28]; palette32 = (unsigned int *)s->palette; for (i = 0; i < PALETTE_COUNT; i++) { r = raw_palette[palette_index++] * 4; g = raw_palette[palette_index++] * 4; b = raw_palette[palette_index++] * 4; palette32[i] = (r << 16) | (g << 8) | (b); } s->frame.data[0] = s->prev_frame.data[0] = NULL; return 0; }
19,334
qemu
ee636500d6eab44b83f09cb730b67226b70423b1
0
static int mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong end, int prot, int flags, int fd, abi_ulong offset) { abi_ulong real_end, addr; void *host_start; int prot1, prot_new; real_end = real_start + qemu_host_page_size; host_start = g2h(real_start); /* get the protection of the target pages outside the mapping */ prot1 = 0; for(addr = real_start; addr < real_end; addr++) { if (addr < start || addr >= end) prot1 |= page_get_flags(addr); } if (prot1 == 0) { /* no page was there, so we allocate one */ void *p = mmap(host_start, qemu_host_page_size, prot, flags | MAP_ANONYMOUS, -1, 0); if (p == MAP_FAILED) return -1; prot1 = prot; } prot1 &= PAGE_BITS; prot_new = prot | prot1; if (!(flags & MAP_ANONYMOUS)) { /* msync() won't work here, so we return an error if write is possible while it is a shared mapping */ if ((flags & MAP_TYPE) == MAP_SHARED && (prot & PROT_WRITE)) return -EINVAL; /* adjust protection to be able to read */ if (!(prot1 & PROT_WRITE)) mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE); /* read the corresponding file data */ if (pread(fd, g2h(start), end - start, offset) == -1) return -1; /* put final protection */ if (prot_new != (prot1 | PROT_WRITE)) mprotect(host_start, qemu_host_page_size, prot_new); } else { /* just update the protection */ if (prot_new != prot1) { mprotect(host_start, qemu_host_page_size, prot_new); } } return 0; }
19,335
qemu
3e3553905cfc814d59de6d1a634c3a991b2a9257
0
int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset, int64_t size) { BDRVQcowState *s = bs->opaque; int chk = QCOW2_OL_DEFAULT & ~ign; int i, j; if (!size) { return 0; } if (chk & QCOW2_OL_MAIN_HEADER) { if (offset < s->cluster_size) { return QCOW2_OL_MAIN_HEADER; } } /* align range to test to cluster boundaries */ size = align_offset(offset_into_cluster(s, offset) + size, s->cluster_size); offset = start_of_cluster(s, offset); if ((chk & QCOW2_OL_ACTIVE_L1) && s->l1_size) { if (overlaps_with(s->l1_table_offset, s->l1_size * sizeof(uint64_t))) { return QCOW2_OL_ACTIVE_L1; } } if ((chk & QCOW2_OL_REFCOUNT_TABLE) && s->refcount_table_size) { if (overlaps_with(s->refcount_table_offset, s->refcount_table_size * sizeof(uint64_t))) { return QCOW2_OL_REFCOUNT_TABLE; } } if ((chk & QCOW2_OL_SNAPSHOT_TABLE) && s->snapshots_size) { if (overlaps_with(s->snapshots_offset, s->snapshots_size)) { return QCOW2_OL_SNAPSHOT_TABLE; } } if ((chk & QCOW2_OL_INACTIVE_L1) && s->snapshots) { for (i = 0; i < s->nb_snapshots; i++) { if (s->snapshots[i].l1_size && overlaps_with(s->snapshots[i].l1_table_offset, s->snapshots[i].l1_size * sizeof(uint64_t))) { return QCOW2_OL_INACTIVE_L1; } } } if ((chk & QCOW2_OL_ACTIVE_L2) && s->l1_table) { for (i = 0; i < s->l1_size; i++) { if ((s->l1_table[i] & L1E_OFFSET_MASK) && overlaps_with(s->l1_table[i] & L1E_OFFSET_MASK, s->cluster_size)) { return QCOW2_OL_ACTIVE_L2; } } } if ((chk & QCOW2_OL_REFCOUNT_BLOCK) && s->refcount_table) { for (i = 0; i < s->refcount_table_size; i++) { if ((s->refcount_table[i] & REFT_OFFSET_MASK) && overlaps_with(s->refcount_table[i] & REFT_OFFSET_MASK, s->cluster_size)) { return QCOW2_OL_REFCOUNT_BLOCK; } } } if ((chk & QCOW2_OL_INACTIVE_L2) && s->snapshots) { for (i = 0; i < s->nb_snapshots; i++) { uint64_t l1_ofs = s->snapshots[i].l1_table_offset; uint32_t l1_sz = s->snapshots[i].l1_size; uint64_t l1_sz2 = l1_sz * sizeof(uint64_t); uint64_t *l1 = g_malloc(l1_sz2); int ret; ret = bdrv_pread(bs->file, l1_ofs, l1, l1_sz2); if (ret < 0) { g_free(l1); return ret; } for (j = 0; j < l1_sz; j++) { uint64_t l2_ofs = be64_to_cpu(l1[j]) & L1E_OFFSET_MASK; if (l2_ofs && overlaps_with(l2_ofs, s->cluster_size)) { g_free(l1); return QCOW2_OL_INACTIVE_L2; } } g_free(l1); } } return 0; }
19,336
qemu
84f1c148da2b35fbb5a436597872765257e8914e
0
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) { CPUState *cpu; PageDesc *p; uint32_t h; tb_page_addr_t phys_pc; assert_tb_locked(); atomic_set(&tb->invalid, true); /* remove the TB from the hash list */ phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate); qht_remove(&tcg_ctx.tb_ctx.htable, tb, h); /* remove the TB from the page list */ if (tb->page_addr[0] != page_addr) { p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); tb_page_remove(&p->first_tb, tb); invalidate_page_bitmap(p); } if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); tb_page_remove(&p->first_tb, tb); invalidate_page_bitmap(p); } /* remove the TB from the hash list */ h = tb_jmp_cache_hash_func(tb->pc); CPU_FOREACH(cpu) { if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) { atomic_set(&cpu->tb_jmp_cache[h], NULL); } } /* suppress this TB from the two jump lists */ tb_remove_from_jmp_list(tb, 0); tb_remove_from_jmp_list(tb, 1); /* suppress any remaining jumps to this TB */ tb_jmp_unlink(tb); tcg_ctx.tb_ctx.tb_phys_invalidate_count++; }
19,337
qemu
a89ef0c357abfbf1f76e2d7418fe3c880e0364bd
0
int nbd_client_co_flush(BlockDriverState *bs) { NbdClientSession *client = nbd_get_client_session(bs); struct nbd_request request = { .type = NBD_CMD_FLUSH }; struct nbd_reply reply; ssize_t ret; if (!(client->nbdflags & NBD_FLAG_SEND_FLUSH)) { return 0; } if (client->nbdflags & NBD_FLAG_SEND_FUA) { request.type |= NBD_CMD_FLAG_FUA; } request.from = 0; request.len = 0; nbd_coroutine_start(client, &request); ret = nbd_co_send_request(bs, &request, NULL, 0); if (ret < 0) { reply.error = -ret; } else { nbd_co_receive_reply(client, &request, &reply, NULL, 0); } nbd_coroutine_end(client, &request); return -reply.error; }
19,338
qemu
4f97558e100f66f953ba7576b0ced146e6846997
0
void *colo_process_incoming_thread(void *opaque) { MigrationIncomingState *mis = opaque; migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_COLO); mis->to_src_file = qemu_file_get_return_path(mis->from_src_file); if (!mis->to_src_file) { error_report("COLO incoming thread: Open QEMUFile to_src_file failed"); goto out; } /* * Note: the communication between Primary side and Secondary side * should be sequential, we set the fd to unblocked in migration incoming * coroutine, and here we are in the COLO incoming thread, so it is ok to * set the fd back to blocked. */ qemu_file_set_blocking(mis->from_src_file, true); /* TODO: COLO checkpoint restore loop */ out: if (mis->to_src_file) { qemu_fclose(mis->to_src_file); } migration_incoming_exit_colo(); return NULL; }
19,339
qemu
494a8ebe713055d3946183f4b395f85a18b43e9e
0
static int v9fs_receive_status(V9fsProxy *proxy, struct iovec *reply, int *status) { int retval; ProxyHeader header; *status = 0; reply->iov_len = 0; retval = socket_read(proxy->sockfd, reply->iov_base, PROXY_HDR_SZ); if (retval < 0) { return retval; } reply->iov_len = PROXY_HDR_SZ; proxy_unmarshal(reply, 0, "dd", &header.type, &header.size); if (header.size != sizeof(int)) { *status = -ENOBUFS; return 0; } retval = socket_read(proxy->sockfd, reply->iov_base + PROXY_HDR_SZ, header.size); if (retval < 0) { return retval; } reply->iov_len += header.size; proxy_unmarshal(reply, PROXY_HDR_SZ, "d", status); return 0; }
19,340
qemu
b9ce1454e14ec918acb90d899ce7724f69682f45
0
static int qemu_peek_byte(QEMUFile *f) { if (f->is_write) abort(); if (f->buf_index >= f->buf_size) { qemu_fill_buffer(f); if (f->buf_index >= f->buf_size) return 0; } return f->buf[f->buf_index]; }
19,341
qemu
08215d8fd8ca15425401adc9e01361cbc6882402
0
static void kvm_fixup_page_sizes(PowerPCCPU *cpu) { static struct kvm_ppc_smmu_info smmu_info; static bool has_smmu_info; CPUPPCState *env = &cpu->env; long rampagesize; int iq, ik, jq, jk; /* We only handle page sizes for 64-bit server guests for now */ if (!(env->mmu_model & POWERPC_MMU_64)) { return; } /* Collect MMU info from kernel if not already */ if (!has_smmu_info) { kvm_get_smmu_info(cpu, &smmu_info); has_smmu_info = true; } rampagesize = getrampagesize(); /* Convert to QEMU form */ memset(&env->sps, 0, sizeof(env->sps)); for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) { struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq]; struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik]; if (!kvm_valid_page_size(smmu_info.flags, rampagesize, ksps->page_shift)) { continue; } qsps->page_shift = ksps->page_shift; qsps->slb_enc = ksps->slb_enc; for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) { if (!kvm_valid_page_size(smmu_info.flags, rampagesize, ksps->enc[jk].page_shift)) { continue; } qsps->enc[jq].page_shift = ksps->enc[jk].page_shift; qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc; if (++jq >= PPC_PAGE_SIZES_MAX_SZ) { break; } } if (++iq >= PPC_PAGE_SIZES_MAX_SZ) { break; } } env->slb_nr = smmu_info.slb_size; if (smmu_info.flags & KVM_PPC_1T_SEGMENTS) { env->mmu_model |= POWERPC_MMU_1TSEG; } else { env->mmu_model &= ~POWERPC_MMU_1TSEG; } }
19,342
qemu
e1123a3b40a1a9a625a29c8ed4debb7e206ea690
0
static unsigned long *iscsi_allocationmap_init(IscsiLun *iscsilun) { return bitmap_try_new(DIV_ROUND_UP(sector_lun2qemu(iscsilun->num_blocks, iscsilun), iscsilun->cluster_sectors)); }
19,343
qemu
b03c38057b7ac4ffb60fa98a26dd4c8d5fa9c54c
0
static int iscsi_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) { IscsiLun *iscsilun = bs->opaque; bdi->unallocated_blocks_are_zero = !!iscsilun->lbprz; bdi->can_write_zeroes_with_unmap = iscsilun->lbprz && iscsilun->lbp.lbpws; /* Guess the internal cluster (page) size of the iscsi target by the means * of opt_unmap_gran. Transfer the unmap granularity only if it has a * reasonable size for bdi->cluster_size */ if (iscsilun->bl.opt_unmap_gran * iscsilun->block_size >= 64 * 1024 && iscsilun->bl.opt_unmap_gran * iscsilun->block_size <= 16 * 1024 * 1024) { bdi->cluster_size = iscsilun->bl.opt_unmap_gran * iscsilun->block_size; } return 0; }
19,344
FFmpeg
474d858fd9551b45a17e1f3a4322de1a88e749b9
0
static void set_port(struct sockaddr_storage *ss, int port) { sockaddr_union ssu = (sockaddr_union){.storage = *ss}; if (ss->ss_family == AF_INET) ssu.in.sin_port = htons(port); #if HAVE_STRUCT_SOCKADDR_IN6 else if (ss->ss_family == AF_INET6) ssu.in6.sin6_port = htons(port); #endif *ss = ssu.storage; }
19,345
qemu
72cf2d4f0e181d0d3a3122e04129c58a95da713e
0
void unregister_savevm(const char *idstr, void *opaque) { SaveStateEntry *se, *new_se; TAILQ_FOREACH_SAFE(se, &savevm_handlers, entry, new_se) { if (strcmp(se->idstr, idstr) == 0 && se->opaque == opaque) { TAILQ_REMOVE(&savevm_handlers, se, entry); qemu_free(se); } } }
19,346
qemu
ef546f1275f6563e8934dd5e338d29d9f9909ca6
0
static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd, struct iovec *iov, unsigned int iov_cnt) { VirtIODevice *vdev = VIRTIO_DEVICE(n); uint64_t offloads; size_t s; if (!((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features)) { return VIRTIO_NET_ERR; } s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads)); if (s != sizeof(offloads)) { return VIRTIO_NET_ERR; } if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) { uint64_t supported_offloads; if (!n->has_vnet_hdr) { return VIRTIO_NET_ERR; } supported_offloads = virtio_net_supported_guest_offloads(n); if (offloads & ~supported_offloads) { return VIRTIO_NET_ERR; } n->curr_guest_offloads = offloads; virtio_net_apply_guest_offloads(n); return VIRTIO_NET_OK; } else { return VIRTIO_NET_ERR; } }
19,347
qemu
477830727821e4bc337f4ac1fd222ffe0b900e1a
0
static int coroutine_fn sd_co_flush_to_disk(BlockDriverState *bs) { BDRVSheepdogState *s = bs->opaque; SheepdogObjReq hdr = { 0 }; SheepdogObjRsp *rsp = (SheepdogObjRsp *)&hdr; SheepdogInode *inode = &s->inode; int ret; unsigned int wlen = 0, rlen = 0; if (s->cache_flags != SD_FLAG_CMD_CACHE) { return 0; } hdr.opcode = SD_OP_FLUSH_VDI; hdr.oid = vid_to_vdi_oid(inode->vdi_id); ret = do_req(s->flush_fd, (SheepdogReq *)&hdr, NULL, &wlen, &rlen); if (ret) { error_report("failed to send a request to the sheep"); return ret; } if (rsp->result == SD_RES_INVALID_PARMS) { dprintf("disable write cache since the server doesn't support it\n"); s->cache_flags = SD_FLAG_CMD_DIRECT; closesocket(s->flush_fd); return 0; } if (rsp->result != SD_RES_SUCCESS) { error_report("%s", sd_strerror(rsp->result)); return -EIO; } return 0; }
19,348
qemu
72cf2d4f0e181d0d3a3122e04129c58a95da713e
0
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, target_phys_addr_t paddr, int prot, int mmu_idx, int is_softmmu) { PhysPageDesc *p; unsigned long pd; unsigned int index; target_ulong address; target_ulong code_address; target_phys_addr_t addend; int ret; CPUTLBEntry *te; CPUWatchpoint *wp; target_phys_addr_t iotlb; p = phys_page_find(paddr >> TARGET_PAGE_BITS); if (!p) { pd = IO_MEM_UNASSIGNED; } else { pd = p->phys_offset; } #if defined(DEBUG_TLB) printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n", vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd); #endif ret = 0; address = vaddr; if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { /* IO memory case (romd handled later) */ address |= TLB_MMIO; } addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK); if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) { /* Normal RAM. */ iotlb = pd & TARGET_PAGE_MASK; if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM) iotlb |= IO_MEM_NOTDIRTY; else iotlb |= IO_MEM_ROM; } else { /* IO handlers are currently passed a physical address. It would be nice to pass an offset from the base address of that region. This would avoid having to special case RAM, and avoid full address decoding in every device. We can't use the high bits of pd for this because IO_MEM_ROMD uses these as a ram address. */ iotlb = (pd & ~TARGET_PAGE_MASK); if (p) { iotlb += p->region_offset; } else { iotlb += paddr; } } code_address = address; /* Make accesses to pages with watchpoints go via the watchpoint trap routines. */ TAILQ_FOREACH(wp, &env->watchpoints, entry) { if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) { iotlb = io_mem_watch + paddr; /* TODO: The memory case can be optimized by not trapping reads of pages with a write breakpoint. */ address |= TLB_MMIO; } } index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); env->iotlb[mmu_idx][index] = iotlb - vaddr; te = &env->tlb_table[mmu_idx][index]; te->addend = addend - vaddr; if (prot & PAGE_READ) { te->addr_read = address; } else { te->addr_read = -1; } if (prot & PAGE_EXEC) { te->addr_code = code_address; } else { te->addr_code = -1; } if (prot & PAGE_WRITE) { if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || (pd & IO_MEM_ROMD)) { /* Write access calls the I/O callback. */ te->addr_write = address | TLB_MMIO; } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && !cpu_physical_memory_is_dirty(pd)) { te->addr_write = address | TLB_NOTDIRTY; } else { te->addr_write = address; } } else { te->addr_write = -1; } return ret; }
19,349
qemu
62112d181ca33fea976100c4335dfc3e2f727e6c
0
int net_init_tap(QemuOpts *opts, Monitor *mon, const char *name, VLANState *vlan) { TAPState *s; int fd, vnet_hdr = 0; if (qemu_opt_get(opts, "fd")) { if (qemu_opt_get(opts, "ifname") || qemu_opt_get(opts, "script") || qemu_opt_get(opts, "downscript") || qemu_opt_get(opts, "vnet_hdr")) { qemu_error("ifname=, script=, downscript= and vnet_hdr= is invalid with fd=\n"); return -1; } fd = net_handle_fd_param(mon, qemu_opt_get(opts, "fd")); if (fd == -1) { return -1; } fcntl(fd, F_SETFL, O_NONBLOCK); vnet_hdr = tap_probe_vnet_hdr(fd); } else { if (!qemu_opt_get(opts, "script")) { qemu_opt_set(opts, "script", DEFAULT_NETWORK_SCRIPT); } if (!qemu_opt_get(opts, "downscript")) { qemu_opt_set(opts, "downscript", DEFAULT_NETWORK_DOWN_SCRIPT); } fd = net_tap_init(opts, &vnet_hdr); if (fd == -1) { return -1; } } s = net_tap_fd_init(vlan, "tap", name, fd, vnet_hdr); if (!s) { close(fd); return -1; } if (tap_set_sndbuf(s->fd, opts) < 0) { return -1; } if (qemu_opt_get(opts, "fd")) { snprintf(s->nc.info_str, sizeof(s->nc.info_str), "fd=%d", fd); } else { const char *ifname, *script, *downscript; ifname = qemu_opt_get(opts, "ifname"); script = qemu_opt_get(opts, "script"); downscript = qemu_opt_get(opts, "downscript"); snprintf(s->nc.info_str, sizeof(s->nc.info_str), "ifname=%s,script=%s,downscript=%s", ifname, script, downscript); if (strcmp(downscript, "no") != 0) { snprintf(s->down_script, sizeof(s->down_script), "%s", downscript); snprintf(s->down_script_arg, sizeof(s->down_script_arg), "%s", ifname); } } if (vlan) { vlan->nb_host_devs++; } return 0; }
19,350
qemu
3ef0eab178e5120a0e1c079d163d5c71689d9b71
1
uint16_t acpi_pm1_evt_get_sts(ACPIREGS *ar) { int64_t d = acpi_pm_tmr_get_clock(); if (d >= ar->tmr.overflow_time) { ar->pm1.evt.sts |= ACPI_BITMASK_TIMER_STATUS; } return ar->pm1.evt.sts; }
19,351
FFmpeg
1ac5abb1d062b6ca983d494068bb9fd30390a941
1
static void store_slice16_c(uint16_t *dst, const uint16_t *src, int dst_linesize, int src_linesize, int width, int height, int log2_scale, const uint8_t dither[8][8]) { int y, x; #define STORE16(pos) do { \ temp = ((src[x + y*src_linesize + pos] << log2_scale) + (d[pos]>>1)) >> 5; \ if (temp & 0x400) \ temp = ~(temp >> 31); \ dst[x + y*dst_linesize + pos] = temp; \ } while (0) for (y = 0; y < height; y++) { const uint8_t *d = dither[y]; for (x = 0; x < width; x += 8) { int temp; STORE16(0); STORE16(1); STORE16(2); STORE16(3); STORE16(4); STORE16(5); STORE16(6); STORE16(7); } } }
19,352
qemu
eddedd546a68f6ac864b71d50dd8d39b939b724b
1
void mips_cpu_unassigned_access(CPUState *cs, hwaddr addr, bool is_write, bool is_exec, int unused, unsigned size) { MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; if (is_exec) { helper_raise_exception(env, EXCP_IBE); } else { helper_raise_exception(env, EXCP_DBE);
19,354
qemu
4f8eb8daebd72bdc214c63a3b2577f95bbadb27d
1
void pxa27x_register_keypad(struct pxa2xx_keypad_s *kp, struct keymap *map, int size) { kp->map = (struct keymap *) qemu_mallocz(sizeof(struct keymap) * size); if(!map || size < 0x80) { fprintf(stderr, "%s - No PXA keypad map defined\n", __FUNCTION__); exit(-1); } kp->map = map; qemu_add_kbd_event_handler((QEMUPutKBDEvent *) pxa27x_keyboard_event, kp); }
19,355
qemu
9561fda8d90e176bef598ba87c42a1bd6ad03ef7
1
static void virtio_rng_initfn(Object *obj) { VirtIORNG *vrng = VIRTIO_RNG(obj); object_property_add_link(obj, "rng", TYPE_RNG_BACKEND, (Object **)&vrng->conf.rng, NULL); }
19,356
qemu
18c9a9c3c2698d71575d49c308db88f295ddffed
1
print_execve(const struct syscallname *name, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6) { abi_ulong arg_ptr_addr; char *s; if (!(s = lock_user_string(arg1))) return; gemu_log("%s(\"%s\",{", name->name, s); unlock_user(s, arg1, 0); for (arg_ptr_addr = arg2; ; arg_ptr_addr += sizeof(abi_ulong)) { abi_ulong *arg_ptr, arg_addr, s_addr; arg_ptr = lock_user(VERIFY_READ, arg_ptr_addr, sizeof(abi_ulong), 1); if (!arg_ptr) return; arg_addr = tswapl(*arg_ptr); unlock_user(arg_ptr, arg_ptr_addr, 0); if (!arg_addr) break; if ((s = lock_user_string(arg_addr))) { gemu_log("\"%s\",", s); unlock_user(s, s_addr, 0); } } gemu_log("NULL})"); }
19,358
FFmpeg
003243c3c2bdfa485eedbed593a0bb2feae66ab9
0
DECLARE_LOOP_FILTER(mmxext) DECLARE_LOOP_FILTER(sse2) DECLARE_LOOP_FILTER(ssse3) #endif #define VP8_LUMA_MC_FUNC(IDX, SIZE, OPT) \ c->put_vp8_epel_pixels_tab[IDX][0][2] = ff_put_vp8_epel ## SIZE ## _h6_ ## OPT; \ c->put_vp8_epel_pixels_tab[IDX][2][0] = ff_put_vp8_epel ## SIZE ## _v6_ ## OPT; \ c->put_vp8_epel_pixels_tab[IDX][2][2] = ff_put_vp8_epel ## SIZE ## _h6v6_ ## OPT #define VP8_MC_FUNC(IDX, SIZE, OPT) \ c->put_vp8_epel_pixels_tab[IDX][0][1] = ff_put_vp8_epel ## SIZE ## _h4_ ## OPT; \ c->put_vp8_epel_pixels_tab[IDX][1][0] = ff_put_vp8_epel ## SIZE ## _v4_ ## OPT; \ c->put_vp8_epel_pixels_tab[IDX][1][1] = ff_put_vp8_epel ## SIZE ## _h4v4_ ## OPT; \ c->put_vp8_epel_pixels_tab[IDX][1][2] = ff_put_vp8_epel ## SIZE ## _h6v4_ ## OPT; \ c->put_vp8_epel_pixels_tab[IDX][2][1] = ff_put_vp8_epel ## SIZE ## _h4v6_ ## OPT; \ VP8_LUMA_MC_FUNC(IDX, SIZE, OPT) #define VP8_BILINEAR_MC_FUNC(IDX, SIZE, OPT) \ c->put_vp8_bilinear_pixels_tab[IDX][0][1] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \ c->put_vp8_bilinear_pixels_tab[IDX][0][2] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \ c->put_vp8_bilinear_pixels_tab[IDX][1][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \ c->put_vp8_bilinear_pixels_tab[IDX][1][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \ c->put_vp8_bilinear_pixels_tab[IDX][1][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \ c->put_vp8_bilinear_pixels_tab[IDX][2][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \ c->put_vp8_bilinear_pixels_tab[IDX][2][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \ c->put_vp8_bilinear_pixels_tab[IDX][2][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c) { mm_flags = mm_support(); #if HAVE_YASM if (mm_flags & FF_MM_MMX) { c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx; c->vp8_idct_add = ff_vp8_idct_add_mmx; c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx; c->put_vp8_epel_pixels_tab[0][0][0] = c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx; c->put_vp8_epel_pixels_tab[1][0][0] = c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx; c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx; c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx; c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmx; c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmx; c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmx; c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmx; c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmx; c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmx; c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmx; c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmx; } /* note that 4-tap width=16 functions are missing because w=16 * is only used for luma, and luma is always a copy or sixtap. */ if (mm_flags & FF_MM_MMX2) { VP8_LUMA_MC_FUNC(0, 16, mmxext); VP8_MC_FUNC(1, 8, mmxext); VP8_MC_FUNC(2, 4, mmxext); VP8_BILINEAR_MC_FUNC(0, 16, mmxext); VP8_BILINEAR_MC_FUNC(1, 8, mmxext); VP8_BILINEAR_MC_FUNC(2, 4, mmxext); c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext; c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext; c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmxext; c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmxext; c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmxext; c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmxext; c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmxext; c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmxext; c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmxext; c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext; } if (mm_flags & FF_MM_SSE) { c->put_vp8_epel_pixels_tab[0][0][0] = c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_sse; } if (mm_flags & (FF_MM_SSE2|FF_MM_SSE2SLOW)) { VP8_LUMA_MC_FUNC(0, 16, sse2); VP8_MC_FUNC(1, 8, sse2); VP8_BILINEAR_MC_FUNC(0, 16, sse2); VP8_BILINEAR_MC_FUNC(1, 8, sse2); c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_sse2; c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse2; c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_sse2; c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_sse2; c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmxext; c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmxext; } if (mm_flags & FF_MM_SSE2) { c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_sse2; c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_sse2; //c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse2; //c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse2; } if (mm_flags & FF_MM_SSSE3) { VP8_LUMA_MC_FUNC(0, 16, ssse3); VP8_MC_FUNC(1, 8, ssse3); VP8_MC_FUNC(2, 4, ssse3); VP8_BILINEAR_MC_FUNC(0, 16, ssse3); VP8_BILINEAR_MC_FUNC(1, 8, ssse3); VP8_BILINEAR_MC_FUNC(2, 4, ssse3); c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_ssse3; c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_ssse3; c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_ssse3; c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_ssse3; c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_ssse3; c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_ssse3; c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_ssse3; //c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_ssse3; c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_ssse3; //c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_ssse3; } if (mm_flags & FF_MM_SSE4) { c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse4; } #endif }
19,359
FFmpeg
e0c6cce44729d94e2a5507a4b6d031f23e8bd7b6
0
void ff_pngdsp_init_x86(PNGDSPContext *dsp) { #if HAVE_YASM int flags = av_get_cpu_flags(); #if ARCH_X86_32 if (flags & AV_CPU_FLAG_MMX) dsp->add_bytes_l2 = ff_add_bytes_l2_mmx; #endif if (flags & AV_CPU_FLAG_MMXEXT) dsp->add_paeth_prediction = ff_add_png_paeth_prediction_mmx2; if (flags & AV_CPU_FLAG_SSE2) dsp->add_bytes_l2 = ff_add_bytes_l2_sse2; if (flags & AV_CPU_FLAG_SSSE3) dsp->add_paeth_prediction = ff_add_png_paeth_prediction_ssse3; #endif }
19,360
FFmpeg
464ccb01447b91717cf580b870e636514701ce4f
1
static int decode_plane(Indeo3DecodeContext *ctx, AVCodecContext *avctx, Plane *plane, const uint8_t *data, int32_t data_size, int32_t strip_width) { Cell curr_cell; int num_vectors; /* each plane data starts with mc_vector_count field, */ /* an optional array of motion vectors followed by the vq data */ num_vectors = bytestream_get_le32(&data); ctx->mc_vectors = num_vectors ? data : 0; /* init the bitreader */ init_get_bits(&ctx->gb, &data[num_vectors * 2], data_size << 3); ctx->skip_bits = 0; ctx->need_resync = 0; ctx->last_byte = data + data_size - 1; /* initialize the 1st cell and set its dimensions to whole plane */ curr_cell.xpos = curr_cell.ypos = 0; curr_cell.width = plane->width >> 2; curr_cell.height = plane->height >> 2; curr_cell.tree = 0; // we are in the MC tree now curr_cell.mv_ptr = 0; // no motion vector = INTRA cell return parse_bintree(ctx, avctx, plane, INTRA_NULL, &curr_cell, CELL_STACK_MAX, strip_width); }
19,362
qemu
a879125b47c3ae554c01824f996a64a45a86556e
1
static void qpci_pc_config_writel(QPCIBus *bus, int devfn, uint8_t offset, uint32_t value) { outl(0xcf8, (1 << 31) | (devfn << 8) | offset); outl(0xcfc, value); }
19,363
FFmpeg
f28043d0a34aaf4ac7cf25bd0dddd868811c0ab2
1
static char *get_geokey_val(int key, int val) { char *ap; if (val == TIFF_GEO_KEY_UNDEFINED) return av_strdup("undefined"); if (val == TIFF_GEO_KEY_USER_DEFINED) return av_strdup("User-Defined"); #define RET_GEOKEY_VAL(TYPE, array)\ if (val >= TIFF_##TYPE##_OFFSET &&\ val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_codes))\ return av_strdup(ff_tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET]); switch (key) { case TIFF_GT_MODEL_TYPE_GEOKEY: RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type); break; case TIFF_GT_RASTER_TYPE_GEOKEY: RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type); break; case TIFF_GEOG_LINEAR_UNITS_GEOKEY: case TIFF_PROJ_LINEAR_UNITS_GEOKEY: case TIFF_VERTICAL_UNITS_GEOKEY: RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit); break; case TIFF_GEOG_ANGULAR_UNITS_GEOKEY: case TIFF_GEOG_AZIMUTH_UNITS_GEOKEY: RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit); break; case TIFF_GEOGRAPHIC_TYPE_GEOKEY: RET_GEOKEY_VAL(GCS_TYPE, gcs_type); RET_GEOKEY_VAL(GCSE_TYPE, gcse_type); break; case TIFF_GEOG_GEODETIC_DATUM_GEOKEY: RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum); RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e); break; case TIFF_GEOG_ELLIPSOID_GEOKEY: RET_GEOKEY_VAL(ELLIPSOID, ellipsoid); break; case TIFF_GEOG_PRIME_MERIDIAN_GEOKEY: RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian); break; case TIFF_PROJECTED_CS_TYPE_GEOKEY: return av_strdup(search_keyval(ff_tiff_proj_cs_type_codes, FF_ARRAY_ELEMS(ff_tiff_proj_cs_type_codes), val)); break; case TIFF_PROJECTION_GEOKEY: return av_strdup(search_keyval(ff_tiff_projection_codes, FF_ARRAY_ELEMS(ff_tiff_projection_codes), val)); break; case TIFF_PROJ_COORD_TRANS_GEOKEY: RET_GEOKEY_VAL(COORD_TRANS, coord_trans); break; case TIFF_VERTICAL_CS_TYPE_GEOKEY: RET_GEOKEY_VAL(VERT_CS, vert_cs); RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs); break; } ap = av_malloc(14); if (ap) snprintf(ap, 14, "Unknown-%d", val); return ap; }
19,364
FFmpeg
28215b3700723da0c0beb93945702b6fb2b3596d
1
void ff_float_init_arm_vfp(DSPContext* c, AVCodecContext *avctx) { c->vector_fmul = vector_fmul_vfp; c->vector_fmul_reverse = vector_fmul_reverse_vfp; #ifdef HAVE_ARMV6 c->float_to_int16 = float_to_int16_vfp; #endif }
19,365
qemu
078a458e077d6b0db262c4b05fee51d01de2d1d2
1
static inline int64_t get_sector_offset(BlockDriverState *bs, int64_t sector_num, int write) { BDRVVPCState *s = bs->opaque; uint64_t offset = sector_num * 512; uint64_t bitmap_offset, block_offset; uint32_t pagetable_index, pageentry_index; pagetable_index = offset / s->block_size; pageentry_index = (offset % s->block_size) / 512; if (pagetable_index >= s->max_table_entries || s->pagetable[pagetable_index] == 0xffffffff) return -1; // not allocated bitmap_offset = 512 * (uint64_t) s->pagetable[pagetable_index]; block_offset = bitmap_offset + s->bitmap_size + (512 * pageentry_index); // We must ensure that we don't write to any sectors which are marked as // unused in the bitmap. We get away with setting all bits in the block // bitmap each time we write to a new block. This might cause Virtual PC to // miss sparse read optimization, but it's not a problem in terms of // correctness. if (write && (s->last_bitmap_offset != bitmap_offset)) { uint8_t bitmap[s->bitmap_size]; s->last_bitmap_offset = bitmap_offset; memset(bitmap, 0xff, s->bitmap_size); bdrv_pwrite(bs->file, bitmap_offset, bitmap, s->bitmap_size); } // printf("sector: %" PRIx64 ", index: %x, offset: %x, bioff: %" PRIx64 ", bloff: %" PRIx64 "\n", // sector_num, pagetable_index, pageentry_index, // bitmap_offset, block_offset); // disabled by reason #if 0 #ifdef CACHE if (bitmap_offset != s->last_bitmap) { lseek(s->fd, bitmap_offset, SEEK_SET); s->last_bitmap = bitmap_offset; // Scary! Bitmap is stored as big endian 32bit entries, // while we used to look it up byte by byte read(s->fd, s->pageentry_u8, 512); for (i = 0; i < 128; i++) be32_to_cpus(&s->pageentry_u32[i]); } if ((s->pageentry_u8[pageentry_index / 8] >> (pageentry_index % 8)) & 1) return -1; #else lseek(s->fd, bitmap_offset + (pageentry_index / 8), SEEK_SET); read(s->fd, &bitmap_entry, 1); if ((bitmap_entry >> (pageentry_index % 8)) & 1) return -1; // not allocated #endif #endif return block_offset; }
19,367
qemu
2dbafdc012d3ea81a97fec6226ca82d644539c9a
0
static void coroutine_fn wait_for_overlapping_requests(BlockDriverState *bs, BdrvTrackedRequest *self, int64_t offset, unsigned int bytes) { BdrvTrackedRequest *req; int64_t cluster_offset; unsigned int cluster_bytes; bool retry; /* If we touch the same cluster it counts as an overlap. This guarantees * that allocating writes will be serialized and not race with each other * for the same cluster. For example, in copy-on-read it ensures that the * CoR read and write operations are atomic and guest writes cannot * interleave between them. */ round_bytes_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes); do { retry = false; QLIST_FOREACH(req, &bs->tracked_requests, list) { if (req == self) { continue; } if (tracked_request_overlaps(req, cluster_offset, cluster_bytes)) { /* Hitting this means there was a reentrant request, for * example, a block driver issuing nested requests. This must * never happen since it means deadlock. */ assert(qemu_coroutine_self() != req->co); qemu_co_queue_wait(&req->wait_queue); retry = true; break; } } } while (retry); }
19,368
qemu
42a268c241183877192c376d03bd9b6d527407c7
0
static void disas_comp_b_imm(DisasContext *s, uint32_t insn) { unsigned int sf, op, rt; uint64_t addr; int label_match; TCGv_i64 tcg_cmp; sf = extract32(insn, 31, 1); op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */ rt = extract32(insn, 0, 5); addr = s->pc + sextract32(insn, 5, 19) * 4 - 4; tcg_cmp = read_cpu_reg(s, rt, sf); label_match = gen_new_label(); tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ, tcg_cmp, 0, label_match); gen_goto_tb(s, 0, s->pc); gen_set_label(label_match); gen_goto_tb(s, 1, addr); }
19,369
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static uint64_t integratorcm_read(void *opaque, target_phys_addr_t offset, unsigned size) { integratorcm_state *s = (integratorcm_state *)opaque; if (offset >= 0x100 && offset < 0x200) { /* CM_SPD */ if (offset >= 0x180) return 0; return integrator_spd[offset >> 2]; } switch (offset >> 2) { case 0: /* CM_ID */ return 0x411a3001; case 1: /* CM_PROC */ return 0; case 2: /* CM_OSC */ return s->cm_osc; case 3: /* CM_CTRL */ return s->cm_ctrl; case 4: /* CM_STAT */ return 0x00100000; case 5: /* CM_LOCK */ if (s->cm_lock == 0xa05f) { return 0x1a05f; } else { return s->cm_lock; } case 6: /* CM_LMBUSCNT */ /* ??? High frequency timer. */ hw_error("integratorcm_read: CM_LMBUSCNT"); case 7: /* CM_AUXOSC */ return s->cm_auxosc; case 8: /* CM_SDRAM */ return s->cm_sdram; case 9: /* CM_INIT */ return s->cm_init; case 10: /* CM_REFCT */ /* ??? High frequency timer. */ hw_error("integratorcm_read: CM_REFCT"); case 12: /* CM_FLAGS */ return s->cm_flags; case 14: /* CM_NVFLAGS */ return s->cm_nvflags; case 16: /* CM_IRQ_STAT */ return s->int_level & s->irq_enabled; case 17: /* CM_IRQ_RSTAT */ return s->int_level; case 18: /* CM_IRQ_ENSET */ return s->irq_enabled; case 20: /* CM_SOFT_INTSET */ return s->int_level & 1; case 24: /* CM_FIQ_STAT */ return s->int_level & s->fiq_enabled; case 25: /* CM_FIQ_RSTAT */ return s->int_level; case 26: /* CM_FIQ_ENSET */ return s->fiq_enabled; case 32: /* CM_VOLTAGE_CTL0 */ case 33: /* CM_VOLTAGE_CTL1 */ case 34: /* CM_VOLTAGE_CTL2 */ case 35: /* CM_VOLTAGE_CTL3 */ /* ??? Voltage control unimplemented. */ return 0; default: hw_error("integratorcm_read: Unimplemented offset 0x%x\n", (int)offset); return 0; } }
19,370