label
int64
0
1
func1
stringlengths
23
97k
id
int64
0
27.3k
0
static always_inline void gen_arith3 (void *helper, int ra, int rb, int rc, int islit, uint8_t lit) { if (unlikely(rc == 31)) return; if (ra != 31) { if (islit) { TCGv tmp = tcg_const_i64(lit); tcg_gen_helper_1_2(helper, cpu_ir[rc], cpu_ir[ra], tmp); tcg_temp_free(tmp); } else tcg_gen_helper_1_2(helper, cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } else { TCGv tmp1 = tcg_const_i64(0); if (islit) { TCGv tmp2 = tcg_const_i64(lit); tcg_gen_helper_1_2(helper, cpu_ir[rc], tmp1, tmp2); tcg_temp_free(tmp2); } else tcg_gen_helper_1_2(helper, cpu_ir[rc], tmp1, cpu_ir[rb]); tcg_temp_free(tmp1); } }
3,300
0
int cpu_get_dump_info(ArchDumpInfo *info, const struct GuestPhysBlockList *guest_phys_blocks) { PowerPCCPU *cpu = POWERPC_CPU(first_cpu); PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); info->d_machine = PPC_ELF_MACHINE; info->d_class = ELFCLASS; if ((*pcc->interrupts_big_endian)(cpu)) { info->d_endian = ELFDATA2MSB; } else { info->d_endian = ELFDATA2LSB; } /* 64KB is the max page size for pseries kernel */ if (strncmp(object_get_typename(qdev_get_machine()), "pseries-", 8) == 0) { info->page_size = (1U << 16); } return 0; }
3,301
0
static CharDriverState *qemu_chr_open_pty(QemuOpts *opts) { CharDriverState *chr; PtyCharDriver *s; struct termios tty; const char *label; int master_fd, slave_fd, len; #if defined(__OpenBSD__) || defined(__DragonFly__) char pty_name[PATH_MAX]; #define q_ptsname(x) pty_name #else char *pty_name = NULL; #define q_ptsname(x) ptsname(x) #endif if (openpty(&master_fd, &slave_fd, pty_name, NULL, NULL) < 0) { return NULL; } /* Set raw attributes on the pty. */ tcgetattr(slave_fd, &tty); cfmakeraw(&tty); tcsetattr(slave_fd, TCSAFLUSH, &tty); close(slave_fd); chr = g_malloc0(sizeof(CharDriverState)); len = strlen(q_ptsname(master_fd)) + 5; chr->filename = g_malloc(len); snprintf(chr->filename, len, "pty:%s", q_ptsname(master_fd)); qemu_opt_set(opts, "path", q_ptsname(master_fd)); label = qemu_opts_id(opts); fprintf(stderr, "char device redirected to %s%s%s%s\n", q_ptsname(master_fd), label ? " (label " : "", label ? label : "", label ? ")" : ""); s = g_malloc0(sizeof(PtyCharDriver)); chr->opaque = s; chr->chr_write = pty_chr_write; chr->chr_update_read_handler = pty_chr_update_read_handler; chr->chr_close = pty_chr_close; chr->chr_add_watch = pty_chr_add_watch; s->fd = io_channel_from_fd(master_fd); s->timer_tag = 0; return chr; }
3,303
0
static void put_frame( AVFormatContext *s, ASFStream *stream, int timestamp, const uint8_t *buf, int m_obj_size ) { ASFContext *asf = s->priv_data; int m_obj_offset, payload_len, frag_len1; m_obj_offset = 0; while (m_obj_offset < m_obj_size) { payload_len = m_obj_size - m_obj_offset; if (asf->packet_timestamp_start == -1) { asf->multi_payloads_present = (payload_len < MULTI_PAYLOAD_CONSTANT); if (asf->multi_payloads_present){ asf->packet_size_left = PACKET_SIZE; //For debug asf->packet_size_left = PACKET_SIZE - PACKET_HEADER_MIN_SIZE - 1; frag_len1 = MULTI_PAYLOAD_CONSTANT - 1; } else { asf->packet_size_left = PACKET_SIZE - PACKET_HEADER_MIN_SIZE; frag_len1 = SINGLE_PAYLOAD_DATA_LENGTH; } if (asf->prev_packet_sent_time > timestamp) asf->packet_timestamp_start = asf->prev_packet_sent_time; else asf->packet_timestamp_start = timestamp; } else { // multi payloads frag_len1 = asf->packet_size_left - PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS; if (asf->prev_packet_sent_time > timestamp) asf->packet_timestamp_start = asf->prev_packet_sent_time; else if (asf->packet_timestamp_start >= timestamp) asf->packet_timestamp_start = timestamp; } if (frag_len1 > 0) { if (payload_len > frag_len1) payload_len = frag_len1; else if (payload_len == (frag_len1 - 1)) payload_len = frag_len1 - 2; //additional byte need to put padding length put_payload_header(s, stream, timestamp+preroll_time, m_obj_size, m_obj_offset, payload_len); put_buffer(&asf->pb, buf, payload_len); if (asf->multi_payloads_present) asf->packet_size_left -= (payload_len + PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS); else asf->packet_size_left -= (payload_len + PAYLOAD_HEADER_SIZE_SINGLE_PAYLOAD); asf->packet_timestamp_end = timestamp; asf->packet_nb_payloads++; } else { payload_len = 0; } m_obj_offset += payload_len; buf += payload_len; if (!asf->multi_payloads_present) flush_packet(s); else if (asf->packet_size_left <= (PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS + 1)) flush_packet(s); } stream->seq++; }
3,304
0
static char *print_drive(void *ptr) { return g_strdup(bdrv_get_device_name(ptr)); }
3,305
0
static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn) { int op; int q; int rd, rn, rm; int size; int shift; int pass; int count; int pairwise; int u; uint32_t imm, mask; TCGv tmp, tmp2, tmp3, tmp4, tmp5; TCGv_i64 tmp64; if (!s->vfp_enabled) return 1; q = (insn & (1 << 6)) != 0; u = (insn >> 24) & 1; VFP_DREG_D(rd, insn); VFP_DREG_N(rn, insn); VFP_DREG_M(rm, insn); size = (insn >> 20) & 3; if ((insn & (1 << 23)) == 0) { /* Three register same length. */ op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1); /* Catch invalid op and bad size combinations: UNDEF */ if ((neon_3r_sizes[op] & (1 << size)) == 0) { return 1; } /* All insns of this form UNDEF for either this condition or the * superset of cases "Q==1"; we catch the latter later. */ if (q && ((rd | rn | rm) & 1)) { return 1; } if (size == 3 && op != NEON_3R_LOGIC) { /* 64-bit element instructions. */ for (pass = 0; pass < (q ? 2 : 1); pass++) { neon_load_reg64(cpu_V0, rn + pass); neon_load_reg64(cpu_V1, rm + pass); switch (op) { case NEON_3R_VQADD: if (u) { gen_helper_neon_qadd_u64(cpu_V0, cpu_V0, cpu_V1); } else { gen_helper_neon_qadd_s64(cpu_V0, cpu_V0, cpu_V1); } break; case NEON_3R_VQSUB: if (u) { gen_helper_neon_qsub_u64(cpu_V0, cpu_V0, cpu_V1); } else { gen_helper_neon_qsub_s64(cpu_V0, cpu_V0, cpu_V1); } break; case NEON_3R_VSHL: if (u) { gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0); } else { gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0); } break; case NEON_3R_VQSHL: if (u) { gen_helper_neon_qshl_u64(cpu_V0, cpu_V1, cpu_V0); } else { gen_helper_neon_qshl_s64(cpu_V0, cpu_V1, cpu_V0); } break; case NEON_3R_VRSHL: if (u) { gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0); } else { gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0); } break; case NEON_3R_VQRSHL: if (u) { gen_helper_neon_qrshl_u64(cpu_V0, cpu_V1, cpu_V0); } else { gen_helper_neon_qrshl_s64(cpu_V0, cpu_V1, cpu_V0); } break; case NEON_3R_VADD_VSUB: if (u) { tcg_gen_sub_i64(CPU_V001); } else { tcg_gen_add_i64(CPU_V001); } break; default: abort(); } neon_store_reg64(cpu_V0, rd + pass); } return 0; } pairwise = 0; switch (op) { case NEON_3R_VSHL: case NEON_3R_VQSHL: case NEON_3R_VRSHL: case NEON_3R_VQRSHL: { int rtmp; /* Shift instruction operands are reversed. */ rtmp = rn; rn = rm; rm = rtmp; } break; case NEON_3R_VPADD: if (u) { return 1; } /* Fall through */ case NEON_3R_VPMAX: case NEON_3R_VPMIN: pairwise = 1; break; case NEON_3R_FLOAT_ARITH: pairwise = (u && size < 2); /* if VPADD (float) */ break; case NEON_3R_FLOAT_MINMAX: pairwise = u; /* if VPMIN/VPMAX (float) */ break; case NEON_3R_FLOAT_CMP: if (!u && size) { /* no encoding for U=0 C=1x */ return 1; } break; case NEON_3R_FLOAT_ACMP: if (!u) { return 1; } break; case NEON_3R_VRECPS_VRSQRTS: if (u) { return 1; } break; case NEON_3R_VMUL: if (u && (size != 0)) { /* UNDEF on invalid size for polynomial subcase */ return 1; } break; default: break; } if (pairwise && q) { /* All the pairwise insns UNDEF if Q is set */ return 1; } for (pass = 0; pass < (q ? 4 : 2); pass++) { if (pairwise) { /* Pairwise. */ if (pass < 1) { tmp = neon_load_reg(rn, 0); tmp2 = neon_load_reg(rn, 1); } else { tmp = neon_load_reg(rm, 0); tmp2 = neon_load_reg(rm, 1); } } else { /* Elementwise. */ tmp = neon_load_reg(rn, pass); tmp2 = neon_load_reg(rm, pass); } switch (op) { case NEON_3R_VHADD: GEN_NEON_INTEGER_OP(hadd); break; case NEON_3R_VQADD: GEN_NEON_INTEGER_OP(qadd); break; case NEON_3R_VRHADD: GEN_NEON_INTEGER_OP(rhadd); break; case NEON_3R_LOGIC: /* Logic ops. */ switch ((u << 2) | size) { case 0: /* VAND */ tcg_gen_and_i32(tmp, tmp, tmp2); break; case 1: /* BIC */ tcg_gen_andc_i32(tmp, tmp, tmp2); break; case 2: /* VORR */ tcg_gen_or_i32(tmp, tmp, tmp2); break; case 3: /* VORN */ tcg_gen_orc_i32(tmp, tmp, tmp2); break; case 4: /* VEOR */ tcg_gen_xor_i32(tmp, tmp, tmp2); break; case 5: /* VBSL */ tmp3 = neon_load_reg(rd, pass); gen_neon_bsl(tmp, tmp, tmp2, tmp3); tcg_temp_free_i32(tmp3); break; case 6: /* VBIT */ tmp3 = neon_load_reg(rd, pass); gen_neon_bsl(tmp, tmp, tmp3, tmp2); tcg_temp_free_i32(tmp3); break; case 7: /* VBIF */ tmp3 = neon_load_reg(rd, pass); gen_neon_bsl(tmp, tmp3, tmp, tmp2); tcg_temp_free_i32(tmp3); break; } break; case NEON_3R_VHSUB: GEN_NEON_INTEGER_OP(hsub); break; case NEON_3R_VQSUB: GEN_NEON_INTEGER_OP(qsub); break; case NEON_3R_VCGT: GEN_NEON_INTEGER_OP(cgt); break; case NEON_3R_VCGE: GEN_NEON_INTEGER_OP(cge); break; case NEON_3R_VSHL: GEN_NEON_INTEGER_OP(shl); break; case NEON_3R_VQSHL: GEN_NEON_INTEGER_OP(qshl); break; case NEON_3R_VRSHL: GEN_NEON_INTEGER_OP(rshl); break; case NEON_3R_VQRSHL: GEN_NEON_INTEGER_OP(qrshl); break; case NEON_3R_VMAX: GEN_NEON_INTEGER_OP(max); break; case NEON_3R_VMIN: GEN_NEON_INTEGER_OP(min); break; case NEON_3R_VABD: GEN_NEON_INTEGER_OP(abd); break; case NEON_3R_VABA: GEN_NEON_INTEGER_OP(abd); tcg_temp_free_i32(tmp2); tmp2 = neon_load_reg(rd, pass); gen_neon_add(size, tmp, tmp2); break; case NEON_3R_VADD_VSUB: if (!u) { /* VADD */ gen_neon_add(size, tmp, tmp2); } else { /* VSUB */ switch (size) { case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break; case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break; case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break; default: abort(); } } break; case NEON_3R_VTST_VCEQ: if (!u) { /* VTST */ switch (size) { case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break; case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break; case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break; default: abort(); } } else { /* VCEQ */ switch (size) { case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break; case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break; case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break; default: abort(); } } break; case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */ switch (size) { case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break; case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break; case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break; default: abort(); } tcg_temp_free_i32(tmp2); tmp2 = neon_load_reg(rd, pass); if (u) { /* VMLS */ gen_neon_rsb(size, tmp, tmp2); } else { /* VMLA */ gen_neon_add(size, tmp, tmp2); } break; case NEON_3R_VMUL: if (u) { /* polynomial */ gen_helper_neon_mul_p8(tmp, tmp, tmp2); } else { /* Integer */ switch (size) { case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break; case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break; case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break; default: abort(); } } break; case NEON_3R_VPMAX: GEN_NEON_INTEGER_OP(pmax); break; case NEON_3R_VPMIN: GEN_NEON_INTEGER_OP(pmin); break; case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */ if (!u) { /* VQDMULH */ switch (size) { case 1: gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2); break; case 2: gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2); break; default: abort(); } } else { /* VQRDMULH */ switch (size) { case 1: gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2); break; case 2: gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2); break; default: abort(); } } break; case NEON_3R_VPADD: switch (size) { case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break; case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break; case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break; default: abort(); } break; case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */ switch ((u << 2) | size) { case 0: /* VADD */ gen_helper_neon_add_f32(tmp, tmp, tmp2); break; case 2: /* VSUB */ gen_helper_neon_sub_f32(tmp, tmp, tmp2); break; case 4: /* VPADD */ gen_helper_neon_add_f32(tmp, tmp, tmp2); break; case 6: /* VABD */ gen_helper_neon_abd_f32(tmp, tmp, tmp2); break; default: abort(); } break; case NEON_3R_FLOAT_MULTIPLY: gen_helper_neon_mul_f32(tmp, tmp, tmp2); if (!u) { tcg_temp_free_i32(tmp2); tmp2 = neon_load_reg(rd, pass); if (size == 0) { gen_helper_neon_add_f32(tmp, tmp, tmp2); } else { gen_helper_neon_sub_f32(tmp, tmp2, tmp); } } break; case NEON_3R_FLOAT_CMP: if (!u) { gen_helper_neon_ceq_f32(tmp, tmp, tmp2); } else { if (size == 0) gen_helper_neon_cge_f32(tmp, tmp, tmp2); else gen_helper_neon_cgt_f32(tmp, tmp, tmp2); } break; case NEON_3R_FLOAT_ACMP: if (size == 0) gen_helper_neon_acge_f32(tmp, tmp, tmp2); else gen_helper_neon_acgt_f32(tmp, tmp, tmp2); break; case NEON_3R_FLOAT_MINMAX: if (size == 0) gen_helper_neon_max_f32(tmp, tmp, tmp2); else gen_helper_neon_min_f32(tmp, tmp, tmp2); break; case NEON_3R_VRECPS_VRSQRTS: if (size == 0) gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env); else gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env); break; default: abort(); } tcg_temp_free_i32(tmp2); /* Save the result. For elementwise operations we can put it straight into the destination register. For pairwise operations we have to be careful to avoid clobbering the source operands. */ if (pairwise && rd == rm) { neon_store_scratch(pass, tmp); } else { neon_store_reg(rd, pass, tmp); } } /* for pass */ if (pairwise && rd == rm) { for (pass = 0; pass < (q ? 4 : 2); pass++) { tmp = neon_load_scratch(pass); neon_store_reg(rd, pass, tmp); } } /* End of 3 register same size operations. */ } else if (insn & (1 << 4)) { if ((insn & 0x00380080) != 0) { /* Two registers and shift. */ op = (insn >> 8) & 0xf; if (insn & (1 << 7)) { /* 64-bit shift. */ if (op > 7) { return 1; } size = 3; } else { size = 2; while ((insn & (1 << (size + 19))) == 0) size--; } shift = (insn >> 16) & ((1 << (3 + size)) - 1); /* To avoid excessive dumplication of ops we implement shift by immediate using the variable shift operations. */ if (op < 8) { /* Shift by immediate: VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */ if (q && ((rd | rm) & 1)) { return 1; } if (!u && (op == 4 || op == 6)) { return 1; } /* Right shifts are encoded as N - shift, where N is the element size in bits. */ if (op <= 4) shift = shift - (1 << (size + 3)); if (size == 3) { count = q + 1; } else { count = q ? 4: 2; } switch (size) { case 0: imm = (uint8_t) shift; imm |= imm << 8; imm |= imm << 16; break; case 1: imm = (uint16_t) shift; imm |= imm << 16; break; case 2: case 3: imm = shift; break; default: abort(); } for (pass = 0; pass < count; pass++) { if (size == 3) { neon_load_reg64(cpu_V0, rm + pass); tcg_gen_movi_i64(cpu_V1, imm); switch (op) { case 0: /* VSHR */ case 1: /* VSRA */ if (u) gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1); else gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1); break; case 2: /* VRSHR */ case 3: /* VRSRA */ if (u) gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1); else gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1); break; case 4: /* VSRI */ case 5: /* VSHL, VSLI */ gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1); break; case 6: /* VQSHLU */ gen_helper_neon_qshlu_s64(cpu_V0, cpu_V0, cpu_V1); break; case 7: /* VQSHL */ if (u) { gen_helper_neon_qshl_u64(cpu_V0, cpu_V0, cpu_V1); } else { gen_helper_neon_qshl_s64(cpu_V0, cpu_V0, cpu_V1); } break; } if (op == 1 || op == 3) { /* Accumulate. */ neon_load_reg64(cpu_V1, rd + pass); tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1); } else if (op == 4 || (op == 5 && u)) { /* Insert */ neon_load_reg64(cpu_V1, rd + pass); uint64_t mask; if (shift < -63 || shift > 63) { mask = 0; } else { if (op == 4) { mask = 0xffffffffffffffffull >> -shift; } else { mask = 0xffffffffffffffffull << shift; } } tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask); tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1); } neon_store_reg64(cpu_V0, rd + pass); } else { /* size < 3 */ /* Operands in T0 and T1. */ tmp = neon_load_reg(rm, pass); tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, imm); switch (op) { case 0: /* VSHR */ case 1: /* VSRA */ GEN_NEON_INTEGER_OP(shl); break; case 2: /* VRSHR */ case 3: /* VRSRA */ GEN_NEON_INTEGER_OP(rshl); break; case 4: /* VSRI */ case 5: /* VSHL, VSLI */ switch (size) { case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break; case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break; case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break; default: abort(); } break; case 6: /* VQSHLU */ switch (size) { case 0: gen_helper_neon_qshlu_s8(tmp, tmp, tmp2); break; case 1: gen_helper_neon_qshlu_s16(tmp, tmp, tmp2); break; case 2: gen_helper_neon_qshlu_s32(tmp, tmp, tmp2); break; default: abort(); } break; case 7: /* VQSHL */ GEN_NEON_INTEGER_OP(qshl); break; } tcg_temp_free_i32(tmp2); if (op == 1 || op == 3) { /* Accumulate. */ tmp2 = neon_load_reg(rd, pass); gen_neon_add(size, tmp, tmp2); tcg_temp_free_i32(tmp2); } else if (op == 4 || (op == 5 && u)) { /* Insert */ switch (size) { case 0: if (op == 4) mask = 0xff >> -shift; else mask = (uint8_t)(0xff << shift); mask |= mask << 8; mask |= mask << 16; break; case 1: if (op == 4) mask = 0xffff >> -shift; else mask = (uint16_t)(0xffff << shift); mask |= mask << 16; break; case 2: if (shift < -31 || shift > 31) { mask = 0; } else { if (op == 4) mask = 0xffffffffu >> -shift; else mask = 0xffffffffu << shift; } break; default: abort(); } tmp2 = neon_load_reg(rd, pass); tcg_gen_andi_i32(tmp, tmp, mask); tcg_gen_andi_i32(tmp2, tmp2, ~mask); tcg_gen_or_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); } neon_store_reg(rd, pass, tmp); } } /* for pass */ } else if (op < 10) { /* Shift by immediate and narrow: VSHRN, VRSHRN, VQSHRN, VQRSHRN. */ int input_unsigned = (op == 8) ? !u : u; if (rm & 1) { return 1; } shift = shift - (1 << (size + 3)); size++; if (size == 3) { tmp64 = tcg_const_i64(shift); neon_load_reg64(cpu_V0, rm); neon_load_reg64(cpu_V1, rm + 1); for (pass = 0; pass < 2; pass++) { TCGv_i64 in; if (pass == 0) { in = cpu_V0; } else { in = cpu_V1; } if (q) { if (input_unsigned) { gen_helper_neon_rshl_u64(cpu_V0, in, tmp64); } else { gen_helper_neon_rshl_s64(cpu_V0, in, tmp64); } } else { if (input_unsigned) { gen_helper_neon_shl_u64(cpu_V0, in, tmp64); } else { gen_helper_neon_shl_s64(cpu_V0, in, tmp64); } } tmp = tcg_temp_new_i32(); gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0); neon_store_reg(rd, pass, tmp); } /* for pass */ tcg_temp_free_i64(tmp64); } else { if (size == 1) { imm = (uint16_t)shift; imm |= imm << 16; } else { /* size == 2 */ imm = (uint32_t)shift; } tmp2 = tcg_const_i32(imm); tmp4 = neon_load_reg(rm + 1, 0); tmp5 = neon_load_reg(rm + 1, 1); for (pass = 0; pass < 2; pass++) { if (pass == 0) { tmp = neon_load_reg(rm, 0); } else { tmp = tmp4; } gen_neon_shift_narrow(size, tmp, tmp2, q, input_unsigned); if (pass == 0) { tmp3 = neon_load_reg(rm, 1); } else { tmp3 = tmp5; } gen_neon_shift_narrow(size, tmp3, tmp2, q, input_unsigned); tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3); tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp3); tmp = tcg_temp_new_i32(); gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0); neon_store_reg(rd, pass, tmp); } /* for pass */ tcg_temp_free_i32(tmp2); } } else if (op == 10) { /* VSHLL, VMOVL */ if (q || (rd & 1)) { return 1; } tmp = neon_load_reg(rm, 0); tmp2 = neon_load_reg(rm, 1); for (pass = 0; pass < 2; pass++) { if (pass == 1) tmp = tmp2; gen_neon_widen(cpu_V0, tmp, size, u); if (shift != 0) { /* The shift is less than the width of the source type, so we can just shift the whole register. */ tcg_gen_shli_i64(cpu_V0, cpu_V0, shift); /* Widen the result of shift: we need to clear * the potential overflow bits resulting from * left bits of the narrow input appearing as * right bits of left the neighbour narrow * input. */ if (size < 2 || !u) { uint64_t imm64; if (size == 0) { imm = (0xffu >> (8 - shift)); imm |= imm << 16; } else if (size == 1) { imm = 0xffff >> (16 - shift); } else { /* size == 2 */ imm = 0xffffffff >> (32 - shift); } if (size < 2) { imm64 = imm | (((uint64_t)imm) << 32); } else { imm64 = imm; } tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64); } } neon_store_reg64(cpu_V0, rd + pass); } } else if (op >= 14) { /* VCVT fixed-point. */ if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) { return 1; } /* We have already masked out the must-be-1 top bit of imm6, * hence this 32-shift where the ARM ARM has 64-imm6. */ shift = 32 - shift; for (pass = 0; pass < (q ? 4 : 2); pass++) { tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass)); if (!(op & 1)) { if (u) gen_vfp_ulto(0, shift); else gen_vfp_slto(0, shift); } else { if (u) gen_vfp_toul(0, shift); else gen_vfp_tosl(0, shift); } tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass)); } } else { return 1; } } else { /* (insn & 0x00380080) == 0 */ int invert; if (q && (rd & 1)) { return 1; } op = (insn >> 8) & 0xf; /* One register and immediate. */ imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf); invert = (insn & (1 << 5)) != 0; /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE. * We choose to not special-case this and will behave as if a * valid constant encoding of 0 had been given. */ switch (op) { case 0: case 1: /* no-op */ break; case 2: case 3: imm <<= 8; break; case 4: case 5: imm <<= 16; break; case 6: case 7: imm <<= 24; break; case 8: case 9: imm |= imm << 16; break; case 10: case 11: imm = (imm << 8) | (imm << 24); break; case 12: imm = (imm << 8) | 0xff; break; case 13: imm = (imm << 16) | 0xffff; break; case 14: imm |= (imm << 8) | (imm << 16) | (imm << 24); if (invert) imm = ~imm; break; case 15: if (invert) { return 1; } imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19) | ((imm & 0x40) ? (0x1f << 25) : (1 << 30)); break; } if (invert) imm = ~imm; for (pass = 0; pass < (q ? 4 : 2); pass++) { if (op & 1 && op < 12) { tmp = neon_load_reg(rd, pass); if (invert) { /* The immediate value has already been inverted, so BIC becomes AND. */ tcg_gen_andi_i32(tmp, tmp, imm); } else { tcg_gen_ori_i32(tmp, tmp, imm); } } else { /* VMOV, VMVN. */ tmp = tcg_temp_new_i32(); if (op == 14 && invert) { int n; uint32_t val; val = 0; for (n = 0; n < 4; n++) { if (imm & (1 << (n + (pass & 1) * 4))) val |= 0xff << (n * 8); } tcg_gen_movi_i32(tmp, val); } else { tcg_gen_movi_i32(tmp, imm); } } neon_store_reg(rd, pass, tmp); } } } else { /* (insn & 0x00800010 == 0x00800000) */ if (size != 3) { op = (insn >> 8) & 0xf; if ((insn & (1 << 6)) == 0) { /* Three registers of different lengths. */ int src1_wide; int src2_wide; int prewiden; /* undefreq: bit 0 : UNDEF if size != 0 * bit 1 : UNDEF if size == 0 * bit 2 : UNDEF if U == 1 * Note that [1:0] set implies 'always UNDEF' */ int undefreq; /* prewiden, src1_wide, src2_wide, undefreq */ static const int neon_3reg_wide[16][4] = { {1, 0, 0, 0}, /* VADDL */ {1, 1, 0, 0}, /* VADDW */ {1, 0, 0, 0}, /* VSUBL */ {1, 1, 0, 0}, /* VSUBW */ {0, 1, 1, 0}, /* VADDHN */ {0, 0, 0, 0}, /* VABAL */ {0, 1, 1, 0}, /* VSUBHN */ {0, 0, 0, 0}, /* VABDL */ {0, 0, 0, 0}, /* VMLAL */ {0, 0, 0, 6}, /* VQDMLAL */ {0, 0, 0, 0}, /* VMLSL */ {0, 0, 0, 6}, /* VQDMLSL */ {0, 0, 0, 0}, /* Integer VMULL */ {0, 0, 0, 2}, /* VQDMULL */ {0, 0, 0, 5}, /* Polynomial VMULL */ {0, 0, 0, 3}, /* Reserved: always UNDEF */ }; prewiden = neon_3reg_wide[op][0]; src1_wide = neon_3reg_wide[op][1]; src2_wide = neon_3reg_wide[op][2]; undefreq = neon_3reg_wide[op][3]; if (((undefreq & 1) && (size != 0)) || ((undefreq & 2) && (size == 0)) || ((undefreq & 4) && u)) { return 1; } if ((src1_wide && (rn & 1)) || (src2_wide && (rm & 1)) || (!src2_wide && (rd & 1))) { return 1; } /* Avoid overlapping operands. Wide source operands are always aligned so will never overlap with wide destinations in problematic ways. */ if (rd == rm && !src2_wide) { tmp = neon_load_reg(rm, 1); neon_store_scratch(2, tmp); } else if (rd == rn && !src1_wide) { tmp = neon_load_reg(rn, 1); neon_store_scratch(2, tmp); } TCGV_UNUSED(tmp3); for (pass = 0; pass < 2; pass++) { if (src1_wide) { neon_load_reg64(cpu_V0, rn + pass); TCGV_UNUSED(tmp); } else { if (pass == 1 && rd == rn) { tmp = neon_load_scratch(2); } else { tmp = neon_load_reg(rn, pass); } if (prewiden) { gen_neon_widen(cpu_V0, tmp, size, u); } } if (src2_wide) { neon_load_reg64(cpu_V1, rm + pass); TCGV_UNUSED(tmp2); } else { if (pass == 1 && rd == rm) { tmp2 = neon_load_scratch(2); } else { tmp2 = neon_load_reg(rm, pass); } if (prewiden) { gen_neon_widen(cpu_V1, tmp2, size, u); } } switch (op) { case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */ gen_neon_addl(size); break; case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */ gen_neon_subl(size); break; case 5: case 7: /* VABAL, VABDL */ switch ((size << 1) | u) { case 0: gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2); break; case 1: gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2); break; case 2: gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2); break; case 3: gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2); break; case 4: gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2); break; case 5: gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2); break; default: abort(); } tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp); break; case 8: case 9: case 10: case 11: case 12: case 13: /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */ gen_neon_mull(cpu_V0, tmp, tmp2, size, u); break; case 14: /* Polynomial VMULL */ gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2); tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp); break; default: /* 15 is RESERVED: caught earlier */ abort(); } if (op == 13) { /* VQDMULL */ gen_neon_addl_saturate(cpu_V0, cpu_V0, size); neon_store_reg64(cpu_V0, rd + pass); } else if (op == 5 || (op >= 8 && op <= 11)) { /* Accumulate. */ neon_load_reg64(cpu_V1, rd + pass); switch (op) { case 10: /* VMLSL */ gen_neon_negl(cpu_V0, size); /* Fall through */ case 5: case 8: /* VABAL, VMLAL */ gen_neon_addl(size); break; case 9: case 11: /* VQDMLAL, VQDMLSL */ gen_neon_addl_saturate(cpu_V0, cpu_V0, size); if (op == 11) { gen_neon_negl(cpu_V0, size); } gen_neon_addl_saturate(cpu_V0, cpu_V1, size); break; default: abort(); } neon_store_reg64(cpu_V0, rd + pass); } else if (op == 4 || op == 6) { /* Narrowing operation. */ tmp = tcg_temp_new_i32(); if (!u) { switch (size) { case 0: gen_helper_neon_narrow_high_u8(tmp, cpu_V0); break; case 1: gen_helper_neon_narrow_high_u16(tmp, cpu_V0); break; case 2: tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); tcg_gen_trunc_i64_i32(tmp, cpu_V0); break; default: abort(); } } else { switch (size) { case 0: gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0); break; case 1: gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0); break; case 2: tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31); tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); tcg_gen_trunc_i64_i32(tmp, cpu_V0); break; default: abort(); } } if (pass == 0) { tmp3 = tmp; } else { neon_store_reg(rd, 0, tmp3); neon_store_reg(rd, 1, tmp); } } else { /* Write back the result. */ neon_store_reg64(cpu_V0, rd + pass); } } } else { /* Two registers and a scalar. NB that for ops of this form * the ARM ARM labels bit 24 as Q, but it is in our variable * 'u', not 'q'. */ if (size == 0) { return 1; } switch (op) { case 1: /* Float VMLA scalar */ case 5: /* Floating point VMLS scalar */ case 9: /* Floating point VMUL scalar */ if (size == 1) { return 1; } /* fall through */ case 0: /* Integer VMLA scalar */ case 4: /* Integer VMLS scalar */ case 8: /* Integer VMUL scalar */ case 12: /* VQDMULH scalar */ case 13: /* VQRDMULH scalar */ if (u && ((rd | rn) & 1)) { return 1; } tmp = neon_get_scalar(size, rm); neon_store_scratch(0, tmp); for (pass = 0; pass < (u ? 4 : 2); pass++) { tmp = neon_load_scratch(0); tmp2 = neon_load_reg(rn, pass); if (op == 12) { if (size == 1) { gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2); } else { gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2); } } else if (op == 13) { if (size == 1) { gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2); } else { gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2); } } else if (op & 1) { gen_helper_neon_mul_f32(tmp, tmp, tmp2); } else { switch (size) { case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break; case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break; case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break; default: abort(); } } tcg_temp_free_i32(tmp2); if (op < 8) { /* Accumulate. */ tmp2 = neon_load_reg(rd, pass); switch (op) { case 0: gen_neon_add(size, tmp, tmp2); break; case 1: gen_helper_neon_add_f32(tmp, tmp, tmp2); break; case 4: gen_neon_rsb(size, tmp, tmp2); break; case 5: gen_helper_neon_sub_f32(tmp, tmp2, tmp); break; default: abort(); } tcg_temp_free_i32(tmp2); } neon_store_reg(rd, pass, tmp); } break; case 3: /* VQDMLAL scalar */ case 7: /* VQDMLSL scalar */ case 11: /* VQDMULL scalar */ if (u == 1) { return 1; } /* fall through */ case 2: /* VMLAL sclar */ case 6: /* VMLSL scalar */ case 10: /* VMULL scalar */ if (rd & 1) { return 1; } tmp2 = neon_get_scalar(size, rm); /* We need a copy of tmp2 because gen_neon_mull * deletes it during pass 0. */ tmp4 = tcg_temp_new_i32(); tcg_gen_mov_i32(tmp4, tmp2); tmp3 = neon_load_reg(rn, 1); for (pass = 0; pass < 2; pass++) { if (pass == 0) { tmp = neon_load_reg(rn, 0); } else { tmp = tmp3; tmp2 = tmp4; } gen_neon_mull(cpu_V0, tmp, tmp2, size, u); if (op != 11) { neon_load_reg64(cpu_V1, rd + pass); } switch (op) { case 6: gen_neon_negl(cpu_V0, size); /* Fall through */ case 2: gen_neon_addl(size); break; case 3: case 7: gen_neon_addl_saturate(cpu_V0, cpu_V0, size); if (op == 7) { gen_neon_negl(cpu_V0, size); } gen_neon_addl_saturate(cpu_V0, cpu_V1, size); break; case 10: /* no-op */ break; case 11: gen_neon_addl_saturate(cpu_V0, cpu_V0, size); break; default: abort(); } neon_store_reg64(cpu_V0, rd + pass); } break; default: /* 14 and 15 are RESERVED */ return 1; } } } else { /* size == 3 */ if (!u) { /* Extract. */ imm = (insn >> 8) & 0xf; if (imm > 7 && !q) return 1; if (q && ((rd | rn | rm) & 1)) { return 1; } if (imm == 0) { neon_load_reg64(cpu_V0, rn); if (q) { neon_load_reg64(cpu_V1, rn + 1); } } else if (imm == 8) { neon_load_reg64(cpu_V0, rn + 1); if (q) { neon_load_reg64(cpu_V1, rm); } } else if (q) { tmp64 = tcg_temp_new_i64(); if (imm < 8) { neon_load_reg64(cpu_V0, rn); neon_load_reg64(tmp64, rn + 1); } else { neon_load_reg64(cpu_V0, rn + 1); neon_load_reg64(tmp64, rm); } tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8); tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8)); tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1); if (imm < 8) { neon_load_reg64(cpu_V1, rm); } else { neon_load_reg64(cpu_V1, rm + 1); imm -= 8; } tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8)); tcg_gen_shri_i64(tmp64, tmp64, imm * 8); tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64); tcg_temp_free_i64(tmp64); } else { /* BUGFIX */ neon_load_reg64(cpu_V0, rn); tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8); neon_load_reg64(cpu_V1, rm); tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8)); tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1); } neon_store_reg64(cpu_V0, rd); if (q) { neon_store_reg64(cpu_V1, rd + 1); } } else if ((insn & (1 << 11)) == 0) { /* Two register misc. */ op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf); size = (insn >> 18) & 3; /* UNDEF for unknown op values and bad op-size combinations */ if ((neon_2rm_sizes[op] & (1 << size)) == 0) { return 1; } switch (op) { case NEON_2RM_VREV64: for (pass = 0; pass < (q ? 2 : 1); pass++) { tmp = neon_load_reg(rm, pass * 2); tmp2 = neon_load_reg(rm, pass * 2 + 1); switch (size) { case 0: tcg_gen_bswap32_i32(tmp, tmp); break; case 1: gen_swap_half(tmp); break; case 2: /* no-op */ break; default: abort(); } neon_store_reg(rd, pass * 2 + 1, tmp); if (size == 2) { neon_store_reg(rd, pass * 2, tmp2); } else { switch (size) { case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break; case 1: gen_swap_half(tmp2); break; default: abort(); } neon_store_reg(rd, pass * 2, tmp2); } } break; case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U: case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U: for (pass = 0; pass < q + 1; pass++) { tmp = neon_load_reg(rm, pass * 2); gen_neon_widen(cpu_V0, tmp, size, op & 1); tmp = neon_load_reg(rm, pass * 2 + 1); gen_neon_widen(cpu_V1, tmp, size, op & 1); switch (size) { case 0: gen_helper_neon_paddl_u16(CPU_V001); break; case 1: gen_helper_neon_paddl_u32(CPU_V001); break; case 2: tcg_gen_add_i64(CPU_V001); break; default: abort(); } if (op >= NEON_2RM_VPADAL) { /* Accumulate. */ neon_load_reg64(cpu_V1, rd + pass); gen_neon_addl(size); } neon_store_reg64(cpu_V0, rd + pass); } break; case NEON_2RM_VTRN: if (size == 2) { int n; for (n = 0; n < (q ? 4 : 2); n += 2) { tmp = neon_load_reg(rm, n); tmp2 = neon_load_reg(rd, n + 1); neon_store_reg(rm, n, tmp2); neon_store_reg(rd, n + 1, tmp); } } else { goto elementwise; } break; case NEON_2RM_VUZP: if (gen_neon_unzip(rd, rm, size, q)) { return 1; } break; case NEON_2RM_VZIP: if (gen_neon_zip(rd, rm, size, q)) { return 1; } break; case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN: /* also VQMOVUN; op field and mnemonics don't line up */ TCGV_UNUSED(tmp2); for (pass = 0; pass < 2; pass++) { neon_load_reg64(cpu_V0, rm + pass); tmp = tcg_temp_new_i32(); gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size, tmp, cpu_V0); if (pass == 0) { tmp2 = tmp; } else { neon_store_reg(rd, 0, tmp2); neon_store_reg(rd, 1, tmp); } } break; case NEON_2RM_VSHLL: if (q) { return 1; } tmp = neon_load_reg(rm, 0); tmp2 = neon_load_reg(rm, 1); for (pass = 0; pass < 2; pass++) { if (pass == 1) tmp = tmp2; gen_neon_widen(cpu_V0, tmp, size, 1); tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size); neon_store_reg64(cpu_V0, rd + pass); } break; case NEON_2RM_VCVT_F16_F32: if (!arm_feature(env, ARM_FEATURE_VFP_FP16)) return 1; tmp = tcg_temp_new_i32(); tmp2 = tcg_temp_new_i32(); tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0)); gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env); tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1)); gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env); tcg_gen_shli_i32(tmp2, tmp2, 16); tcg_gen_or_i32(tmp2, tmp2, tmp); tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2)); gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env); tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3)); neon_store_reg(rd, 0, tmp2); tmp2 = tcg_temp_new_i32(); gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env); tcg_gen_shli_i32(tmp2, tmp2, 16); tcg_gen_or_i32(tmp2, tmp2, tmp); neon_store_reg(rd, 1, tmp2); tcg_temp_free_i32(tmp); break; case NEON_2RM_VCVT_F32_F16: if (!arm_feature(env, ARM_FEATURE_VFP_FP16)) return 1; tmp3 = tcg_temp_new_i32(); tmp = neon_load_reg(rm, 0); tmp2 = neon_load_reg(rm, 1); tcg_gen_ext16u_i32(tmp3, tmp); gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0)); tcg_gen_shri_i32(tmp3, tmp, 16); gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1)); tcg_temp_free_i32(tmp); tcg_gen_ext16u_i32(tmp3, tmp2); gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2)); tcg_gen_shri_i32(tmp3, tmp2, 16); gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env); tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3)); tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp3); break; default: elementwise: for (pass = 0; pass < (q ? 4 : 2); pass++) { if (neon_2rm_is_float_op(op)) { tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass)); TCGV_UNUSED(tmp); } else { tmp = neon_load_reg(rm, pass); } switch (op) { case NEON_2RM_VREV32: switch (size) { case 0: tcg_gen_bswap32_i32(tmp, tmp); break; case 1: gen_swap_half(tmp); break; default: abort(); } break; case NEON_2RM_VREV16: gen_rev16(tmp); break; case NEON_2RM_VCLS: switch (size) { case 0: gen_helper_neon_cls_s8(tmp, tmp); break; case 1: gen_helper_neon_cls_s16(tmp, tmp); break; case 2: gen_helper_neon_cls_s32(tmp, tmp); break; default: abort(); } break; case NEON_2RM_VCLZ: switch (size) { case 0: gen_helper_neon_clz_u8(tmp, tmp); break; case 1: gen_helper_neon_clz_u16(tmp, tmp); break; case 2: gen_helper_clz(tmp, tmp); break; default: abort(); } break; case NEON_2RM_VCNT: gen_helper_neon_cnt_u8(tmp, tmp); break; case NEON_2RM_VMVN: tcg_gen_not_i32(tmp, tmp); break; case NEON_2RM_VQABS: switch (size) { case 0: gen_helper_neon_qabs_s8(tmp, tmp); break; case 1: gen_helper_neon_qabs_s16(tmp, tmp); break; case 2: gen_helper_neon_qabs_s32(tmp, tmp); break; default: abort(); } break; case NEON_2RM_VQNEG: switch (size) { case 0: gen_helper_neon_qneg_s8(tmp, tmp); break; case 1: gen_helper_neon_qneg_s16(tmp, tmp); break; case 2: gen_helper_neon_qneg_s32(tmp, tmp); break; default: abort(); } break; case NEON_2RM_VCGT0: case NEON_2RM_VCLE0: tmp2 = tcg_const_i32(0); switch(size) { case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break; case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break; case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break; default: abort(); } tcg_temp_free(tmp2); if (op == NEON_2RM_VCLE0) { tcg_gen_not_i32(tmp, tmp); } break; case NEON_2RM_VCGE0: case NEON_2RM_VCLT0: tmp2 = tcg_const_i32(0); switch(size) { case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break; case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break; case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break; default: abort(); } tcg_temp_free(tmp2); if (op == NEON_2RM_VCLT0) { tcg_gen_not_i32(tmp, tmp); } break; case NEON_2RM_VCEQ0: tmp2 = tcg_const_i32(0); switch(size) { case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break; case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break; case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break; default: abort(); } tcg_temp_free(tmp2); break; case NEON_2RM_VABS: switch(size) { case 0: gen_helper_neon_abs_s8(tmp, tmp); break; case 1: gen_helper_neon_abs_s16(tmp, tmp); break; case 2: tcg_gen_abs_i32(tmp, tmp); break; default: abort(); } break; case NEON_2RM_VNEG: tmp2 = tcg_const_i32(0); gen_neon_rsb(size, tmp, tmp2); tcg_temp_free(tmp2); break; case NEON_2RM_VCGT0_F: tmp2 = tcg_const_i32(0); gen_helper_neon_cgt_f32(tmp, tmp, tmp2); tcg_temp_free(tmp2); break; case NEON_2RM_VCGE0_F: tmp2 = tcg_const_i32(0); gen_helper_neon_cge_f32(tmp, tmp, tmp2); tcg_temp_free(tmp2); break; case NEON_2RM_VCEQ0_F: tmp2 = tcg_const_i32(0); gen_helper_neon_ceq_f32(tmp, tmp, tmp2); tcg_temp_free(tmp2); break; case NEON_2RM_VCLE0_F: tmp2 = tcg_const_i32(0); gen_helper_neon_cge_f32(tmp, tmp2, tmp); tcg_temp_free(tmp2); break; case NEON_2RM_VCLT0_F: tmp2 = tcg_const_i32(0); gen_helper_neon_cgt_f32(tmp, tmp2, tmp); tcg_temp_free(tmp2); break; case NEON_2RM_VABS_F: gen_vfp_abs(0); break; case NEON_2RM_VNEG_F: gen_vfp_neg(0); break; case NEON_2RM_VSWP: tmp2 = neon_load_reg(rd, pass); neon_store_reg(rm, pass, tmp2); break; case NEON_2RM_VTRN: tmp2 = neon_load_reg(rd, pass); switch (size) { case 0: gen_neon_trn_u8(tmp, tmp2); break; case 1: gen_neon_trn_u16(tmp, tmp2); break; default: abort(); } neon_store_reg(rm, pass, tmp2); break; case NEON_2RM_VRECPE: gen_helper_recpe_u32(tmp, tmp, cpu_env); break; case NEON_2RM_VRSQRTE: gen_helper_rsqrte_u32(tmp, tmp, cpu_env); break; case NEON_2RM_VRECPE_F: gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env); break; case NEON_2RM_VRSQRTE_F: gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env); break; case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */ gen_vfp_sito(0); break; case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */ gen_vfp_uito(0); break; case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */ gen_vfp_tosiz(0); break; case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */ gen_vfp_touiz(0); break; default: /* Reserved op values were caught by the * neon_2rm_sizes[] check earlier. */ abort(); } if (neon_2rm_is_float_op(op)) { tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass)); } else { neon_store_reg(rd, pass, tmp); } } break; } } else if ((insn & (1 << 10)) == 0) { /* VTBL, VTBX. */ int n = ((insn >> 5) & 0x18) + 8; if (insn & (1 << 6)) { tmp = neon_load_reg(rd, 0); } else { tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); } tmp2 = neon_load_reg(rm, 0); tmp4 = tcg_const_i32(rn); tmp5 = tcg_const_i32(n); gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5); tcg_temp_free_i32(tmp); if (insn & (1 << 6)) { tmp = neon_load_reg(rd, 1); } else { tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); } tmp3 = neon_load_reg(rm, 1); gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5); tcg_temp_free_i32(tmp5); tcg_temp_free_i32(tmp4); neon_store_reg(rd, 0, tmp2); neon_store_reg(rd, 1, tmp3); tcg_temp_free_i32(tmp); } else if ((insn & 0x380) == 0) { /* VDUP */ if (insn & (1 << 19)) { tmp = neon_load_reg(rm, 1); } else { tmp = neon_load_reg(rm, 0); } if (insn & (1 << 16)) { gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8); } else if (insn & (1 << 17)) { if ((insn >> 18) & 1) gen_neon_dup_high16(tmp); else gen_neon_dup_low16(tmp); } for (pass = 0; pass < (q ? 4 : 2); pass++) { tmp2 = tcg_temp_new_i32(); tcg_gen_mov_i32(tmp2, tmp); neon_store_reg(rd, pass, tmp2); } tcg_temp_free_i32(tmp); } else { return 1; } } } return 0; }
3,306
0
static void rtas_nvram_store(PowerPCCPU *cpu, sPAPREnvironment *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { sPAPRNVRAM *nvram = spapr->nvram; hwaddr offset, buffer, len; int alen; void *membuf; if ((nargs != 3) || (nret != 2)) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } if (!nvram) { rtas_st(rets, 0, RTAS_OUT_HW_ERROR); return; } offset = rtas_ld(args, 0); buffer = rtas_ld(args, 1); len = rtas_ld(args, 2); if (((offset + len) < offset) || ((offset + len) > nvram->size)) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } membuf = cpu_physical_memory_map(buffer, &len, 0); if (nvram->drive) { alen = bdrv_pwrite(nvram->drive, offset, membuf, len); } else { assert(nvram->buf); memcpy(nvram->buf + offset, membuf, len); alen = len; } cpu_physical_memory_unmap(membuf, len, 0, len); rtas_st(rets, 0, (alen < len) ? RTAS_OUT_HW_ERROR : RTAS_OUT_SUCCESS); rtas_st(rets, 1, (alen < 0) ? 0 : alen); }
3,307
0
int coroutine_fn qed_find_cluster(BDRVQEDState *s, QEDRequest *request, uint64_t pos, size_t *len, uint64_t *img_offset) { uint64_t l2_offset; uint64_t offset = 0; unsigned int index; unsigned int n; int ret; /* Limit length to L2 boundary. Requests are broken up at the L2 boundary * so that a request acts on one L2 table at a time. */ *len = MIN(*len, (((pos >> s->l1_shift) + 1) << s->l1_shift) - pos); l2_offset = s->l1_table->offsets[qed_l1_index(s, pos)]; if (qed_offset_is_unalloc_cluster(l2_offset)) { *img_offset = 0; return QED_CLUSTER_L1; } if (!qed_check_table_offset(s, l2_offset)) { *img_offset = *len = 0; return -EINVAL; } ret = qed_read_l2_table(s, request, l2_offset); qed_acquire(s); if (ret) { goto out; } index = qed_l2_index(s, pos); n = qed_bytes_to_clusters(s, qed_offset_into_cluster(s, pos) + *len); n = qed_count_contiguous_clusters(s, request->l2_table->table, index, n, &offset); if (qed_offset_is_unalloc_cluster(offset)) { ret = QED_CLUSTER_L2; } else if (qed_offset_is_zero_cluster(offset)) { ret = QED_CLUSTER_ZERO; } else if (qed_check_cluster_offset(s, offset)) { ret = QED_CLUSTER_FOUND; } else { ret = -EINVAL; } *len = MIN(*len, n * s->header.cluster_size - qed_offset_into_cluster(s, pos)); out: *img_offset = offset; qed_release(s); return ret; }
3,308
0
static int send_solid_rect(VncState *vs) { size_t bytes; vnc_write_u8(vs, VNC_TIGHT_FILL << 4); /* no flushing, no filter */ if (vs->tight_pixel24) { tight_pack24(vs, vs->tight.buffer, 1, &vs->tight.offset); bytes = 3; } else { bytes = vs->clientds.pf.bytes_per_pixel; } vnc_write(vs, vs->tight.buffer, bytes); return 1; }
3,310
0
int DMA_read_memory (int nchan, void *buf, int pos, int len) { struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3]; target_phys_addr_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR]; if (r->mode & 0x20) { int i; uint8_t *p = buf; cpu_physical_memory_read (addr - pos - len, buf, len); /* What about 16bit transfers? */ for (i = 0; i < len >> 1; i++) { uint8_t b = p[len - i - 1]; p[i] = b; } } else cpu_physical_memory_read (addr + pos, buf, len); return len; }
3,311
0
static ssize_t nc_sendv_compat(NetClientState *nc, const struct iovec *iov, int iovcnt) { uint8_t buffer[4096]; size_t offset; offset = iov_to_buf(iov, iovcnt, 0, buffer, sizeof(buffer)); return nc->info->receive(nc, buffer, offset); }
3,313
0
static uint32_t gic_dist_readl(void *opaque, target_phys_addr_t offset) { uint32_t val; val = gic_dist_readw(opaque, offset); val |= gic_dist_readw(opaque, offset + 2) << 16; return val; }
3,314
0
static int ftp_flush_control_input(FTPContext *s) { char buf[CONTROL_BUFFER_SIZE]; int err, ori_block_flag = s->conn_control_block_flag; s->conn_control_block_flag = 1; do { err = ftp_get_line(s, buf, sizeof(buf)); } while (!err); s->conn_control_block_flag = ori_block_flag; if (err < 0 && err != AVERROR_EXIT) return err; return 0; }
3,315
0
static void virtio_rng_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIORNG *vrng = VIRTIO_RNG(dev); Error *local_err = NULL; if (!vrng->conf.period_ms > 0) { error_setg(errp, "'period' parameter expects a positive integer"); return; } /* Workaround: Property parsing does not enforce unsigned integers, * So this is a hack to reject such numbers. */ if (vrng->conf.max_bytes > INT64_MAX) { error_setg(errp, "'max-bytes' parameter must be non-negative, " "and less than 2^63"); return; } if (vrng->conf.rng == NULL) { vrng->conf.default_backend = RNG_RANDOM(object_new(TYPE_RNG_RANDOM)); user_creatable_complete(OBJECT(vrng->conf.default_backend), &local_err); if (local_err) { error_propagate(errp, local_err); object_unref(OBJECT(vrng->conf.default_backend)); return; } object_property_add_child(OBJECT(dev), "default-backend", OBJECT(vrng->conf.default_backend), NULL); /* The child property took a reference, we can safely drop ours now */ object_unref(OBJECT(vrng->conf.default_backend)); object_property_set_link(OBJECT(dev), OBJECT(vrng->conf.default_backend), "rng", NULL); } vrng->rng = vrng->conf.rng; if (vrng->rng == NULL) { error_setg(errp, "'rng' parameter expects a valid object"); return; } virtio_init(vdev, "virtio-rng", VIRTIO_ID_RNG, 0); vrng->vq = virtio_add_queue(vdev, 8, handle_input); vrng->quota_remaining = vrng->conf.max_bytes; vrng->rate_limit_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, check_rate_limit, vrng); timer_mod(vrng->rate_limit_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vrng->conf.period_ms); register_savevm(dev, "virtio-rng", -1, 1, virtio_rng_save, virtio_rng_load, vrng); }
3,316
0
void arm_sysctl_init(uint32_t base, uint32_t sys_id) { DeviceState *dev; dev = qdev_create(NULL, "realview_sysctl"); qdev_prop_set_uint32(dev, "sys_id", sys_id); qdev_init_nofail(dev); sysbus_mmio_map(sysbus_from_qdev(dev), 0, base); }
3,317
0
fdctrl_t *sun4m_fdctrl_init (qemu_irq irq, target_phys_addr_t io_base, BlockDriverState **fds) { fdctrl_t *fdctrl; fdctrl = fdctrl_init(irq, 0, 1, io_base, fds); fdctrl->sun4m = 1; return fdctrl; }
3,318
0
void cpu_loop(CPUUniCore32State *env) { CPUState *cs = CPU(uc32_env_get_cpu(env)); int trapnr; unsigned int n, insn; target_siginfo_t info; for (;;) { cpu_exec_start(cs); trapnr = uc32_cpu_exec(cs); cpu_exec_end(cs); switch (trapnr) { case UC32_EXCP_PRIV: { /* system call */ get_user_u32(insn, env->regs[31] - 4); n = insn & 0xffffff; if (n >= UC32_SYSCALL_BASE) { /* linux syscall */ n -= UC32_SYSCALL_BASE; if (n == UC32_SYSCALL_NR_set_tls) { cpu_set_tls(env, env->regs[0]); env->regs[0] = 0; } else { env->regs[0] = do_syscall(env, n, env->regs[0], env->regs[1], env->regs[2], env->regs[3], env->regs[4], env->regs[5], 0, 0); } } else { goto error; } } break; case UC32_EXCP_DTRAP: case UC32_EXCP_ITRAP: info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; /* XXX: check env->error_code */ info.si_code = TARGET_SEGV_MAPERR; info._sifields._sigfault._addr = env->cp0.c4_faultaddr; queue_signal(env, info.si_signo, &info); break; case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; case EXCP_DEBUG: { int sig; sig = gdb_handlesig(cs, TARGET_SIGTRAP); if (sig) { info.si_signo = sig; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; queue_signal(env, info.si_signo, &info); } } break; default: goto error; } process_pending_signals(env); } error: fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr); cpu_dump_state(cs, stderr, fprintf, 0); abort(); }
3,319
1
static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, int buf_size) { int i; int cur = 0; int skip = 0; int len, toks; TM2Codes codes; /* get stream length in dwords */ len = AV_RB32(buf); buf += 4; cur += 4; skip = len * 4 + 4; if(len == 0) return 4; if (len >= INT_MAX/4-1 || len < 0 || len > buf_size) { av_log(ctx->avctx, AV_LOG_ERROR, "Error, invalid stream size.\n"); } toks = AV_RB32(buf); buf += 4; cur += 4; if(toks & 1) { len = AV_RB32(buf); buf += 4; cur += 4; if(len == TM2_ESCAPE) { len = AV_RB32(buf); buf += 4; cur += 4; } if(len > 0) { init_get_bits(&ctx->gb, buf, (skip - cur) * 8); if(tm2_read_deltas(ctx, stream_id) == -1) buf += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2; cur += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2; } } /* skip unused fields */ if(AV_RB32(buf) == TM2_ESCAPE) { buf += 4; cur += 4; /* some unknown length - could be escaped too */ } buf += 4; cur += 4; buf += 4; cur += 4; /* unused by decoder */ init_get_bits(&ctx->gb, buf, (skip - cur) * 8); if(tm2_build_huff_table(ctx, &codes) == -1) buf += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2; cur += ((get_bits_count(&ctx->gb) + 31) >> 5) << 2; toks >>= 1; /* check if we have sane number of tokens */ if((toks < 0) || (toks > 0xFFFFFF)){ av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of tokens: %i\n", toks); tm2_free_codes(&codes); } ctx->tokens[stream_id] = av_realloc(ctx->tokens[stream_id], toks * sizeof(int)); ctx->tok_lens[stream_id] = toks; len = AV_RB32(buf); buf += 4; cur += 4; if(len > 0) { init_get_bits(&ctx->gb, buf, (skip - cur) * 8); for(i = 0; i < toks; i++) { if (get_bits_left(&ctx->gb) <= 0) { av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of tokens: %i\n", toks); } ctx->tokens[stream_id][i] = tm2_get_token(&ctx->gb, &codes); } } else { for(i = 0; i < toks; i++) ctx->tokens[stream_id][i] = codes.recode[0]; } tm2_free_codes(&codes); return skip; }
3,321
1
int64_t av_gcd(int64_t a, int64_t b) { if (b) return av_gcd(b, a % b); else return a; }
3,323
1
static inline int ape_decode_value_3900(APEContext *ctx, APERice *rice) { unsigned int x, overflow; int tmpk; overflow = range_get_symbol(ctx, counts_3970, counts_diff_3970); if (overflow == (MODEL_ELEMENTS - 1)) { tmpk = range_decode_bits(ctx, 5); overflow = 0; } else tmpk = (rice->k < 1) ? 0 : rice->k - 1; if (tmpk <= 16 || ctx->fileversion < 3910) { if (tmpk > 23) { av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk); return AVERROR_INVALIDDATA; } x = range_decode_bits(ctx, tmpk); } else if (tmpk <= 32) { x = range_decode_bits(ctx, 16); x |= (range_decode_bits(ctx, tmpk - 16) << 16); } else { av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk); return AVERROR_INVALIDDATA; } x += overflow << tmpk; update_rice(rice, x); /* Convert to signed */ if (x & 1) return (x >> 1) + 1; else return -(x >> 1); }
3,324
1
int avcodec_decode_video(AVCodecContext *avctx, AVPicture *picture, int *got_picture_ptr, UINT8 *buf, int buf_size) { int ret; ret = avctx->codec->decode(avctx, picture, got_picture_ptr, buf, buf_size); avctx->frame_number++; return ret; }
3,325
1
static void nbd_teardown_connection(NbdClientSession *client) { struct nbd_request request = { .type = NBD_CMD_DISC, .from = 0, .len = 0 }; nbd_send_request(client->sock, &request); /* finish any pending coroutines */ shutdown(client->sock, 2); nbd_recv_coroutines_enter_all(client); qemu_aio_set_fd_handler(client->sock, NULL, NULL, NULL); closesocket(client->sock); client->sock = -1; }
3,326
0
static void do_bit_allocation(AC3DecodeContext *ctx, int flags) { ac3_audio_block *ab = &ctx->audio_block; int i, snroffst = 0; if (!flags) /* bit allocation is not required */ return; if (ab->flags & AC3_AB_SNROFFSTE) { /* check whether snroffsts are zero */ snroffst += ab->csnroffst; if (ab->flags & AC3_AB_CPLINU) snroffst += ab->cplfsnroffst; for (i = 0; i < ctx->bsi.nfchans; i++) snroffst += ab->fsnroffst[i]; if (ctx->bsi.flags & AC3_BSI_LFEON) snroffst += ab->lfefsnroffst; if (!snroffst) { memset(ab->cplbap, 0, sizeof (ab->cplbap)); for (i = 0; i < ctx->bsi.nfchans; i++) memset(ab->bap[i], 0, sizeof (ab->bap[i])); memset(ab->lfebap, 0, sizeof (ab->lfebap)); return; } } /* perform bit allocation */ if ((ab->flags & AC3_AB_CPLINU) && (flags & 64)) do_bit_allocation1(ctx, 5); for (i = 0; i < ctx->bsi.nfchans; i++) if (flags & (1 << i)) do_bit_allocation1(ctx, i); if ((ctx->bsi.flags & AC3_BSI_LFEON) && (flags & 32)) do_bit_allocation1(ctx, 6); }
3,329
0
static int adx_decode_header(AVCodecContext *avctx, const uint8_t *buf, int bufsize) { int offset; if (buf[0] != 0x80) return 0; offset = (AV_RB32(buf) ^ 0x80000000) + 4; if (bufsize < offset || memcmp(buf + offset - 6, "(c)CRI", 6)) return 0; avctx->channels = buf[7]; avctx->sample_rate = AV_RB32(buf + 8); avctx->bit_rate = avctx->sample_rate * avctx->channels * 18 * 8 / 32; return offset; }
3,331
0
static void scsi_write_complete(void * opaque, int ret) { SCSIDiskReq *r = (SCSIDiskReq *)opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; if (r->req.aiocb != NULL) { r->req.aiocb = NULL; block_acct_done(bdrv_get_stats(s->qdev.conf.bs), &r->acct); } if (r->req.io_canceled) { goto done; } if (ret < 0) { if (scsi_handle_rw_error(r, -ret)) { goto done; } } n = r->qiov.size / 512; r->sector += n; r->sector_count -= n; if (r->sector_count == 0) { scsi_write_do_fua(r); return; } else { scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); DPRINTF("Write complete tag=0x%x more=%zd\n", r->req.tag, r->qiov.size); scsi_req_data(&r->req, r->qiov.size); } done: if (!r->req.io_canceled) { scsi_req_unref(&r->req); } }
3,332
0
static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) { int64_t overlap_offset = req->offset & ~(align - 1); unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align) - overlap_offset; if (!req->serialising) { req->bs->serialising_in_flight++; req->serialising = true; } req->overlap_offset = MIN(req->overlap_offset, overlap_offset); req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes); }
3,334
0
STATIC void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h) { MOVQ_ZERO(mm7); SET_RND(mm6); // =2 for rnd and =1 for no_rnd version __asm__ volatile( "movq (%1), %%mm0 \n\t" "movq 1(%1), %%mm4 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm4, %%mm5 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpcklbw %%mm7, %%mm4 \n\t" "punpckhbw %%mm7, %%mm1 \n\t" "punpckhbw %%mm7, %%mm5 \n\t" "paddusw %%mm0, %%mm4 \n\t" "paddusw %%mm1, %%mm5 \n\t" "xor %%"REG_a", %%"REG_a" \n\t" "add %3, %1 \n\t" ".p2align 3 \n\t" "1: \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" "movq 1(%1, %%"REG_a"), %%mm2 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm2, %%mm3 \n\t" "punpcklbw %%mm7, %%mm0 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" "punpckhbw %%mm7, %%mm1 \n\t" "punpckhbw %%mm7, %%mm3 \n\t" "paddusw %%mm2, %%mm0 \n\t" "paddusw %%mm3, %%mm1 \n\t" "paddusw %%mm6, %%mm4 \n\t" "paddusw %%mm6, %%mm5 \n\t" "paddusw %%mm0, %%mm4 \n\t" "paddusw %%mm1, %%mm5 \n\t" "psrlw $2, %%mm4 \n\t" "psrlw $2, %%mm5 \n\t" "movq (%2, %%"REG_a"), %%mm3 \n\t" "packuswb %%mm5, %%mm4 \n\t" "pcmpeqd %%mm2, %%mm2 \n\t" "paddb %%mm2, %%mm2 \n\t" PAVGB_MMX(%%mm3, %%mm4, %%mm5, %%mm2) "movq %%mm5, (%2, %%"REG_a") \n\t" "add %3, %%"REG_a" \n\t" "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3 "movq 1(%1, %%"REG_a"), %%mm4 \n\t" "movq %%mm2, %%mm3 \n\t" "movq %%mm4, %%mm5 \n\t" "punpcklbw %%mm7, %%mm2 \n\t" "punpcklbw %%mm7, %%mm4 \n\t" "punpckhbw %%mm7, %%mm3 \n\t" "punpckhbw %%mm7, %%mm5 \n\t" "paddusw %%mm2, %%mm4 \n\t" "paddusw %%mm3, %%mm5 \n\t" "paddusw %%mm6, %%mm0 \n\t" "paddusw %%mm6, %%mm1 \n\t" "paddusw %%mm4, %%mm0 \n\t" "paddusw %%mm5, %%mm1 \n\t" "psrlw $2, %%mm0 \n\t" "psrlw $2, %%mm1 \n\t" "movq (%2, %%"REG_a"), %%mm3 \n\t" "packuswb %%mm1, %%mm0 \n\t" "pcmpeqd %%mm2, %%mm2 \n\t" "paddb %%mm2, %%mm2 \n\t" PAVGB_MMX(%%mm3, %%mm0, %%mm1, %%mm2) "movq %%mm1, (%2, %%"REG_a") \n\t" "add %3, %%"REG_a" \n\t" "subl $2, %0 \n\t" "jnz 1b \n\t" :"+g"(h), "+S"(pixels) :"D"(block), "r"((x86_reg)line_size) :REG_a, "memory"); }
3,335
0
start_list(Visitor *v, const char *name, Error **errp) { StringInputVisitor *siv = to_siv(v); if (parse_str(siv, name, errp) < 0) { return; } siv->cur_range = g_list_first(siv->ranges); if (siv->cur_range) { Range *r = siv->cur_range->data; if (r) { siv->cur = r->begin; } } }
3,336
0
static int check_arg(const CmdArgs *cmd_args, QDict *args) { QObject *value; const char *name; name = qstring_get_str(cmd_args->name); if (!args) { return check_opt(cmd_args, name, args); } value = qdict_get(args, name); if (!value) { return check_opt(cmd_args, name, args); } switch (cmd_args->type) { case 'F': case 'B': case 's': if (qobject_type(value) != QTYPE_QSTRING) { qerror_report(QERR_INVALID_PARAMETER_TYPE, name, "string"); return -1; } break; case '/': { int i; const char *keys[] = { "count", "format", "size", NULL }; for (i = 0; keys[i]; i++) { QObject *obj = qdict_get(args, keys[i]); if (!obj) { qerror_report(QERR_MISSING_PARAMETER, name); return -1; } if (qobject_type(obj) != QTYPE_QINT) { qerror_report(QERR_INVALID_PARAMETER_TYPE, name, "int"); return -1; } } break; } case 'i': case 'l': case 'M': if (qobject_type(value) != QTYPE_QINT) { qerror_report(QERR_INVALID_PARAMETER_TYPE, name, "int"); return -1; } break; case 'f': case 'T': if (qobject_type(value) != QTYPE_QINT && qobject_type(value) != QTYPE_QFLOAT) { qerror_report(QERR_INVALID_PARAMETER_TYPE, name, "number"); return -1; } break; case 'b': if (qobject_type(value) != QTYPE_QBOOL) { qerror_report(QERR_INVALID_PARAMETER_TYPE, name, "bool"); return -1; } break; case '-': if (qobject_type(value) != QTYPE_QINT && qobject_type(value) != QTYPE_QBOOL) { qerror_report(QERR_INVALID_PARAMETER_TYPE, name, "bool"); return -1; } break; case 'O': default: /* impossible */ abort(); } return 0; }
3,337
0
static void blkdebug_refresh_filename(BlockDriverState *bs) { QDict *opts; const QDictEntry *e; bool force_json = false; for (e = qdict_first(bs->options); e; e = qdict_next(bs->options, e)) { if (strcmp(qdict_entry_key(e), "config") && strcmp(qdict_entry_key(e), "x-image") && strcmp(qdict_entry_key(e), "image") && strncmp(qdict_entry_key(e), "image.", strlen("image."))) { force_json = true; break; } } if (force_json && !bs->file->bs->full_open_options) { /* The config file cannot be recreated, so creating a plain filename * is impossible */ return; } if (!force_json && bs->file->bs->exact_filename[0]) { snprintf(bs->exact_filename, sizeof(bs->exact_filename), "blkdebug:%s:%s", qdict_get_try_str(bs->options, "config") ?: "", bs->file->bs->exact_filename); } opts = qdict_new(); qdict_put_obj(opts, "driver", QOBJECT(qstring_from_str("blkdebug"))); QINCREF(bs->file->bs->full_open_options); qdict_put_obj(opts, "image", QOBJECT(bs->file->bs->full_open_options)); for (e = qdict_first(bs->options); e; e = qdict_next(bs->options, e)) { if (strcmp(qdict_entry_key(e), "x-image") && strcmp(qdict_entry_key(e), "image") && strncmp(qdict_entry_key(e), "image.", strlen("image."))) { qobject_incref(qdict_entry_value(e)); qdict_put_obj(opts, qdict_entry_key(e), qdict_entry_value(e)); } } bs->full_open_options = opts; }
3,338
0
static void memory_region_initfn(Object *obj) { MemoryRegion *mr = MEMORY_REGION(obj); ObjectProperty *op; mr->ops = &unassigned_mem_ops; mr->enabled = true; mr->romd_mode = true; mr->global_locking = true; mr->destructor = memory_region_destructor_none; QTAILQ_INIT(&mr->subregions); QTAILQ_INIT(&mr->coalesced); op = object_property_add(OBJECT(mr), "container", "link<" TYPE_MEMORY_REGION ">", memory_region_get_container, NULL, /* memory_region_set_container */ NULL, NULL, &error_abort); op->resolve = memory_region_resolve_container; object_property_add(OBJECT(mr), "addr", "uint64", memory_region_get_addr, NULL, /* memory_region_set_addr */ NULL, NULL, &error_abort); object_property_add(OBJECT(mr), "priority", "uint32", memory_region_get_priority, NULL, /* memory_region_set_priority */ NULL, NULL, &error_abort); object_property_add_bool(OBJECT(mr), "may-overlap", memory_region_get_may_overlap, NULL, /* memory_region_set_may_overlap */ &error_abort); object_property_add(OBJECT(mr), "size", "uint64", memory_region_get_size, NULL, /* memory_region_set_size, */ NULL, NULL, &error_abort); }
3,340
0
static void curl_setup_preadv(BlockDriverState *bs, CURLAIOCB *acb) { CURLState *state; int running; BDRVCURLState *s = bs->opaque; uint64_t start = acb->offset; uint64_t end; qemu_mutex_lock(&s->mutex); // In case we have the requested data already (e.g. read-ahead), // we can just call the callback and be done. if (curl_find_buf(s, start, acb->bytes, acb)) { goto out; } // No cache found, so let's start a new request for (;;) { state = curl_find_state(s); if (state) { break; } qemu_mutex_unlock(&s->mutex); aio_poll(bdrv_get_aio_context(bs), true); qemu_mutex_lock(&s->mutex); } if (curl_init_state(s, state) < 0) { curl_clean_state(state); acb->ret = -EIO; goto out; } acb->start = 0; acb->end = MIN(acb->bytes, s->len - start); state->buf_off = 0; g_free(state->orig_buf); state->buf_start = start; state->buf_len = MIN(acb->end + s->readahead_size, s->len - start); end = start + state->buf_len - 1; state->orig_buf = g_try_malloc(state->buf_len); if (state->buf_len && state->orig_buf == NULL) { curl_clean_state(state); acb->ret = -ENOMEM; goto out; } state->acb[0] = acb; snprintf(state->range, 127, "%" PRIu64 "-%" PRIu64, start, end); DPRINTF("CURL (AIO): Reading %" PRIu64 " at %" PRIu64 " (%s)\n", acb->bytes, start, state->range); curl_easy_setopt(state->curl, CURLOPT_RANGE, state->range); curl_multi_add_handle(s->multi, state->curl); /* Tell curl it needs to kick things off */ curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, &running); out: qemu_mutex_unlock(&s->mutex); }
3,341
0
static int local_rename(FsContext *ctx, const char *oldpath, const char *newpath) { char *tmp; int err; tmp = qemu_strdup(rpath(ctx, oldpath)); if (tmp == NULL) { return -1; } err = rename(tmp, rpath(ctx, newpath)); if (err == -1) { int serrno = errno; qemu_free(tmp); errno = serrno; } else { qemu_free(tmp); } return err; }
3,342
0
static Visitor *validate_test_init_raw(TestInputVisitorData *data, const char *json_string) { return validate_test_init_internal(data, json_string, NULL); }
3,343
0
static void gen_compute_branch (DisasContext *ctx, uint32_t opc, int insn_bytes, int rs, int rt, int32_t offset) { target_ulong btgt = -1; int blink = 0; int bcond_compute = 0; TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); if (ctx->hflags & MIPS_HFLAG_BMASK) { #ifdef MIPS_DEBUG_DISAS LOG_DISAS("Branch in delay slot at PC 0x" TARGET_FMT_lx "\n", ctx->pc); #endif generate_exception(ctx, EXCP_RI); goto out; } /* Load needed operands */ switch (opc) { case OPC_BEQ: case OPC_BEQL: case OPC_BNE: case OPC_BNEL: /* Compare two registers */ if (rs != rt) { gen_load_gpr(t0, rs); gen_load_gpr(t1, rt); bcond_compute = 1; } btgt = ctx->pc + insn_bytes + offset; break; case OPC_BGEZ: case OPC_BGEZAL: case OPC_BGEZALL: case OPC_BGEZL: case OPC_BGTZ: case OPC_BGTZL: case OPC_BLEZ: case OPC_BLEZL: case OPC_BLTZ: case OPC_BLTZAL: case OPC_BLTZALL: case OPC_BLTZL: /* Compare to zero */ if (rs != 0) { gen_load_gpr(t0, rs); bcond_compute = 1; } btgt = ctx->pc + insn_bytes + offset; break; case OPC_J: case OPC_JAL: case OPC_JALX: /* Jump to immediate */ btgt = ((ctx->pc + insn_bytes) & (int32_t)0xF0000000) | (uint32_t)offset; break; case OPC_JR: case OPC_JALR: case OPC_JALRC: /* Jump to register */ if (offset != 0 && offset != 16) { /* Hint = 0 is JR/JALR, hint 16 is JR.HB/JALR.HB, the others are reserved. */ MIPS_INVAL("jump hint"); generate_exception(ctx, EXCP_RI); goto out; } gen_load_gpr(btarget, rs); break; default: MIPS_INVAL("branch/jump"); generate_exception(ctx, EXCP_RI); goto out; } if (bcond_compute == 0) { /* No condition to be computed */ switch (opc) { case OPC_BEQ: /* rx == rx */ case OPC_BEQL: /* rx == rx likely */ case OPC_BGEZ: /* 0 >= 0 */ case OPC_BGEZL: /* 0 >= 0 likely */ case OPC_BLEZ: /* 0 <= 0 */ case OPC_BLEZL: /* 0 <= 0 likely */ /* Always take */ ctx->hflags |= MIPS_HFLAG_B; MIPS_DEBUG("balways"); break; case OPC_BGEZAL: /* 0 >= 0 */ case OPC_BGEZALL: /* 0 >= 0 likely */ /* Always take and link */ blink = 31; ctx->hflags |= MIPS_HFLAG_B; MIPS_DEBUG("balways and link"); break; case OPC_BNE: /* rx != rx */ case OPC_BGTZ: /* 0 > 0 */ case OPC_BLTZ: /* 0 < 0 */ /* Treat as NOP. */ MIPS_DEBUG("bnever (NOP)"); goto out; case OPC_BLTZAL: /* 0 < 0 */ tcg_gen_movi_tl(cpu_gpr[31], ctx->pc + 8); MIPS_DEBUG("bnever and link"); goto out; case OPC_BLTZALL: /* 0 < 0 likely */ tcg_gen_movi_tl(cpu_gpr[31], ctx->pc + 8); /* Skip the instruction in the delay slot */ MIPS_DEBUG("bnever, link and skip"); ctx->pc += 4; goto out; case OPC_BNEL: /* rx != rx likely */ case OPC_BGTZL: /* 0 > 0 likely */ case OPC_BLTZL: /* 0 < 0 likely */ /* Skip the instruction in the delay slot */ MIPS_DEBUG("bnever and skip"); ctx->pc += 4; goto out; case OPC_J: ctx->hflags |= MIPS_HFLAG_B; MIPS_DEBUG("j " TARGET_FMT_lx, btgt); break; case OPC_JALX: ctx->hflags |= MIPS_HFLAG_BX; /* Fallthrough */ case OPC_JAL: blink = 31; ctx->hflags |= MIPS_HFLAG_B; ctx->hflags |= (ctx->hflags & MIPS_HFLAG_M16 ? MIPS_HFLAG_BDS16 : MIPS_HFLAG_BDS32); MIPS_DEBUG("jal " TARGET_FMT_lx, btgt); break; case OPC_JR: ctx->hflags |= MIPS_HFLAG_BR; if (ctx->hflags & MIPS_HFLAG_M16) ctx->hflags |= MIPS_HFLAG_BDS16; MIPS_DEBUG("jr %s", regnames[rs]); break; case OPC_JALR: case OPC_JALRC: blink = rt; ctx->hflags |= MIPS_HFLAG_BR; if (ctx->hflags & MIPS_HFLAG_M16) ctx->hflags |= MIPS_HFLAG_BDS16; MIPS_DEBUG("jalr %s, %s", regnames[rt], regnames[rs]); break; default: MIPS_INVAL("branch/jump"); generate_exception(ctx, EXCP_RI); goto out; } } else { switch (opc) { case OPC_BEQ: tcg_gen_setcond_tl(TCG_COND_EQ, bcond, t0, t1); MIPS_DEBUG("beq %s, %s, " TARGET_FMT_lx, regnames[rs], regnames[rt], btgt); goto not_likely; case OPC_BEQL: tcg_gen_setcond_tl(TCG_COND_EQ, bcond, t0, t1); MIPS_DEBUG("beql %s, %s, " TARGET_FMT_lx, regnames[rs], regnames[rt], btgt); goto likely; case OPC_BNE: tcg_gen_setcond_tl(TCG_COND_NE, bcond, t0, t1); MIPS_DEBUG("bne %s, %s, " TARGET_FMT_lx, regnames[rs], regnames[rt], btgt); goto not_likely; case OPC_BNEL: tcg_gen_setcond_tl(TCG_COND_NE, bcond, t0, t1); MIPS_DEBUG("bnel %s, %s, " TARGET_FMT_lx, regnames[rs], regnames[rt], btgt); goto likely; case OPC_BGEZ: tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0); MIPS_DEBUG("bgez %s, " TARGET_FMT_lx, regnames[rs], btgt); goto not_likely; case OPC_BGEZL: tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0); MIPS_DEBUG("bgezl %s, " TARGET_FMT_lx, regnames[rs], btgt); goto likely; case OPC_BGEZAL: tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0); MIPS_DEBUG("bgezal %s, " TARGET_FMT_lx, regnames[rs], btgt); blink = 31; goto not_likely; case OPC_BGEZALL: tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0); blink = 31; MIPS_DEBUG("bgezall %s, " TARGET_FMT_lx, regnames[rs], btgt); goto likely; case OPC_BGTZ: tcg_gen_setcondi_tl(TCG_COND_GT, bcond, t0, 0); MIPS_DEBUG("bgtz %s, " TARGET_FMT_lx, regnames[rs], btgt); goto not_likely; case OPC_BGTZL: tcg_gen_setcondi_tl(TCG_COND_GT, bcond, t0, 0); MIPS_DEBUG("bgtzl %s, " TARGET_FMT_lx, regnames[rs], btgt); goto likely; case OPC_BLEZ: tcg_gen_setcondi_tl(TCG_COND_LE, bcond, t0, 0); MIPS_DEBUG("blez %s, " TARGET_FMT_lx, regnames[rs], btgt); goto not_likely; case OPC_BLEZL: tcg_gen_setcondi_tl(TCG_COND_LE, bcond, t0, 0); MIPS_DEBUG("blezl %s, " TARGET_FMT_lx, regnames[rs], btgt); goto likely; case OPC_BLTZ: tcg_gen_setcondi_tl(TCG_COND_LT, bcond, t0, 0); MIPS_DEBUG("bltz %s, " TARGET_FMT_lx, regnames[rs], btgt); goto not_likely; case OPC_BLTZL: tcg_gen_setcondi_tl(TCG_COND_LT, bcond, t0, 0); MIPS_DEBUG("bltzl %s, " TARGET_FMT_lx, regnames[rs], btgt); goto likely; case OPC_BLTZAL: tcg_gen_setcondi_tl(TCG_COND_LT, bcond, t0, 0); blink = 31; MIPS_DEBUG("bltzal %s, " TARGET_FMT_lx, regnames[rs], btgt); not_likely: ctx->hflags |= MIPS_HFLAG_BC; break; case OPC_BLTZALL: tcg_gen_setcondi_tl(TCG_COND_LT, bcond, t0, 0); blink = 31; MIPS_DEBUG("bltzall %s, " TARGET_FMT_lx, regnames[rs], btgt); likely: ctx->hflags |= MIPS_HFLAG_BL; break; default: MIPS_INVAL("conditional branch/jump"); generate_exception(ctx, EXCP_RI); goto out; } } MIPS_DEBUG("enter ds: link %d cond %02x target " TARGET_FMT_lx, blink, ctx->hflags, btgt); ctx->btarget = btgt; if (blink > 0) { int post_delay = insn_bytes; int lowbit = !!(ctx->hflags & MIPS_HFLAG_M16); if (opc != OPC_JALRC) post_delay += ((ctx->hflags & MIPS_HFLAG_BDS16) ? 2 : 4); tcg_gen_movi_tl(cpu_gpr[blink], ctx->pc + post_delay + lowbit); } out: if (insn_bytes == 2) ctx->hflags |= MIPS_HFLAG_B16; tcg_temp_free(t0); tcg_temp_free(t1); }
3,345
0
static int tak_read_header(AVFormatContext *s) { TAKDemuxContext *tc = s->priv_data; AVIOContext *pb = s->pb; GetBitContext gb; AVStream *st; uint8_t *buffer = NULL; int ret; st = avformat_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = AV_CODEC_ID_TAK; st->need_parsing = AVSTREAM_PARSE_FULL_RAW; tc->mlast_frame = 0; if (avio_rl32(pb) != MKTAG('t', 'B', 'a', 'K')) { avio_seek(pb, -4, SEEK_CUR); return 0; } while (!url_feof(pb)) { enum TAKMetaDataType type; int size; type = avio_r8(pb) & 0x7f; size = avio_rl24(pb); switch (type) { case TAK_METADATA_STREAMINFO: case TAK_METADATA_LAST_FRAME: case TAK_METADATA_ENCODER: buffer = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE); if (!buffer) return AVERROR(ENOMEM); if (avio_read(pb, buffer, size) != size) { av_freep(&buffer); return AVERROR(EIO); } init_get_bits(&gb, buffer, size * 8); break; case TAK_METADATA_MD5: { uint8_t md5[16]; int i; if (size != 19) return AVERROR_INVALIDDATA; avio_read(pb, md5, 16); avio_skip(pb, 3); av_log(s, AV_LOG_VERBOSE, "MD5="); for (i = 0; i < 16; i++) av_log(s, AV_LOG_VERBOSE, "%02x", md5[i]); av_log(s, AV_LOG_VERBOSE, "\n"); break; } case TAK_METADATA_END: { int64_t curpos = avio_tell(pb); if (pb->seekable) { ff_ape_parse_tag(s); avio_seek(pb, curpos, SEEK_SET); } tc->data_end += curpos; return 0; } default: ret = avio_skip(pb, size); if (ret < 0) return ret; } if (type == TAK_METADATA_STREAMINFO) { TAKStreamInfo ti; avpriv_tak_parse_streaminfo(&gb, &ti); if (ti.samples > 0) st->duration = ti.samples; st->codec->bits_per_coded_sample = ti.bps; if (ti.ch_layout) st->codec->channel_layout = ti.ch_layout; st->codec->sample_rate = ti.sample_rate; st->codec->channels = ti.channels; st->start_time = 0; avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate); st->codec->extradata = buffer; st->codec->extradata_size = size; buffer = NULL; } else if (type == TAK_METADATA_LAST_FRAME) { if (size != 11) return AVERROR_INVALIDDATA; tc->mlast_frame = 1; tc->data_end = get_bits64(&gb, TAK_LAST_FRAME_POS_BITS) + get_bits(&gb, TAK_LAST_FRAME_SIZE_BITS); av_freep(&buffer); } else if (type == TAK_METADATA_ENCODER) { av_log(s, AV_LOG_VERBOSE, "encoder version: %0X\n", get_bits_long(&gb, TAK_ENCODER_VERSION_BITS)); av_freep(&buffer); } } return AVERROR_EOF; }
3,346
0
int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags) { int64_t target_size, ret, bytes, offset = 0; BlockDriverState *bs = child->bs; int n; /* sectors */ target_size = bdrv_getlength(bs); if (target_size < 0) { return target_size; } for (;;) { bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES); if (bytes <= 0) { return 0; } ret = bdrv_get_block_status(bs, offset >> BDRV_SECTOR_BITS, bytes >> BDRV_SECTOR_BITS, &n, NULL); if (ret < 0) { error_report("error getting block status at offset %" PRId64 ": %s", offset, strerror(-ret)); return ret; } if (ret & BDRV_BLOCK_ZERO) { offset += n * BDRV_SECTOR_BITS; continue; } ret = bdrv_pwrite_zeroes(child, offset, n * BDRV_SECTOR_SIZE, flags); if (ret < 0) { error_report("error writing zeroes at offset %" PRId64 ": %s", offset, strerror(-ret)); return ret; } offset += n * BDRV_SECTOR_SIZE; } }
3,347
0
void HELPER(wsr_ibreaka)(uint32_t i, uint32_t v) { if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) { tb_invalidate_phys_page_range( env->sregs[IBREAKA + i], env->sregs[IBREAKA + i] + 1, 0); tb_invalidate_phys_page_range(v, v + 1, 0); } env->sregs[IBREAKA + i] = v; }
3,349
0
static unsigned syborg_virtio_get_features(void *opaque) { unsigned ret = 0; ret |= (1 << VIRTIO_F_NOTIFY_ON_EMPTY); return ret; }
3,350
0
static int check_strtox_error(const char *p, char *endptr, const char **next, int err) { if (err == 0 && endptr == p) { err = EINVAL; } if (!next && *endptr) { return -EINVAL; } if (next) { *next = endptr; } return -err; }
3,351
0
void spapr_setup_hpt_and_vrma(sPAPRMachineState *spapr) { int hpt_shift; if ((spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) || (spapr->cas_reboot && !spapr_ovec_test(spapr->ov5_cas, OV5_HPT_RESIZE))) { hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size); } else { hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->ram_size); } spapr_reallocate_hpt(spapr, hpt_shift, &error_fatal); if (spapr->vrma_adjust) { spapr->rma_size = kvmppc_rma_size(spapr_node0_size(MACHINE(spapr)), spapr->htab_shift); } /* We're setting up a hash table, so that means we're not radix */ spapr->patb_entry = 0; }
3,352
0
static CharDriverState *qmp_chardev_open_udp(const char *id, ChardevBackend *backend, ChardevReturn *ret, Error **errp) { ChardevUdp *udp = backend->u.udp; ChardevCommon *common = qapi_ChardevUdp_base(udp); QIOChannelSocket *sioc = qio_channel_socket_new(); if (qio_channel_socket_dgram_sync(sioc, udp->local, udp->remote, errp) < 0) { object_unref(OBJECT(sioc)); return NULL; } return qemu_chr_open_udp(sioc, common, errp); }
3,353
0
static inline int vec_reg_offset(int regno, int element, TCGMemOp size) { int offs = offsetof(CPUARMState, vfp.regs[regno * 2]); #ifdef HOST_WORDS_BIGENDIAN /* This is complicated slightly because vfp.regs[2n] is * still the low half and vfp.regs[2n+1] the high half * of the 128 bit vector, even on big endian systems. * Calculate the offset assuming a fully bigendian 128 bits, * then XOR to account for the order of the two 64 bit halves. */ offs += (16 - ((element + 1) * (1 << size))); offs ^= 8; #else offs += element * (1 << size); #endif return offs; }
3,354
0
static void gen_doz(DisasContext *ctx) { int l1 = gen_new_label(); int l2 = gen_new_label(); tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], l1); tcg_gen_sub_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0); gen_set_label(l2); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); }
3,355
0
static av_cold int nvenc_alloc_surface(AVCodecContext *avctx, int idx) { NvencContext *ctx = avctx->priv_data; NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs; NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs; NVENCSTATUS nv_status; NV_ENC_CREATE_BITSTREAM_BUFFER allocOut = { 0 }; allocOut.version = NV_ENC_CREATE_BITSTREAM_BUFFER_VER; if (avctx->pix_fmt == AV_PIX_FMT_CUDA) { ctx->surfaces[idx].in_ref = av_frame_alloc(); if (!ctx->surfaces[idx].in_ref) return AVERROR(ENOMEM); } else { NV_ENC_CREATE_INPUT_BUFFER allocSurf = { 0 }; ctx->surfaces[idx].format = nvenc_map_buffer_format(ctx->data_pix_fmt); if (ctx->surfaces[idx].format == NV_ENC_BUFFER_FORMAT_UNDEFINED) { av_log(avctx, AV_LOG_FATAL, "Invalid input pixel format: %s\n", av_get_pix_fmt_name(ctx->data_pix_fmt)); return AVERROR(EINVAL); } allocSurf.version = NV_ENC_CREATE_INPUT_BUFFER_VER; allocSurf.width = (avctx->width + 31) & ~31; allocSurf.height = (avctx->height + 31) & ~31; allocSurf.memoryHeap = NV_ENC_MEMORY_HEAP_SYSMEM_CACHED; allocSurf.bufferFmt = ctx->surfaces[idx].format; nv_status = p_nvenc->nvEncCreateInputBuffer(ctx->nvencoder, &allocSurf); if (nv_status != NV_ENC_SUCCESS) { return nvenc_print_error(avctx, nv_status, "CreateInputBuffer failed"); } ctx->surfaces[idx].input_surface = allocSurf.inputBuffer; ctx->surfaces[idx].width = allocSurf.width; ctx->surfaces[idx].height = allocSurf.height; } ctx->surfaces[idx].lockCount = 0; /* 1MB is large enough to hold most output frames. * NVENC increases this automaticaly if it is not enough. */ allocOut.size = 1024 * 1024; allocOut.memoryHeap = NV_ENC_MEMORY_HEAP_SYSMEM_CACHED; nv_status = p_nvenc->nvEncCreateBitstreamBuffer(ctx->nvencoder, &allocOut); if (nv_status != NV_ENC_SUCCESS) { int err = nvenc_print_error(avctx, nv_status, "CreateBitstreamBuffer failed"); if (avctx->pix_fmt != AV_PIX_FMT_CUDA) p_nvenc->nvEncDestroyInputBuffer(ctx->nvencoder, ctx->surfaces[idx].input_surface); av_frame_free(&ctx->surfaces[idx].in_ref); return err; } ctx->surfaces[idx].output_surface = allocOut.bitstreamBuffer; ctx->surfaces[idx].size = allocOut.size; return 0; }
3,357
0
void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) { #if defined(CONFIG_KVM) uint32_t vec[4]; #ifdef __x86_64__ asm volatile("cpuid" : "=a"(vec[0]), "=b"(vec[1]), "=c"(vec[2]), "=d"(vec[3]) : "0"(function), "c"(count) : "cc"); #else asm volatile("pusha \n\t" "cpuid \n\t" "mov %%eax, 0(%2) \n\t" "mov %%ebx, 4(%2) \n\t" "mov %%ecx, 8(%2) \n\t" "mov %%edx, 12(%2) \n\t" "popa" : : "a"(function), "c"(count), "S"(vec) : "memory", "cc"); #endif if (eax) *eax = vec[0]; if (ebx) *ebx = vec[1]; if (ecx) *ecx = vec[2]; if (edx) *edx = vec[3]; #endif }
3,358
0
int qemu_add_child_watch(pid_t pid) { ChildProcessRecord *rec; if (!sigchld_bh) { qemu_init_child_watch(); } QLIST_FOREACH(rec, &child_watches, next) { if (rec->pid == pid) { return 1; } } rec = g_malloc0(sizeof(ChildProcessRecord)); rec->pid = pid; QLIST_INSERT_HEAD(&child_watches, rec, next); return 0; }
3,359
0
static void bitband_writew(void *opaque, target_phys_addr_t offset, uint32_t value) { uint32_t addr; uint16_t mask; uint16_t v; addr = bitband_addr(opaque, offset) & ~1; mask = (1 << ((offset >> 2) & 15)); mask = tswap16(mask); cpu_physical_memory_read(addr, (uint8_t *)&v, 2); if (value & 1) v |= mask; else v &= ~mask; cpu_physical_memory_write(addr, (uint8_t *)&v, 2); }
3,360
0
static int write_console_data(SCLPEvent *event, const uint8_t *buf, int len) { int ret = 0; const uint8_t *buf_offset; SCLPConsoleLM *scon = SCLPLM_CONSOLE(event); if (!scon->chr) { /* If there's no backend, we can just say we consumed all data. */ return len; } buf_offset = buf; while (len > 0) { ret = qemu_chr_fe_write(scon->chr, buf, len); if (ret == 0) { /* a pty doesn't seem to be connected - no error */ len = 0; } else if (ret == -EAGAIN || (ret > 0 && ret < len)) { len -= ret; buf_offset += ret; } else { len = 0; } } return ret; }
3,361
0
static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num, int nb_sectors) { BlockBackend *source = s->common.blk; int sectors_per_chunk, nb_chunks; int ret; MirrorOp *op; int max_sectors; sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; max_sectors = sectors_per_chunk * s->max_iov; /* We can only handle as much as buf_size at a time. */ nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors); nb_sectors = MIN(max_sectors, nb_sectors); assert(nb_sectors); ret = nb_sectors; if (s->cow_bitmap) { ret += mirror_cow_align(s, &sector_num, &nb_sectors); } assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size); /* The sector range must meet granularity because: * 1) Caller passes in aligned values; * 2) mirror_cow_align is used only when target cluster is larger. */ assert(!(sector_num % sectors_per_chunk)); nb_chunks = DIV_ROUND_UP(nb_sectors, sectors_per_chunk); while (s->buf_free_count < nb_chunks) { trace_mirror_yield_in_flight(s, sector_num * BDRV_SECTOR_SIZE, s->in_flight); mirror_wait_for_io(s); } /* Allocate a MirrorOp that is used as an AIO callback. */ op = g_new(MirrorOp, 1); op->s = s; op->sector_num = sector_num; op->nb_sectors = nb_sectors; /* Now make a QEMUIOVector taking enough granularity-sized chunks * from s->buf_free. */ qemu_iovec_init(&op->qiov, nb_chunks); while (nb_chunks-- > 0) { MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); size_t remaining = nb_sectors * BDRV_SECTOR_SIZE - op->qiov.size; QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); s->buf_free_count--; qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); } /* Copy the dirty cluster. */ s->in_flight++; s->sectors_in_flight += nb_sectors; trace_mirror_one_iteration(s, sector_num * BDRV_SECTOR_SIZE, nb_sectors * BDRV_SECTOR_SIZE); blk_aio_preadv(source, sector_num * BDRV_SECTOR_SIZE, &op->qiov, 0, mirror_read_complete, op); return ret; }
3,362
0
static void tcg_target_init(TCGContext *s) { #ifdef CONFIG_GETAUXVAL unsigned long hwcap = getauxval(AT_HWCAP); if (hwcap & PPC_FEATURE_ARCH_2_06) { have_isa_2_06 = true; } #endif tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff); tcg_regset_set32(tcg_target_call_clobber_regs, 0, (1 << TCG_REG_R0) | #ifdef __APPLE__ (1 << TCG_REG_R2) | #endif (1 << TCG_REG_R3) | (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R12) ); tcg_regset_clear(s->reserved_regs); tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); #ifndef __APPLE__ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); #endif tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); tcg_add_target_add_op_defs(ppc_op_defs); }
3,363
0
static void test_validate_alternate(TestInputVisitorData *data, const void *unused) { UserDefAlternate *tmp = NULL; Visitor *v; v = validate_test_init(data, "42"); visit_type_UserDefAlternate(v, NULL, &tmp, &error_abort); qapi_free_UserDefAlternate(tmp); }
3,364
0
static void parse_chap(struct iscsi_context *iscsi, const char *target, Error **errp) { QemuOptsList *list; QemuOpts *opts; const char *user = NULL; const char *password = NULL; const char *secretid; char *secret = NULL; list = qemu_find_opts("iscsi"); if (!list) { return; } opts = qemu_opts_find(list, target); if (opts == NULL) { opts = QTAILQ_FIRST(&list->head); if (!opts) { return; } } user = qemu_opt_get(opts, "user"); if (!user) { return; } secretid = qemu_opt_get(opts, "password-secret"); password = qemu_opt_get(opts, "password"); if (secretid && password) { error_setg(errp, "'password' and 'password-secret' properties are " "mutually exclusive"); return; } if (secretid) { secret = qcrypto_secret_lookup_as_utf8(secretid, errp); if (!secret) { return; } password = secret; } else if (!password) { error_setg(errp, "CHAP username specified but no password was given"); return; } if (iscsi_set_initiator_username_pwd(iscsi, user, password)) { error_setg(errp, "Failed to set initiator username and password"); } g_free(secret); }
3,366
0
static void mips_fulong2e_init(MachineState *machine) { ram_addr_t ram_size = machine->ram_size; const char *cpu_model = machine->cpu_model; const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; char *filename; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); MemoryRegion *bios = g_new(MemoryRegion, 1); long bios_size; int64_t kernel_entry; qemu_irq *i8259; qemu_irq *cpu_exit_irq; PCIBus *pci_bus; ISABus *isa_bus; I2CBus *smbus; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; MIPSCPU *cpu; CPUMIPSState *env; /* init CPUs */ if (cpu_model == NULL) { cpu_model = "Loongson-2E"; } cpu = cpu_mips_init(cpu_model); if (cpu == NULL) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } env = &cpu->env; qemu_register_reset(main_cpu_reset, cpu); /* fulong 2e has 256M ram. */ ram_size = 256 * 1024 * 1024; /* fulong 2e has a 1M flash.Winbond W39L040AP70Z */ bios_size = 1024 * 1024; /* allocate RAM */ memory_region_allocate_system_memory(ram, NULL, "fulong2e.ram", ram_size); memory_region_init_ram(bios, NULL, "fulong2e.bios", bios_size, &error_abort); vmstate_register_ram_global(bios); memory_region_set_readonly(bios, true); memory_region_add_subregion(address_space_mem, 0, ram); memory_region_add_subregion(address_space_mem, 0x1fc00000LL, bios); /* We do not support flash operation, just loading pmon.bin as raw BIOS. * Please use -L to set the BIOS path and -bios to set bios name. */ if (kernel_filename) { loaderparams.ram_size = ram_size; loaderparams.kernel_filename = kernel_filename; loaderparams.kernel_cmdline = kernel_cmdline; loaderparams.initrd_filename = initrd_filename; kernel_entry = load_kernel (env); write_bootloader(env, memory_region_get_ram_ptr(bios), kernel_entry); } else { if (bios_name == NULL) { bios_name = FULONG_BIOSNAME; } filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); if (filename) { bios_size = load_image_targphys(filename, 0x1fc00000LL, BIOS_SIZE); g_free(filename); } else { bios_size = -1; } if ((bios_size < 0 || bios_size > BIOS_SIZE) && !kernel_filename && !qtest_enabled()) { error_report("Could not load MIPS bios '%s'", bios_name); exit(1); } } /* Init internal devices */ cpu_mips_irq_init_cpu(env); cpu_mips_clock_init(env); /* North bridge, Bonito --> IP2 */ pci_bus = bonito_init((qemu_irq *)&(env->irq[2])); /* South bridge */ ide_drive_get(hd, ARRAY_SIZE(hd)); isa_bus = vt82c686b_init(pci_bus, PCI_DEVFN(FULONG2E_VIA_SLOT, 0)); if (!isa_bus) { fprintf(stderr, "vt82c686b_init error\n"); exit(1); } /* Interrupt controller */ /* The 8259 -> IP5 */ i8259 = i8259_init(isa_bus, env->irq[5]); isa_bus_irqs(isa_bus, i8259); vt82c686b_ide_init(pci_bus, hd, PCI_DEVFN(FULONG2E_VIA_SLOT, 1)); pci_create_simple(pci_bus, PCI_DEVFN(FULONG2E_VIA_SLOT, 2), "vt82c686b-usb-uhci"); pci_create_simple(pci_bus, PCI_DEVFN(FULONG2E_VIA_SLOT, 3), "vt82c686b-usb-uhci"); smbus = vt82c686b_pm_init(pci_bus, PCI_DEVFN(FULONG2E_VIA_SLOT, 4), 0xeee1, NULL); /* TODO: Populate SPD eeprom data. */ smbus_eeprom_init(smbus, 1, eeprom_spd, sizeof(eeprom_spd)); /* init other devices */ pit = pit_init(isa_bus, 0x40, 0, NULL); cpu_exit_irq = qemu_allocate_irqs(cpu_request_exit, NULL, 1); DMA_init(0, cpu_exit_irq); /* Super I/O */ isa_create_simple(isa_bus, "i8042"); rtc_init(isa_bus, 2000, NULL); serial_hds_isa_init(isa_bus, MAX_SERIAL_PORTS); parallel_hds_isa_init(isa_bus, 1); /* Sound card */ audio_init(pci_bus); /* Network card */ network_init(pci_bus); }
3,367
0
static inline void downmix_dualmono_to_stereo(float *samples) { int i; float tmp; for (i = 0; i < 256; i++) { tmp = samples[i] + samples[i + 256]; samples[i] = samples[i + 256] = tmp; } }
3,369
0
static int64_t mmsh_read_seek(URLContext *h, int stream_index, int64_t timestamp, int flags) { MMSHContext *mmsh = h->priv_data; MMSContext *mms = &mmsh->mms; int ret; ret= mmsh_open_internal(h, mmsh->location, 0, timestamp, 0); if(ret>=0){ if (mms->mms_hd) ffurl_close(mms->mms_hd); av_freep(&mms->streams); av_freep(&mms->asf_header); av_free(mmsh); mmsh = h->priv_data; mms = &mmsh->mms; mms->asf_header_read_size= mms->asf_header_size; }else h->priv_data= mmsh; return ret; }
3,370
1
static int get_current_cpu(void) { return cpu_single_env->cpu_index; }
3,371
1
static void bonito_cop_writel(void *opaque, hwaddr addr, uint64_t val, unsigned size) { PCIBonitoState *s = opaque; ((uint32_t *)(&s->boncop))[addr/sizeof(uint32_t)] = val & 0xffffffff;
3,372
1
static int mxf_read_local_tags(MXFContext *mxf, KLVPacket *klv, MXFMetadataReadFunc *read_child, int ctx_size, enum MXFMetadataSetType type) { AVIOContext *pb = mxf->fc->pb; MXFMetadataSet *ctx = ctx_size ? av_mallocz(ctx_size) : mxf; uint64_t klv_end = avio_tell(pb) + klv->length; if (!ctx) return AVERROR(ENOMEM); mxf_metadataset_init(ctx, type); while (avio_tell(pb) + 4 < klv_end && !avio_feof(pb)) { int ret; int tag = avio_rb16(pb); int size = avio_rb16(pb); /* KLV specified by 0x53 */ uint64_t next = avio_tell(pb) + size; UID uid = {0}; av_dlog(mxf->fc, "local tag %#04x size %d\n", tag, size); if (!size) { /* ignore empty tag, needed for some files with empty UMID tag */ av_log(mxf->fc, AV_LOG_ERROR, "local tag %#04x with 0 size\n", tag); continue; } if (tag > 0x7FFF) { /* dynamic tag */ int i; for (i = 0; i < mxf->local_tags_count; i++) { int local_tag = AV_RB16(mxf->local_tags+i*18); if (local_tag == tag) { memcpy(uid, mxf->local_tags+i*18+2, 16); av_dlog(mxf->fc, "local tag %#04x\n", local_tag); PRINT_KEY(mxf->fc, "uid", uid); } } } if (ctx_size && tag == 0x3C0A) avio_read(pb, ctx->uid, 16); else if ((ret = read_child(ctx, pb, tag, size, uid, -1)) < 0) return ret; /* Accept the 64k local set limit being exceeded (Avid). Don't accept * it extending past the end of the KLV though (zzuf5.mxf). */ if (avio_tell(pb) > klv_end) { if (ctx_size) av_free(ctx); av_log(mxf->fc, AV_LOG_ERROR, "local tag %#04x extends past end of local set @ %#"PRIx64"\n", tag, klv->offset); return AVERROR_INVALIDDATA; } else if (avio_tell(pb) <= next) /* only seek forward, else this can loop for a long time */ avio_seek(pb, next, SEEK_SET); } if (ctx_size) ctx->type = type; return ctx_size ? mxf_add_metadata_set(mxf, ctx) : 0; }
3,373
1
static int vhost_client_migration_log(CPUPhysMemoryClient *client, int enable) { struct vhost_dev *dev = container_of(client, struct vhost_dev, client); int r; if (!!enable == dev->log_enabled) { return 0; } if (!dev->started) { dev->log_enabled = enable; return 0; } if (!enable) { r = vhost_dev_set_log(dev, false); if (r < 0) { return r; } if (dev->log) { g_free(dev->log); } dev->log = NULL; dev->log_size = 0; } else { vhost_dev_log_resize(dev, vhost_get_log_size(dev)); r = vhost_dev_set_log(dev, true); if (r < 0) { return r; } } dev->log_enabled = enable; return 0; }
3,374
1
long do_rt_sigreturn(CPUS390XState *env) { rt_sigframe *frame; abi_ulong frame_addr = env->regs[15]; sigset_t set; trace_user_do_rt_sigreturn(env, frame_addr); if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { goto badframe; } target_to_host_sigset(&set, &frame->uc.tuc_sigmask); set_sigmask(&set); /* ~_BLOCKABLE? */ if (restore_sigregs(env, &frame->uc.tuc_mcontext)) { goto badframe; } if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) { goto badframe; } unlock_user_struct(frame, frame_addr, 0); return -TARGET_QEMU_ESIGRETURN; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); return 0; }
3,376
1
static int no_init_out (HWVoiceOut *hw, struct audsettings *as) { audio_pcm_init_info (&hw->info, as); hw->samples = 1024; return 0; }
3,377
1
static int protocol_client_vencrypt_init(VncState *vs, uint8_t *data, size_t len) { if (data[0] != 0 || data[1] != 2) { VNC_DEBUG("Unsupported VeNCrypt protocol %d.%d\n", (int)data[0], (int)data[1]); vnc_write_u8(vs, 1); /* Reject version */ vnc_flush(vs); vnc_client_error(vs); } else { VNC_DEBUG("Sending allowed auth %d\n", vs->subauth); vnc_write_u8(vs, 0); /* Accept version */ vnc_write_u8(vs, 1); /* Number of sub-auths */ vnc_write_u32(vs, vs->subauth); /* The supported auth */ vnc_flush(vs); vnc_read_when(vs, protocol_client_vencrypt_auth, 4); } return 0; }
3,378
1
static always_inline int _pte_check (mmu_ctx_t *ctx, int is_64b, target_ulong pte0, target_ulong pte1, int h, int rw, int type) { target_ulong ptem, mmask; int access, ret, pteh, ptev, pp; access = 0; ret = -1; /* Check validity and table match */ #if defined(TARGET_PPC64) if (is_64b) { ptev = pte64_is_valid(pte0); pteh = (pte0 >> 1) & 1; } else #endif { ptev = pte_is_valid(pte0); pteh = (pte0 >> 6) & 1; } if (ptev && h == pteh) { /* Check vsid & api */ #if defined(TARGET_PPC64) if (is_64b) { ptem = pte0 & PTE64_PTEM_MASK; mmask = PTE64_CHECK_MASK; pp = (pte1 & 0x00000003) | ((pte1 >> 61) & 0x00000004); ctx->nx |= (pte1 >> 2) & 1; /* No execute bit */ ctx->nx |= (pte1 >> 3) & 1; /* Guarded bit */ } else #endif { ptem = pte0 & PTE_PTEM_MASK; mmask = PTE_CHECK_MASK; pp = pte1 & 0x00000003; } if (ptem == ctx->ptem) { if (ctx->raddr != (target_ulong)-1) { /* all matches should have equal RPN, WIMG & PP */ if ((ctx->raddr & mmask) != (pte1 & mmask)) { if (loglevel != 0) fprintf(logfile, "Bad RPN/WIMG/PP\n"); return -3; } } /* Compute access rights */ access = pp_check(ctx->key, pp, ctx->nx); /* Keep the matching PTE informations */ ctx->raddr = pte1; ctx->prot = access; ret = check_prot(ctx->prot, rw, type); if (ret == 0) { /* Access granted */ #if defined (DEBUG_MMU) if (loglevel != 0) fprintf(logfile, "PTE access granted !\n"); #endif } else { /* Access right violation */ #if defined (DEBUG_MMU) if (loglevel != 0) fprintf(logfile, "PTE access rejected\n"); #endif } } } return ret; }
3,379
0
static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, const H2645NAL *nal) { const SPS *sps; const PPS *pps; int ret; unsigned int slice_type, tmp, i; int field_pic_flag, bottom_field_flag; int droppable, picture_structure; sl->first_mb_addr = get_ue_golomb(&sl->gb); slice_type = get_ue_golomb_31(&sl->gb); if (slice_type > 9) { av_log(h->avctx, AV_LOG_ERROR, "slice type %d too large at %d\n", slice_type, sl->first_mb_addr); return AVERROR_INVALIDDATA; } if (slice_type > 4) { slice_type -= 5; sl->slice_type_fixed = 1; } else sl->slice_type_fixed = 0; slice_type = ff_h264_golomb_to_pict_type[slice_type]; sl->slice_type = slice_type; sl->slice_type_nos = slice_type & 3; if (nal->type == NAL_IDR_SLICE && sl->slice_type_nos != AV_PICTURE_TYPE_I) { av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n"); return AVERROR_INVALIDDATA; } sl->pps_id = get_ue_golomb(&sl->gb); if (sl->pps_id >= MAX_PPS_COUNT) { av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", sl->pps_id); return AVERROR_INVALIDDATA; } if (!h->ps.pps_list[sl->pps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing PPS %u referenced\n", sl->pps_id); return AVERROR_INVALIDDATA; } pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data; if (!h->ps.sps_list[pps->sps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing SPS %u referenced\n", pps->sps_id); return AVERROR_INVALIDDATA; } sps = (const SPS*)h->ps.sps_list[pps->sps_id]->data; sl->frame_num = get_bits(&sl->gb, sps->log2_max_frame_num); sl->mb_mbaff = 0; droppable = nal->ref_idc == 0; if (sps->frame_mbs_only_flag) { picture_structure = PICT_FRAME; } else { field_pic_flag = get_bits1(&sl->gb); if (field_pic_flag) { bottom_field_flag = get_bits1(&sl->gb); picture_structure = PICT_TOP_FIELD + bottom_field_flag; } else { picture_structure = PICT_FRAME; } } sl->picture_structure = picture_structure; sl->mb_field_decoding_flag = picture_structure != PICT_FRAME; if (picture_structure == PICT_FRAME) { sl->curr_pic_num = sl->frame_num; sl->max_pic_num = 1 << sps->log2_max_frame_num; } else { sl->curr_pic_num = 2 * sl->frame_num + 1; sl->max_pic_num = 1 << (sps->log2_max_frame_num + 1); } if (nal->type == NAL_IDR_SLICE) get_ue_golomb(&sl->gb); /* idr_pic_id */ if (sps->poc_type == 0) { sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb); if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME) sl->delta_poc_bottom = get_se_golomb(&sl->gb); } if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) { sl->delta_poc[0] = get_se_golomb(&sl->gb); if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME) sl->delta_poc[1] = get_se_golomb(&sl->gb); } if (pps->redundant_pic_cnt_present) sl->redundant_pic_count = get_ue_golomb(&sl->gb); if (sl->slice_type_nos == AV_PICTURE_TYPE_B) sl->direct_spatial_mv_pred = get_bits1(&sl->gb); ret = ff_h264_parse_ref_count(&sl->list_count, sl->ref_count, &sl->gb, pps, sl->slice_type_nos, picture_structure); if (ret < 0) return ret; if (sl->slice_type_nos != AV_PICTURE_TYPE_I) { ret = ff_h264_decode_ref_pic_list_reordering(h, sl); if (ret < 0) { sl->ref_count[1] = sl->ref_count[0] = 0; return ret; } } sl->pwt.use_weight = 0; for (i = 0; i < 2; i++) { sl->pwt.luma_weight_flag[i] = 0; sl->pwt.chroma_weight_flag[i] = 0; } if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) || (pps->weighted_bipred_idc == 1 && sl->slice_type_nos == AV_PICTURE_TYPE_B)) ff_h264_pred_weight_table(&sl->gb, sps, sl->ref_count, sl->slice_type_nos, &sl->pwt); sl->explicit_ref_marking = 0; if (nal->ref_idc) { ret = ff_h264_decode_ref_pic_marking(h, sl, &sl->gb); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return AVERROR_INVALIDDATA; } if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) { tmp = get_ue_golomb_31(&sl->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp); return AVERROR_INVALIDDATA; } sl->cabac_init_idc = tmp; } sl->last_qscale_diff = 0; tmp = pps->init_qp + get_se_golomb(&sl->gb); if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) { av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp); return AVERROR_INVALIDDATA; } sl->qscale = tmp; sl->chroma_qp[0] = get_chroma_qp(pps, 0, sl->qscale); sl->chroma_qp[1] = get_chroma_qp(pps, 1, sl->qscale); // FIXME qscale / qp ... stuff if (sl->slice_type == AV_PICTURE_TYPE_SP) get_bits1(&sl->gb); /* sp_for_switch_flag */ if (sl->slice_type == AV_PICTURE_TYPE_SP || sl->slice_type == AV_PICTURE_TYPE_SI) get_se_golomb(&sl->gb); /* slice_qs_delta */ sl->deblocking_filter = 1; sl->slice_alpha_c0_offset = 0; sl->slice_beta_offset = 0; if (pps->deblocking_filter_parameters_present) { tmp = get_ue_golomb_31(&sl->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, "deblocking_filter_idc %u out of range\n", tmp); return AVERROR_INVALIDDATA; } sl->deblocking_filter = tmp; if (sl->deblocking_filter < 2) sl->deblocking_filter ^= 1; // 1<->0 if (sl->deblocking_filter) { sl->slice_alpha_c0_offset = get_se_golomb(&sl->gb) * 2; sl->slice_beta_offset = get_se_golomb(&sl->gb) * 2; if (sl->slice_alpha_c0_offset > 12 || sl->slice_alpha_c0_offset < -12 || sl->slice_beta_offset > 12 || sl->slice_beta_offset < -12) { av_log(h->avctx, AV_LOG_ERROR, "deblocking filter parameters %d %d out of range\n", sl->slice_alpha_c0_offset, sl->slice_beta_offset); return AVERROR_INVALIDDATA; } } } return 0; }
3,380
0
static int nut_read_header(AVFormatContext * avf, AVFormatParameters * ap) { NUTContext * priv = avf->priv_data; ByteIOContext * bc = &avf->pb; nut_demuxer_opts_t dopts = { .input = { .priv = bc, .seek = av_seek, .read = av_read, .eof = NULL, .file_pos = 0, }, .alloc = { av_malloc, av_realloc, av_free }, .read_index = 1, .cache_syncpoints = 1, }; nut_context_t * nut = priv->nut = nut_demuxer_init(&dopts); nut_stream_header_t * s; int ret, i; if ((ret = nut_read_headers(nut, &s, NULL))) { if (ret < 0) av_log(avf, AV_LOG_ERROR, " NUT error: %s\n", nut_error(-ret)); nut_demuxer_uninit(nut); return -1; } priv->s = s; for (i = 0; s[i].type != -1 && i < 2; i++) { AVStream * st = av_new_stream(avf, i); int j; for (j = 0; j < s[i].fourcc_len && j < 8; j++) st->codec->codec_tag |= s[i].fourcc[j]<<(j*8); st->codec->has_b_frames = s[i].decode_delay; st->codec->extradata_size = s[i].codec_specific_len; if (st->codec->extradata_size) { st->codec->extradata = av_mallocz(st->codec->extradata_size); memcpy(st->codec->extradata, s[i].codec_specific, st->codec->extradata_size); } av_set_pts_info(avf->streams[i], 60, s[i].time_base.nom, s[i].time_base.den); st->start_time = 0; st->duration = s[i].max_pts; st->codec->codec_id = codec_get_id(nut_tags, st->codec->codec_tag); switch(s[i].type) { case NUT_AUDIO_CLASS: st->codec->codec_type = CODEC_TYPE_AUDIO; if (st->codec->codec_id == CODEC_ID_NONE) st->codec->codec_id = codec_get_wav_id(st->codec->codec_tag); st->codec->channels = s[i].channel_count; st->codec->sample_rate = s[i].samplerate_nom / s[i].samplerate_denom; break; case NUT_VIDEO_CLASS: st->codec->codec_type = CODEC_TYPE_VIDEO; if (st->codec->codec_id == CODEC_ID_NONE) st->codec->codec_id = codec_get_bmp_id(st->codec->codec_tag); st->codec->width = s[i].width; st->codec->height = s[i].height; st->codec->sample_aspect_ratio.num = s[i].sample_width; st->codec->sample_aspect_ratio.den = s[i].sample_height; break; } if (st->codec->codec_id == CODEC_ID_NONE) av_log(avf, AV_LOG_ERROR, "Unknown codec?!\n"); } return 0; }
3,381
0
static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length) { GetBitContext gb; int i; init_get_bits(&gb, src, length * 8); for (i = 0; i < 3; i++) { if (read_len_table(s->len[i], &gb) < 0) return -1; if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i]) < 0) { return -1; } ff_free_vlc(&s->vlc[i]); init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0); } generate_joint_tables(s); return (get_bits_count(&gb) + 7) / 8; }
3,382
0
static int h264_slice_header_init(H264Context *h, int reinit) { int nb_slices = (HAVE_THREADS && h->avctx->active_thread_type & FF_THREAD_SLICE) ? h->avctx->thread_count : 1; int i, ret; h->avctx->sample_aspect_ratio = h->sps.sar; av_assert0(h->avctx->sample_aspect_ratio.den); av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt, &h->chroma_x_shift, &h->chroma_y_shift); if (h->sps.timing_info_present_flag) { int64_t den = h->sps.time_scale; if (h->x264_build < 44U) den *= 2; av_reduce(&h->avctx->time_base.num, &h->avctx->time_base.den, h->sps.num_units_in_tick, den, 1 << 30); } if (reinit) ff_h264_free_tables(h, 0); h->first_field = 0; h->prev_interlaced_frame = 1; init_scan_tables(h); ret = ff_h264_alloc_tables(h); if (ret < 0) { av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n"); return ret; } if (nb_slices > H264_MAX_THREADS || (nb_slices > h->mb_height && h->mb_height)) { int max_slices; if (h->mb_height) max_slices = FFMIN(H264_MAX_THREADS, h->mb_height); else max_slices = H264_MAX_THREADS; av_log(h->avctx, AV_LOG_WARNING, "too many threads/slices %d," " reducing to %d\n", nb_slices, max_slices); nb_slices = max_slices; } h->slice_context_count = nb_slices; if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) { ret = ff_h264_context_init(h); if (ret < 0) { av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n"); return ret; } } else { for (i = 1; i < h->slice_context_count; i++) { H264Context *c; c = h->thread_context[i] = av_mallocz(sizeof(H264Context)); if (!c) return AVERROR(ENOMEM); c->avctx = h->avctx; c->dsp = h->dsp; c->vdsp = h->vdsp; c->h264dsp = h->h264dsp; c->h264qpel = h->h264qpel; c->h264chroma = h->h264chroma; c->sps = h->sps; c->pps = h->pps; c->pixel_shift = h->pixel_shift; c->width = h->width; c->height = h->height; c->linesize = h->linesize; c->uvlinesize = h->uvlinesize; c->chroma_x_shift = h->chroma_x_shift; c->chroma_y_shift = h->chroma_y_shift; c->qscale = h->qscale; c->droppable = h->droppable; c->data_partitioning = h->data_partitioning; c->low_delay = h->low_delay; c->mb_width = h->mb_width; c->mb_height = h->mb_height; c->mb_stride = h->mb_stride; c->mb_num = h->mb_num; c->flags = h->flags; c->workaround_bugs = h->workaround_bugs; c->pict_type = h->pict_type; init_scan_tables(c); clone_tables(c, h, i); c->context_initialized = 1; } for (i = 0; i < h->slice_context_count; i++) if ((ret = ff_h264_context_init(h->thread_context[i])) < 0) { av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n"); return ret; } } h->context_initialized = 1; return 0; }
3,384
0
static unsigned tget_long(GetByteContext *gb, int le) { unsigned v = le ? bytestream2_get_le32u(gb) : bytestream2_get_be32u(gb); return v; }
3,385
1
static int ir2_decode_plane(Ir2Context *ctx, int width, int height, uint8_t *dst, int pitch, const uint8_t *table) { int i; int j; int out = 0; if (width & 1) return AVERROR_INVALIDDATA; /* first line contain absolute values, other lines contain deltas */ while (out < width) { int c = ir2_get_code(&ctx->gb); if (c >= 0x80) { /* we have a run */ c -= 0x7F; if (out + c*2 > width) return AVERROR_INVALIDDATA; for (i = 0; i < c * 2; i++) dst[out++] = 0x80; } else { /* copy two values from table */ dst[out++] = table[c * 2]; dst[out++] = table[(c * 2) + 1]; } } dst += pitch; for (j = 1; j < height; j++) { out = 0; if (get_bits_left(&ctx->gb) <= 0) return AVERROR_INVALIDDATA; while (out < width) { int c = ir2_get_code(&ctx->gb); if (c >= 0x80) { /* we have a skip */ c -= 0x7F; if (out + c*2 > width) return AVERROR_INVALIDDATA; for (i = 0; i < c * 2; i++) { dst[out] = dst[out - pitch]; out++; } } else { /* add two deltas from table */ int t = dst[out - pitch] + (table[c * 2] - 128); t = av_clip_uint8(t); dst[out] = t; out++; t = dst[out - pitch] + (table[(c * 2) + 1] - 128); t = av_clip_uint8(t); dst[out] = t; out++; } } dst += pitch; } return 0; }
3,386
1
void hmp_change(Monitor *mon, const QDict *qdict) { const char *device = qdict_get_str(qdict, "device"); const char *target = qdict_get_str(qdict, "target"); const char *arg = qdict_get_try_str(qdict, "arg"); const char *read_only = qdict_get_try_str(qdict, "read-only-mode"); BlockdevChangeReadOnlyMode read_only_mode = 0; Error *err = NULL; if (strcmp(device, "vnc") == 0) { if (read_only) { monitor_printf(mon, "Parameter 'read-only-mode' is invalid for VNC\n"); return; } if (strcmp(target, "passwd") == 0 || strcmp(target, "password") == 0) { if (!arg) { monitor_read_password(mon, hmp_change_read_arg, NULL); return; } } qmp_change("vnc", target, !!arg, arg, &err); } else { if (read_only) { read_only_mode = qapi_enum_parse(BlockdevChangeReadOnlyMode_lookup, read_only, BLOCKDEV_CHANGE_READ_ONLY_MODE__MAX, BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN, &err); if (err) { hmp_handle_error(mon, &err); return; } } qmp_blockdev_change_medium(true, device, false, NULL, target, !!arg, arg, !!read_only, read_only_mode, &err); if (err && error_get_class(err) == ERROR_CLASS_DEVICE_ENCRYPTED) { error_free(err); monitor_read_block_device_key(mon, device, NULL, NULL); return; } } hmp_handle_error(mon, &err); }
3,387
1
static CharDriverState *text_console_init(ChardevVC *vc) { CharDriverState *chr; QemuConsole *s; unsigned width = 0; unsigned height = 0; chr = g_malloc0(sizeof(CharDriverState)); if (vc->has_width) { width = vc->width; } else if (vc->has_cols) { width = vc->cols * FONT_WIDTH; } if (vc->has_height) { height = vc->height; } else if (vc->has_rows) { height = vc->rows * FONT_HEIGHT; } trace_console_txt_new(width, height); if (width == 0 || height == 0) { s = new_console(NULL, TEXT_CONSOLE); } else { s = new_console(NULL, TEXT_CONSOLE_FIXED_SIZE); s->surface = qemu_create_displaysurface(width, height); } if (!s) { g_free(chr); return NULL; } s->chr = chr; chr->opaque = s; chr->chr_set_echo = text_console_set_echo; /* console/chardev init sometimes completes elsewhere in a 2nd * stage, so defer OPENED events until they are fully initialized */ chr->explicit_be_open = true; if (display_state) { text_console_do_init(chr, display_state); } return chr; }
3,388
1
static int codec_reinit(AVCodecContext *avctx, int width, int height, int quality) { NuvContext *c = avctx->priv_data; width = (width + 1) & ~1; height = (height + 1) & ~1; if (quality >= 0) get_quant_quality(c, quality); if (width != c->width || height != c->height) { if (av_image_check_size(height, width, 0, avctx) < 0) return 0; avctx->width = c->width = width; avctx->height = c->height = height; c->decomp_size = c->height * c->width * 3 / 2; c->decomp_buf = av_realloc(c->decomp_buf, c->decomp_size + AV_LZO_OUTPUT_PADDING); if (!c->decomp_buf) { av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n"); return 0; } rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq); } else if (quality != c->quality) rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq); return 1; }
3,389
1
int av_open_input_stream(AVFormatContext **ic_ptr, ByteIOContext *pb, const char *filename, AVInputFormat *fmt, AVFormatParameters *ap) { int err; AVFormatContext *ic; AVFormatParameters default_ap; if(!ap){ ap=&default_ap; memset(ap, 0, sizeof(default_ap)); } if(!ap->prealloced_context) ic = avformat_alloc_context(); else ic = *ic_ptr; if (!ic) { err = AVERROR(ENOMEM); goto fail; } ic->iformat = fmt; ic->pb = pb; ic->duration = AV_NOPTS_VALUE; ic->start_time = AV_NOPTS_VALUE; av_strlcpy(ic->filename, filename, sizeof(ic->filename)); /* allocate private data */ if (fmt->priv_data_size > 0) { ic->priv_data = av_mallocz(fmt->priv_data_size); if (!ic->priv_data) { err = AVERROR(ENOMEM); goto fail; } } else { ic->priv_data = NULL; } if (ic->iformat->read_header) { err = ic->iformat->read_header(ic, ap); if (err < 0) goto fail; } if (pb && !ic->data_offset) ic->data_offset = url_ftell(ic->pb); #if LIBAVFORMAT_VERSION_MAJOR < 53 ff_metadata_demux_compat(ic); #endif ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE; *ic_ptr = ic; return 0; fail: if (ic) { int i; av_freep(&ic->priv_data); for(i=0;i<ic->nb_streams;i++) { AVStream *st = ic->streams[i]; if (st) { av_free(st->priv_data); av_free(st->codec->extradata); } av_free(st); } } av_free(ic); *ic_ptr = NULL; return err; }
3,390
1
static void network_to_control(RDMAControlHeader *control) { control->type = ntohl(control->type); control->len = ntohl(control->len); control->repeat = ntohl(control->repeat); }
3,391
1
void ff_get_wav_header(AVIOContext *pb, AVCodecContext *codec, int size) { int id; id = avio_rl16(pb); codec->codec_type = AVMEDIA_TYPE_AUDIO; codec->codec_tag = id; codec->channels = avio_rl16(pb); codec->sample_rate = avio_rl32(pb); codec->bit_rate = avio_rl32(pb) * 8; codec->block_align = avio_rl16(pb); if (size == 14) { /* We're dealing with plain vanilla WAVEFORMAT */ codec->bits_per_coded_sample = 8; }else codec->bits_per_coded_sample = avio_rl16(pb); if (size >= 18) { /* We're obviously dealing with WAVEFORMATEX */ int cbSize = avio_rl16(pb); /* cbSize */ size -= 18; cbSize = FFMIN(size, cbSize); if (cbSize >= 22 && id == 0xfffe) { /* WAVEFORMATEXTENSIBLE */ codec->bits_per_coded_sample = avio_rl16(pb); codec->channel_layout = avio_rl32(pb); /* dwChannelMask */ id = avio_rl32(pb); /* 4 first bytes of GUID */ avio_skip(pb, 12); /* skip end of GUID */ cbSize -= 22; size -= 22; } codec->extradata_size = cbSize; if (cbSize > 0) { codec->extradata = av_mallocz(codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); avio_read(pb, codec->extradata, codec->extradata_size); size -= cbSize; } /* It is possible for the chunk to contain garbage at the end */ if (size > 0) avio_skip(pb, size); } codec->codec_id = ff_wav_codec_get_id(id, codec->bits_per_coded_sample); if (codec->codec_id == CODEC_ID_AAC_LATM) { /* channels and sample_rate values are those prior to applying SBR and/or PS */ codec->channels = 0; codec->sample_rate = 0; } }
3,393
0
static int read_restart_header(MLPDecodeContext *m, GetBitContext *gbp, const uint8_t *buf, unsigned int substr) { SubStream *s = &m->substream[substr]; unsigned int ch; int sync_word, tmp; uint8_t checksum; uint8_t lossless_check; int start_count = get_bits_count(gbp); const int max_matrix_channel = m->avctx->codec_id == CODEC_ID_MLP ? MAX_MATRIX_CHANNEL_MLP : MAX_MATRIX_CHANNEL_TRUEHD; sync_word = get_bits(gbp, 13); if (sync_word != 0x31ea >> 1) { av_log(m->avctx, AV_LOG_ERROR, "restart header sync incorrect (got 0x%04x)\n", sync_word); return AVERROR_INVALIDDATA; } s->noise_type = get_bits1(gbp); if (m->avctx->codec_id == CODEC_ID_MLP && s->noise_type) { av_log(m->avctx, AV_LOG_ERROR, "MLP must have 0x31ea sync word.\n"); return AVERROR_INVALIDDATA; } skip_bits(gbp, 16); /* Output timestamp */ s->min_channel = get_bits(gbp, 4); s->max_channel = get_bits(gbp, 4); s->max_matrix_channel = get_bits(gbp, 4); if (s->max_matrix_channel > max_matrix_channel) { av_log(m->avctx, AV_LOG_ERROR, "Max matrix channel cannot be greater than %d.\n", max_matrix_channel); return AVERROR_INVALIDDATA; } if (s->max_channel != s->max_matrix_channel) { av_log(m->avctx, AV_LOG_ERROR, "Max channel must be equal max matrix channel.\n"); return AVERROR_INVALIDDATA; } /* This should happen for TrueHD streams with >6 channels and MLP's noise * type. It is not yet known if this is allowed. */ if (s->max_channel > MAX_MATRIX_CHANNEL_MLP && !s->noise_type) { av_log_ask_for_sample(m->avctx, "Number of channels %d is larger than the maximum supported " "by the decoder.\n", s->max_channel + 2); return AVERROR_PATCHWELCOME; } if (s->min_channel > s->max_channel) { av_log(m->avctx, AV_LOG_ERROR, "Substream min channel cannot be greater than max channel.\n"); return AVERROR_INVALIDDATA; } if (m->avctx->request_channels > 0 && s->max_channel + 1 >= m->avctx->request_channels && substr < m->max_decoded_substream) { av_log(m->avctx, AV_LOG_DEBUG, "Extracting %d channel downmix from substream %d. " "Further substreams will be skipped.\n", s->max_channel + 1, substr); m->max_decoded_substream = substr; } s->noise_shift = get_bits(gbp, 4); s->noisegen_seed = get_bits(gbp, 23); skip_bits(gbp, 19); s->data_check_present = get_bits1(gbp); lossless_check = get_bits(gbp, 8); if (substr == m->max_decoded_substream && s->lossless_check_data != 0xffffffff) { tmp = xor_32_to_8(s->lossless_check_data); if (tmp != lossless_check) av_log(m->avctx, AV_LOG_WARNING, "Lossless check failed - expected %02x, calculated %02x.\n", lossless_check, tmp); } skip_bits(gbp, 16); memset(s->ch_assign, 0, sizeof(s->ch_assign)); for (ch = 0; ch <= s->max_matrix_channel; ch++) { int ch_assign = get_bits(gbp, 6); if (ch_assign > s->max_matrix_channel) { av_log_ask_for_sample(m->avctx, "Assignment of matrix channel %d to invalid output channel %d.\n", ch, ch_assign); return AVERROR_PATCHWELCOME; } s->ch_assign[ch_assign] = ch; } if (m->avctx->codec_id == CODEC_ID_MLP && m->needs_reordering) { if (m->avctx->channel_layout == (AV_CH_LAYOUT_QUAD|AV_CH_LOW_FREQUENCY) || m->avctx->channel_layout == AV_CH_LAYOUT_5POINT0_BACK) { int i = s->ch_assign[4]; s->ch_assign[4] = s->ch_assign[3]; s->ch_assign[3] = s->ch_assign[2]; s->ch_assign[2] = i; } else if (m->avctx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK) { FFSWAP(int, s->ch_assign[2], s->ch_assign[4]); FFSWAP(int, s->ch_assign[3], s->ch_assign[5]); } } if (m->avctx->codec_id == CODEC_ID_TRUEHD && (m->avctx->channel_layout == AV_CH_LAYOUT_7POINT1 || m->avctx->channel_layout == AV_CH_LAYOUT_7POINT1_WIDE)) { FFSWAP(int, s->ch_assign[4], s->ch_assign[6]); FFSWAP(int, s->ch_assign[5], s->ch_assign[7]); } else if (m->avctx->codec_id == CODEC_ID_TRUEHD && (m->avctx->channel_layout == AV_CH_LAYOUT_6POINT1 || m->avctx->channel_layout == (AV_CH_LAYOUT_6POINT1 | AV_CH_TOP_CENTER) || m->avctx->channel_layout == (AV_CH_LAYOUT_6POINT1 | AV_CH_TOP_FRONT_CENTER))) { int i = s->ch_assign[6]; s->ch_assign[6] = s->ch_assign[5]; s->ch_assign[5] = s->ch_assign[4]; s->ch_assign[4] = i; } checksum = ff_mlp_restart_checksum(buf, get_bits_count(gbp) - start_count); if (checksum != get_bits(gbp, 8)) av_log(m->avctx, AV_LOG_ERROR, "restart header checksum error\n"); /* Set default decoding parameters. */ s->param_presence_flags = 0xff; s->num_primitive_matrices = 0; s->blocksize = 8; s->lossless_check_data = 0; memset(s->output_shift , 0, sizeof(s->output_shift )); memset(s->quant_step_size, 0, sizeof(s->quant_step_size)); for (ch = s->min_channel; ch <= s->max_channel; ch++) { ChannelParams *cp = &s->channel_params[ch]; cp->filter_params[FIR].order = 0; cp->filter_params[IIR].order = 0; cp->filter_params[FIR].shift = 0; cp->filter_params[IIR].shift = 0; /* Default audio coding is 24-bit raw PCM. */ cp->huff_offset = 0; cp->sign_huff_offset = (-1) << 23; cp->codebook = 0; cp->huff_lsbs = 24; } if (substr == m->max_decoded_substream) m->avctx->channels = s->max_matrix_channel + 1; return 0; }
3,395
1
static void init_blk_migration(Monitor *mon, QEMUFile *f) { BlkMigDevState *bmds; BlockDriverState *bs; block_mig_state.submitted = 0; block_mig_state.read_done = 0; block_mig_state.transferred = 0; block_mig_state.total_sector_sum = 0; block_mig_state.prev_progress = -1; for (bs = bdrv_first; bs != NULL; bs = bs->next) { if (bs->type == BDRV_TYPE_HD) { bmds = qemu_mallocz(sizeof(BlkMigDevState)); bmds->bs = bs; bmds->bulk_completed = 0; bmds->total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS; bmds->completed_sectors = 0; bmds->shared_base = block_mig_state.shared_base; block_mig_state.total_sector_sum += bmds->total_sectors; if (bmds->shared_base) { monitor_printf(mon, "Start migration for %s with shared base " "image\n", bs->device_name); } else { monitor_printf(mon, "Start full migration for %s\n", bs->device_name); } QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry); } } }
3,396
1
static void vmgenid_device_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_vmgenid; dc->realize = vmgenid_realize; dc->hotpluggable = false; dc->props = vmgenid_properties; set_bit(DEVICE_CATEGORY_MISC, dc->categories); object_class_property_add_str(klass, VMGENID_GUID, NULL, vmgenid_set_guid, NULL); object_class_property_set_description(klass, VMGENID_GUID, "Set Global Unique Identifier " "(big-endian) or auto for random value", NULL); }
3,400
1
static void run_block_job(BlockJob *job, Error **errp) { AioContext *aio_context = blk_get_aio_context(job->blk); /* FIXME In error cases, the job simply goes away and we access a dangling * pointer below. */ aio_context_acquire(aio_context); do { aio_poll(aio_context, true); qemu_progress_print(job->len ? ((float)job->offset / job->len * 100.f) : 0.0f, 0); } while (!job->ready); block_job_complete_sync(job, errp); aio_context_release(aio_context); /* A block job may finish instantaneously without publishing any progress, * so just signal completion here */ qemu_progress_print(100.f, 0); }
3,402
1
static int dxtory_decode_v1_420(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size) { int h, w; uint8_t *Y1, *Y2, *U, *V; int ret; if (src_size < avctx->width * avctx->height * 3 / 2) { av_log(avctx, AV_LOG_ERROR, "packet too small\n"); return AVERROR_INVALIDDATA; } avctx->pix_fmt = AV_PIX_FMT_YUV420P; if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) return ret; Y1 = pic->data[0]; Y2 = pic->data[0] + pic->linesize[0]; U = pic->data[1]; V = pic->data[2]; for (h = 0; h < avctx->height; h += 2) { for (w = 0; w < avctx->width; w += 2) { AV_COPY16(Y1 + w, src); AV_COPY16(Y2 + w, src + 2); U[w >> 1] = src[4] + 0x80; V[w >> 1] = src[5] + 0x80; src += 6; } Y1 += pic->linesize[0] << 1; Y2 += pic->linesize[0] << 1; U += pic->linesize[1]; V += pic->linesize[2]; } return 0; }
3,403
1
int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { int ret; AVPacket user_pkt = *avpkt; int needs_realloc = !user_pkt.data; *got_packet_ptr = 0; if(CONFIG_FRAME_THREAD_ENCODER && avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME)) return ff_thread_video_encode_frame(avctx, avpkt, frame, got_packet_ptr); if ((avctx->flags&CODEC_FLAG_PASS1) && avctx->stats_out) avctx->stats_out[0] = '\0'; if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) { av_free_packet(avpkt); av_init_packet(avpkt); avpkt->size = 0; return 0; } if (av_image_check_size(avctx->width, avctx->height, 0, avctx)) return AVERROR(EINVAL); av_assert0(avctx->codec->encode2); ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr); av_assert0(ret <= 0); if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) { needs_realloc = 0; if (user_pkt.data) { if (user_pkt.size >= avpkt->size) { memcpy(user_pkt.data, avpkt->data, avpkt->size); } else { av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size); avpkt->size = user_pkt.size; ret = -1; } avpkt->buf = user_pkt.buf; avpkt->data = user_pkt.data; #if FF_API_DESTRUCT_PACKET FF_DISABLE_DEPRECATION_WARNINGS avpkt->destruct = user_pkt.destruct; FF_ENABLE_DEPRECATION_WARNINGS #endif } else { if (av_dup_packet(avpkt) < 0) { ret = AVERROR(ENOMEM); } } } if (!ret) { if (!*got_packet_ptr) avpkt->size = 0; else if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) avpkt->pts = avpkt->dts = frame->pts; if (needs_realloc && avpkt->data) { ret = av_buffer_realloc(&avpkt->buf, avpkt->size + FF_INPUT_BUFFER_PADDING_SIZE); if (ret >= 0) avpkt->data = avpkt->buf->data; } avctx->frame_number++; } if (ret < 0 || !*got_packet_ptr) av_free_packet(avpkt); else av_packet_merge_side_data(avpkt); emms_c(); return ret; }
3,404
0
static int dxva2_hevc_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size) { const HEVCContext *h = avctx->priv_data; AVDXVAContext *ctx = avctx->hwaccel_context; struct hevc_dxva2_picture_context *ctx_pic = h->ref->hwaccel_picture_private; if (!DXVA_CONTEXT_VALID(avctx, ctx)) return -1; av_assert0(ctx_pic); /* Fill up DXVA_PicParams_HEVC */ fill_picture_parameters(avctx, ctx, h, &ctx_pic->pp); /* Fill up DXVA_Qmatrix_HEVC */ fill_scaling_lists(ctx, h, &ctx_pic->qm); ctx_pic->slice_count = 0; ctx_pic->bitstream_size = 0; ctx_pic->bitstream = NULL; return 0; }
3,405
0
static uint8_t *ogg_write_vorbiscomment(int offset, int bitexact, int *header_len, AVDictionary **m, int framing_bit) { const char *vendor = bitexact ? "ffmpeg" : LIBAVFORMAT_IDENT; int size; uint8_t *p, *p0; ff_metadata_conv(m, ff_vorbiscomment_metadata_conv, NULL); size = offset + ff_vorbiscomment_length(*m, vendor) + framing_bit; p = av_mallocz(size); if (!p) return NULL; p0 = p; p += offset; ff_vorbiscomment_write(&p, m, vendor); if (framing_bit) bytestream_put_byte(&p, 1); *header_len = size; return p0; }
3,406
0
static void envelope_peak16(WaveformContext *s, AVFrame *out, int plane, int component) { const int dst_linesize = out->linesize[component] / 2; const int bg = s->bg_color[component] * (s->size / 256); const int limit = s->size - 1; const int is_chroma = (component == 1 || component == 2); const int shift_w = (is_chroma ? s->desc->log2_chroma_w : 0); const int shift_h = (is_chroma ? s->desc->log2_chroma_h : 0); const int dst_h = FF_CEIL_RSHIFT(out->height, shift_h); const int dst_w = FF_CEIL_RSHIFT(out->width, shift_w); const int start = s->estart[plane]; const int end = s->eend[plane]; int *emax = s->emax[plane][component]; int *emin = s->emin[plane][component]; uint16_t *dst; int x, y; if (s->mode) { for (x = 0; x < dst_w; x++) { for (y = start; y < end && y < emin[x]; y++) { dst = (uint16_t *)out->data[component] + y * dst_linesize + x; if (dst[0] != bg) { emin[x] = y; break; } } for (y = end - 1; y >= start && y >= emax[x]; y--) { dst = (uint16_t *)out->data[component] + y * dst_linesize + x; if (dst[0] != bg) { emax[x] = y; break; } } } if (s->envelope == 3) envelope_instant16(s, out, plane, component); for (x = 0; x < dst_w; x++) { dst = (uint16_t *)out->data[component] + emin[x] * dst_linesize + x; dst[0] = limit; dst = (uint16_t *)out->data[component] + emax[x] * dst_linesize + x; dst[0] = limit; } } else { for (y = 0; y < dst_h; y++) { dst = (uint16_t *)out->data[component] + y * dst_linesize; for (x = start; x < end && x < emin[y]; x++) { if (dst[x] != bg) { emin[y] = x; break; } } for (x = end - 1; x >= start && x >= emax[y]; x--) { if (dst[x] != bg) { emax[y] = x; break; } } } if (s->envelope == 3) envelope_instant16(s, out, plane, component); for (y = 0; y < dst_h; y++) { dst = (uint16_t *)out->data[component] + y * dst_linesize + emin[y]; dst[0] = limit; dst = (uint16_t *)out->data[component] + y * dst_linesize + emax[y]; dst[0] = limit; } } }
3,407
0
int sws_setColorspaceDetails(SwsContext *c, const int inv_table[4], int srcRange, const int table[4], int dstRange, int brightness, int contrast, int saturation) { int64_t crv = inv_table[0]; int64_t cbu = inv_table[1]; int64_t cgu = -inv_table[2]; int64_t cgv = -inv_table[3]; int64_t cy = 1<<16; int64_t oy = 0; memcpy(c->srcColorspaceTable, inv_table, sizeof(int)*4); memcpy(c->dstColorspaceTable, table, sizeof(int)*4); c->brightness= brightness; c->contrast = contrast; c->saturation= saturation; c->srcRange = srcRange; c->dstRange = dstRange; if (isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1; c->uOffset= 0x0400040004000400LL; c->vOffset= 0x0400040004000400LL; if (!srcRange) { cy= (cy*255) / 219; oy= 16<<16; } else { crv= (crv*224) / 255; cbu= (cbu*224) / 255; cgu= (cgu*224) / 255; cgv= (cgv*224) / 255; } cy = (cy *contrast )>>16; crv= (crv*contrast * saturation)>>32; cbu= (cbu*contrast * saturation)>>32; cgu= (cgu*contrast * saturation)>>32; cgv= (cgv*contrast * saturation)>>32; oy -= 256*brightness; c->yCoeff= roundToInt16(cy *8192) * 0x0001000100010001ULL; c->vrCoeff= roundToInt16(crv*8192) * 0x0001000100010001ULL; c->ubCoeff= roundToInt16(cbu*8192) * 0x0001000100010001ULL; c->vgCoeff= roundToInt16(cgv*8192) * 0x0001000100010001ULL; c->ugCoeff= roundToInt16(cgu*8192) * 0x0001000100010001ULL; c->yOffset= roundToInt16(oy * 8) * 0x0001000100010001ULL; c->yuv2rgb_y_coeff = (int16_t)roundToInt16(cy <<13); c->yuv2rgb_y_offset = (int16_t)roundToInt16(oy << 9); c->yuv2rgb_v2r_coeff= (int16_t)roundToInt16(crv<<13); c->yuv2rgb_v2g_coeff= (int16_t)roundToInt16(cgv<<13); c->yuv2rgb_u2g_coeff= (int16_t)roundToInt16(cgu<<13); c->yuv2rgb_u2b_coeff= (int16_t)roundToInt16(cbu<<13); ff_yuv2rgb_c_init_tables(c, inv_table, srcRange, brightness, contrast, saturation); //FIXME factorize #if ARCH_PPC && HAVE_ALTIVEC if (c->flags & SWS_CPU_CAPS_ALTIVEC) ff_yuv2rgb_init_tables_altivec(c, inv_table, brightness, contrast, saturation); #endif return 0; }
3,408
0
static int decode_mb_info(IVI5DecContext *ctx, IVIBandDesc *band, IVITile *tile, AVCodecContext *avctx) { int x, y, mv_x, mv_y, mv_delta, offs, mb_offset, mv_scale, blks_per_mb; IVIMbInfo *mb, *ref_mb; int row_offset = band->mb_size * band->pitch; mb = tile->mbs; ref_mb = tile->ref_mbs; offs = tile->ypos * band->pitch + tile->xpos; if (!ref_mb && ((band->qdelta_present && band->inherit_qdelta) || band->inherit_mv)) return AVERROR_INVALIDDATA; /* scale factor for motion vectors */ mv_scale = (ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3); mv_x = mv_y = 0; for (y = tile->ypos; y < (tile->ypos + tile->height); y += band->mb_size) { mb_offset = offs; for (x = tile->xpos; x < (tile->xpos + tile->width); x += band->mb_size) { mb->xpos = x; mb->ypos = y; mb->buf_offs = mb_offset; if (get_bits1(&ctx->gb)) { if (ctx->frame_type == FRAMETYPE_INTRA) { av_log(avctx, AV_LOG_ERROR, "Empty macroblock in an INTRA picture!\n"); return -1; } mb->type = 1; /* empty macroblocks are always INTER */ mb->cbp = 0; /* all blocks are empty */ mb->q_delta = 0; if (!band->plane && !band->band_num && (ctx->frame_flags & 8)) { mb->q_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, IVI_VLC_BITS, 1); mb->q_delta = IVI_TOSIGNED(mb->q_delta); } mb->mv_x = mb->mv_y = 0; /* no motion vector coded */ if (band->inherit_mv && ref_mb){ /* motion vector inheritance */ if (mv_scale) { mb->mv_x = ivi_scale_mv(ref_mb->mv_x, mv_scale); mb->mv_y = ivi_scale_mv(ref_mb->mv_y, mv_scale); } else { mb->mv_x = ref_mb->mv_x; mb->mv_y = ref_mb->mv_y; } } } else { if (band->inherit_mv && ref_mb) { mb->type = ref_mb->type; /* copy mb_type from corresponding reference mb */ } else if (ctx->frame_type == FRAMETYPE_INTRA) { mb->type = 0; /* mb_type is always INTRA for intra-frames */ } else { mb->type = get_bits1(&ctx->gb); } blks_per_mb = band->mb_size != band->blk_size ? 4 : 1; mb->cbp = get_bits(&ctx->gb, blks_per_mb); mb->q_delta = 0; if (band->qdelta_present) { if (band->inherit_qdelta) { if (ref_mb) mb->q_delta = ref_mb->q_delta; } else if (mb->cbp || (!band->plane && !band->band_num && (ctx->frame_flags & 8))) { mb->q_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, IVI_VLC_BITS, 1); mb->q_delta = IVI_TOSIGNED(mb->q_delta); } } if (!mb->type) { mb->mv_x = mb->mv_y = 0; /* there is no motion vector in intra-macroblocks */ } else { if (band->inherit_mv && ref_mb){ /* motion vector inheritance */ if (mv_scale) { mb->mv_x = ivi_scale_mv(ref_mb->mv_x, mv_scale); mb->mv_y = ivi_scale_mv(ref_mb->mv_y, mv_scale); } else { mb->mv_x = ref_mb->mv_x; mb->mv_y = ref_mb->mv_y; } } else { /* decode motion vector deltas */ mv_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, IVI_VLC_BITS, 1); mv_y += IVI_TOSIGNED(mv_delta); mv_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, IVI_VLC_BITS, 1); mv_x += IVI_TOSIGNED(mv_delta); mb->mv_x = mv_x; mb->mv_y = mv_y; } } } mb++; if (ref_mb) ref_mb++; mb_offset += band->mb_size; } offs += row_offset; } align_get_bits(&ctx->gb); return 0; }
3,409
1
void ff_imdct_calc(MDCTContext *s, FFTSample *output, const FFTSample *input, FFTSample *tmp) { int k, n8, n4, n2, n, j; const uint16_t *revtab = s->fft.revtab; const FFTSample *tcos = s->tcos; const FFTSample *tsin = s->tsin; const FFTSample *in1, *in2; FFTComplex *z = (FFTComplex *)tmp; n = 1 << s->nbits; n2 = n >> 1; n4 = n >> 2; n8 = n >> 3; /* pre rotation */ in1 = input; in2 = input + n2 - 1; for(k = 0; k < n4; k++) { j=revtab[k]; CMUL(z[j].re, z[j].im, *in2, *in1, tcos[k], tsin[k]); in1 += 2; in2 -= 2; } ff_fft_calc(&s->fft, z); /* post rotation + reordering */ /* XXX: optimize */ for(k = 0; k < n4; k++) { CMUL(z[k].re, z[k].im, z[k].re, z[k].im, tcos[k], tsin[k]); } for(k = 0; k < n8; k++) { output[2*k] = -z[n8 + k].im; output[n2-1-2*k] = z[n8 + k].im; output[2*k+1] = z[n8-1-k].re; output[n2-1-2*k-1] = -z[n8-1-k].re; output[n2 + 2*k]=-z[k+n8].re; output[n-1- 2*k]=-z[k+n8].re; output[n2 + 2*k+1]=z[n8-k-1].im; output[n-2 - 2 * k] = z[n8-k-1].im; } }
3,410
1
ISADevice *isa_create(const char *name) { DeviceState *dev; if (!isabus) { fprintf(stderr, "Tried to create isa device %s with no isa bus present.\n", name); return NULL; } dev = qdev_create(&isabus->qbus, name); return DO_UPCAST(ISADevice, qdev, dev); }
3,411
1
static int applehttp_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { AppleHTTPContext *c = s->priv_data; int i, j, ret; if ((flags & AVSEEK_FLAG_BYTE) || !c->variants[0]->finished) return AVERROR(ENOSYS); timestamp = av_rescale_rnd(timestamp, 1, stream_index >= 0 ? s->streams[stream_index]->time_base.den : AV_TIME_BASE, flags & AVSEEK_FLAG_BACKWARD ? AV_ROUND_DOWN : AV_ROUND_UP); ret = AVERROR(EIO); for (i = 0; i < c->n_variants; i++) { /* Reset reading */ struct variant *var = c->variants[i]; int64_t pos = 0; if (var->input) { ffurl_close(var->input); var->input = NULL; } av_free_packet(&var->pkt); reset_packet(&var->pkt); var->pb.eof_reached = 0; /* Locate the segment that contains the target timestamp */ for (j = 0; j < var->n_segments; j++) { if (timestamp >= pos && timestamp < pos + var->segments[j]->duration) { var->cur_seq_no = var->start_seq_no + j; ret = 0; break; } pos += var->segments[j]->duration; } } return ret; }
3,412
1
int coroutine_fn qemu_co_recvv(int sockfd, struct iovec *iov, int len, int iov_offset) { int total = 0; int ret; while (len) { ret = qemu_recvv(sockfd, iov, len, iov_offset + total); if (ret < 0) { if (errno == EAGAIN) { qemu_coroutine_yield(); continue; } if (total == 0) { total = -1; } break; } if (ret == 0) { break; } total += ret, len -= ret; } return total; }
3,413
1
static void vc1_decode_p_blocks(VC1Context *v) { MpegEncContext *s = &v->s; int apply_loop_filter; /* select codingmode used for VLC tables selection */ switch (v->c_ac_table_index) { case 0: v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA; break; case 1: v->codingset = CS_HIGH_MOT_INTRA; break; case 2: v->codingset = CS_MID_RATE_INTRA; break; } switch (v->c_ac_table_index) { case 0: v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER; break; case 1: v->codingset2 = CS_HIGH_MOT_INTER; break; case 2: v->codingset2 = CS_MID_RATE_INTER; break; } apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY); s->first_slice_line = 1; memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride); for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) { s->mb_x = 0; ff_init_block_index(s); for (; s->mb_x < s->mb_width; s->mb_x++) { ff_update_block_index(s); if (v->fcm == ILACE_FIELD) vc1_decode_p_mb_intfi(v); else if (v->fcm == ILACE_FRAME) vc1_decode_p_mb_intfr(v); else vc1_decode_p_mb(v); if (s->mb_y != s->start_mb_y && apply_loop_filter && v->fcm == PROGRESSIVE) vc1_apply_p_loop_filter(v); if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) { // TODO: may need modification to handle slice coding ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR); av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y); return; } } memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride); memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride); memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride); memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride); if (s->mb_y != s->start_mb_y) ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16); s->first_slice_line = 0; } if (apply_loop_filter) { s->mb_x = 0; ff_init_block_index(s); for (; s->mb_x < s->mb_width; s->mb_x++) { ff_update_block_index(s); vc1_apply_p_loop_filter(v); } } if (s->end_mb_y >= s->start_mb_y) ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16); ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1, (s->end_mb_y << v->field_mode) - 1, ER_MB_END); }
3,414
1
static int get_metadata(AVFormatContext *s, const char *const tag, const unsigned data_size) { uint8_t *buf = ((data_size + 1) == 0) ? NULL : av_malloc(data_size + 1); if (!buf) return AVERROR(ENOMEM); if (avio_read(s->pb, buf, data_size) < 0) { av_free(buf); return AVERROR(EIO); } buf[data_size] = 0; av_dict_set(&s->metadata, tag, buf, AV_DICT_DONT_STRDUP_VAL); return 0; }
3,415
0
static int set_sps(HEVCContext *s, const HEVCSPS *sps) { int ret; int num = 0, den = 0; pic_arrays_free(s); ret = pic_arrays_init(s, sps); if (ret < 0) goto fail; s->avctx->coded_width = sps->width; s->avctx->coded_height = sps->height; s->avctx->width = sps->output_width; s->avctx->height = sps->output_height; s->avctx->pix_fmt = sps->pix_fmt; s->avctx->sample_aspect_ratio = sps->vui.sar; s->avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics; if (sps->vui.video_signal_type_present_flag) s->avctx->color_range = sps->vui.video_full_range_flag ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; else s->avctx->color_range = AVCOL_RANGE_MPEG; if (sps->vui.colour_description_present_flag) { s->avctx->color_primaries = sps->vui.colour_primaries; s->avctx->color_trc = sps->vui.transfer_characteristic; s->avctx->colorspace = sps->vui.matrix_coeffs; } else { s->avctx->color_primaries = AVCOL_PRI_UNSPECIFIED; s->avctx->color_trc = AVCOL_TRC_UNSPECIFIED; s->avctx->colorspace = AVCOL_SPC_UNSPECIFIED; } ff_hevc_pred_init(&s->hpc, sps->bit_depth); ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth); ff_videodsp_init (&s->vdsp, sps->bit_depth); if (sps->sao_enabled) { av_frame_unref(s->tmp_frame); ret = ff_get_buffer(s->avctx, s->tmp_frame, AV_GET_BUFFER_FLAG_REF); if (ret < 0) goto fail; s->frame = s->tmp_frame; } s->sps = sps; s->vps = (HEVCVPS*) s->vps_list[s->sps->vps_id]->data; if (s->vps->vps_timing_info_present_flag) { num = s->vps->vps_num_units_in_tick; den = s->vps->vps_time_scale; } else if (sps->vui.vui_timing_info_present_flag) { num = sps->vui.vui_num_units_in_tick; den = sps->vui.vui_time_scale; } if (num != 0 && den != 0) av_reduce(&s->avctx->time_base.num, &s->avctx->time_base.den, num, den, 1 << 30); return 0; fail: pic_arrays_free(s); s->sps = NULL; return ret; }
3,417
0
yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target) { const int16_t *buf0 = buf[0], *buf1 = buf[1]; const uint8_t * const d128 = dither_8x8_220[y & 7]; int yalpha1 = 4096 - yalpha; int i; if (c->flags & SWS_ERROR_DIFFUSION) { int err = 0; int acc = 0; for (i = 0; i < dstW; i +=2) { int Y; Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19; Y += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4; c->dither_error[0][i] = err; acc = 2*acc + (Y >= 128); Y -= 220*(acc&1); err = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19; err += (7*Y + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4; c->dither_error[0][i+1] = Y; acc = 2*acc + (err >= 128); err -= 220*(acc&1); if ((i & 7) == 6) output_pixel(*dest++, acc); } c->dither_error[0][i] = err; } else { for (i = 0; i < dstW; i += 8) { int Y, acc = 0; Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19; accumulate_bit(acc, Y + d128[0]); Y = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19; accumulate_bit(acc, Y + d128[1]); Y = (buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19; accumulate_bit(acc, Y + d128[2]); Y = (buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19; accumulate_bit(acc, Y + d128[3]); Y = (buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19; accumulate_bit(acc, Y + d128[4]); Y = (buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19; accumulate_bit(acc, Y + d128[5]); Y = (buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19; accumulate_bit(acc, Y + d128[6]); Y = (buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19; accumulate_bit(acc, Y + d128[7]); output_pixel(*dest++, acc); } } }
3,418
0
static double eval_expr(Parser * p, AVEvalExpr * e) { switch (e->type) { case e_value: return e->value; case e_const: return e->value * p->const_value[e->a.const_index]; case e_func0: return e->value * e->a.func0(eval_expr(p, e->param[0])); case e_func1: return e->value * e->a.func1(p->opaque, eval_expr(p, e->param[0])); case e_func2: return e->value * e->a.func2(p->opaque, eval_expr(p, e->param[0]), eval_expr(p, e->param[1])); case e_squish: return 1/(1+exp(4*eval_expr(p, e->param[0]))); case e_gauss: { double d = eval_expr(p, e->param[0]); return exp(-d*d/2)/sqrt(2*M_PI); } case e_ld: return e->value * p->var[clip(eval_expr(p, e->param[0]), 0, VARS-1)]; case e_while: { double d; while(eval_expr(p, e->param[0])) d=eval_expr(p, e->param[1]); return d; } default: { double d = eval_expr(p, e->param[0]); double d2 = eval_expr(p, e->param[1]); switch (e->type) { case e_mod: return e->value * (d - floor(d/d2)*d2); case e_max: return e->value * (d > d2 ? d : d2); case e_min: return e->value * (d < d2 ? d : d2); case e_eq: return e->value * (d == d2 ? 1.0 : 0.0); case e_gt: return e->value * (d > d2 ? 1.0 : 0.0); case e_gte: return e->value * (d >= d2 ? 1.0 : 0.0); case e_pow: return e->value * pow(d, d2); case e_mul: return e->value * (d * d2); case e_div: return e->value * (d / d2); case e_add: return e->value * (d + d2); case e_last:return d2; case e_st : return e->value * (p->var[clip(d, 0, VARS-1)]= d2); } } } return NAN; }
3,419
1
static int build_filter(ResampleContext *c, void *filter, double factor, int tap_count, int alloc, int phase_count, int scale, int filter_type, int kaiser_beta){ int ph, i; double x, y, w; double *tab = av_malloc_array(tap_count, sizeof(*tab)); const int center= (tap_count-1)/2; if (!tab) return AVERROR(ENOMEM); /* if upsampling, only need to interpolate, no filter */ if (factor > 1.0) factor = 1.0; for(ph=0;ph<phase_count;ph++) { double norm = 0; for(i=0;i<tap_count;i++) { x = M_PI * ((double)(i - center) - (double)ph / phase_count) * factor; if (x == 0) y = 1.0; else y = sin(x) / x; switch(filter_type){ case SWR_FILTER_TYPE_CUBIC:{ const float d= -0.5; //first order derivative = -0.5 x = fabs(((double)(i - center) - (double)ph / phase_count) * factor); if(x<1.0) y= 1 - 3*x*x + 2*x*x*x + d*( -x*x + x*x*x); else y= d*(-4 + 8*x - 5*x*x + x*x*x); break;} case SWR_FILTER_TYPE_BLACKMAN_NUTTALL: w = 2.0*x / (factor*tap_count) + M_PI; y *= 0.3635819 - 0.4891775 * cos(w) + 0.1365995 * cos(2*w) - 0.0106411 * cos(3*w); break; case SWR_FILTER_TYPE_KAISER: w = 2.0*x / (factor*tap_count*M_PI); y *= bessel(kaiser_beta*sqrt(FFMAX(1-w*w, 0))); break; default: av_assert0(0); } tab[i] = y; norm += y; } /* normalize so that an uniform color remains the same */ switch(c->format){ case AV_SAMPLE_FMT_S16P: for(i=0;i<tap_count;i++) ((int16_t*)filter)[ph * alloc + i] = av_clip(lrintf(tab[i] * scale / norm), INT16_MIN, INT16_MAX); break; case AV_SAMPLE_FMT_S32P: for(i=0;i<tap_count;i++) ((int32_t*)filter)[ph * alloc + i] = av_clipl_int32(llrint(tab[i] * scale / norm)); break; case AV_SAMPLE_FMT_FLTP: for(i=0;i<tap_count;i++) ((float*)filter)[ph * alloc + i] = tab[i] * scale / norm; break; case AV_SAMPLE_FMT_DBLP: for(i=0;i<tap_count;i++) ((double*)filter)[ph * alloc + i] = tab[i] * scale / norm; break; } } #if 0 { #define LEN 1024 int j,k; double sine[LEN + tap_count]; double filtered[LEN]; double maxff=-2, minff=2, maxsf=-2, minsf=2; for(i=0; i<LEN; i++){ double ss=0, sf=0, ff=0; for(j=0; j<LEN+tap_count; j++) sine[j]= cos(i*j*M_PI/LEN); for(j=0; j<LEN; j++){ double sum=0; ph=0; for(k=0; k<tap_count; k++) sum += filter[ph * tap_count + k] * sine[k+j]; filtered[j]= sum / (1<<FILTER_SHIFT); ss+= sine[j + center] * sine[j + center]; ff+= filtered[j] * filtered[j]; sf+= sine[j + center] * filtered[j]; } ss= sqrt(2*ss/LEN); ff= sqrt(2*ff/LEN); sf= 2*sf/LEN; maxff= FFMAX(maxff, ff); minff= FFMIN(minff, ff); maxsf= FFMAX(maxsf, sf); minsf= FFMIN(minsf, sf); if(i%11==0){ av_log(NULL, AV_LOG_ERROR, "i:%4d ss:%f ff:%13.6e-%13.6e sf:%13.6e-%13.6e\n", i, ss, maxff, minff, maxsf, minsf); minff=minsf= 2; maxff=maxsf= -2; } } } #endif av_free(tab); return 0; }
3,420
1
void avcodec_get_context_defaults2(AVCodecContext *s, enum CodecType codec_type){ int flags=0; memset(s, 0, sizeof(AVCodecContext)); s->av_class= &av_codec_context_class; s->codec_type = codec_type; if(codec_type == CODEC_TYPE_AUDIO) flags= AV_OPT_FLAG_AUDIO_PARAM; else if(codec_type == CODEC_TYPE_VIDEO) flags= AV_OPT_FLAG_VIDEO_PARAM; else if(codec_type == CODEC_TYPE_SUBTITLE) flags= AV_OPT_FLAG_SUBTITLE_PARAM; av_opt_set_defaults2(s, flags, flags); s->rc_eq= av_strdup("tex^qComp"); s->time_base= (AVRational){0,1}; s->get_buffer= avcodec_default_get_buffer; s->release_buffer= avcodec_default_release_buffer; s->get_format= avcodec_default_get_format; s->execute= avcodec_default_execute; s->sample_aspect_ratio= (AVRational){0,1}; s->pix_fmt= PIX_FMT_NONE; s->sample_fmt= SAMPLE_FMT_S16; // FIXME: set to NONE s->palctrl = NULL; s->reget_buffer= avcodec_default_reget_buffer; }
3,421
1
static void dec_float(DisasContext *dc, uint32_t insn) { uint32_t op0; uint32_t ra, rb, rd; op0 = extract32(insn, 0, 8); ra = extract32(insn, 16, 5); rb = extract32(insn, 11, 5); rd = extract32(insn, 21, 5); switch (op0) { case 0x00: /* lf.add.s */ LOG_DIS("lf.add.s r%d, r%d, r%d\n", rd, ra, rb); gen_helper_float_add_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); break; case 0x01: /* lf.sub.s */ LOG_DIS("lf.sub.s r%d, r%d, r%d\n", rd, ra, rb); gen_helper_float_sub_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); break; case 0x02: /* lf.mul.s */ LOG_DIS("lf.mul.s r%d, r%d, r%d\n", rd, ra, rb); if (ra != 0 && rb != 0) { gen_helper_float_mul_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); } else { tcg_gen_ori_tl(fpcsr, fpcsr, FPCSR_ZF); tcg_gen_movi_i32(cpu_R[rd], 0x0); } break; case 0x03: /* lf.div.s */ LOG_DIS("lf.div.s r%d, r%d, r%d\n", rd, ra, rb); gen_helper_float_div_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); break; case 0x04: /* lf.itof.s */ LOG_DIS("lf.itof r%d, r%d\n", rd, ra); gen_helper_itofs(cpu_R[rd], cpu_env, cpu_R[ra]); break; case 0x05: /* lf.ftoi.s */ LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra); gen_helper_ftois(cpu_R[rd], cpu_env, cpu_R[ra]); break; case 0x06: /* lf.rem.s */ LOG_DIS("lf.rem.s r%d, r%d, r%d\n", rd, ra, rb); gen_helper_float_rem_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); break; case 0x07: /* lf.madd.s */ LOG_DIS("lf.madd.s r%d, r%d, r%d\n", rd, ra, rb); gen_helper_float_madd_s(cpu_R[rd], cpu_env, cpu_R[rd], cpu_R[ra], cpu_R[rb]); break; case 0x08: /* lf.sfeq.s */ LOG_DIS("lf.sfeq.s r%d, r%d\n", ra, rb); gen_helper_float_eq_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); break; case 0x09: /* lf.sfne.s */ LOG_DIS("lf.sfne.s r%d, r%d\n", ra, rb); gen_helper_float_ne_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); break; case 0x0a: /* lf.sfgt.s */ LOG_DIS("lf.sfgt.s r%d, r%d\n", ra, rb); gen_helper_float_gt_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); break; case 0x0b: /* lf.sfge.s */ LOG_DIS("lf.sfge.s r%d, r%d\n", ra, rb); gen_helper_float_ge_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); break; case 0x0c: /* lf.sflt.s */ LOG_DIS("lf.sflt.s r%d, r%d\n", ra, rb); gen_helper_float_lt_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); break; case 0x0d: /* lf.sfle.s */ LOG_DIS("lf.sfle.s r%d, r%d\n", ra, rb); gen_helper_float_le_s(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); break; /* not used yet, open it when we need or64. */ /*#ifdef TARGET_OPENRISC64 case 0x10: lf.add.d LOG_DIS("lf.add.d r%d, r%d, r%d\n", rd, ra, rb); check_of64s(dc); gen_helper_float_add_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); break; case 0x11: lf.sub.d LOG_DIS("lf.sub.d r%d, r%d, r%d\n", rd, ra, rb); check_of64s(dc); gen_helper_float_sub_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); break; case 0x12: lf.mul.d LOG_DIS("lf.mul.d r%d, r%d, r%d\n", rd, ra, rb); check_of64s(dc); if (ra != 0 && rb != 0) { gen_helper_float_mul_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); } else { tcg_gen_ori_tl(fpcsr, fpcsr, FPCSR_ZF); tcg_gen_movi_i64(cpu_R[rd], 0x0); } break; case 0x13: lf.div.d LOG_DIS("lf.div.d r%d, r%d, r%d\n", rd, ra, rb); check_of64s(dc); gen_helper_float_div_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); break; case 0x14: lf.itof.d LOG_DIS("lf.itof r%d, r%d\n", rd, ra); check_of64s(dc); gen_helper_itofd(cpu_R[rd], cpu_env, cpu_R[ra]); break; case 0x15: lf.ftoi.d LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra); check_of64s(dc); gen_helper_ftoid(cpu_R[rd], cpu_env, cpu_R[ra]); break; case 0x16: lf.rem.d LOG_DIS("lf.rem.d r%d, r%d, r%d\n", rd, ra, rb); check_of64s(dc); gen_helper_float_rem_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]); break; case 0x17: lf.madd.d LOG_DIS("lf.madd.d r%d, r%d, r%d\n", rd, ra, rb); check_of64s(dc); gen_helper_float_madd_d(cpu_R[rd], cpu_env, cpu_R[rd], cpu_R[ra], cpu_R[rb]); break; case 0x18: lf.sfeq.d LOG_DIS("lf.sfeq.d r%d, r%d\n", ra, rb); check_of64s(dc); gen_helper_float_eq_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); break; case 0x1a: lf.sfgt.d LOG_DIS("lf.sfgt.d r%d, r%d\n", ra, rb); check_of64s(dc); gen_helper_float_gt_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); break; case 0x1b: lf.sfge.d LOG_DIS("lf.sfge.d r%d, r%d\n", ra, rb); check_of64s(dc); gen_helper_float_ge_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); break; case 0x19: lf.sfne.d LOG_DIS("lf.sfne.d r%d, r%d\n", ra, rb); check_of64s(dc); gen_helper_float_ne_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); break; case 0x1c: lf.sflt.d LOG_DIS("lf.sflt.d r%d, r%d\n", ra, rb); check_of64s(dc); gen_helper_float_lt_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); break; case 0x1d: lf.sfle.d LOG_DIS("lf.sfle.d r%d, r%d\n", ra, rb); check_of64s(dc); gen_helper_float_le_d(cpu_sr_f, cpu_env, cpu_R[ra], cpu_R[rb]); break; #endif*/ default: gen_illegal_exception(dc); break; } }
3,422
1
void rng_backend_open(RngBackend *s, Error **errp) { object_property_set_bool(OBJECT(s), true, "opened", errp); }
3,423