label
int64
0
1
func1
stringlengths
23
97k
id
int64
0
27.3k
0
static inline void skip_hdr_extension(GetBitContext *gb) { int i, len; do { len = get_bits(gb, 8); for (i = 0; i < len; i++) skip_bits(gb, 8); } while(len); }
20,493
0
static int nbd_negotiate_options(NBDClient *client) { uint32_t flags; /* Client sends: [ 0 .. 3] client flags [ 0 .. 7] NBD_OPTS_MAGIC [ 8 .. 11] NBD option [12 .. 15] Data length ... Rest of request [ 0 .. 7] NBD_OPTS_MAGIC [ 8 .. 11] Second NBD option [12 .. 15] Data length ... Rest of request */ if (nbd_negotiate_read(client->ioc, &flags, sizeof(flags)) != sizeof(flags)) { LOG("read failed"); return -EIO; } TRACE("Checking client flags"); be32_to_cpus(&flags); if (flags != 0 && flags != NBD_FLAG_C_FIXED_NEWSTYLE) { LOG("Bad client flags received"); return -EIO; } while (1) { int ret; uint32_t tmp, length; uint64_t magic; if (nbd_negotiate_read(client->ioc, &magic, sizeof(magic)) != sizeof(magic)) { LOG("read failed"); return -EINVAL; } TRACE("Checking opts magic"); if (magic != be64_to_cpu(NBD_OPTS_MAGIC)) { LOG("Bad magic received"); return -EINVAL; } if (nbd_negotiate_read(client->ioc, &tmp, sizeof(tmp)) != sizeof(tmp)) { LOG("read failed"); return -EINVAL; } if (nbd_negotiate_read(client->ioc, &length, sizeof(length)) != sizeof(length)) { LOG("read failed"); return -EINVAL; } length = be32_to_cpu(length); TRACE("Checking option"); switch (be32_to_cpu(tmp)) { case NBD_OPT_LIST: ret = nbd_negotiate_handle_list(client, length); if (ret < 0) { return ret; } break; case NBD_OPT_ABORT: return -EINVAL; case NBD_OPT_EXPORT_NAME: return nbd_negotiate_handle_export_name(client, length); default: tmp = be32_to_cpu(tmp); LOG("Unsupported option 0x%x", tmp); nbd_negotiate_send_rep(client->ioc, NBD_REP_ERR_UNSUP, tmp); return -EINVAL; } } }
20,494
0
static void pxb_dev_realize(PCIDevice *dev, Error **errp) { if (pci_bus_is_express(dev->bus)) { error_setg(errp, "pxb devices cannot reside on a PCIe bus"); return; } pxb_dev_realize_common(dev, false, errp); }
20,496
0
static void gen_arith_imm(DisasContext *ctx, uint32_t opc, int rt, int rs, int16_t imm) { target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */ const char *opn = "imm arith"; if (rt == 0 && opc != OPC_ADDI && opc != OPC_DADDI) { /* If no destination, treat it as a NOP. For addi, we must generate the overflow exception when needed. */ MIPS_DEBUG("NOP"); return; } switch (opc) { case OPC_ADDI: { TCGv t0 = tcg_temp_local_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); int l1 = gen_new_label(); gen_load_gpr(t1, rs); tcg_gen_addi_tl(t0, t1, uimm); tcg_gen_ext32s_tl(t0, t0); tcg_gen_xori_tl(t1, t1, ~uimm); tcg_gen_xori_tl(t2, t0, uimm); tcg_gen_and_tl(t1, t1, t2); tcg_temp_free(t2); tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); tcg_temp_free(t1); /* operands of same sign, result different sign */ generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(l1); tcg_gen_ext32s_tl(t0, t0); gen_store_gpr(t0, rt); tcg_temp_free(t0); } opn = "addi"; break; case OPC_ADDIU: if (rs != 0) { tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm); tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]); } else { tcg_gen_movi_tl(cpu_gpr[rt], uimm); } opn = "addiu"; break; #if defined(TARGET_MIPS64) case OPC_DADDI: { TCGv t0 = tcg_temp_local_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); int l1 = gen_new_label(); gen_load_gpr(t1, rs); tcg_gen_addi_tl(t0, t1, uimm); tcg_gen_xori_tl(t1, t1, ~uimm); tcg_gen_xori_tl(t2, t0, uimm); tcg_gen_and_tl(t1, t1, t2); tcg_temp_free(t2); tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1); tcg_temp_free(t1); /* operands of same sign, result different sign */ generate_exception(ctx, EXCP_OVERFLOW); gen_set_label(l1); gen_store_gpr(t0, rt); tcg_temp_free(t0); } opn = "daddi"; break; case OPC_DADDIU: if (rs != 0) { tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm); } else { tcg_gen_movi_tl(cpu_gpr[rt], uimm); } opn = "daddiu"; break; #endif } (void)opn; /* avoid a compiler warning */ MIPS_DEBUG("%s %s, %s, " TARGET_FMT_lx, opn, regnames[rt], regnames[rs], uimm); }
20,498
0
CPUCRISState *cpu_cris_init (const char *cpu_model) { CPUCRISState *env; static int tcg_initialized = 0; int i; env = qemu_mallocz(sizeof(CPUCRISState)); if (!env) return NULL; cpu_exec_init(env); cpu_reset(env); if (tcg_initialized) return env; tcg_initialized = 1; cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env"); cc_x = tcg_global_mem_new(TCG_TYPE_TL, TCG_AREG0, offsetof(CPUState, cc_x), "cc_x"); cc_src = tcg_global_mem_new(TCG_TYPE_TL, TCG_AREG0, offsetof(CPUState, cc_src), "cc_src"); cc_dest = tcg_global_mem_new(TCG_TYPE_TL, TCG_AREG0, offsetof(CPUState, cc_dest), "cc_dest"); cc_result = tcg_global_mem_new(TCG_TYPE_TL, TCG_AREG0, offsetof(CPUState, cc_result), "cc_result"); cc_op = tcg_global_mem_new(TCG_TYPE_TL, TCG_AREG0, offsetof(CPUState, cc_op), "cc_op"); cc_size = tcg_global_mem_new(TCG_TYPE_TL, TCG_AREG0, offsetof(CPUState, cc_size), "cc_size"); cc_mask = tcg_global_mem_new(TCG_TYPE_TL, TCG_AREG0, offsetof(CPUState, cc_mask), "cc_mask"); env_pc = tcg_global_mem_new(TCG_TYPE_TL, TCG_AREG0, offsetof(CPUState, pc), "pc"); env_btarget = tcg_global_mem_new(TCG_TYPE_TL, TCG_AREG0, offsetof(CPUState, btarget), "btarget"); env_btaken = tcg_global_mem_new(TCG_TYPE_TL, TCG_AREG0, offsetof(CPUState, btaken), "btaken"); for (i = 0; i < 16; i++) { cpu_R[i] = tcg_global_mem_new(TCG_TYPE_TL, TCG_AREG0, offsetof(CPUState, regs[i]), regnames[i]); } for (i = 0; i < 16; i++) { cpu_PR[i] = tcg_global_mem_new(TCG_TYPE_TL, TCG_AREG0, offsetof(CPUState, pregs[i]), pregnames[i]); } TCG_HELPER(helper_raise_exception); TCG_HELPER(helper_dump); TCG_HELPER(helper_tlb_flush_pid); TCG_HELPER(helper_movl_sreg_reg); TCG_HELPER(helper_movl_reg_sreg); TCG_HELPER(helper_rfe); TCG_HELPER(helper_rfn); TCG_HELPER(helper_evaluate_flags_muls); TCG_HELPER(helper_evaluate_flags_mulu); TCG_HELPER(helper_evaluate_flags_mcp); TCG_HELPER(helper_evaluate_flags_alu_4); TCG_HELPER(helper_evaluate_flags_move_4); TCG_HELPER(helper_evaluate_flags_move_2); TCG_HELPER(helper_evaluate_flags); TCG_HELPER(helper_top_evaluate_flags); return env; }
20,499
0
static void pxa2xx_lcdc_dma0_redraw_rot180(PXA2xxLCDState *s, hwaddr addr, int *miny, int *maxy) { DisplaySurface *surface = qemu_console_surface(s->con); int src_width, dest_width; drawfn fn = NULL; if (s->dest_width) { fn = s->line_fn[s->transp][s->bpp]; } if (!fn) { return; } src_width = (s->xres + 3) & ~3; /* Pad to a 4 pixels multiple */ if (s->bpp == pxa_lcdc_19pbpp || s->bpp == pxa_lcdc_18pbpp) { src_width *= 3; } else if (s->bpp > pxa_lcdc_16bpp) { src_width *= 4; } else if (s->bpp > pxa_lcdc_8bpp) { src_width *= 2; } dest_width = s->xres * s->dest_width; *miny = 0; framebuffer_update_display(surface, s->sysmem, addr, s->xres, s->yres, src_width, -dest_width, -s->dest_width, s->invalidated, fn, s->dma_ch[0].palette, miny, maxy); }
20,500
0
static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, int r2 , int32_t constant , int32_t offset) { TCGv temp, temp2; int n; switch (opc) { /* SB-format jumps */ case OPC1_16_SB_J: case OPC1_32_B_J: gen_goto_tb(ctx, 0, ctx->pc + offset * 2); break; case OPC1_32_B_CALL: case OPC1_16_SB_CALL: gen_helper_1arg(call, ctx->next_pc); gen_goto_tb(ctx, 0, ctx->pc + offset * 2); break; case OPC1_16_SB_JZ: gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], 0, offset); break; case OPC1_16_SB_JNZ: gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15], 0, offset); break; /* SBC-format jumps */ case OPC1_16_SBC_JEQ: gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], constant, offset); break; case OPC1_16_SBC_JNE: gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15], constant, offset); break; /* SBRN-format jumps */ case OPC1_16_SBRN_JZ_T: temp = tcg_temp_new(); tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant); gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset); tcg_temp_free(temp); break; case OPC1_16_SBRN_JNZ_T: temp = tcg_temp_new(); tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant); gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset); tcg_temp_free(temp); break; /* SBR-format jumps */ case OPC1_16_SBR_JEQ: gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], offset); break; case OPC1_16_SBR_JNE: gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], offset); break; case OPC1_16_SBR_JNZ: gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[r1], 0, offset); break; case OPC1_16_SBR_JNZ_A: gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_a[r1], 0, offset); break; case OPC1_16_SBR_JGEZ: gen_branch_condi(ctx, TCG_COND_GE, cpu_gpr_d[r1], 0, offset); break; case OPC1_16_SBR_JGTZ: gen_branch_condi(ctx, TCG_COND_GT, cpu_gpr_d[r1], 0, offset); break; case OPC1_16_SBR_JLEZ: gen_branch_condi(ctx, TCG_COND_LE, cpu_gpr_d[r1], 0, offset); break; case OPC1_16_SBR_JLTZ: gen_branch_condi(ctx, TCG_COND_LT, cpu_gpr_d[r1], 0, offset); break; case OPC1_16_SBR_JZ: gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[r1], 0, offset); break; case OPC1_16_SBR_JZ_A: gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_a[r1], 0, offset); break; case OPC1_16_SBR_LOOP: gen_loop(ctx, r1, offset * 2 - 32); break; /* SR-format jumps */ case OPC1_16_SR_JI: tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], 0xfffffffe); tcg_gen_exit_tb(0); break; case OPC2_32_SYS_RET: case OPC2_16_SR_RET: gen_helper_ret(cpu_env); tcg_gen_exit_tb(0); break; /* B-format */ case OPC1_32_B_CALLA: gen_helper_1arg(call, ctx->next_pc); gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset)); break; case OPC1_32_B_JLA: tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc); /* fall through */ case OPC1_32_B_JA: gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset)); break; case OPC1_32_B_JL: tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc); gen_goto_tb(ctx, 0, ctx->pc + offset * 2); break; /* BOL format */ case OPCM_32_BRC_EQ_NEQ: if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JEQ) { gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[r1], constant, offset); } else { gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[r1], constant, offset); } break; case OPCM_32_BRC_GE: if (MASK_OP_BRC_OP2(ctx->opcode) == OP2_32_BRC_JGE) { gen_branch_condi(ctx, TCG_COND_GE, cpu_gpr_d[r1], constant, offset); } else { constant = MASK_OP_BRC_CONST4(ctx->opcode); gen_branch_condi(ctx, TCG_COND_GEU, cpu_gpr_d[r1], constant, offset); } break; case OPCM_32_BRC_JLT: if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JLT) { gen_branch_condi(ctx, TCG_COND_LT, cpu_gpr_d[r1], constant, offset); } else { constant = MASK_OP_BRC_CONST4(ctx->opcode); gen_branch_condi(ctx, TCG_COND_LTU, cpu_gpr_d[r1], constant, offset); } break; case OPCM_32_BRC_JNE: temp = tcg_temp_new(); if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JNED) { tcg_gen_mov_tl(temp, cpu_gpr_d[r1]); /* subi is unconditional */ tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset); } else { tcg_gen_mov_tl(temp, cpu_gpr_d[r1]); /* addi is unconditional */ tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset); } tcg_temp_free(temp); break; /* BRN format */ case OPCM_32_BRN_JTT: n = MASK_OP_BRN_N(ctx->opcode); temp = tcg_temp_new(); tcg_gen_andi_tl(temp, cpu_gpr_d[r1], (1 << n)); if (MASK_OP_BRN_OP2(ctx->opcode) == OPC2_32_BRN_JNZ_T) { gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset); } else { gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset); } tcg_temp_free(temp); break; /* BRR Format */ case OPCM_32_BRR_EQ_NEQ: if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JEQ) { gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[r2], offset); } else { gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[r2], offset); } break; case OPCM_32_BRR_ADDR_EQ_NEQ: if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JEQ_A) { gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_a[r1], cpu_gpr_a[r2], offset); } else { gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_a[r1], cpu_gpr_a[r2], offset); } break; case OPCM_32_BRR_GE: if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JGE) { gen_branch_cond(ctx, TCG_COND_GE, cpu_gpr_d[r1], cpu_gpr_d[r2], offset); } else { gen_branch_cond(ctx, TCG_COND_GEU, cpu_gpr_d[r1], cpu_gpr_d[r2], offset); } break; case OPCM_32_BRR_JLT: if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JLT) { gen_branch_cond(ctx, TCG_COND_LT, cpu_gpr_d[r1], cpu_gpr_d[r2], offset); } else { gen_branch_cond(ctx, TCG_COND_LTU, cpu_gpr_d[r1], cpu_gpr_d[r2], offset); } break; case OPCM_32_BRR_LOOP: if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_LOOP) { gen_loop(ctx, r1, offset * 2); } else { /* OPC2_32_BRR_LOOPU */ gen_goto_tb(ctx, 0, ctx->pc + offset * 2); } break; case OPCM_32_BRR_JNE: temp = tcg_temp_new(); temp2 = tcg_temp_new(); if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRR_JNED) { tcg_gen_mov_tl(temp, cpu_gpr_d[r1]); /* also save r2, in case of r1 == r2, so r2 is not decremented */ tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]); /* subi is unconditional */ tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset); } else { tcg_gen_mov_tl(temp, cpu_gpr_d[r1]); /* also save r2, in case of r1 == r2, so r2 is not decremented */ tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]); /* addi is unconditional */ tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset); } tcg_temp_free(temp); tcg_temp_free(temp2); break; case OPCM_32_BRR_JNZ: if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JNZ_A) { gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_a[r1], 0, offset); } else { gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_a[r1], 0, offset); } break; default: printf("Branch Error at %x\n", ctx->pc); } ctx->bstate = BS_BRANCH; }
20,501
0
static void ide_init1(IDEBus *bus, int unit, DriveInfo *dinfo) { static int drive_serial = 1; IDEState *s = &bus->ifs[unit]; s->bus = bus; s->unit = unit; s->drive_serial = drive_serial++; s->io_buffer = qemu_blockalign(s->bs, IDE_DMA_BUF_SECTORS*512 + 4); s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4; s->smart_selftest_data = qemu_blockalign(s->bs, 512); s->sector_write_timer = qemu_new_timer(vm_clock, ide_sector_write_timer_cb, s); ide_init_drive(s, dinfo, NULL); }
20,502
0
static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) { switch (page) { case MODE_PAGE_CACHING: bdrv_set_enable_write_cache(s->qdev.conf.bs, (p[0] & 4) != 0); break; default: break; } }
20,503
0
int av_opt_set_dict2(void *obj, AVDictionary **options, int search_flags) { AVDictionaryEntry *t = NULL; AVDictionary *tmp = NULL; int ret = 0; if (!options) return 0; while ((t = av_dict_get(*options, "", t, AV_DICT_IGNORE_SUFFIX))) { ret = av_opt_set(obj, t->key, t->value, search_flags); if (ret == AVERROR_OPTION_NOT_FOUND) av_dict_set(&tmp, t->key, t->value, 0); else if (ret < 0) { av_log(obj, AV_LOG_ERROR, "Error setting option %s to value %s.\n", t->key, t->value); break; } ret = 0; } av_dict_free(options); *options = tmp; return ret; }
20,504
0
START_TEST(empty_input) { const char *empty = ""; QObject *obj = qobject_from_json(empty); fail_unless(obj == NULL); }
20,505
0
int net_client_init(const char *device, const char *p) { static const char * const fd_params[] = { "vlan", "name", "fd", NULL }; char buf[1024]; int vlan_id, ret; VLANState *vlan; char *name = NULL; vlan_id = 0; if (get_param_value(buf, sizeof(buf), "vlan", p)) { vlan_id = strtol(buf, NULL, 0); } vlan = qemu_find_vlan(vlan_id); if (get_param_value(buf, sizeof(buf), "name", p)) { name = strdup(buf); } if (!strcmp(device, "nic")) { static const char * const nic_params[] = { "vlan", "name", "macaddr", "model", NULL }; NICInfo *nd; uint8_t *macaddr; int idx = nic_get_free_idx(); if (check_params(nic_params, p) < 0) { fprintf(stderr, "qemu: invalid parameter '%s' in '%s'\n", buf, p); return -1; } if (idx == -1 || nb_nics >= MAX_NICS) { fprintf(stderr, "Too Many NICs\n"); ret = -1; goto out; } nd = &nd_table[idx]; macaddr = nd->macaddr; macaddr[0] = 0x52; macaddr[1] = 0x54; macaddr[2] = 0x00; macaddr[3] = 0x12; macaddr[4] = 0x34; macaddr[5] = 0x56 + idx; if (get_param_value(buf, sizeof(buf), "macaddr", p)) { if (parse_macaddr(macaddr, buf) < 0) { fprintf(stderr, "invalid syntax for ethernet address\n"); ret = -1; goto out; } } if (get_param_value(buf, sizeof(buf), "model", p)) { nd->model = strdup(buf); } nd->vlan = vlan; nd->name = name; nd->used = 1; name = NULL; nb_nics++; vlan->nb_guest_devs++; ret = idx; } else if (!strcmp(device, "none")) { if (*p != '\0') { fprintf(stderr, "qemu: 'none' takes no parameters\n"); return -1; } /* does nothing. It is needed to signal that no network cards are wanted */ ret = 0; } else #ifdef CONFIG_SLIRP if (!strcmp(device, "user")) { static const char * const slirp_params[] = { "vlan", "name", "hostname", "restrict", "ip", NULL }; if (check_params(slirp_params, p) < 0) { fprintf(stderr, "qemu: invalid parameter '%s' in '%s'\n", buf, p); return -1; } if (get_param_value(buf, sizeof(buf), "hostname", p)) { pstrcpy(slirp_hostname, sizeof(slirp_hostname), buf); } if (get_param_value(buf, sizeof(buf), "restrict", p)) { slirp_restrict = (buf[0] == 'y') ? 1 : 0; } if (get_param_value(buf, sizeof(buf), "ip", p)) { slirp_ip = strdup(buf); } vlan->nb_host_devs++; ret = net_slirp_init(vlan, device, name); } else if (!strcmp(device, "channel")) { long port; char name[20], *devname; struct VMChannel *vmc; port = strtol(p, &devname, 10); devname++; if (port < 1 || port > 65535) { fprintf(stderr, "vmchannel wrong port number\n"); ret = -1; goto out; } vmc = malloc(sizeof(struct VMChannel)); snprintf(name, 20, "vmchannel%ld", port); vmc->hd = qemu_chr_open(name, devname, NULL); if (!vmc->hd) { fprintf(stderr, "qemu: could not open vmchannel device" "'%s'\n", devname); ret = -1; goto out; } vmc->port = port; slirp_add_exec(3, vmc->hd, 4, port); qemu_chr_add_handlers(vmc->hd, vmchannel_can_read, vmchannel_read, NULL, vmc); ret = 0; } else #endif #ifdef _WIN32 if (!strcmp(device, "tap")) { static const char * const tap_params[] = { "vlan", "name", "ifname", NULL }; char ifname[64]; if (check_params(tap_params, p) < 0) { fprintf(stderr, "qemu: invalid parameter '%s' in '%s'\n", buf, p); return -1; } if (get_param_value(ifname, sizeof(ifname), "ifname", p) <= 0) { fprintf(stderr, "tap: no interface name\n"); ret = -1; goto out; } vlan->nb_host_devs++; ret = tap_win32_init(vlan, device, name, ifname); } else #elif defined (_AIX) #else if (!strcmp(device, "tap")) { char ifname[64]; char setup_script[1024], down_script[1024]; int fd; vlan->nb_host_devs++; if (get_param_value(buf, sizeof(buf), "fd", p) > 0) { if (check_params(fd_params, p) < 0) { fprintf(stderr, "qemu: invalid parameter '%s' in '%s'\n", buf, p); return -1; } fd = strtol(buf, NULL, 0); fcntl(fd, F_SETFL, O_NONBLOCK); net_tap_fd_init(vlan, device, name, fd); ret = 0; } else { static const char * const tap_params[] = { "vlan", "name", "ifname", "script", "downscript", NULL }; if (check_params(tap_params, p) < 0) { fprintf(stderr, "qemu: invalid parameter '%s' in '%s'\n", buf, p); return -1; } if (get_param_value(ifname, sizeof(ifname), "ifname", p) <= 0) { ifname[0] = '\0'; } if (get_param_value(setup_script, sizeof(setup_script), "script", p) == 0) { pstrcpy(setup_script, sizeof(setup_script), DEFAULT_NETWORK_SCRIPT); } if (get_param_value(down_script, sizeof(down_script), "downscript", p) == 0) { pstrcpy(down_script, sizeof(down_script), DEFAULT_NETWORK_DOWN_SCRIPT); } ret = net_tap_init(vlan, device, name, ifname, setup_script, down_script); } } else #endif if (!strcmp(device, "socket")) { if (get_param_value(buf, sizeof(buf), "fd", p) > 0) { int fd; if (check_params(fd_params, p) < 0) { fprintf(stderr, "qemu: invalid parameter '%s' in '%s'\n", buf, p); return -1; } fd = strtol(buf, NULL, 0); ret = -1; if (net_socket_fd_init(vlan, device, name, fd, 1)) ret = 0; } else if (get_param_value(buf, sizeof(buf), "listen", p) > 0) { static const char * const listen_params[] = { "vlan", "name", "listen", NULL }; if (check_params(listen_params, p) < 0) { fprintf(stderr, "qemu: invalid parameter '%s' in '%s'\n", buf, p); return -1; } ret = net_socket_listen_init(vlan, device, name, buf); } else if (get_param_value(buf, sizeof(buf), "connect", p) > 0) { static const char * const connect_params[] = { "vlan", "name", "connect", NULL }; if (check_params(connect_params, p) < 0) { fprintf(stderr, "qemu: invalid parameter '%s' in '%s'\n", buf, p); return -1; } ret = net_socket_connect_init(vlan, device, name, buf); } else if (get_param_value(buf, sizeof(buf), "mcast", p) > 0) { static const char * const mcast_params[] = { "vlan", "name", "mcast", NULL }; if (check_params(mcast_params, p) < 0) { fprintf(stderr, "qemu: invalid parameter '%s' in '%s'\n", buf, p); return -1; } ret = net_socket_mcast_init(vlan, device, name, buf); } else { fprintf(stderr, "Unknown socket options: %s\n", p); ret = -1; goto out; } vlan->nb_host_devs++; } else #ifdef CONFIG_VDE if (!strcmp(device, "vde")) { static const char * const vde_params[] = { "vlan", "name", "sock", "port", "group", "mode", NULL }; char vde_sock[1024], vde_group[512]; int vde_port, vde_mode; if (check_params(vde_params, p) < 0) { fprintf(stderr, "qemu: invalid parameter '%s' in '%s'\n", buf, p); return -1; } vlan->nb_host_devs++; if (get_param_value(vde_sock, sizeof(vde_sock), "sock", p) <= 0) { vde_sock[0] = '\0'; } if (get_param_value(buf, sizeof(buf), "port", p) > 0) { vde_port = strtol(buf, NULL, 10); } else { vde_port = 0; } if (get_param_value(vde_group, sizeof(vde_group), "group", p) <= 0) { vde_group[0] = '\0'; } if (get_param_value(buf, sizeof(buf), "mode", p) > 0) { vde_mode = strtol(buf, NULL, 8); } else { vde_mode = 0700; } ret = net_vde_init(vlan, device, name, vde_sock, vde_port, vde_group, vde_mode); } else #endif if (!strcmp(device, "dump")) { int len = 65536; if (get_param_value(buf, sizeof(buf), "len", p) > 0) { len = strtol(buf, NULL, 0); } if (!get_param_value(buf, sizeof(buf), "file", p)) { snprintf(buf, sizeof(buf), "qemu-vlan%d.pcap", vlan_id); } ret = net_dump_init(vlan, device, name, buf, len); } else { fprintf(stderr, "Unknown network device: %s\n", device); ret = -1; goto out; } if (ret < 0) { fprintf(stderr, "Could not initialize device '%s'\n", device); } out: if (name) free(name); return ret; }
20,506
0
static int virtio_ccw_device_init(VirtioCcwDevice *dev, VirtIODevice *vdev) { unsigned int cssid = 0; unsigned int ssid = 0; unsigned int schid; unsigned int devno; bool have_devno = false; bool found = false; SubchDev *sch; int ret; int num; DeviceState *parent = DEVICE(dev); sch = g_malloc0(sizeof(SubchDev)); sch->driver_data = dev; dev->sch = sch; dev->indicators = NULL; /* Initialize subchannel structure. */ sch->channel_prog = 0x0; sch->last_cmd_valid = false; sch->thinint_active = false; /* * Use a device number if provided. Otherwise, fall back to subchannel * number. */ if (dev->bus_id) { num = sscanf(dev->bus_id, "%x.%x.%04x", &cssid, &ssid, &devno); if (num == 3) { if ((cssid > MAX_CSSID) || (ssid > MAX_SSID)) { ret = -EINVAL; error_report("Invalid cssid or ssid: cssid %x, ssid %x", cssid, ssid); goto out_err; } /* Enforce use of virtual cssid. */ if (cssid != VIRTUAL_CSSID) { ret = -EINVAL; error_report("cssid %x not valid for virtio devices", cssid); goto out_err; } if (css_devno_used(cssid, ssid, devno)) { ret = -EEXIST; error_report("Device %x.%x.%04x already exists", cssid, ssid, devno); goto out_err; } sch->cssid = cssid; sch->ssid = ssid; sch->devno = devno; have_devno = true; } else { ret = -EINVAL; error_report("Malformed devno parameter '%s'", dev->bus_id); goto out_err; } } /* Find the next free id. */ if (have_devno) { for (schid = 0; schid <= MAX_SCHID; schid++) { if (!css_find_subch(1, cssid, ssid, schid)) { sch->schid = schid; css_subch_assign(cssid, ssid, schid, devno, sch); found = true; break; } } if (!found) { ret = -ENODEV; error_report("No free subchannel found for %x.%x.%04x", cssid, ssid, devno); goto out_err; } trace_virtio_ccw_new_device(cssid, ssid, schid, devno, "user-configured"); } else { cssid = VIRTUAL_CSSID; for (ssid = 0; ssid <= MAX_SSID; ssid++) { for (schid = 0; schid <= MAX_SCHID; schid++) { if (!css_find_subch(1, cssid, ssid, schid)) { sch->cssid = cssid; sch->ssid = ssid; sch->schid = schid; devno = schid; /* * If the devno is already taken, look further in this * subchannel set. */ while (css_devno_used(cssid, ssid, devno)) { if (devno == MAX_SCHID) { devno = 0; } else if (devno == schid - 1) { ret = -ENODEV; error_report("No free devno found"); goto out_err; } else { devno++; } } sch->devno = devno; css_subch_assign(cssid, ssid, schid, devno, sch); found = true; break; } } if (found) { break; } } if (!found) { ret = -ENODEV; error_report("Virtual channel subsystem is full!"); goto out_err; } trace_virtio_ccw_new_device(cssid, ssid, schid, devno, "auto-configured"); } /* Build initial schib. */ css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE); sch->ccw_cb = virtio_ccw_cb; /* Build senseid data. */ memset(&sch->id, 0, sizeof(SenseId)); sch->id.reserved = 0xff; sch->id.cu_type = VIRTIO_CCW_CU_TYPE; sch->id.cu_model = vdev->device_id; /* Only the first 32 feature bits are used. */ dev->host_features[0] = virtio_bus_get_vdev_features(&dev->bus, dev->host_features[0]); dev->host_features[0] |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY; dev->host_features[0] |= 0x1 << VIRTIO_F_BAD_FEATURE; css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid, parent->hotplugged, 1); return 0; out_err: dev->sch = NULL; g_free(sch); return ret; }
20,508
0
static void pc_init_isa(MachineState *machine) { pci_enabled = false; has_acpi_build = false; smbios_defaults = false; gigabyte_align = false; smbios_legacy_mode = true; has_reserved_memory = false; option_rom_has_mr = true; rom_file_has_mr = false; if (!machine->cpu_model) { machine->cpu_model = "486"; } x86_cpu_change_kvm_default("kvm-pv-eoi", NULL); enable_compat_apic_id_mode(); pc_init1(machine, TYPE_I440FX_PCI_HOST_BRIDGE, TYPE_I440FX_PCI_DEVICE); }
20,509
0
void qemu_aio_ref(void *p) { BlockAIOCB *acb = p; acb->refcnt++; }
20,510
0
matroska_read_header (AVFormatContext *s, AVFormatParameters *ap) { MatroskaDemuxContext *matroska = s->priv_data; char *doctype; int version, last_level, res = 0; uint32_t id; matroska->ctx = s; /* First read the EBML header. */ doctype = NULL; if ((res = ebml_read_header(matroska, &doctype, &version)) < 0) return res; if ((doctype == NULL) || strcmp(doctype, "matroska")) { av_log(matroska->ctx, AV_LOG_ERROR, "Wrong EBML doctype ('%s' != 'matroska').\n", doctype ? doctype : "(none)"); if (doctype) av_free(doctype); return AVERROR_NOFMT; } av_free(doctype); if (version > 2) { av_log(matroska->ctx, AV_LOG_ERROR, "Matroska demuxer version 2 too old for file version %d\n", version); return AVERROR_NOFMT; } /* The next thing is a segment. */ while (1) { if (!(id = ebml_peek_id(matroska, &last_level))) return AVERROR_IO; if (id == MATROSKA_ID_SEGMENT) break; /* oi! */ av_log(matroska->ctx, AV_LOG_INFO, "Expected a Segment ID (0x%x), but received 0x%x!\n", MATROSKA_ID_SEGMENT, id); if ((res = ebml_read_skip(matroska)) < 0) return res; } /* We now have a Matroska segment. * Seeks are from the beginning of the segment, * after the segment ID/length. */ if ((res = ebml_read_master(matroska, &id)) < 0) return res; matroska->segment_start = url_ftell(&s->pb); matroska->time_scale = 1000000; /* we've found our segment, start reading the different contents in here */ while (res == 0) { if (!(id = ebml_peek_id(matroska, &matroska->level_up))) { res = AVERROR_IO; break; } else if (matroska->level_up) { matroska->level_up--; break; } switch (id) { /* stream info */ case MATROSKA_ID_INFO: { if ((res = ebml_read_master(matroska, &id)) < 0) break; res = matroska_parse_info(matroska); break; } /* track info headers */ case MATROSKA_ID_TRACKS: { if ((res = ebml_read_master(matroska, &id)) < 0) break; res = matroska_parse_tracks(matroska); break; } /* stream index */ case MATROSKA_ID_CUES: { if (!matroska->index_parsed) { if ((res = ebml_read_master(matroska, &id)) < 0) break; res = matroska_parse_index(matroska); } else res = ebml_read_skip(matroska); break; } /* metadata */ case MATROSKA_ID_TAGS: { if (!matroska->metadata_parsed) { if ((res = ebml_read_master(matroska, &id)) < 0) break; res = matroska_parse_metadata(matroska); } else res = ebml_read_skip(matroska); break; } /* file index (if seekable, seek to Cues/Tags to parse it) */ case MATROSKA_ID_SEEKHEAD: { if ((res = ebml_read_master(matroska, &id)) < 0) break; res = matroska_parse_seekhead(matroska); break; } case MATROSKA_ID_CLUSTER: { /* Do not read the master - this will be done in the next * call to matroska_read_packet. */ res = 1; break; } default: av_log(matroska->ctx, AV_LOG_INFO, "Unknown matroska file header ID 0x%x\n", id); /* fall-through */ case EBML_ID_VOID: res = ebml_read_skip(matroska); break; } if (matroska->level_up) { matroska->level_up--; break; } } /* Have we found a cluster? */ if (ebml_peek_id(matroska, NULL) == MATROSKA_ID_CLUSTER) { int i, j; MatroskaTrack *track; AVStream *st; for (i = 0; i < matroska->num_tracks; i++) { enum CodecID codec_id = CODEC_ID_NONE; uint8_t *extradata = NULL; int extradata_size = 0; int extradata_offset = 0; track = matroska->tracks[i]; /* libavformat does not really support subtitles. * Also apply some sanity checks. */ if ((track->type == MATROSKA_TRACK_TYPE_SUBTITLE) || (track->codec_id == NULL)) continue; for(j=0; codec_tags[j].str; j++){ if(!strncmp(codec_tags[j].str, track->codec_id, strlen(codec_tags[j].str))){ codec_id= codec_tags[j].id; break; } } /* Set the FourCC from the CodecID. */ /* This is the MS compatibility mode which stores a * BITMAPINFOHEADER in the CodecPrivate. */ if (!strcmp(track->codec_id, MATROSKA_CODEC_ID_VIDEO_VFW_FOURCC) && (track->codec_priv_size >= 40) && (track->codec_priv != NULL)) { unsigned char *p; /* Offset of biCompression. Stored in LE. */ p = (unsigned char *)track->codec_priv + 16; ((MatroskaVideoTrack *)track)->fourcc = (p[3] << 24) | (p[2] << 16) | (p[1] << 8) | p[0]; codec_id = codec_get_id(codec_bmp_tags, ((MatroskaVideoTrack *)track)->fourcc); } /* This is the MS compatibility mode which stores a * WAVEFORMATEX in the CodecPrivate. */ else if (!strcmp(track->codec_id, MATROSKA_CODEC_ID_AUDIO_ACM) && (track->codec_priv_size >= 18) && (track->codec_priv != NULL)) { unsigned char *p; uint16_t tag; /* Offset of wFormatTag. Stored in LE. */ p = (unsigned char *)track->codec_priv; tag = (p[1] << 8) | p[0]; codec_id = codec_get_id(codec_wav_tags, tag); } else if (codec_id == CODEC_ID_MPEG1VIDEO || codec_id == CODEC_ID_MPEG2VIDEO || codec_id == CODEC_ID_MPEG4 || codec_id == CODEC_ID_MSMPEG4V3 || codec_id == CODEC_ID_H264) { track->flags |= MATROSKA_TRACK_REORDER; } else if (codec_id == CODEC_ID_AAC && !track->codec_priv_size) { MatroskaAudioTrack *audiotrack = (MatroskaAudioTrack *) track; int profile = matroska_aac_profile(track->codec_id); int sri = matroska_aac_sri(audiotrack->internal_samplerate); extradata = av_malloc(5); if (extradata == NULL) return AVERROR_NOMEM; extradata[0] = (profile << 3) | ((sri&0x0E) >> 1); extradata[1] = ((sri&0x01) << 7) | (audiotrack->channels<<3); if (strstr(track->codec_id, "SBR")) { sri = matroska_aac_sri(audiotrack->samplerate); extradata[2] = 0x56; extradata[3] = 0xE5; extradata[4] = 0x80 | (sri<<3); extradata_size = 5; } else { extradata_size = 2; } track->default_duration = 1024*1000 / audiotrack->internal_samplerate; } else if (codec_id == CODEC_ID_TTA) { MatroskaAudioTrack *audiotrack = (MatroskaAudioTrack *) track; ByteIOContext b; extradata_size = 30; extradata = av_mallocz(extradata_size); if (extradata == NULL) return AVERROR_NOMEM; init_put_byte(&b, extradata, extradata_size, 1, NULL, NULL, NULL, NULL); put_buffer(&b, (uint8_t *) "TTA1", 4); put_le16(&b, 1); put_le16(&b, audiotrack->channels); put_le16(&b, audiotrack->bitdepth); put_le32(&b, audiotrack->samplerate); put_le32(&b, matroska->ctx->duration * audiotrack->samplerate); } else if (codec_id == CODEC_ID_RV10 || codec_id == CODEC_ID_RV20 || codec_id == CODEC_ID_RV30 || codec_id == CODEC_ID_RV40) { extradata_offset = 26; track->codec_priv_size -= extradata_offset; track->flags |= MATROSKA_TRACK_REAL_V; } if (codec_id == CODEC_ID_NONE) { av_log(matroska->ctx, AV_LOG_INFO, "Unknown/unsupported CodecID %s.\n", track->codec_id); } track->stream_index = matroska->num_streams; matroska->num_streams++; st = av_new_stream(s, track->stream_index); if (st == NULL) return AVERROR_NOMEM; av_set_pts_info(st, 64, matroska->time_scale, 1000*1000*1000); /* 64 bit pts in ns */ st->codec->codec_id = codec_id; if (track->default_duration) av_reduce(&st->codec->time_base.num, &st->codec->time_base.den, track->default_duration, 1000, 30000); if(extradata){ st->codec->extradata = extradata; st->codec->extradata_size = extradata_size; } else if(track->codec_priv && track->codec_priv_size > 0){ st->codec->extradata = av_malloc(track->codec_priv_size); if(st->codec->extradata == NULL) return AVERROR_NOMEM; st->codec->extradata_size = track->codec_priv_size; memcpy(st->codec->extradata,track->codec_priv+extradata_offset, track->codec_priv_size); } if (track->type == MATROSKA_TRACK_TYPE_VIDEO) { MatroskaVideoTrack *videotrack = (MatroskaVideoTrack *)track; st->codec->codec_type = CODEC_TYPE_VIDEO; st->codec->codec_tag = videotrack->fourcc; st->codec->width = videotrack->pixel_width; st->codec->height = videotrack->pixel_height; if (videotrack->display_width == 0) videotrack->display_width= videotrack->pixel_width; if (videotrack->display_height == 0) videotrack->display_height= videotrack->pixel_height; av_reduce(&st->codec->sample_aspect_ratio.num, &st->codec->sample_aspect_ratio.den, st->codec->height * videotrack->display_width, st->codec-> width * videotrack->display_height, 255); } else if (track->type == MATROSKA_TRACK_TYPE_AUDIO) { MatroskaAudioTrack *audiotrack = (MatroskaAudioTrack *)track; st->codec->codec_type = CODEC_TYPE_AUDIO; st->codec->sample_rate = audiotrack->samplerate; st->codec->channels = audiotrack->channels; } else if (track->type == MATROSKA_TRACK_TYPE_SUBTITLE) { st->codec->codec_type = CODEC_TYPE_SUBTITLE; } /* What do we do with private data? E.g. for Vorbis. */ } res = 0; } return res; }
20,511
0
void spapr_drc_detach(sPAPRDRConnector *drc) { trace_spapr_drc_detach(spapr_drc_index(drc)); drc->unplug_requested = true; if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) { trace_spapr_drc_awaiting_isolated(spapr_drc_index(drc)); return; } if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI && drc->allocation_state != SPAPR_DR_ALLOCATION_STATE_UNUSABLE) { trace_spapr_drc_awaiting_unusable(spapr_drc_index(drc)); return; } spapr_drc_release(drc); }
20,512
0
void cpu_outb(CPUState *env, pio_addr_t addr, uint8_t val) { LOG_IOPORT("outb: %04"FMT_pioaddr" %02"PRIx8"\n", addr, val); ioport_write(0, addr, val); #ifdef CONFIG_KQEMU if (env) env->last_io_time = cpu_get_time_fast(); #endif }
20,513
0
void ff_id3v2_read_dict(AVIOContext *pb, AVDictionary **metadata, const char *magic, ID3v2ExtraMeta **extra_meta) { id3v2_read_internal(pb, metadata, NULL, magic, extra_meta); }
20,514
0
int ff_lpc_calc_coefs(DSPContext *s, const int32_t *samples, int blocksize, int min_order, int max_order, int precision, int32_t coefs[][MAX_LPC_ORDER], int *shift, int use_lpc, int omethod, int max_shift, int zero_shift) { double autoc[MAX_LPC_ORDER+1]; double ref[MAX_LPC_ORDER]; double lpc[MAX_LPC_ORDER][MAX_LPC_ORDER]; int i, j, pass; int opt_order; assert(max_order >= MIN_LPC_ORDER && max_order <= MAX_LPC_ORDER); if(use_lpc == 1){ s->flac_compute_autocorr(samples, blocksize, max_order, autoc); compute_lpc_coefs(autoc, max_order, &lpc[0][0], MAX_LPC_ORDER, 0, 1); for(i=0; i<max_order; i++) ref[i] = fabs(lpc[i][i]); }else{ LLSModel m[2]; double var[MAX_LPC_ORDER+1], weight; for(pass=0; pass<use_lpc-1; pass++){ av_init_lls(&m[pass&1], max_order); weight=0; for(i=max_order; i<blocksize; i++){ for(j=0; j<=max_order; j++) var[j]= samples[i-j]; if(pass){ double eval, inv, rinv; eval= av_evaluate_lls(&m[(pass-1)&1], var+1, max_order-1); eval= (512>>pass) + fabs(eval - var[0]); inv = 1/eval; rinv = sqrt(inv); for(j=0; j<=max_order; j++) var[j] *= rinv; weight += inv; }else weight++; av_update_lls(&m[pass&1], var, 1.0); } av_solve_lls(&m[pass&1], 0.001, 0); } for(i=0; i<max_order; i++){ for(j=0; j<max_order; j++) lpc[i][j]=-m[(pass-1)&1].coeff[i][j]; ref[i]= sqrt(m[(pass-1)&1].variance[i] / weight) * (blocksize - max_order) / 4000; } for(i=max_order-1; i>0; i--) ref[i] = ref[i-1] - ref[i]; } opt_order = max_order; if(omethod == ORDER_METHOD_EST) { opt_order = estimate_best_order(ref, min_order, max_order); i = opt_order-1; quantize_lpc_coefs(lpc[i], i+1, precision, coefs[i], &shift[i], max_shift, zero_shift); } else { for(i=min_order-1; i<max_order; i++) { quantize_lpc_coefs(lpc[i], i+1, precision, coefs[i], &shift[i], max_shift, zero_shift); } } return opt_order; }
20,515
0
void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf) { cpu_fprintf(f, "Available CPUs:\n" " Xtensa core\n"); }
20,516
0
static inline void gen_jcc1_noeob(DisasContext *s, int b, int l1) { CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]); if (cc.mask != -1) { tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask); cc.reg = cpu_T[0]; } if (cc.use_reg2) { tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1); } else { tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1); } }
20,517
0
static void *rcu_q_reader(void *arg) { long long j, n_reads_local = 0; struct list_element *el; *(struct rcu_reader_data **)arg = &rcu_reader; atomic_inc(&nthreadsrunning); while (goflag == GOFLAG_INIT) { g_usleep(1000); } while (goflag == GOFLAG_RUN) { rcu_read_lock(); QLIST_FOREACH_RCU(el, &Q_list_head, entry) { j = atomic_read(&el->val); (void)j; n_reads_local++; if (goflag == GOFLAG_STOP) { break; } } rcu_read_unlock(); g_usleep(100); } atomic_add(&n_reads, n_reads_local); return NULL; }
20,519
0
static ssize_t local_llistxattr(FsContext *ctx, const char *path, void *value, size_t size) { ssize_t retval; ssize_t actual_len = 0; char *orig_value, *orig_value_start; char *temp_value, *temp_value_start; ssize_t xattr_len, parsed_len = 0, attr_len; if (ctx->fs_sm != SM_MAPPED) { return llistxattr(rpath(ctx, path), value, size); } /* Get the actual len */ xattr_len = llistxattr(rpath(ctx, path), value, 0); /* Now fetch the xattr and find the actual size */ orig_value = qemu_malloc(xattr_len); xattr_len = llistxattr(rpath(ctx, path), orig_value, xattr_len); /* * For mapped security model drop user.virtfs namespace * from the list */ temp_value = qemu_mallocz(xattr_len); temp_value_start = temp_value; orig_value_start = orig_value; while (xattr_len > parsed_len) { attr_len = strlen(orig_value) + 1; if (strncmp(orig_value, "user.virtfs.", 12) != 0) { /* Copy this entry */ strcat(temp_value, orig_value); temp_value += attr_len; actual_len += attr_len; } parsed_len += attr_len; orig_value += attr_len; } if (!size) { retval = actual_len; goto out; } else if (size >= actual_len) { /* now copy the parsed attribute list back */ memset(value, 0, size); memcpy(value, temp_value_start, actual_len); retval = actual_len; goto out; } errno = ERANGE; retval = -1; out: qemu_free(orig_value_start); qemu_free(temp_value_start); return retval; }
20,521
0
static void idct(uint8_t *dst, int dst_linesize, int src[64]) { int i, j, k; double tmp[64]; for (i = 0; i < 8; i++) { for (j = 0; j < 8; j++) { double sum = 0.0; for (k = 0; k < 8; k++) sum += c[k*8+j] * src[8*i+k]; tmp[8*i+j] = sum; } } for (j = 0; j < 8; j++) { for (i = 0; i < 8; i++) { double sum = 0.0; for (k = 0; k < 8; k++) sum += c[k*8+i]*tmp[8*k+j]; dst[dst_linesize*i + j] = av_clip_uint8((int)floor(sum+0.5)); } } }
20,522
0
static int usb_hid_handle_control(USBDevice *dev, int request, int value, int index, int length, uint8_t *data) { USBHIDState *s = (USBHIDState *)dev; int ret; ret = usb_desc_handle_control(dev, request, value, index, length, data); if (ret >= 0) { return ret; } ret = 0; switch(request) { case DeviceRequest | USB_REQ_GET_STATUS: data[0] = (1 << USB_DEVICE_SELF_POWERED) | (dev->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP); data[1] = 0x00; ret = 2; break; case DeviceOutRequest | USB_REQ_CLEAR_FEATURE: if (value == USB_DEVICE_REMOTE_WAKEUP) { dev->remote_wakeup = 0; } else { goto fail; } ret = 0; break; case DeviceOutRequest | USB_REQ_SET_FEATURE: if (value == USB_DEVICE_REMOTE_WAKEUP) { dev->remote_wakeup = 1; } else { goto fail; } ret = 0; break; case DeviceRequest | USB_REQ_GET_CONFIGURATION: data[0] = 1; ret = 1; break; case DeviceOutRequest | USB_REQ_SET_CONFIGURATION: ret = 0; break; case DeviceRequest | USB_REQ_GET_INTERFACE: data[0] = 0; ret = 1; break; case DeviceOutRequest | USB_REQ_SET_INTERFACE: ret = 0; break; /* hid specific requests */ case InterfaceRequest | USB_REQ_GET_DESCRIPTOR: switch(value >> 8) { case 0x22: if (s->kind == USB_MOUSE) { memcpy(data, qemu_mouse_hid_report_descriptor, sizeof(qemu_mouse_hid_report_descriptor)); ret = sizeof(qemu_mouse_hid_report_descriptor); } else if (s->kind == USB_TABLET) { memcpy(data, qemu_tablet_hid_report_descriptor, sizeof(qemu_tablet_hid_report_descriptor)); ret = sizeof(qemu_tablet_hid_report_descriptor); } else if (s->kind == USB_KEYBOARD) { memcpy(data, qemu_keyboard_hid_report_descriptor, sizeof(qemu_keyboard_hid_report_descriptor)); ret = sizeof(qemu_keyboard_hid_report_descriptor); } break; default: goto fail; } break; case GET_REPORT: if (s->kind == USB_MOUSE) ret = usb_mouse_poll(s, data, length); else if (s->kind == USB_TABLET) ret = usb_tablet_poll(s, data, length); else if (s->kind == USB_KEYBOARD) ret = usb_keyboard_poll(&s->kbd, data, length); break; case SET_REPORT: if (s->kind == USB_KEYBOARD) ret = usb_keyboard_write(&s->kbd, data, length); else goto fail; break; case GET_PROTOCOL: if (s->kind != USB_KEYBOARD) goto fail; ret = 1; data[0] = s->protocol; break; case SET_PROTOCOL: if (s->kind != USB_KEYBOARD) goto fail; ret = 0; s->protocol = value; break; case GET_IDLE: ret = 1; data[0] = s->idle; break; case SET_IDLE: s->idle = (uint8_t) (value >> 8); usb_hid_set_next_idle(s, qemu_get_clock(vm_clock)); ret = 0; break; default: fail: ret = USB_RET_STALL; break; } return ret; }
20,523
0
static int protocol_client_auth(VncState *vs, char *data, size_t len) { /* We only advertise 1 auth scheme at a time, so client * must pick the one we sent. Verify this */ if (data[0] != vs->auth) { /* Reject auth */ VNC_DEBUG("Reject auth %d\n", (int)data[0]); vnc_write_u32(vs, 1); if (vs->minor >= 8) { static const char err[] = "Authentication failed"; vnc_write_u32(vs, sizeof(err)); vnc_write(vs, err, sizeof(err)); } vnc_client_error(vs); } else { /* Accept requested auth */ VNC_DEBUG("Client requested auth %d\n", (int)data[0]); switch (vs->auth) { case VNC_AUTH_NONE: VNC_DEBUG("Accept auth none\n"); vnc_write_u32(vs, 0); /* Accept auth completion */ vnc_read_when(vs, protocol_client_init, 1); break; case VNC_AUTH_VNC: VNC_DEBUG("Start VNC auth\n"); return start_auth_vnc(vs); #if CONFIG_VNC_TLS case VNC_AUTH_VENCRYPT: VNC_DEBUG("Accept VeNCrypt auth\n");; return start_auth_vencrypt(vs); #endif /* CONFIG_VNC_TLS */ default: /* Should not be possible, but just in case */ VNC_DEBUG("Reject auth %d\n", vs->auth); vnc_write_u8(vs, 1); if (vs->minor >= 8) { static const char err[] = "Authentication failed"; vnc_write_u32(vs, sizeof(err)); vnc_write(vs, err, sizeof(err)); } vnc_client_error(vs); } } return 0; }
20,524
0
static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping, int phdr_index, target_phys_addr_t offset) { Elf32_Phdr phdr; int ret; int endian = s->dump_info.d_endian; memset(&phdr, 0, sizeof(Elf32_Phdr)); phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian); phdr.p_offset = cpu_convert_to_target32(offset, endian); phdr.p_paddr = cpu_convert_to_target32(memory_mapping->phys_addr, endian); if (offset == -1) { /* When the memory is not stored into vmcore, offset will be -1 */ phdr.p_filesz = 0; } else { phdr.p_filesz = cpu_convert_to_target32(memory_mapping->length, endian); } phdr.p_memsz = cpu_convert_to_target32(memory_mapping->length, endian); phdr.p_vaddr = cpu_convert_to_target32(memory_mapping->virt_addr, endian); ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s); if (ret < 0) { dump_error(s, "dump: failed to write program header table.\n"); return -1; } return 0; }
20,525
0
static void rtas_nvram_fetch(PowerPCCPU *cpu, sPAPREnvironment *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { sPAPRNVRAM *nvram = spapr->nvram; hwaddr offset, buffer, len; int alen; void *membuf; if ((nargs != 3) || (nret != 2)) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } if (!nvram) { rtas_st(rets, 0, RTAS_OUT_HW_ERROR); rtas_st(rets, 1, 0); return; } offset = rtas_ld(args, 0); buffer = rtas_ld(args, 1); len = rtas_ld(args, 2); if (((offset + len) < offset) || ((offset + len) > nvram->size)) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); rtas_st(rets, 1, 0); return; } membuf = cpu_physical_memory_map(buffer, &len, 1); if (nvram->drive) { alen = bdrv_pread(nvram->drive, offset, membuf, len); } else { assert(nvram->buf); memcpy(membuf, nvram->buf + offset, len); alen = len; } cpu_physical_memory_unmap(membuf, len, 1, len); rtas_st(rets, 0, (alen < len) ? RTAS_OUT_HW_ERROR : RTAS_OUT_SUCCESS); rtas_st(rets, 1, (alen < 0) ? 0 : alen); }
20,526
0
void address_space_init(AddressSpace *as, MemoryRegion *root) { memory_region_transaction_begin(); as->root = root; as->current_map = g_new(FlatView, 1); flatview_init(as->current_map); as->ioeventfd_nb = 0; as->ioeventfds = NULL; QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link); as->name = NULL; memory_region_transaction_commit(); address_space_init_dispatch(as); }
20,527
0
static int virtio_net_init_pci(PCIDevice *pci_dev) { VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev); VirtIODevice *vdev; vdev = virtio_net_init(&pci_dev->qdev, &proxy->nic, &proxy->net); vdev->nvectors = proxy->nvectors; virtio_init_pci(proxy, vdev, PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_DEVICE_ID_VIRTIO_NET, PCI_CLASS_NETWORK_ETHERNET, 0x00); /* make the actual value visible */ proxy->nvectors = vdev->nvectors; return 0; }
20,528
0
static void z2_init(MachineState *machine) { const char *cpu_model = machine->cpu_model; const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; MemoryRegion *address_space_mem = get_system_memory(); uint32_t sector_len = 0x10000; PXA2xxState *mpu; DriveInfo *dinfo; int be; void *z2_lcd; I2CBus *bus; DeviceState *wm; if (!cpu_model) { cpu_model = "pxa270-c5"; } /* Setup CPU & memory */ mpu = pxa270_init(address_space_mem, z2_binfo.ram_size, cpu_model); #ifdef TARGET_WORDS_BIGENDIAN be = 1; #else be = 0; #endif dinfo = drive_get(IF_PFLASH, 0, 0); if (!dinfo && !qtest_enabled()) { fprintf(stderr, "Flash image must be given with the " "'pflash' parameter\n"); exit(1); } if (!pflash_cfi01_register(Z2_FLASH_BASE, NULL, "z2.flash0", Z2_FLASH_SIZE, dinfo ? blk_bs(blk_by_legacy_dinfo(dinfo)) : NULL, sector_len, Z2_FLASH_SIZE / sector_len, 4, 0, 0, 0, 0, be)) { fprintf(stderr, "qemu: Error registering flash memory.\n"); exit(1); } /* setup keypad */ pxa27x_register_keypad(mpu->kp, map, 0x100); /* MMC/SD host */ pxa2xx_mmci_handlers(mpu->mmc, NULL, qdev_get_gpio_in(mpu->gpio, Z2_GPIO_SD_DETECT)); type_register_static(&zipit_lcd_info); type_register_static(&aer915_info); z2_lcd = ssi_create_slave(mpu->ssp[1], "zipit-lcd"); bus = pxa2xx_i2c_bus(mpu->i2c[0]); i2c_create_slave(bus, TYPE_AER915, 0x55); wm = i2c_create_slave(bus, "wm8750", 0x1b); mpu->i2s->opaque = wm; mpu->i2s->codec_out = wm8750_dac_dat; mpu->i2s->codec_in = wm8750_adc_dat; wm8750_data_req_set(wm, mpu->i2s->data_req, mpu->i2s); qdev_connect_gpio_out(mpu->gpio, Z2_GPIO_LCD_CS, qemu_allocate_irq(z2_lcd_cs, z2_lcd, 0)); z2_binfo.kernel_filename = kernel_filename; z2_binfo.kernel_cmdline = kernel_cmdline; z2_binfo.initrd_filename = initrd_filename; z2_binfo.board_id = 0x6dd; arm_load_kernel(mpu->cpu, &z2_binfo); }
20,529
0
static int stdio_fclose(void *opaque) { QEMUFileStdio *s = opaque; int ret = 0; if (s->file->ops->put_buffer || s->file->ops->writev_buffer) { int fd = fileno(s->stdio_file); struct stat st; ret = fstat(fd, &st); if (ret == 0 && S_ISREG(st.st_mode)) { /* * If the file handle is a regular file make sure the * data is flushed to disk before signaling success. */ ret = fsync(fd); if (ret != 0) { ret = -errno; return ret; } } } if (fclose(s->stdio_file) == EOF) { ret = -errno; } g_free(s); return ret; }
20,533
0
static int decode_cce(AACContext * ac, GetBitContext * gb, ChannelElement * che) { int num_gain = 0; int c, g, sfb, ret, idx = 0; int sign; float scale; SingleChannelElement * sce = &che->ch[0]; ChannelCoupling * coup = &che->coup; coup->coupling_point = 2*get_bits1(gb); coup->num_coupled = get_bits(gb, 3); for (c = 0; c <= coup->num_coupled; c++) { num_gain++; coup->type[c] = get_bits1(gb) ? TYPE_CPE : TYPE_SCE; coup->id_select[c] = get_bits(gb, 4); if (coup->type[c] == TYPE_CPE) { coup->ch_select[c] = get_bits(gb, 2); if (coup->ch_select[c] == 3) num_gain++; } else coup->ch_select[c] = 1; } coup->coupling_point += get_bits1(gb); if (coup->coupling_point == 2) { av_log(ac->avccontext, AV_LOG_ERROR, "Independently switched CCE with 'invalid' domain signalled.\n"); memset(coup, 0, sizeof(ChannelCoupling)); return -1; } sign = get_bits(gb, 1); scale = pow(2., pow(2., (int)get_bits(gb, 2) - 3)); if ((ret = decode_ics(ac, sce, gb, 0, 0))) return ret; for (c = 0; c < num_gain; c++) { int cge = 1; int gain = 0; float gain_cache = 1.; if (c) { cge = coup->coupling_point == AFTER_IMDCT ? 1 : get_bits1(gb); gain = cge ? get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60: 0; gain_cache = pow(scale, gain); } for (g = 0; g < sce->ics.num_window_groups; g++) { for (sfb = 0; sfb < sce->ics.max_sfb; sfb++, idx++) { if (sce->band_type[idx] != ZERO_BT) { if (!cge) { int t = get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60; if (t) { int s = 1; if (sign) { s -= 2 * (t & 0x1); t >>= 1; } gain += t; gain_cache = pow(scale, gain) * s; } } coup->gain[c][idx] = gain_cache; } } } } return 0; }
20,535
0
void ff_avg_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h) { if (h&3) { ff_avg_dirac_pixels32_c(dst, src, stride, h); } else { ff_avg_pixels16_sse2(dst , src[0] , stride, h); ff_avg_pixels16_sse2(dst+16, src[0]+16, stride, h); } }
20,537
0
static int ra288_decode_frame(AVCodecContext * avctx, void *data, int *data_size, const uint8_t * buf, int buf_size) { int16_t *out = data; int i, j; RA288Context *ractx = avctx->priv_data; GetBitContext gb; if (buf_size < avctx->block_align) { av_log(avctx, AV_LOG_ERROR, "Error! Input buffer is too small [%d<%d]\n", buf_size, avctx->block_align); return 0; } init_get_bits(&gb, buf, avctx->block_align * 8); for (i=0; i < 32; i++) { float gain = amptable[get_bits(&gb, 3)]; int cb_coef = get_bits(&gb, 6 + (i&1)); decode(ractx, gain, cb_coef); for (j=0; j < 5; j++) *(out++) = 8 * ractx->sp_block[4 - j]; if ((i & 7) == 3) backward_filter(ractx); } *data_size = (char *)out - (char *)data; return avctx->block_align; }
20,538
0
void ff_avg_h264_qpel8_mc31_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_hv_qrt_and_aver_dst_8x8_msa(src - 2, src - (stride * 2) + sizeof(uint8_t), stride, dst, stride); }
20,539
0
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost) { int ret = 0; /* apply the output bitstream filters, if any */ if (ost->nb_bitstream_filters) { int idx; ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt); if (ret < 0) goto finish; idx = 1; while (idx) { /* get a packet from the previous filter up the chain */ ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt); if (ret == AVERROR(EAGAIN)) { ret = 0; idx--; continue; } else if (ret < 0) goto finish; /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when * the api states this shouldn't happen after init(). Propagate it here to the * muxer and to the next filters in the chain to workaround this. * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing * par_out->extradata and adapt muxers accordingly to get rid of this. */ if (!(ost->bsf_extradata_updated[idx - 1] & 1)) { ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out); if (ret < 0) goto finish; ost->bsf_extradata_updated[idx - 1] |= 1; } /* send it to the next filter down the chain or to the muxer */ if (idx < ost->nb_bitstream_filters) { /* HACK/FIXME! - See above */ if (!(ost->bsf_extradata_updated[idx] & 2)) { ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out); if (ret < 0) goto finish; ost->bsf_extradata_updated[idx] |= 2; } ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt); if (ret < 0) goto finish; idx++; } else write_packet(of, pkt, ost); } } else write_packet(of, pkt, ost); finish: if (ret < 0 && ret != AVERROR_EOF) { av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output " "packet for stream #%d:%d.\n", ost->file_index, ost->index); if(exit_on_error) exit_program(1); } }
20,540
0
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size) { H264Context *h = avctx->priv_data; MpegEncContext *s = &h->s; AVFrame *pict = data; float new_aspect; int buf_index; s->flags= avctx->flags; *data_size = 0; /* no supplementary picture */ if (buf_size == 0) { return 0; } if(s->flags&CODEC_FLAG_TRUNCATED){ int next= find_frame_end(s, buf, buf_size); if( ff_combine_frame(s, next, &buf, &buf_size) < 0 ) return buf_size; //printf("next:%d buf_size:%d last_index:%d\n", next, buf_size, s->parse_context.last_index); } if(s->avctx->extradata_size && s->picture_number==0){ if(0 < decode_nal_units(h, s->avctx->extradata, s->avctx->extradata_size) ) return -1; } buf_index=decode_nal_units(h, buf, buf_size); if(buf_index < 0) return -1; //FIXME do something with unavailable reference frames // if(ret==FRAME_SKIPED) return get_consumed_bytes(s, buf_index, buf_size); #if 0 if(s->pict_type==B_TYPE || s->low_delay){ *pict= *(AVFrame*)&s->current_picture; } else { *pict= *(AVFrame*)&s->last_picture; } #endif *pict= *(AVFrame*)&s->current_picture; //FIXME assert(pict->data[0]); //printf("out %d\n", (int)pict->data[0]); if(avctx->debug&FF_DEBUG_QP){ int8_t *qtab= pict->qscale_table; int x,y; for(y=0; y<s->mb_height; y++){ for(x=0; x<s->mb_width; x++){ printf("%2d ", qtab[x + y*s->mb_width]); } printf("\n"); } printf("\n"); } #if 0 //? /* Return the Picture timestamp as the frame number */ /* we substract 1 because it is added on utils.c */ avctx->frame_number = s->picture_number - 1; #endif #if 0 /* dont output the last pic after seeking */ if(s->last_picture_ptr || s->low_delay) //Note this isnt a issue as a IDR pic should flush teh buffers #endif *data_size = sizeof(AVFrame); return get_consumed_bytes(s, buf_index, buf_size); }
20,542
0
static int slice_decode_thread(AVCodecContext *c, void *arg){ MpegEncContext *s= *(void**)arg; const uint8_t *buf= s->gb.buffer; int mb_y= s->start_mb_y; const int field_pic= s->picture_structure != PICT_FRAME; s->error_count= (3*(s->end_mb_y - s->start_mb_y)*s->mb_width) >> field_pic; for(;;){ uint32_t start_code; int ret; ret= mpeg_decode_slice((Mpeg1Context*)s, mb_y, &buf, s->gb.buffer_end - buf); emms_c(); //av_log(c, AV_LOG_DEBUG, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n", //ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, s->start_mb_y, s->end_mb_y, s->error_count); if(ret < 0){ if(s->resync_mb_x>=0 && s->resync_mb_y>=0) ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, AC_ERROR|DC_ERROR|MV_ERROR); }else{ ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END); } if(s->mb_y == s->end_mb_y) return 0; start_code= -1; buf = ff_find_start_code(buf, s->gb.buffer_end, &start_code); mb_y= (start_code - SLICE_MIN_START_CODE) << field_pic; if (s->picture_structure == PICT_BOTTOM_FIELD) mb_y++; if(mb_y < 0 || mb_y >= s->end_mb_y) return -1; } return 0; //not reached }
20,543
0
void ff_put_h264_qpel16_mc12_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_midh_qrt_16w_msa(src - (2 * stride) - 2, stride, dst, stride, 16, 0); }
20,544
0
static void filter_mb_edgech( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int qp ) { int i; const int index_a = qp + h->slice_alpha_c0_offset; const int alpha = (alpha_table+52)[index_a]; const int beta = (beta_table+52)[qp + h->slice_beta_offset]; if( bS[0] < 4 ) { int8_t tc[4]; for(i=0; i<4; i++) tc[i] = bS[i] ? (tc0_table+52)[index_a][bS[i] - 1] + 1 : 0; h->s.dsp.h264_v_loop_filter_chroma(pix, stride, alpha, beta, tc); } else { h->s.dsp.h264_v_loop_filter_chroma_intra(pix, stride, alpha, beta); } }
20,545
0
static int vmdk_open_vmdk4(BlockDriverState *bs, BlockDriverState *file, int flags, Error **errp) { int ret; uint32_t magic; uint32_t l1_size, l1_entry_sectors; VMDK4Header header; VmdkExtent *extent; BDRVVmdkState *s = bs->opaque; int64_t l1_backup_offset = 0; ret = bdrv_pread(file, sizeof(magic), &header, sizeof(header)); if (ret < 0) { error_setg_errno(errp, -ret, "Could not read header from file '%s'", file->filename); return -EINVAL; } if (header.capacity == 0) { uint64_t desc_offset = le64_to_cpu(header.desc_offset); if (desc_offset) { char *buf = vmdk_read_desc(file, desc_offset << 9, errp); if (!buf) { return -EINVAL; } ret = vmdk_open_desc_file(bs, flags, buf, errp); g_free(buf); return ret; } } if (!s->create_type) { s->create_type = g_strdup("monolithicSparse"); } if (le64_to_cpu(header.gd_offset) == VMDK4_GD_AT_END) { /* * The footer takes precedence over the header, so read it in. The * footer starts at offset -1024 from the end: One sector for the * footer, and another one for the end-of-stream marker. */ struct { struct { uint64_t val; uint32_t size; uint32_t type; uint8_t pad[512 - 16]; } QEMU_PACKED footer_marker; uint32_t magic; VMDK4Header header; uint8_t pad[512 - 4 - sizeof(VMDK4Header)]; struct { uint64_t val; uint32_t size; uint32_t type; uint8_t pad[512 - 16]; } QEMU_PACKED eos_marker; } QEMU_PACKED footer; ret = bdrv_pread(file, bs->file->total_sectors * 512 - 1536, &footer, sizeof(footer)); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to read footer"); return ret; } /* Some sanity checks for the footer */ if (be32_to_cpu(footer.magic) != VMDK4_MAGIC || le32_to_cpu(footer.footer_marker.size) != 0 || le32_to_cpu(footer.footer_marker.type) != MARKER_FOOTER || le64_to_cpu(footer.eos_marker.val) != 0 || le32_to_cpu(footer.eos_marker.size) != 0 || le32_to_cpu(footer.eos_marker.type) != MARKER_END_OF_STREAM) { error_setg(errp, "Invalid footer"); return -EINVAL; } header = footer.header; } if (le32_to_cpu(header.version) > 3) { char buf[64]; snprintf(buf, sizeof(buf), "VMDK version %" PRId32, le32_to_cpu(header.version)); error_set(errp, QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, bdrv_get_device_or_node_name(bs), "vmdk", buf); return -ENOTSUP; } else if (le32_to_cpu(header.version) == 3 && (flags & BDRV_O_RDWR)) { /* VMware KB 2064959 explains that version 3 added support for * persistent changed block tracking (CBT), and backup software can * read it as version=1 if it doesn't care about the changed area * information. So we are safe to enable read only. */ error_setg(errp, "VMDK version 3 must be read only"); return -EINVAL; } if (le32_to_cpu(header.num_gtes_per_gt) > 512) { error_setg(errp, "L2 table size too big"); return -EINVAL; } l1_entry_sectors = le32_to_cpu(header.num_gtes_per_gt) * le64_to_cpu(header.granularity); if (l1_entry_sectors == 0) { error_setg(errp, "L1 entry size is invalid"); return -EINVAL; } l1_size = (le64_to_cpu(header.capacity) + l1_entry_sectors - 1) / l1_entry_sectors; if (le32_to_cpu(header.flags) & VMDK4_FLAG_RGD) { l1_backup_offset = le64_to_cpu(header.rgd_offset) << 9; } if (bdrv_nb_sectors(file) < le64_to_cpu(header.grain_offset)) { error_setg(errp, "File truncated, expecting at least %" PRId64 " bytes", (int64_t)(le64_to_cpu(header.grain_offset) * BDRV_SECTOR_SIZE)); return -EINVAL; } ret = vmdk_add_extent(bs, file, false, le64_to_cpu(header.capacity), le64_to_cpu(header.gd_offset) << 9, l1_backup_offset, l1_size, le32_to_cpu(header.num_gtes_per_gt), le64_to_cpu(header.granularity), &extent, errp); if (ret < 0) { return ret; } extent->compressed = le16_to_cpu(header.compressAlgorithm) == VMDK4_COMPRESSION_DEFLATE; if (extent->compressed) { g_free(s->create_type); s->create_type = g_strdup("streamOptimized"); } extent->has_marker = le32_to_cpu(header.flags) & VMDK4_FLAG_MARKER; extent->version = le32_to_cpu(header.version); extent->has_zero_grain = le32_to_cpu(header.flags) & VMDK4_FLAG_ZERO_GRAIN; ret = vmdk_init_tables(bs, extent, errp); if (ret) { /* free extent allocated by vmdk_add_extent */ vmdk_free_last_extent(bs); } return ret; }
20,546
0
dma_write(void *opaque, target_phys_addr_t addr, uint64_t val64, unsigned int size) { struct fs_dma_ctrl *ctrl = opaque; uint32_t value = val64; int c; if (size != 4) { dma_winvalid(opaque, addr, value); } /* Make addr relative to this channel and bounded to nr regs. */ c = fs_channel(addr); addr &= 0xff; addr >>= 2; switch (addr) { case RW_DATA: ctrl->channels[c].regs[addr] = value; break; case RW_CFG: ctrl->channels[c].regs[addr] = value; dma_update_state(ctrl, c); break; case RW_CMD: /* continue. */ if (value & ~1) printf("Invalid store to ch=%d RW_CMD %x\n", c, value); ctrl->channels[c].regs[addr] = value; channel_continue(ctrl, c); break; case RW_SAVED_DATA: case RW_SAVED_DATA_BUF: case RW_GROUP: case RW_GROUP_DOWN: ctrl->channels[c].regs[addr] = value; break; case RW_ACK_INTR: case RW_INTR_MASK: ctrl->channels[c].regs[addr] = value; channel_update_irq(ctrl, c); if (addr == RW_ACK_INTR) ctrl->channels[c].regs[RW_ACK_INTR] = 0; break; case RW_STREAM_CMD: if (value & ~1023) printf("Invalid store to ch=%d " "RW_STREAMCMD %x\n", c, value); ctrl->channels[c].regs[addr] = value; D(printf("stream_cmd ch=%d\n", c)); channel_stream_cmd(ctrl, c, value); break; default: D(printf ("%s c=%d " TARGET_FMT_plx "\n", __func__, c, addr)); break; } }
20,547
0
static int scsi_generic_initfn(SCSIDevice *dev) { SCSIGenericState *s = DO_UPCAST(SCSIGenericState, qdev, dev); int sg_version; struct sg_scsi_id scsiid; if (!s->qdev.conf.dinfo || !s->qdev.conf.dinfo->bdrv) { error_report("scsi-generic: drive property not set"); return -1; } s->bs = s->qdev.conf.dinfo->bdrv; /* check we are really using a /dev/sg* file */ if (!bdrv_is_sg(s->bs)) { error_report("scsi-generic: not /dev/sg*"); return -1; } /* check we are using a driver managing SG_IO (version 3 and after */ if (bdrv_ioctl(s->bs, SG_GET_VERSION_NUM, &sg_version) < 0 || sg_version < 30000) { error_report("scsi-generic: scsi generic interface too old"); return -1; } /* get LUN of the /dev/sg? */ if (bdrv_ioctl(s->bs, SG_GET_SCSI_ID, &scsiid)) { error_report("scsi-generic: SG_GET_SCSI_ID ioctl failed"); return -1; } /* define device state */ s->lun = scsiid.lun; DPRINTF("LUN %d\n", s->lun); s->qdev.type = scsiid.scsi_type; DPRINTF("device type %d\n", s->qdev.type); if (s->qdev.type == TYPE_TAPE) { s->qdev.blocksize = get_stream_blocksize(s->bs); if (s->qdev.blocksize == -1) s->qdev.blocksize = 0; } else { s->qdev.blocksize = get_blocksize(s->bs); /* removable media returns 0 if not present */ if (s->qdev.blocksize <= 0) { if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) s->qdev.blocksize = 2048; else s->qdev.blocksize = 512; } } DPRINTF("block size %d\n", s->qdev.blocksize); s->driver_status = 0; memset(s->sensebuf, 0, sizeof(s->sensebuf)); return 0; }
20,551
0
void memory_region_add_subregion_overlap(MemoryRegion *mr, hwaddr offset, MemoryRegion *subregion, unsigned priority) { subregion->may_overlap = true; subregion->priority = priority; memory_region_add_subregion_common(mr, offset, subregion); }
20,552
0
static int mpeg_decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { Mpeg1Context *s = avctx->priv_data; AVFrame *picture = data; MpegEncContext *s2 = &s->mpeg_enc_ctx; dprintf(avctx, "fill_buffer\n"); if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) { /* special case for last picture */ if (s2->low_delay==0 && s2->next_picture_ptr) { *picture= *(AVFrame*)s2->next_picture_ptr; s2->next_picture_ptr= NULL; *data_size = sizeof(AVFrame); } return buf_size; } if(s2->flags&CODEC_FLAG_TRUNCATED){ int next= ff_mpeg1_find_frame_end(&s2->parse_context, buf, buf_size); if( ff_combine_frame(&s2->parse_context, next, (const uint8_t **)&buf, &buf_size) < 0 ) return buf_size; } #if 0 if (s->repeat_field % 2 == 1) { s->repeat_field++; //fprintf(stderr,"\nRepeating last frame: %d -> %d! pict: %d %d", avctx->frame_number-1, avctx->frame_number, // s2->picture_number, s->repeat_field); if (avctx->flags & CODEC_FLAG_REPEAT_FIELD) { *data_size = sizeof(AVPicture); goto the_end; } } #endif if(s->mpeg_enc_ctx_allocated==0 && avctx->codec_tag == AV_RL32("VCR2")) vcr2_init_sequence(avctx); s->slice_count= 0; if(avctx->extradata && !avctx->frame_number) decode_chunks(avctx, picture, data_size, avctx->extradata, avctx->extradata_size); return decode_chunks(avctx, picture, data_size, buf, buf_size); }
20,554
0
void bdrv_parent_drained_begin(BlockDriverState *bs) { BdrvChild *c; QLIST_FOREACH(c, &bs->parents, next_parent) { if (c->role->drained_begin) { c->role->drained_begin(c); } } }
20,555
0
static int qemu_event_init(void) { int err; int fds[2]; err = qemu_eventfd(fds); if (err == -1) { return -errno; } err = fcntl_setfl(fds[0], O_NONBLOCK); if (err < 0) { goto fail; } err = fcntl_setfl(fds[1], O_NONBLOCK); if (err < 0) { goto fail; } qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, (void *)(intptr_t)fds[0]); io_thread_fd = fds[1]; return 0; fail: close(fds[0]); close(fds[1]); return err; }
20,556
0
static void machine_set_kernel_irqchip(Object *obj, bool value, Error **errp) { MachineState *ms = MACHINE(obj); ms->kernel_irqchip = value; }
20,558
0
static char *pcibus_get_dev_path(DeviceState *dev) { PCIDevice *d = container_of(dev, PCIDevice, qdev); PCIDevice *t; int slot_depth; /* Path format: Domain:00:Slot.Function:Slot.Function....:Slot.Function. * 00 is added here to make this format compatible with * domain:Bus:Slot.Func for systems without nested PCI bridges. * Slot.Function list specifies the slot and function numbers for all * devices on the path from root to the specific device. */ int domain_len = strlen("DDDD:00"); int slot_len = strlen(":SS.F"); int path_len; char *path, *p; /* Calculate # of slots on path between device and root. */; slot_depth = 0; for (t = d; t; t = t->bus->parent_dev) { ++slot_depth; } path_len = domain_len + slot_len * slot_depth; /* Allocate memory, fill in the terminating null byte. */ path = malloc(path_len + 1 /* For '\0' */); path[path_len] = '\0'; /* First field is the domain. */ snprintf(path, domain_len, "%04x:00", pci_find_domain(d->bus)); /* Fill in slot numbers. We walk up from device to root, so need to print * them in the reverse order, last to first. */ p = path + path_len; for (t = d; t; t = t->bus->parent_dev) { p -= slot_len; snprintf(p, slot_len, ":%02x.%x", PCI_SLOT(t->devfn), PCI_FUNC(d->devfn)); } return path; }
20,559
0
int virtio_scsi_read_many(VDev *vdev, ulong sector, void *load_addr, int sec_num) { int sector_count; int f = vdev->blk_factor; unsigned int data_size; do { sector_count = MIN_NON_ZERO(sec_num, vdev->config.scsi.max_sectors); data_size = sector_count * virtio_get_block_size() * f; if (!scsi_read_10(vdev, sector * f, sector_count * f, load_addr, data_size)) { virtio_scsi_verify_response(&resp, "virtio-scsi:read_many"); } load_addr += data_size; sector += sector_count; sec_num -= sector_count; } while (sec_num > 0); return 0; }
20,560
0
static int nbd_negotiate_send_rep_len(QIOChannel *ioc, uint32_t type, uint32_t opt, uint32_t len, Error **errp) { uint64_t magic; trace_nbd_negotiate_send_rep_len(opt, type, len); magic = cpu_to_be64(NBD_REP_MAGIC); if (nbd_write(ioc, &magic, sizeof(magic), errp) < 0) { error_prepend(errp, "write failed (rep magic): "); return -EINVAL; } opt = cpu_to_be32(opt); if (nbd_write(ioc, &opt, sizeof(opt), errp) < 0) { error_prepend(errp, "write failed (rep opt): "); return -EINVAL; } type = cpu_to_be32(type); if (nbd_write(ioc, &type, sizeof(type), errp) < 0) { error_prepend(errp, "write failed (rep type): "); return -EINVAL; } len = cpu_to_be32(len); if (nbd_write(ioc, &len, sizeof(len), errp) < 0) { error_prepend(errp, "write failed (rep data length): "); return -EINVAL; } return 0; }
20,561
0
uint16_t cpu_inw(pio_addr_t addr) { uint16_t val; val = ioport_read(1, addr); trace_cpu_in(addr, val); LOG_IOPORT("inw : %04"FMT_pioaddr" %04"PRIx16"\n", addr, val); return val; }
20,563
0
static int local_chmod(FsContext *ctx, const char *path, mode_t mode) { return chmod(rpath(ctx, path), mode); }
20,564
0
int ff_wmv2_decode_picture_header(MpegEncContext * s) { Wmv2Context * const w= (Wmv2Context*)s; int code; #if 0 { int i; for(i=0; i<s->gb.size*8; i++) printf("%d", get_bits1(&s->gb)); // get_bits1(&s->gb); printf("END\n"); return -1; } #endif if(s->picture_number==0) decode_ext_header(w); s->pict_type = get_bits1(&s->gb) + 1; if(s->pict_type == I_TYPE){ code = get_bits(&s->gb, 7); av_log(s->avctx, AV_LOG_DEBUG, "I7:%X/\n", code); } s->chroma_qscale= s->qscale = get_bits(&s->gb, 5); if(s->qscale < 0) return -1; return 0; }
20,565
0
static int vhdx_update_header(BlockDriverState *bs, BDRVVHDXState *s, bool generate_data_write_guid, MSGUID *log_guid) { int ret = 0; int hdr_idx = 0; uint64_t header_offset = VHDX_HEADER1_OFFSET; VHDXHeader *active_header; VHDXHeader *inactive_header; /* operate on the non-current header */ if (s->curr_header == 0) { hdr_idx = 1; header_offset = VHDX_HEADER2_OFFSET; } active_header = s->headers[s->curr_header]; inactive_header = s->headers[hdr_idx]; inactive_header->sequence_number = active_header->sequence_number + 1; /* a new file guid must be generated before any file write, including * headers */ inactive_header->file_write_guid = s->session_guid; /* a new data guid only needs to be generated before any guest-visible * writes (i.e. something observable via virtual disk read) */ if (generate_data_write_guid) { vhdx_guid_generate(&inactive_header->data_write_guid); } /* update the log guid if present */ if (log_guid) { inactive_header->log_guid = *log_guid; } vhdx_write_header(bs->file, inactive_header, header_offset, true); if (ret < 0) { goto exit; } s->curr_header = hdr_idx; exit: return ret; }
20,566
0
static void cirrus_invalidate_region(CirrusVGAState * s, int off_begin, int off_pitch, int bytesperline, int lines) { int y; int off_cur; int off_cur_end; if (off_pitch < 0) { off_begin -= bytesperline - 1; } for (y = 0; y < lines; y++) { off_cur = off_begin; off_cur_end = (off_cur + bytesperline) & s->cirrus_addr_mask; assert(off_cur_end >= off_cur); memory_region_set_dirty(&s->vga.vram, off_cur, off_cur_end - off_cur); off_begin += off_pitch; } }
20,567
0
static void msmouse_event(void *opaque, int dx, int dy, int dz, int buttons_state) { CharDriverState *chr = (CharDriverState *)opaque; unsigned char bytes[4] = { 0x40, 0x00, 0x00, 0x00 }; /* Movement deltas */ bytes[0] |= (MSMOUSE_HI2(dy) << 2) | MSMOUSE_HI2(dx); bytes[1] |= MSMOUSE_LO6(dx); bytes[2] |= MSMOUSE_LO6(dy); /* Buttons */ bytes[0] |= (buttons_state & 0x01 ? 0x20 : 0x00); bytes[0] |= (buttons_state & 0x02 ? 0x10 : 0x00); bytes[3] |= (buttons_state & 0x04 ? 0x20 : 0x00); /* We always send the packet of, so that we do not have to keep track of previous state of the middle button. This can potentially confuse some very old drivers for two button mice though. */ qemu_chr_be_write(chr, bytes, 4); }
20,568
0
static void gen_load(DisasContext *dc, TCGv dst, TCGv addr, unsigned int size, int sign) { int mem_index = cpu_mmu_index(dc->env); /* If we get a fault on a delayslot we must keep the jmp state in the cpu-state to be able to re-execute the jmp. */ if (dc->delayed_branch == 1) cris_store_direct_jmp(dc); if (size == 1) { if (sign) tcg_gen_qemu_ld8s(dst, addr, mem_index); else tcg_gen_qemu_ld8u(dst, addr, mem_index); } else if (size == 2) { if (sign) tcg_gen_qemu_ld16s(dst, addr, mem_index); else tcg_gen_qemu_ld16u(dst, addr, mem_index); } else if (size == 4) { tcg_gen_qemu_ld32u(dst, addr, mem_index); } else if (size == 8) { tcg_gen_qemu_ld64(dst, addr, mem_index); } }
20,569
0
static void pci_apb_iowritew (void *opaque, target_phys_addr_t addr, uint32_t val) { cpu_outw(addr & IOPORTS_MASK, bswap16(val)); }
20,570
0
static struct ioreq *ioreq_start(struct XenBlkDev *blkdev) { struct ioreq *ioreq = NULL; if (LIST_EMPTY(&blkdev->freelist)) { if (blkdev->requests_total >= max_requests) goto out; /* allocate new struct */ ioreq = qemu_mallocz(sizeof(*ioreq)); ioreq->blkdev = blkdev; blkdev->requests_total++; qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST); } else { /* get one from freelist */ ioreq = LIST_FIRST(&blkdev->freelist); LIST_REMOVE(ioreq, list); qemu_iovec_reset(&ioreq->v); } LIST_INSERT_HEAD(&blkdev->inflight, ioreq, list); blkdev->requests_inflight++; out: return ioreq; }
20,571
0
static void pollfds_poll(GArray *pollfds, int nfds, fd_set *rfds, fd_set *wfds, fd_set *xfds) { int i; for (i = 0; i < pollfds->len; i++) { GPollFD *pfd = &g_array_index(pollfds, GPollFD, i); int fd = pfd->fd; int revents = 0; if (FD_ISSET(fd, rfds)) { revents |= G_IO_IN; } if (FD_ISSET(fd, wfds)) { revents |= G_IO_OUT; } if (FD_ISSET(fd, xfds)) { revents |= G_IO_PRI; } pfd->revents = revents & pfd->events; } }
20,572
0
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) { return addr; }
20,573
0
static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs) { lhs->selector = rhs->selector; lhs->base = rhs->base; lhs->limit = rhs->limit; lhs->flags = (rhs->type << DESC_TYPE_SHIFT) | (rhs->present * DESC_P_MASK) | (rhs->dpl << DESC_DPL_SHIFT) | (rhs->db << DESC_B_SHIFT) | (rhs->s * DESC_S_MASK) | (rhs->l << DESC_L_SHIFT) | (rhs->g * DESC_G_MASK) | (rhs->avl * DESC_AVL_MASK); }
20,574
1
static always_inline int get_bat (CPUState *env, mmu_ctx_t *ctx, target_ulong virtual, int rw, int type) { target_ulong *BATlt, *BATut, *BATu, *BATl; target_ulong base, BEPIl, BEPIu, bl; int i, pp, pr; int ret = -1; #if defined (DEBUG_BATS) if (loglevel != 0) { fprintf(logfile, "%s: %cBAT v 0x" ADDRX "\n", __func__, type == ACCESS_CODE ? 'I' : 'D', virtual); } #endif pr = msr_pr; switch (type) { case ACCESS_CODE: BATlt = env->IBAT[1]; BATut = env->IBAT[0]; break; default: BATlt = env->DBAT[1]; BATut = env->DBAT[0]; break; } #if defined (DEBUG_BATS) if (loglevel != 0) { fprintf(logfile, "%s...: %cBAT v 0x" ADDRX "\n", __func__, type == ACCESS_CODE ? 'I' : 'D', virtual); } #endif base = virtual & 0xFFFC0000; for (i = 0; i < 4; i++) { BATu = &BATut[i]; BATl = &BATlt[i]; BEPIu = *BATu & 0xF0000000; BEPIl = *BATu & 0x0FFE0000; bl = (*BATu & 0x00001FFC) << 15; #if defined (DEBUG_BATS) if (loglevel != 0) { fprintf(logfile, "%s: %cBAT%d v 0x" ADDRX " BATu 0x" ADDRX " BATl 0x" ADDRX "\n", __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl); } #endif if ((virtual & 0xF0000000) == BEPIu && ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { /* BAT matches */ if (((pr == 0) && (*BATu & 0x00000002)) || ((pr != 0) && (*BATu & 0x00000001))) { /* Get physical address */ ctx->raddr = (*BATl & 0xF0000000) | ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) | (virtual & 0x0001F000); /* Compute access rights */ pp = *BATl & 0x00000003; ctx->prot = 0; if (pp != 0) { ctx->prot = PAGE_READ | PAGE_EXEC; if (pp == 0x2) ctx->prot |= PAGE_WRITE; } ret = check_prot(ctx->prot, rw, type); #if defined (DEBUG_BATS) if (ret == 0 && loglevel != 0) { fprintf(logfile, "BAT %d match: r 0x" PADDRX " prot=%c%c\n", i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-', ctx->prot & PAGE_WRITE ? 'W' : '-'); } #endif break; } } } if (ret < 0) { #if defined (DEBUG_BATS) if (loglevel != 0) { fprintf(logfile, "no BAT match for 0x" ADDRX ":\n", virtual); for (i = 0; i < 4; i++) { BATu = &BATut[i]; BATl = &BATlt[i]; BEPIu = *BATu & 0xF0000000; BEPIl = *BATu & 0x0FFE0000; bl = (*BATu & 0x00001FFC) << 15; fprintf(logfile, "%s: %cBAT%d v 0x" ADDRX " BATu 0x" ADDRX " BATl 0x" ADDRX " \n\t" "0x" ADDRX " 0x" ADDRX " 0x" ADDRX "\n", __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl, BEPIu, BEPIl, bl); } } #endif } /* No hit */ return ret; }
20,575
1
void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9]) { int i, x, y, luma_start = 0, luma_ctx = 3; int nnz_pred, nnz, nnz_total = 0; int segment = mb->segment; int block_dc = 0; if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) { nnz_pred = t_nnz[8] + l_nnz[8]; // decode DC values and do hadamard nnz = decode_block_coeffs(c, td->block_dc, s->prob->token[1], 0, nnz_pred, s->qmat[segment].luma_dc_qmul); l_nnz[8] = t_nnz[8] = !!nnz; if (nnz) { nnz_total += nnz; block_dc = 1; if (nnz == 1) s->vp8dsp.vp8_luma_dc_wht_dc(td->block, td->block_dc); else s->vp8dsp.vp8_luma_dc_wht(td->block, td->block_dc); } luma_start = 1; luma_ctx = 0; } // luma blocks for (y = 0; y < 4; y++) for (x = 0; x < 4; x++) { nnz_pred = l_nnz[y] + t_nnz[x]; nnz = decode_block_coeffs(c, td->block[y][x], s->prob->token[luma_ctx], luma_start, nnz_pred, s->qmat[segment].luma_qmul); /* nnz+block_dc may be one more than the actual last index, * but we don't care */ td->non_zero_count_cache[y][x] = nnz + block_dc; t_nnz[x] = l_nnz[y] = !!nnz; nnz_total += nnz; } // chroma blocks // TODO: what to do about dimensions? 2nd dim for luma is x, // but for chroma it's (y<<1)|x for (i = 4; i < 6; i++) for (y = 0; y < 2; y++) for (x = 0; x < 2; x++) { nnz_pred = l_nnz[i + 2 * y] + t_nnz[i + 2 * x]; nnz = decode_block_coeffs(c, td->block[i][(y << 1) + x], s->prob->token[2], 0, nnz_pred, s->qmat[segment].chroma_qmul); td->non_zero_count_cache[i][(y << 1) + x] = nnz; t_nnz[i + 2 * x] = l_nnz[i + 2 * y] = !!nnz; nnz_total += nnz; } // if there were no coded coeffs despite the macroblock not being marked skip, // we MUST not do the inner loop filter and should not do IDCT // Since skip isn't used for bitstream prediction, just manually set it. if (!nnz_total) mb->skip = 1; }
20,577
1
static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble) { int predictor; predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64; predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; c->sample2 = c->sample1; c->sample1 = av_clip_int16(predictor); c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8; if (c->idelta < 16) c->idelta = 16; return c->sample1;
20,578
1
static int __qemu_rdma_add_block(RDMAContext *rdma, void *host_addr, ram_addr_t block_offset, uint64_t length) { RDMALocalBlocks *local = &rdma->local_ram_blocks; RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap, (void *) block_offset); RDMALocalBlock *old = local->block; assert(block == NULL); local->block = g_malloc0(sizeof(RDMALocalBlock) * (local->nb_blocks + 1)); if (local->nb_blocks) { int x; for (x = 0; x < local->nb_blocks; x++) { g_hash_table_remove(rdma->blockmap, (void *)old[x].offset); g_hash_table_insert(rdma->blockmap, (void *)old[x].offset, &local->block[x]); } memcpy(local->block, old, sizeof(RDMALocalBlock) * local->nb_blocks); g_free(old); } block = &local->block[local->nb_blocks]; block->local_host_addr = host_addr; block->offset = block_offset; block->length = length; block->index = local->nb_blocks; block->nb_chunks = ram_chunk_index(host_addr, host_addr + length) + 1UL; block->transit_bitmap = bitmap_new(block->nb_chunks); bitmap_clear(block->transit_bitmap, 0, block->nb_chunks); block->unregister_bitmap = bitmap_new(block->nb_chunks); bitmap_clear(block->unregister_bitmap, 0, block->nb_chunks); block->remote_keys = g_malloc0(block->nb_chunks * sizeof(uint32_t)); block->is_ram_block = local->init ? false : true; g_hash_table_insert(rdma->blockmap, (void *) block_offset, block); DDPRINTF("Added Block: %d, addr: %" PRIu64 ", offset: %" PRIu64 " length: %" PRIu64 " end: %" PRIu64 " bits %" PRIu64 " chunks %d\n", local->nb_blocks, (uint64_t) block->local_host_addr, block->offset, block->length, (uint64_t) (block->local_host_addr + block->length), BITS_TO_LONGS(block->nb_chunks) * sizeof(unsigned long) * 8, block->nb_chunks); local->nb_blocks++; return 0; }
20,579
1
void ff_lzw_decode_tail(LZWState *p) { struct LZWState *s = (struct LZWState *)p; while(!s->eob_reached) lzw_get_code(s); }
20,580
1
static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp) { X86CPU *cpu = X86_CPU(dev); #ifndef CONFIG_USER_ONLY cpu_remove_sync(CPU(dev)); qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); #endif if (cpu->apic_state) { object_unparent(OBJECT(cpu->apic_state)); cpu->apic_state = NULL; } xcc->parent_unrealize(dev, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); return; } }
20,582
1
static int net_tap_init(VLANState *vlan, const char *model, const char *name, const char *ifname1, const char *setup_script, const char *down_script) { TAPState *s; int fd; char ifname[128]; if (ifname1 != NULL) pstrcpy(ifname, sizeof(ifname), ifname1); else ifname[0] = '\0'; TFR(fd = tap_open(ifname, sizeof(ifname))); if (fd < 0) return -1; if (!setup_script || !strcmp(setup_script, "no")) setup_script = ""; if (setup_script[0] != '\0') { if (launch_script(setup_script, ifname, fd)) return -1; } s = net_tap_fd_init(vlan, model, name, fd); if (!s) return -1; snprintf(s->vc->info_str, sizeof(s->vc->info_str), "ifname=%s,script=%s,downscript=%s", ifname, setup_script, down_script); if (down_script && strcmp(down_script, "no")) { snprintf(s->down_script, sizeof(s->down_script), "%s", down_script); snprintf(s->down_script_arg, sizeof(s->down_script_arg), "%s", ifname); } return 0; }
20,583
1
static void ide_test_start(const char *cmdline_fmt, ...) { va_list ap; char *cmdline; va_start(ap, cmdline_fmt); cmdline = g_strdup_vprintf(cmdline_fmt, ap); va_end(ap); qtest_start(cmdline); qtest_irq_intercept_in(global_qtest, "ioapic"); guest_malloc = pc_alloc_init(); }
20,584
1
static int parse_cookie(HTTPContext *s, const char *p, AVDictionary **cookies) { AVDictionary *new_params = NULL; AVDictionaryEntry *e, *cookie_entry; char *eql, *name; // ensure the cookie is parsable if (parse_set_cookie(p, &new_params)) return -1; // if there is no cookie value there is nothing to parse cookie_entry = av_dict_get(new_params, "", NULL, AV_DICT_IGNORE_SUFFIX); if (!cookie_entry || !cookie_entry->value) { return -1; } // ensure the cookie is not expired or older than an existing value if ((e = av_dict_get(new_params, "expires", NULL, 0)) && e->value) { struct tm new_tm = {0}; if (!parse_set_cookie_expiry_time(e->value, &new_tm)) { AVDictionaryEntry *e2; // if the cookie has already expired ignore it if (av_timegm(&new_tm) < av_gettime() / 1000000) { return -1; } // only replace an older cookie with the same name e2 = av_dict_get(*cookies, cookie_entry->key, NULL, 0); if (e2 && e2->value) { AVDictionary *old_params = NULL; if (!parse_set_cookie(p, &old_params)) { e2 = av_dict_get(old_params, "expires", NULL, 0); if (e2 && e2->value) { struct tm old_tm = {0}; if (!parse_set_cookie_expiry_time(e->value, &old_tm)) { if (av_timegm(&new_tm) < av_timegm(&old_tm)) { av_dict_free(&old_params); return -1; } } } } av_dict_free(&old_params); } } } // duplicate the cookie name (dict will dupe the value) if (!(eql = strchr(p, '='))) return AVERROR(EINVAL); if (!(name = av_strndup(p, eql - p))) return AVERROR(ENOMEM); // add the cookie to the dictionary av_dict_set(cookies, name, eql, AV_DICT_DONT_STRDUP_KEY); return 0; }
20,585
1
ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int isairq, DriveInfo *hd0, DriveInfo *hd1) { DeviceState *dev; ISADevice *isadev; ISAIDEState *s; isadev = isa_create(bus, TYPE_ISA_IDE); dev = DEVICE(isadev); qdev_prop_set_uint32(dev, "iobase", iobase); qdev_prop_set_uint32(dev, "iobase2", iobase2); qdev_prop_set_uint32(dev, "irq", isairq); if (qdev_init(dev) < 0) { return NULL; } s = ISA_IDE(dev); if (hd0) { ide_create_drive(&s->bus, 0, hd0); } if (hd1) { ide_create_drive(&s->bus, 1, hd1); } return isadev; }
20,586
1
int rx_produce(World *world, uint32_t pport, const struct iovec *iov, int iovcnt, uint8_t copy_to_cpu) { Rocker *r = world_rocker(world); PCIDevice *dev = (PCIDevice *)r; DescRing *ring = rocker_get_rx_ring_by_pport(r, pport); DescInfo *info = desc_ring_fetch_desc(ring); char *data; size_t data_size = iov_size(iov, iovcnt); char *buf; uint16_t rx_flags = 0; uint16_t rx_csum = 0; size_t tlv_size; RockerTlv *tlvs[ROCKER_TLV_RX_MAX + 1]; hwaddr frag_addr; uint16_t frag_max_len; int pos; int err; if (!info) { return -ROCKER_ENOBUFS; } buf = desc_get_buf(info, false); if (!buf) { err = -ROCKER_ENXIO; goto out; } rocker_tlv_parse(tlvs, ROCKER_TLV_RX_MAX, buf, desc_tlv_size(info)); if (!tlvs[ROCKER_TLV_RX_FRAG_ADDR] || !tlvs[ROCKER_TLV_RX_FRAG_MAX_LEN]) { err = -ROCKER_EINVAL; goto out; } frag_addr = rocker_tlv_get_le64(tlvs[ROCKER_TLV_RX_FRAG_ADDR]); frag_max_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_RX_FRAG_MAX_LEN]); if (data_size > frag_max_len) { err = -ROCKER_EMSGSIZE; goto out; } if (copy_to_cpu) { rx_flags |= ROCKER_RX_FLAGS_FWD_OFFLOAD; } /* XXX calc rx flags/csum */ tlv_size = rocker_tlv_total_size(sizeof(uint16_t)) + /* flags */ rocker_tlv_total_size(sizeof(uint16_t)) + /* scum */ rocker_tlv_total_size(sizeof(uint64_t)) + /* frag addr */ rocker_tlv_total_size(sizeof(uint16_t)) + /* frag max len */ rocker_tlv_total_size(sizeof(uint16_t)); /* frag len */ if (tlv_size > desc_buf_size(info)) { err = -ROCKER_EMSGSIZE; goto out; } /* TODO: * iov dma write can be optimized in similar way e1000 does it in * e1000_receive_iov. But maybe if would make sense to introduce * generic helper iov_dma_write. */ data = g_malloc(data_size); if (!data) { err = -ROCKER_ENOMEM; goto out; } iov_to_buf(iov, iovcnt, 0, data, data_size); pci_dma_write(dev, frag_addr, data, data_size); g_free(data); pos = 0; rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_FLAGS, rx_flags); rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_CSUM, rx_csum); rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_RX_FRAG_ADDR, frag_addr); rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_FRAG_MAX_LEN, frag_max_len); rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_FRAG_LEN, data_size); err = desc_set_buf(info, tlv_size); out: if (desc_ring_post_desc(ring, err)) { rocker_msix_irq(r, ROCKER_MSIX_VEC_RX(pport - 1)); } return err; }
20,587
1
dshow_cycle_pins(AVFormatContext *avctx, enum dshowDeviceType devtype, IBaseFilter *device_filter, IPin **ppin) { struct dshow_ctx *ctx = avctx->priv_data; IEnumPins *pins = 0; IPin *device_pin = NULL; IPin *pin; int r; const GUID *mediatype[2] = { &MEDIATYPE_Video, &MEDIATYPE_Audio }; const char *devtypename = (devtype == VideoDevice) ? "video" : "audio"; int set_format = (devtype == VideoDevice && (ctx->video_size || ctx->framerate)) || (devtype == AudioDevice && (ctx->channels || ctx->sample_rate)); int format_set = 0; r = IBaseFilter_EnumPins(device_filter, &pins); if (r != S_OK) { av_log(avctx, AV_LOG_ERROR, "Could not enumerate pins.\n"); return AVERROR(EIO); } if (!ppin) { av_log(avctx, AV_LOG_INFO, "DirectShow %s device options\n", devtypename); } while (IEnumPins_Next(pins, 1, &pin, NULL) == S_OK && !device_pin) { IKsPropertySet *p = NULL; IEnumMediaTypes *types = NULL; PIN_INFO info = {0}; AM_MEDIA_TYPE *type; GUID category; DWORD r2; IPin_QueryPinInfo(pin, &info); IBaseFilter_Release(info.pFilter); if (info.dir != PINDIR_OUTPUT) goto next; if (IPin_QueryInterface(pin, &IID_IKsPropertySet, (void **) &p) != S_OK) goto next; if (IKsPropertySet_Get(p, &AMPROPSETID_Pin, AMPROPERTY_PIN_CATEGORY, NULL, 0, &category, sizeof(GUID), &r2) != S_OK) goto next; if (!IsEqualGUID(&category, &PIN_CATEGORY_CAPTURE)) goto next; if (!ppin) { char *buf = dup_wchar_to_utf8(info.achName); av_log(avctx, AV_LOG_INFO, " Pin \"%s\"\n", buf); av_free(buf); dshow_cycle_formats(avctx, devtype, pin, NULL); goto next; } if (set_format) { dshow_cycle_formats(avctx, devtype, pin, &format_set); if (!format_set) { goto next; } } if (IPin_EnumMediaTypes(pin, &types) != S_OK) goto next; IEnumMediaTypes_Reset(types); while (IEnumMediaTypes_Next(types, 1, &type, NULL) == S_OK && !device_pin) { if (IsEqualGUID(&type->majortype, mediatype[devtype])) { device_pin = pin; goto next; } CoTaskMemFree(type); } next: if (types) IEnumMediaTypes_Release(types); if (p) IKsPropertySet_Release(p); if (device_pin != pin) IPin_Release(pin); } IEnumPins_Release(pins); if (ppin) { if (set_format && !format_set) { av_log(avctx, AV_LOG_ERROR, "Could not set %s options\n", devtypename); return AVERROR(EIO); } if (!device_pin) { av_log(avctx, AV_LOG_ERROR, "Could not find output pin from %s capture device.\n", devtypename); return AVERROR(EIO); } *ppin = device_pin; } return 0; }
20,588
1
static inline int yv12touyvy_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) { uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY; // yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] ); uint8_t *ysrc = src[0]; uint8_t *usrc = src[1]; uint8_t *vsrc = src[2]; const int width = c->srcW; const int height = srcSliceH; const int lumStride = srcStride[0]; const int chromStride = srcStride[1]; const int dstStride = dstStride_a[0]; const int vertLumPerChroma = 2; const vector unsigned char yperm = vec_lvsl(0, ysrc); register unsigned int y; if(width&15){ yv12touyvy( ysrc, usrc, vsrc, dst,c->srcW,srcSliceH, lumStride, chromStride, dstStride); return srcSliceH; } /* this code assume: 1) dst is 16 bytes-aligned 2) dstStride is a multiple of 16 3) width is a multiple of 16 4) lum&chrom stride are multiple of 8 */ for(y=0; y<height; y++) { int i; for (i = 0; i < width - 31; i+= 32) { const unsigned int j = i >> 1; vector unsigned char v_yA = vec_ld(i, ysrc); vector unsigned char v_yB = vec_ld(i + 16, ysrc); vector unsigned char v_yC = vec_ld(i + 32, ysrc); vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm); vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm); vector unsigned char v_uA = vec_ld(j, usrc); vector unsigned char v_uB = vec_ld(j + 16, usrc); vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc)); vector unsigned char v_vA = vec_ld(j, vsrc); vector unsigned char v_vB = vec_ld(j + 16, vsrc); vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc)); vector unsigned char v_uv_a = vec_mergeh(v_u, v_v); vector unsigned char v_uv_b = vec_mergel(v_u, v_v); vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1); vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1); vector unsigned char v_uyvy_2 = vec_mergeh(v_uv_b, v_y2); vector unsigned char v_uyvy_3 = vec_mergel(v_uv_b, v_y2); vec_st(v_uyvy_0, (i << 1), dst); vec_st(v_uyvy_1, (i << 1) + 16, dst); vec_st(v_uyvy_2, (i << 1) + 32, dst); vec_st(v_uyvy_3, (i << 1) + 48, dst); } if (i < width) { const unsigned int j = i >> 1; vector unsigned char v_y1 = vec_ld(i, ysrc); vector unsigned char v_u = vec_ld(j, usrc); vector unsigned char v_v = vec_ld(j, vsrc); vector unsigned char v_uv_a = vec_mergeh(v_u, v_v); vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1); vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1); vec_st(v_uyvy_0, (i << 1), dst); vec_st(v_uyvy_1, (i << 1) + 16, dst); } if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) ) { usrc += chromStride; vsrc += chromStride; } ysrc += lumStride; dst += dstStride; } return srcSliceH; }
20,589
1
static void serial_ioport_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { SerialState *s = opaque; addr &= 7; DPRINTF("write addr=0x%" HWADDR_PRIx " val=0x%" PRIx64 "\n", addr, val); switch(addr) { default: case 0: if (s->lcr & UART_LCR_DLAB) { s->divider = (s->divider & 0xff00) | val; serial_update_parameters(s); } else { s->thr = (uint8_t) val; if(s->fcr & UART_FCR_FE) { /* xmit overruns overwrite data, so make space if needed */ if (fifo8_is_full(&s->xmit_fifo)) { fifo8_pop(&s->xmit_fifo); } fifo8_push(&s->xmit_fifo, s->thr); s->lsr &= ~UART_LSR_TEMT; } s->thr_ipending = 0; s->lsr &= ~UART_LSR_THRE; serial_update_irq(s); serial_xmit(NULL, G_IO_OUT, s); } break; case 1: if (s->lcr & UART_LCR_DLAB) { s->divider = (s->divider & 0x00ff) | (val << 8); serial_update_parameters(s); } else { s->ier = val & 0x0f; /* If the backend device is a real serial port, turn polling of the modem status lines on physical port on or off depending on UART_IER_MSI state */ if (s->poll_msl >= 0) { if (s->ier & UART_IER_MSI) { s->poll_msl = 1; serial_update_msl(s); } else { timer_del(s->modem_status_poll); s->poll_msl = 0; } } if (s->lsr & UART_LSR_THRE) { s->thr_ipending = 1; serial_update_irq(s); } } break; case 2: val = val & 0xFF; if (s->fcr == val) break; /* Did the enable/disable flag change? If so, make sure FIFOs get flushed */ if ((val ^ s->fcr) & UART_FCR_FE) val |= UART_FCR_XFR | UART_FCR_RFR; /* FIFO clear */ if (val & UART_FCR_RFR) { timer_del(s->fifo_timeout_timer); s->timeout_ipending=0; fifo8_reset(&s->recv_fifo); } if (val & UART_FCR_XFR) { fifo8_reset(&s->xmit_fifo); } if (val & UART_FCR_FE) { s->iir |= UART_IIR_FE; /* Set recv_fifo trigger Level */ switch (val & 0xC0) { case UART_FCR_ITL_1: s->recv_fifo_itl = 1; break; case UART_FCR_ITL_2: s->recv_fifo_itl = 4; break; case UART_FCR_ITL_3: s->recv_fifo_itl = 8; break; case UART_FCR_ITL_4: s->recv_fifo_itl = 14; break; } } else s->iir &= ~UART_IIR_FE; /* Set fcr - or at least the bits in it that are supposed to "stick" */ s->fcr = val & 0xC9; serial_update_irq(s); break; case 3: { int break_enable; s->lcr = val; serial_update_parameters(s); break_enable = (val >> 6) & 1; if (break_enable != s->last_break_enable) { s->last_break_enable = break_enable; qemu_chr_fe_ioctl(s->chr, CHR_IOCTL_SERIAL_SET_BREAK, &break_enable); } } break; case 4: { int flags; int old_mcr = s->mcr; s->mcr = val & 0x1f; if (val & UART_MCR_LOOP) break; if (s->poll_msl >= 0 && old_mcr != s->mcr) { qemu_chr_fe_ioctl(s->chr,CHR_IOCTL_SERIAL_GET_TIOCM, &flags); flags &= ~(CHR_TIOCM_RTS | CHR_TIOCM_DTR); if (val & UART_MCR_RTS) flags |= CHR_TIOCM_RTS; if (val & UART_MCR_DTR) flags |= CHR_TIOCM_DTR; qemu_chr_fe_ioctl(s->chr,CHR_IOCTL_SERIAL_SET_TIOCM, &flags); /* Update the modem status after a one-character-send wait-time, since there may be a response from the device/computer at the other end of the serial line */ timer_mod(s->modem_status_poll, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->char_transmit_time); } } break; case 5: break; case 6: break; case 7: s->scr = val; break; } }
20,590
1
static void FUNC(put_hevc_qpel_bi_w_v)(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, int denom, int wx0, int wx1, int ox0, int ox1, intptr_t mx, intptr_t my, int width) { int x, y; pixel *src = (pixel*)_src; ptrdiff_t srcstride = _srcstride / sizeof(pixel); pixel *dst = (pixel *)_dst; ptrdiff_t dststride = _dststride / sizeof(pixel); const int8_t *filter = ff_hevc_qpel_filters[my - 1]; int shift = 14 + 1 - BIT_DEPTH; int log2Wd = denom + shift - 1; ox0 = ox0 * (1 << (BIT_DEPTH - 8)); ox1 = ox1 * (1 << (BIT_DEPTH - 8)); for (y = 0; y < height; y++) { for (x = 0; x < width; x++) dst[x] = av_clip_pixel(((QPEL_FILTER(src, srcstride) >> (BIT_DEPTH - 8)) * wx1 + src2[x] * wx0 + ((ox0 + ox1 + 1) << log2Wd)) >> (log2Wd + 1)); src += srcstride; dst += dststride; src2 += MAX_PB_SIZE; } }
20,591
1
int spapr_ovec_populate_dt(void *fdt, int fdt_offset, sPAPROptionVector *ov, const char *name) { uint8_t vec[OV_MAXBYTES + 1]; uint16_t vec_len; unsigned long lastbit; int i; g_assert(ov); lastbit = find_last_bit(ov->bitmap, OV_MAXBITS); /* if no bits are set, include at least 1 byte of the vector so we can * still encoded this in the device tree while abiding by the same * encoding/sizing expected in ibm,client-architecture-support */ vec_len = (lastbit == OV_MAXBITS) ? 1 : lastbit / BITS_PER_BYTE + 1; g_assert_cmpint(vec_len, <=, OV_MAXBYTES); /* guest expects vector len encoded as vec_len - 1, since the length byte * is assumed and not included, and the first byte of the vector * is assumed as well */ vec[0] = vec_len - 1; for (i = 1; i < vec_len + 1; i++) { vec[i] = guest_byte_from_bitmap(ov->bitmap, (i - 1) * BITS_PER_BYTE); if (vec[i]) { DPRINTFN("encoding guest vector byte %3d / %3d: 0x%.2x", i, vec_len, vec[i]); } } return fdt_setprop(fdt, fdt_offset, name, vec, vec_len); }
20,592
1
static void ff_id3v2_parse(AVFormatContext *s, int len, uint8_t version, uint8_t flags, ID3v2ExtraMeta **extra_meta) { int isv34, tlen, unsync; char tag[5]; int64_t next, end = avio_tell(s->pb) + len; int taghdrlen; const char *reason = NULL; AVIOContext pb; AVIOContext *pbx; unsigned char *buffer = NULL; int buffer_size = 0; void (*extra_func)(AVFormatContext*, AVIOContext*, int, char*, ID3v2ExtraMeta**) = NULL; switch (version) { case 2: if (flags & 0x40) { reason = "compression"; goto error; } isv34 = 0; taghdrlen = 6; break; case 3: case 4: isv34 = 1; taghdrlen = 10; break; default: reason = "version"; goto error; } unsync = flags & 0x80; if (isv34 && flags & 0x40) /* Extended header present, just skip over it */ avio_skip(s->pb, get_size(s->pb, 4)); while (len >= taghdrlen) { unsigned int tflags = 0; int tunsync = 0; if (isv34) { avio_read(s->pb, tag, 4); tag[4] = 0; if(version==3){ tlen = avio_rb32(s->pb); }else tlen = get_size(s->pb, 4); tflags = avio_rb16(s->pb); tunsync = tflags & ID3v2_FLAG_UNSYNCH; } else { avio_read(s->pb, tag, 3); tag[3] = 0; tlen = avio_rb24(s->pb); } if (tlen < 0 || tlen > len - taghdrlen) { av_log(s, AV_LOG_WARNING, "Invalid size in frame %s, skipping the rest of tag.\n", tag); break; } len -= taghdrlen + tlen; next = avio_tell(s->pb) + tlen; if (!tlen) { if (tag[0]) av_log(s, AV_LOG_DEBUG, "Invalid empty frame %s, skipping.\n", tag); continue; } if (tflags & ID3v2_FLAG_DATALEN) { avio_rb32(s->pb); tlen -= 4; } if (tflags & (ID3v2_FLAG_ENCRYPTION | ID3v2_FLAG_COMPRESSION)) { av_log(s, AV_LOG_WARNING, "Skipping encrypted/compressed ID3v2 frame %s.\n", tag); avio_skip(s->pb, tlen); /* check for text tag or supported special meta tag */ } else if (tag[0] == 'T' || (extra_meta && (extra_func = get_extra_meta_func(tag, isv34)->read))) { if (unsync || tunsync) { int i, j; av_fast_malloc(&buffer, &buffer_size, tlen); if (!buffer) { av_log(s, AV_LOG_ERROR, "Failed to alloc %d bytes\n", tlen); goto seek; } for (i = 0, j = 0; i < tlen; i++, j++) { buffer[j] = avio_r8(s->pb); if (j > 0 && !buffer[j] && buffer[j - 1] == 0xff) { /* Unsynchronised byte, skip it */ j--; } } ffio_init_context(&pb, buffer, j, 0, NULL, NULL, NULL, NULL); tlen = j; pbx = &pb; // read from sync buffer } else { pbx = s->pb; // read straight from input } if (tag[0] == 'T') /* parse text tag */ read_ttag(s, pbx, tlen, tag); else /* parse special meta tag */ extra_func(s, pbx, tlen, tag, extra_meta); } else if (!tag[0]) { if (tag[1]) av_log(s, AV_LOG_WARNING, "invalid frame id, assuming padding"); avio_skip(s->pb, tlen); break; } /* Skip to end of tag */ seek: avio_seek(s->pb, next, SEEK_SET); } if (version == 4 && flags & 0x10) /* Footer preset, always 10 bytes, skip over it */ end += 10; error: if (reason) av_log(s, AV_LOG_INFO, "ID3v2.%d tag skipped, cannot handle %s\n", version, reason); avio_seek(s->pb, end, SEEK_SET); av_free(buffer); return; }
20,593
1
static int shorten_decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size) { ShortenContext *s = avctx->priv_data; int i, input_buf_size = 0; int16_t *samples = data; if(s->max_framesize == 0){ s->max_framesize= 1024; // should hopefully be enough for the first header s->bitstream= av_fast_realloc(s->bitstream, &s->allocated_bitstream_size, s->max_framesize); } if(1 && s->max_framesize){//FIXME truncated buf_size= FFMIN(buf_size, s->max_framesize - s->bitstream_size); input_buf_size= buf_size; if(s->bitstream_index + s->bitstream_size + buf_size > s->allocated_bitstream_size){ // printf("memmove\n"); memmove(s->bitstream, &s->bitstream[s->bitstream_index], s->bitstream_size); s->bitstream_index=0; } memcpy(&s->bitstream[s->bitstream_index + s->bitstream_size], buf, buf_size); buf= &s->bitstream[s->bitstream_index]; buf_size += s->bitstream_size; s->bitstream_size= buf_size; if(buf_size < s->max_framesize){ //dprintf("wanna more data ... %d\n", buf_size); return input_buf_size; } } init_get_bits(&s->gb, buf, buf_size*8); get_bits(&s->gb, s->bitindex); if (!s->blocksize) { int maxnlpc = 0; /* shorten signature */ if (get_bits_long(&s->gb, 32) != bswap_32(ff_get_fourcc("ajkg"))) { av_log(s->avctx, AV_LOG_ERROR, "missing shorten magic 'ajkg'\n"); return -1; } s->lpcqoffset = 0; s->blocksize = DEFAULT_BLOCK_SIZE; s->channels = 1; s->nmean = -1; s->version = get_bits(&s->gb, 8); s->internal_ftype = get_uint(s, TYPESIZE); s->channels = get_uint(s, CHANSIZE); if (s->channels > MAX_CHANNELS) { av_log(s->avctx, AV_LOG_ERROR, "too many channels: %d\n", s->channels); return -1; } /* get blocksize if version > 0 */ if (s->version > 0) { int skip_bytes; s->blocksize = get_uint(s, av_log2(DEFAULT_BLOCK_SIZE)); maxnlpc = get_uint(s, LPCQSIZE); s->nmean = get_uint(s, 0); skip_bytes = get_uint(s, NSKIPSIZE); for (i=0; i<skip_bytes; i++) { skip_bits(&s->gb, 8); } } s->nwrap = FFMAX(NWRAP, maxnlpc); allocate_buffers(s); init_offset(s); if (s->version > 1) s->lpcqoffset = V2LPCQOFFSET; if (get_ur_golomb_shorten(&s->gb, FNSIZE) != FN_VERBATIM) { av_log(s->avctx, AV_LOG_ERROR, "missing verbatim section at begining of stream\n"); return -1; } s->header_size = get_ur_golomb_shorten(&s->gb, VERBATIM_CKSIZE_SIZE); if (s->header_size >= OUT_BUFFER_SIZE || s->header_size < CANONICAL_HEADER_SIZE) { av_log(s->avctx, AV_LOG_ERROR, "header is wrong size: %d\n", s->header_size); return -1; } for (i=0; i<s->header_size; i++) s->header[i] = (char)get_ur_golomb_shorten(&s->gb, VERBATIM_BYTE_SIZE); if (decode_wave_header(avctx, s->header, s->header_size) < 0) return -1; s->cur_chan = 0; s->bitshift = 0; } else { int cmd; int len; cmd = get_ur_golomb_shorten(&s->gb, FNSIZE); switch (cmd) { case FN_ZERO: case FN_DIFF0: case FN_DIFF1: case FN_DIFF2: case FN_DIFF3: case FN_QLPC: { int residual_size = 0; int channel = s->cur_chan; int32_t coffset; if (cmd != FN_ZERO) { residual_size = get_ur_golomb_shorten(&s->gb, ENERGYSIZE); /* this is a hack as version 0 differed in defintion of get_sr_golomb_shorten */ if (s->version == 0) residual_size--; } if (s->nmean == 0) coffset = s->offset[channel][0]; else { int32_t sum = (s->version < 2) ? 0 : s->nmean / 2; for (i=0; i<s->nmean; i++) sum += s->offset[channel][i]; coffset = sum / s->nmean; if (s->version >= 2) coffset >>= FFMIN(1, s->bitshift); } switch (cmd) { case FN_ZERO: for (i=0; i<s->blocksize; i++) s->decoded[channel][i] = 0; break; case FN_DIFF0: for (i=0; i<s->blocksize; i++) s->decoded[channel][i] = get_sr_golomb_shorten(&s->gb, residual_size) + coffset; break; case FN_DIFF1: for (i=0; i<s->blocksize; i++) s->decoded[channel][i] = get_sr_golomb_shorten(&s->gb, residual_size) + s->decoded[channel][i - 1]; break; case FN_DIFF2: for (i=0; i<s->blocksize; i++) s->decoded[channel][i] = get_sr_golomb_shorten(&s->gb, residual_size) + 2*s->decoded[channel][i-1] - s->decoded[channel][i-2]; break; case FN_DIFF3: for (i=0; i<s->blocksize; i++) s->decoded[channel][i] = get_sr_golomb_shorten(&s->gb, residual_size) + 3*s->decoded[channel][i-1] - 3*s->decoded[channel][i-2] + s->decoded[channel][i-3]; break; case FN_QLPC: { int pred_order = get_ur_golomb_shorten(&s->gb, LPCQSIZE); for (i=0; i<pred_order; i++) s->decoded[channel][i - pred_order] -= coffset; decode_subframe_lpc(s, channel, residual_size, pred_order); if (coffset != 0) for (i=0; i < s->blocksize; i++) s->decoded[channel][i] += coffset; } } if (s->nmean > 0) { int32_t sum = (s->version < 2) ? 0 : s->blocksize / 2; for (i=0; i<s->blocksize; i++) sum += s->decoded[channel][i]; for (i=1; i<s->nmean; i++) s->offset[channel][i-1] = s->offset[channel][i]; if (s->version < 2) s->offset[channel][s->nmean - 1] = sum / s->blocksize; else s->offset[channel][s->nmean - 1] = (sum / s->blocksize) << s->bitshift; } for (i=-s->nwrap; i<0; i++) s->decoded[channel][i] = s->decoded[channel][i + s->blocksize]; fix_bitshift(s, s->decoded[channel]); s->cur_chan++; if (s->cur_chan == s->channels) { samples = interleave_buffer(samples, s->channels, s->blocksize, s->decoded); s->cur_chan = 0; goto frame_done; } break; } break; case FN_VERBATIM: len = get_ur_golomb_shorten(&s->gb, VERBATIM_CKSIZE_SIZE); while (len--) { get_ur_golomb_shorten(&s->gb, VERBATIM_BYTE_SIZE); } break; case FN_BITSHIFT: s->bitshift = get_ur_golomb_shorten(&s->gb, BITSHIFTSIZE); break; case FN_BLOCKSIZE: s->blocksize = get_uint(s, av_log2(s->blocksize)); break; case FN_QUIT: return buf_size; break; default: av_log(avctx, AV_LOG_ERROR, "unknown shorten function %d\n", cmd); return -1; break; } } frame_done: *data_size = (int8_t *)samples - (int8_t *)data; // s->last_blocksize = s->blocksize; s->bitindex = get_bits_count(&s->gb) - 8*((get_bits_count(&s->gb))/8); i= (get_bits_count(&s->gb))/8; if (i > buf_size) { av_log(s->avctx, AV_LOG_ERROR, "overread: %d\n", i - buf_size); s->bitstream_size=0; s->bitstream_index=0; return -1; } if (s->bitstream_size) { s->bitstream_index += i; s->bitstream_size -= i; return input_buf_size; } else return i; }
20,594
0
static void apply_param_change(AVCodecContext *avctx, AVPacket *avpkt) { int size = 0; const uint8_t *data; uint32_t flags; if (!(avctx->codec->capabilities & CODEC_CAP_PARAM_CHANGE)) return; data = av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, &size); if (!data || size < 4) return; flags = bytestream_get_le32(&data); size -= 4; if (size < 4) /* Required for any of the changes */ return; if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) { avctx->channels = bytestream_get_le32(&data); size -= 4; } if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) { if (size < 8) return; avctx->channel_layout = bytestream_get_le64(&data); size -= 8; } if (size < 4) return; if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) { avctx->sample_rate = bytestream_get_le32(&data); size -= 4; } if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) { if (size < 8) return; avctx->width = bytestream_get_le32(&data); avctx->height = bytestream_get_le32(&data); avcodec_set_dimensions(avctx, avctx->width, avctx->height); size -= 8; } }
20,595
1
static int net_client_init1(const void *object, int is_netdev, Error **errp) { union { const Netdev *netdev; const NetLegacy *net; } u; const NetClientOptions *opts; const char *name; if (is_netdev) { u.netdev = object; opts = u.netdev->opts; name = u.netdev->id; switch (opts->kind) { #ifdef CONFIG_SLIRP case NET_CLIENT_OPTIONS_KIND_USER: #endif case NET_CLIENT_OPTIONS_KIND_TAP: case NET_CLIENT_OPTIONS_KIND_SOCKET: #ifdef CONFIG_VDE case NET_CLIENT_OPTIONS_KIND_VDE: #endif #ifdef CONFIG_NETMAP case NET_CLIENT_OPTIONS_KIND_NETMAP: #endif #ifdef CONFIG_NET_BRIDGE case NET_CLIENT_OPTIONS_KIND_BRIDGE: #endif case NET_CLIENT_OPTIONS_KIND_HUBPORT: #ifdef CONFIG_VHOST_NET_USED case NET_CLIENT_OPTIONS_KIND_VHOST_USER: #endif #ifdef CONFIG_LINUX case NET_CLIENT_OPTIONS_KIND_L2TPV3: #endif break; default: error_set(errp, QERR_INVALID_PARAMETER_VALUE, "type", "a netdev backend type"); return -1; } } else { u.net = object; opts = u.net->opts; /* missing optional values have been initialized to "all bits zero" */ name = u.net->has_id ? u.net->id : u.net->name; } if (net_client_init_fun[opts->kind]) { NetClientState *peer = NULL; /* Do not add to a vlan if it's a -netdev or a nic with a netdev= * parameter. */ if (!is_netdev && (opts->kind != NET_CLIENT_OPTIONS_KIND_NIC || !opts->nic->has_netdev)) { peer = net_hub_add_port(u.net->has_vlan ? u.net->vlan : 0, NULL); } if (net_client_init_fun[opts->kind](opts, name, peer) < 0) { /* TODO push error reporting into init() methods */ error_set(errp, QERR_DEVICE_INIT_FAILED, NetClientOptionsKind_lookup[opts->kind]); return -1; } } return 0; }
20,597
1
bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov) { BlockDriver *drv = bs->drv; if (!drv->bdrv_co_pwritev_compressed) { return -ENOTSUP; return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
20,598
1
static void cirrus_init_common(CirrusVGAState * s, int device_id, int is_pci) { int i; static int inited; if (!inited) { inited = 1; for(i = 0;i < 256; i++) rop_to_index[i] = CIRRUS_ROP_NOP_INDEX; /* nop rop */ rop_to_index[CIRRUS_ROP_0] = 0; rop_to_index[CIRRUS_ROP_SRC_AND_DST] = 1; rop_to_index[CIRRUS_ROP_NOP] = 2; rop_to_index[CIRRUS_ROP_SRC_AND_NOTDST] = 3; rop_to_index[CIRRUS_ROP_NOTDST] = 4; rop_to_index[CIRRUS_ROP_SRC] = 5; rop_to_index[CIRRUS_ROP_1] = 6; rop_to_index[CIRRUS_ROP_NOTSRC_AND_DST] = 7; rop_to_index[CIRRUS_ROP_SRC_XOR_DST] = 8; rop_to_index[CIRRUS_ROP_SRC_OR_DST] = 9; rop_to_index[CIRRUS_ROP_NOTSRC_OR_NOTDST] = 10; rop_to_index[CIRRUS_ROP_SRC_NOTXOR_DST] = 11; rop_to_index[CIRRUS_ROP_SRC_OR_NOTDST] = 12; rop_to_index[CIRRUS_ROP_NOTSRC] = 13; rop_to_index[CIRRUS_ROP_NOTSRC_OR_DST] = 14; rop_to_index[CIRRUS_ROP_NOTSRC_AND_NOTDST] = 15; } register_ioport_write(0x3c0, 16, 1, vga_ioport_write, s); register_ioport_write(0x3b4, 2, 1, vga_ioport_write, s); register_ioport_write(0x3d4, 2, 1, vga_ioport_write, s); register_ioport_write(0x3ba, 1, 1, vga_ioport_write, s); register_ioport_write(0x3da, 1, 1, vga_ioport_write, s); register_ioport_read(0x3c0, 16, 1, vga_ioport_read, s); register_ioport_read(0x3b4, 2, 1, vga_ioport_read, s); register_ioport_read(0x3d4, 2, 1, vga_ioport_read, s); register_ioport_read(0x3ba, 1, 1, vga_ioport_read, s); register_ioport_read(0x3da, 1, 1, vga_ioport_read, s); s->vga_io_memory = cpu_register_io_memory(0, cirrus_vga_mem_read, cirrus_vga_mem_write, s); cpu_register_physical_memory(isa_mem_base + 0x000a0000, 0x20000, s->vga_io_memory); s->sr[0x06] = 0x0f; if (device_id == CIRRUS_ID_CLGD5446) { /* 4MB 64 bit memory config, always PCI */ s->sr[0x1F] = 0x2d; // MemClock s->gr[0x18] = 0x0f; // fastest memory configuration #if 1 s->sr[0x0f] = 0x98; s->sr[0x17] = 0x20; s->sr[0x15] = 0x04; /* memory size, 3=2MB, 4=4MB */ s->real_vram_size = 4096 * 1024; #else s->sr[0x0f] = 0x18; s->sr[0x17] = 0x20; s->sr[0x15] = 0x03; /* memory size, 3=2MB, 4=4MB */ s->real_vram_size = 2048 * 1024; #endif } else { s->sr[0x1F] = 0x22; // MemClock s->sr[0x0F] = CIRRUS_MEMSIZE_2M; if (is_pci) s->sr[0x17] = CIRRUS_BUSTYPE_PCI; else s->sr[0x17] = CIRRUS_BUSTYPE_ISA; s->real_vram_size = 2048 * 1024; s->sr[0x15] = 0x03; /* memory size, 3=2MB, 4=4MB */ } s->cr[0x27] = device_id; /* Win2K seems to assume that the pattern buffer is at 0xff initially ! */ memset(s->vram_ptr, 0xff, s->real_vram_size); s->cirrus_hidden_dac_lockindex = 5; s->cirrus_hidden_dac_data = 0; /* I/O handler for LFB */ s->cirrus_linear_io_addr = cpu_register_io_memory(0, cirrus_linear_read, cirrus_linear_write, s); s->cirrus_linear_write = cpu_get_io_memory_write(s->cirrus_linear_io_addr); /* I/O handler for LFB */ s->cirrus_linear_bitblt_io_addr = cpu_register_io_memory(0, cirrus_linear_bitblt_read, cirrus_linear_bitblt_write, s); /* I/O handler for memory-mapped I/O */ s->cirrus_mmio_io_addr = cpu_register_io_memory(0, cirrus_mmio_read, cirrus_mmio_write, s); /* XXX: s->vram_size must be a power of two */ s->cirrus_addr_mask = s->real_vram_size - 1; s->linear_mmio_mask = s->real_vram_size - 256; s->get_bpp = cirrus_get_bpp; s->get_offsets = cirrus_get_offsets; s->get_resolution = cirrus_get_resolution; s->cursor_invalidate = cirrus_cursor_invalidate; s->cursor_draw_line = cirrus_cursor_draw_line; register_savevm("cirrus_vga", 0, 2, cirrus_vga_save, cirrus_vga_load, s); }
20,599
1
static int omap2_gpio_init(SysBusDevice *sbd) { DeviceState *dev = DEVICE(sbd); struct omap2_gpif_s *s = OMAP2_GPIO(dev); int i; if (!s->iclk) { hw_error("omap2-gpio: iclk not connected\n"); } if (s->mpu_model < omap3430) { s->modulecount = (s->mpu_model < omap2430) ? 4 : 5; memory_region_init_io(&s->iomem, OBJECT(s), &omap2_gpif_top_ops, s, "omap2.gpio", 0x1000); sysbus_init_mmio(sbd, &s->iomem); } else { s->modulecount = 6; } s->modules = g_new0(struct omap2_gpio_s, s->modulecount); s->handler = g_new0(qemu_irq, s->modulecount * 32); qdev_init_gpio_in(dev, omap2_gpio_set, s->modulecount * 32); qdev_init_gpio_out(dev, s->handler, s->modulecount * 32); for (i = 0; i < s->modulecount; i++) { struct omap2_gpio_s *m = &s->modules[i]; if (!s->fclk[i]) { hw_error("omap2-gpio: fclk%d not connected\n", i); } m->revision = (s->mpu_model < omap3430) ? 0x18 : 0x25; m->handler = &s->handler[i * 32]; sysbus_init_irq(sbd, &m->irq[0]); /* mpu irq */ sysbus_init_irq(sbd, &m->irq[1]); /* dsp irq */ sysbus_init_irq(sbd, &m->wkup); memory_region_init_io(&m->iomem, OBJECT(s), &omap2_gpio_module_ops, m, "omap.gpio-module", 0x1000); sysbus_init_mmio(sbd, &m->iomem); } return 0; }
20,601
1
void ff_frame_thread_free(AVCodecContext *avctx, int thread_count) { FrameThreadContext *fctx = avctx->internal->thread_ctx; const AVCodec *codec = avctx->codec; int i; park_frame_worker_threads(fctx, thread_count); if (fctx->prev_thread && fctx->prev_thread != fctx->threads) if (update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0) < 0) { av_log(avctx, AV_LOG_ERROR, "Final thread update failed\n"); fctx->prev_thread->avctx->internal->is_copy = fctx->threads->avctx->internal->is_copy; fctx->threads->avctx->internal->is_copy = 1; } fctx->die = 1; for (i = 0; i < thread_count; i++) { PerThreadContext *p = &fctx->threads[i]; pthread_mutex_lock(&p->mutex); pthread_cond_signal(&p->input_cond); pthread_mutex_unlock(&p->mutex); if (p->thread_init) pthread_join(p->thread, NULL); p->thread_init=0; if (codec->close) codec->close(p->avctx); release_delayed_buffers(p); av_frame_free(&p->frame); } for (i = 0; i < thread_count; i++) { PerThreadContext *p = &fctx->threads[i]; pthread_mutex_destroy(&p->mutex); pthread_mutex_destroy(&p->progress_mutex); pthread_cond_destroy(&p->input_cond); pthread_cond_destroy(&p->progress_cond); pthread_cond_destroy(&p->output_cond); av_packet_unref(&p->avpkt); av_freep(&p->released_buffers); if (i) { av_freep(&p->avctx->priv_data); av_freep(&p->avctx->slice_offset); } av_freep(&p->avctx->internal); av_freep(&p->avctx); } av_freep(&fctx->threads); pthread_mutex_destroy(&fctx->buffer_mutex); av_freep(&avctx->internal->thread_ctx); if (avctx->priv_data && avctx->codec && avctx->codec->priv_class) av_opt_free(avctx->priv_data); avctx->codec = NULL; }
20,602
1
static bool ranges_can_merge(Range *range1, Range *range2) { return !(range1->end < range2->begin || range2->end < range1->begin); }
20,604
1
static int nbd_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVNBDState *s = bs->opaque; char *export = NULL; int result, sock; Error *local_err = NULL; /* Pop the config into our state object. Exit if invalid. */ nbd_config(s, options, &export, &local_err); if (local_err) { error_propagate(errp, local_err); return -EINVAL; } /* establish TCP connection, return error if it fails * TODO: Configurable retry-until-timeout behaviour. */ sock = nbd_establish_connection(bs, errp); if (sock < 0) { return sock; } /* NBD handshake */ result = nbd_client_init(bs, sock, export, errp); return result; }
20,605
1
static int convert_coeffs(AVFilterContext *ctx, AVFilterLink *inlink) { struct HeadphoneContext *s = ctx->priv; const int ir_len = s->ir_len; int nb_irs = s->nb_irs; int nb_input_channels = ctx->inputs[0]->channels; float gain_lin = expf((s->gain - 3 * nb_input_channels) / 20 * M_LN10); FFTComplex *data_hrtf_l = NULL; FFTComplex *data_hrtf_r = NULL; FFTComplex *fft_in_l = NULL; FFTComplex *fft_in_r = NULL; float *data_ir_l = NULL; float *data_ir_r = NULL; int offset = 0; int n_fft; int i, j; s->buffer_length = 1 << (32 - ff_clz(s->ir_len)); s->n_fft = n_fft = 1 << (32 - ff_clz(s->ir_len + inlink->sample_rate)); if (s->type == FREQUENCY_DOMAIN) { fft_in_l = av_calloc(n_fft, sizeof(*fft_in_l)); fft_in_r = av_calloc(n_fft, sizeof(*fft_in_r)); if (!fft_in_l || !fft_in_r) { return AVERROR(ENOMEM); } av_fft_end(s->fft[0]); av_fft_end(s->fft[1]); s->fft[0] = av_fft_init(log2(s->n_fft), 0); s->fft[1] = av_fft_init(log2(s->n_fft), 0); av_fft_end(s->ifft[0]); av_fft_end(s->ifft[1]); s->ifft[0] = av_fft_init(log2(s->n_fft), 1); s->ifft[1] = av_fft_init(log2(s->n_fft), 1); if (!s->fft[0] || !s->fft[1] || !s->ifft[0] || !s->ifft[1]) { av_log(ctx, AV_LOG_ERROR, "Unable to create FFT contexts of size %d.\n", s->n_fft); return AVERROR(ENOMEM); } } s->data_ir[0] = av_calloc(FFALIGN(s->ir_len, 16), sizeof(float) * s->nb_irs); s->data_ir[1] = av_calloc(FFALIGN(s->ir_len, 16), sizeof(float) * s->nb_irs); s->delay[0] = av_malloc_array(s->nb_irs, sizeof(float)); s->delay[1] = av_malloc_array(s->nb_irs, sizeof(float)); if (s->type == TIME_DOMAIN) { s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels); s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels); } else { s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float)); s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float)); s->temp_fft[0] = av_malloc_array(s->n_fft, sizeof(FFTComplex)); s->temp_fft[1] = av_malloc_array(s->n_fft, sizeof(FFTComplex)); if (!s->temp_fft[0] || !s->temp_fft[1]) return AVERROR(ENOMEM); } if (!s->data_ir[0] || !s->data_ir[1] || !s->ringbuffer[0] || !s->ringbuffer[1]) return AVERROR(ENOMEM); s->in[0].frame = ff_get_audio_buffer(ctx->inputs[0], s->size); if (!s->in[0].frame) return AVERROR(ENOMEM); for (i = 0; i < s->nb_irs; i++) { s->in[i + 1].frame = ff_get_audio_buffer(ctx->inputs[i + 1], s->ir_len); if (!s->in[i + 1].frame) return AVERROR(ENOMEM); } if (s->type == TIME_DOMAIN) { s->temp_src[0] = av_calloc(FFALIGN(ir_len, 16), sizeof(float)); s->temp_src[1] = av_calloc(FFALIGN(ir_len, 16), sizeof(float)); data_ir_l = av_calloc(nb_irs * FFALIGN(ir_len, 16), sizeof(*data_ir_l)); data_ir_r = av_calloc(nb_irs * FFALIGN(ir_len, 16), sizeof(*data_ir_r)); if (!data_ir_r || !data_ir_l || !s->temp_src[0] || !s->temp_src[1]) { av_free(data_ir_l); av_free(data_ir_r); return AVERROR(ENOMEM); } } else { data_hrtf_l = av_malloc_array(n_fft, sizeof(*data_hrtf_l) * nb_irs); data_hrtf_r = av_malloc_array(n_fft, sizeof(*data_hrtf_r) * nb_irs); if (!data_hrtf_r || !data_hrtf_l) { av_free(data_hrtf_l); av_free(data_hrtf_r); return AVERROR(ENOMEM); } } for (i = 0; i < s->nb_irs; i++) { int len = s->in[i + 1].ir_len; int delay_l = s->in[i + 1].delay_l; int delay_r = s->in[i + 1].delay_r; int idx = -1; float *ptr; for (j = 0; j < inlink->channels; j++) { if (s->mapping[i] < 0) { continue; } if ((av_channel_layout_extract_channel(inlink->channel_layout, j)) == (1LL << s->mapping[i])) { idx = j; break; } } if (idx == -1) continue; av_audio_fifo_read(s->in[i + 1].fifo, (void **)s->in[i + 1].frame->extended_data, len); ptr = (float *)s->in[i + 1].frame->extended_data[0]; if (s->type == TIME_DOMAIN) { offset = idx * FFALIGN(len, 16); for (j = 0; j < len; j++) { data_ir_l[offset + j] = ptr[len * 2 - j * 2 - 2] * gain_lin; data_ir_r[offset + j] = ptr[len * 2 - j * 2 - 1] * gain_lin; } } else { memset(fft_in_l, 0, n_fft * sizeof(*fft_in_l)); memset(fft_in_r, 0, n_fft * sizeof(*fft_in_r)); offset = idx * n_fft; for (j = 0; j < len; j++) { fft_in_l[delay_l + j].re = ptr[j * 2 ] * gain_lin; fft_in_r[delay_r + j].re = ptr[j * 2 + 1] * gain_lin; } av_fft_permute(s->fft[0], fft_in_l); av_fft_calc(s->fft[0], fft_in_l); memcpy(data_hrtf_l + offset, fft_in_l, n_fft * sizeof(*fft_in_l)); av_fft_permute(s->fft[0], fft_in_r); av_fft_calc(s->fft[0], fft_in_r); memcpy(data_hrtf_r + offset, fft_in_r, n_fft * sizeof(*fft_in_r)); } } if (s->type == TIME_DOMAIN) { memcpy(s->data_ir[0], data_ir_l, sizeof(float) * nb_irs * FFALIGN(ir_len, 16)); memcpy(s->data_ir[1], data_ir_r, sizeof(float) * nb_irs * FFALIGN(ir_len, 16)); av_freep(&data_ir_l); av_freep(&data_ir_r); } else { s->data_hrtf[0] = av_malloc_array(n_fft * s->nb_irs, sizeof(FFTComplex)); s->data_hrtf[1] = av_malloc_array(n_fft * s->nb_irs, sizeof(FFTComplex)); if (!s->data_hrtf[0] || !s->data_hrtf[1]) { av_freep(&data_hrtf_l); av_freep(&data_hrtf_r); av_freep(&fft_in_l); av_freep(&fft_in_r); return AVERROR(ENOMEM); } memcpy(s->data_hrtf[0], data_hrtf_l, sizeof(FFTComplex) * nb_irs * n_fft); memcpy(s->data_hrtf[1], data_hrtf_r, sizeof(FFTComplex) * nb_irs * n_fft); av_freep(&data_hrtf_l); av_freep(&data_hrtf_r); av_freep(&fft_in_l); av_freep(&fft_in_r); } s->have_hrirs = 1; return 0; }
20,607
1
static void nic_cleanup(VLANClientState *nc) { dp8393xState *s = DO_UPCAST(NICState, nc, nc)->opaque; cpu_unregister_io_memory(s->mmio_index); qemu_del_timer(s->watchdog); qemu_free_timer(s->watchdog); g_free(s); }
20,608
1
static void ide_init1(IDEBus *bus, int unit) { static int drive_serial = 1; IDEState *s = &bus->ifs[unit]; s->bus = bus; s->unit = unit; s->drive_serial = drive_serial++; /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */ s->io_buffer = qemu_memalign(2048, IDE_DMA_BUF_SECTORS*512 + 4); s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4; s->smart_selftest_data = qemu_blockalign(s->bs, 512); s->sector_write_timer = qemu_new_timer_ns(vm_clock, ide_sector_write_timer_cb, s); }
20,609
1
static int iscsi_create(const char *filename, QEMUOptionParameter *options) { int ret = 0; int64_t total_size = 0; BlockDriverState bs; IscsiLun *iscsilun = NULL; QDict *bs_options; memset(&bs, 0, sizeof(BlockDriverState)); /* Read out options */ while (options && options->name) { if (!strcmp(options->name, "size")) { total_size = options->value.n / BDRV_SECTOR_SIZE; } options++; } bs.opaque = g_malloc0(sizeof(struct IscsiLun)); iscsilun = bs.opaque; bs_options = qdict_new(); qdict_put(bs_options, "filename", qstring_from_str(filename)); ret = iscsi_open(&bs, bs_options, 0); QDECREF(bs_options); if (ret != 0) { goto out; } if (iscsilun->nop_timer) { timer_del(iscsilun->nop_timer); timer_free(iscsilun->nop_timer); } if (iscsilun->type != TYPE_DISK) { ret = -ENODEV; goto out; } if (bs.total_sectors < total_size) { ret = -ENOSPC; goto out; } ret = 0; out: if (iscsilun->iscsi != NULL) { iscsi_destroy_context(iscsilun->iscsi); } g_free(bs.opaque); return ret; }
20,611
1
Exynos4210State *exynos4210_init(MemoryRegion *system_mem, unsigned long ram_size) { int i, n; Exynos4210State *s = g_new(Exynos4210State, 1); qemu_irq gate_irq[EXYNOS4210_NCPUS][EXYNOS4210_IRQ_GATE_NINPUTS]; unsigned long mem_size; DeviceState *dev; SysBusDevice *busdev; ObjectClass *cpu_oc; cpu_oc = cpu_class_by_name(TYPE_ARM_CPU, "cortex-a9"); assert(cpu_oc); for (n = 0; n < EXYNOS4210_NCPUS; n++) { Object *cpuobj = object_new(object_class_get_name(cpu_oc)); Error *err = NULL; /* By default A9 CPUs have EL3 enabled. This board does not currently * support EL3 so the CPU EL3 property is disabled before realization. */ if (object_property_find(cpuobj, "has_el3", NULL)) { object_property_set_bool(cpuobj, false, "has_el3", &err); if (err) { error_report_err(err); exit(1); } } s->cpu[n] = ARM_CPU(cpuobj); object_property_set_int(cpuobj, EXYNOS4210_SMP_PRIVATE_BASE_ADDR, "reset-cbar", &error_abort); object_property_set_bool(cpuobj, true, "realized", &err); if (err) { error_report_err(err); exit(1); } } /*** IRQs ***/ s->irq_table = exynos4210_init_irq(&s->irqs); /* IRQ Gate */ for (i = 0; i < EXYNOS4210_NCPUS; i++) { dev = qdev_create(NULL, "exynos4210.irq_gate"); qdev_prop_set_uint32(dev, "n_in", EXYNOS4210_IRQ_GATE_NINPUTS); qdev_init_nofail(dev); /* Get IRQ Gate input in gate_irq */ for (n = 0; n < EXYNOS4210_IRQ_GATE_NINPUTS; n++) { gate_irq[i][n] = qdev_get_gpio_in(dev, n); } busdev = SYS_BUS_DEVICE(dev); /* Connect IRQ Gate output to CPU's IRQ line */ sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(DEVICE(s->cpu[i]), ARM_CPU_IRQ)); } /* Private memory region and Internal GIC */ dev = qdev_create(NULL, "a9mpcore_priv"); qdev_prop_set_uint32(dev, "num-cpu", EXYNOS4210_NCPUS); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); sysbus_mmio_map(busdev, 0, EXYNOS4210_SMP_PRIVATE_BASE_ADDR); for (n = 0; n < EXYNOS4210_NCPUS; n++) { sysbus_connect_irq(busdev, n, gate_irq[n][0]); } for (n = 0; n < EXYNOS4210_INT_GIC_NIRQ; n++) { s->irqs.int_gic_irq[n] = qdev_get_gpio_in(dev, n); } /* Cache controller */ sysbus_create_simple("l2x0", EXYNOS4210_L2X0_BASE_ADDR, NULL); /* External GIC */ dev = qdev_create(NULL, "exynos4210.gic"); qdev_prop_set_uint32(dev, "num-cpu", EXYNOS4210_NCPUS); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); /* Map CPU interface */ sysbus_mmio_map(busdev, 0, EXYNOS4210_EXT_GIC_CPU_BASE_ADDR); /* Map Distributer interface */ sysbus_mmio_map(busdev, 1, EXYNOS4210_EXT_GIC_DIST_BASE_ADDR); for (n = 0; n < EXYNOS4210_NCPUS; n++) { sysbus_connect_irq(busdev, n, gate_irq[n][1]); } for (n = 0; n < EXYNOS4210_EXT_GIC_NIRQ; n++) { s->irqs.ext_gic_irq[n] = qdev_get_gpio_in(dev, n); } /* Internal Interrupt Combiner */ dev = qdev_create(NULL, "exynos4210.combiner"); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); for (n = 0; n < EXYNOS4210_MAX_INT_COMBINER_OUT_IRQ; n++) { sysbus_connect_irq(busdev, n, s->irqs.int_gic_irq[n]); } exynos4210_combiner_get_gpioin(&s->irqs, dev, 0); sysbus_mmio_map(busdev, 0, EXYNOS4210_INT_COMBINER_BASE_ADDR); /* External Interrupt Combiner */ dev = qdev_create(NULL, "exynos4210.combiner"); qdev_prop_set_uint32(dev, "external", 1); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); for (n = 0; n < EXYNOS4210_MAX_INT_COMBINER_OUT_IRQ; n++) { sysbus_connect_irq(busdev, n, s->irqs.ext_gic_irq[n]); } exynos4210_combiner_get_gpioin(&s->irqs, dev, 1); sysbus_mmio_map(busdev, 0, EXYNOS4210_EXT_COMBINER_BASE_ADDR); /* Initialize board IRQs. */ exynos4210_init_board_irqs(&s->irqs); /*** Memory ***/ /* Chip-ID and OMR */ memory_region_init_io(&s->chipid_mem, NULL, &exynos4210_chipid_and_omr_ops, NULL, "exynos4210.chipid", sizeof(chipid_and_omr)); memory_region_add_subregion(system_mem, EXYNOS4210_CHIPID_ADDR, &s->chipid_mem); /* Internal ROM */ memory_region_init_ram(&s->irom_mem, NULL, "exynos4210.irom", EXYNOS4210_IROM_SIZE, &error_abort); vmstate_register_ram_global(&s->irom_mem); memory_region_set_readonly(&s->irom_mem, true); memory_region_add_subregion(system_mem, EXYNOS4210_IROM_BASE_ADDR, &s->irom_mem); /* mirror of iROM */ memory_region_init_alias(&s->irom_alias_mem, NULL, "exynos4210.irom_alias", &s->irom_mem, 0, EXYNOS4210_IROM_SIZE); memory_region_set_readonly(&s->irom_alias_mem, true); memory_region_add_subregion(system_mem, EXYNOS4210_IROM_MIRROR_BASE_ADDR, &s->irom_alias_mem); /* Internal RAM */ memory_region_init_ram(&s->iram_mem, NULL, "exynos4210.iram", EXYNOS4210_IRAM_SIZE, &error_abort); vmstate_register_ram_global(&s->iram_mem); memory_region_add_subregion(system_mem, EXYNOS4210_IRAM_BASE_ADDR, &s->iram_mem); /* DRAM */ mem_size = ram_size; if (mem_size > EXYNOS4210_DRAM_MAX_SIZE) { memory_region_init_ram(&s->dram1_mem, NULL, "exynos4210.dram1", mem_size - EXYNOS4210_DRAM_MAX_SIZE, &error_abort); vmstate_register_ram_global(&s->dram1_mem); memory_region_add_subregion(system_mem, EXYNOS4210_DRAM1_BASE_ADDR, &s->dram1_mem); mem_size = EXYNOS4210_DRAM_MAX_SIZE; } memory_region_init_ram(&s->dram0_mem, NULL, "exynos4210.dram0", mem_size, &error_abort); vmstate_register_ram_global(&s->dram0_mem); memory_region_add_subregion(system_mem, EXYNOS4210_DRAM0_BASE_ADDR, &s->dram0_mem); /* PMU. * The only reason of existence at the moment is that secondary CPU boot * loader uses PMU INFORM5 register as a holding pen. */ sysbus_create_simple("exynos4210.pmu", EXYNOS4210_PMU_BASE_ADDR, NULL); /* PWM */ sysbus_create_varargs("exynos4210.pwm", EXYNOS4210_PWM_BASE_ADDR, s->irq_table[exynos4210_get_irq(22, 0)], s->irq_table[exynos4210_get_irq(22, 1)], s->irq_table[exynos4210_get_irq(22, 2)], s->irq_table[exynos4210_get_irq(22, 3)], s->irq_table[exynos4210_get_irq(22, 4)], NULL); /* RTC */ sysbus_create_varargs("exynos4210.rtc", EXYNOS4210_RTC_BASE_ADDR, s->irq_table[exynos4210_get_irq(23, 0)], s->irq_table[exynos4210_get_irq(23, 1)], NULL); /* Multi Core Timer */ dev = qdev_create(NULL, "exynos4210.mct"); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); for (n = 0; n < 4; n++) { /* Connect global timer interrupts to Combiner gpio_in */ sysbus_connect_irq(busdev, n, s->irq_table[exynos4210_get_irq(1, 4 + n)]); } /* Connect local timer interrupts to Combiner gpio_in */ sysbus_connect_irq(busdev, 4, s->irq_table[exynos4210_get_irq(51, 0)]); sysbus_connect_irq(busdev, 5, s->irq_table[exynos4210_get_irq(35, 3)]); sysbus_mmio_map(busdev, 0, EXYNOS4210_MCT_BASE_ADDR); /*** I2C ***/ for (n = 0; n < EXYNOS4210_I2C_NUMBER; n++) { uint32_t addr = EXYNOS4210_I2C_BASE_ADDR + EXYNOS4210_I2C_SHIFT * n; qemu_irq i2c_irq; if (n < 8) { i2c_irq = s->irq_table[exynos4210_get_irq(EXYNOS4210_I2C_INTG, n)]; } else { i2c_irq = s->irq_table[exynos4210_get_irq(EXYNOS4210_HDMI_INTG, 1)]; } dev = qdev_create(NULL, "exynos4210.i2c"); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); sysbus_connect_irq(busdev, 0, i2c_irq); sysbus_mmio_map(busdev, 0, addr); s->i2c_if[n] = (I2CBus *)qdev_get_child_bus(dev, "i2c"); } /*** UARTs ***/ exynos4210_uart_create(EXYNOS4210_UART0_BASE_ADDR, EXYNOS4210_UART0_FIFO_SIZE, 0, NULL, s->irq_table[exynos4210_get_irq(EXYNOS4210_UART_INT_GRP, 0)]); exynos4210_uart_create(EXYNOS4210_UART1_BASE_ADDR, EXYNOS4210_UART1_FIFO_SIZE, 1, NULL, s->irq_table[exynos4210_get_irq(EXYNOS4210_UART_INT_GRP, 1)]); exynos4210_uart_create(EXYNOS4210_UART2_BASE_ADDR, EXYNOS4210_UART2_FIFO_SIZE, 2, NULL, s->irq_table[exynos4210_get_irq(EXYNOS4210_UART_INT_GRP, 2)]); exynos4210_uart_create(EXYNOS4210_UART3_BASE_ADDR, EXYNOS4210_UART3_FIFO_SIZE, 3, NULL, s->irq_table[exynos4210_get_irq(EXYNOS4210_UART_INT_GRP, 3)]); /*** Display controller (FIMD) ***/ sysbus_create_varargs("exynos4210.fimd", EXYNOS4210_FIMD0_BASE_ADDR, s->irq_table[exynos4210_get_irq(11, 0)], s->irq_table[exynos4210_get_irq(11, 1)], s->irq_table[exynos4210_get_irq(11, 2)], NULL); sysbus_create_simple(TYPE_EXYNOS4210_EHCI, EXYNOS4210_EHCI_BASE_ADDR, s->irq_table[exynos4210_get_irq(28, 3)]); return s; }
20,612
0
void ff_put_h264_qpel8_mc22_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_mid_8w_msa(src - (2 * stride) - 2, stride, dst, stride, 8); }
20,615
0
static int fbdev_write_packet(AVFormatContext *h, AVPacket *pkt) { FBDevContext *fbdev = h->priv_data; uint8_t *pin, *pout; enum AVPixelFormat fb_pix_fmt; int disp_height; int bytes_to_copy; AVCodecContext *codec_ctx = h->streams[fbdev->index]->codec; enum AVPixelFormat video_pix_fmt = codec_ctx->pix_fmt; int video_width = codec_ctx->width; int video_height = codec_ctx->height; int bytes_per_pixel = ((codec_ctx->bits_per_coded_sample + 7) >> 3); int src_line_size = video_width * bytes_per_pixel; int i; if (fbdev->index != pkt->stream_index) return 0; if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0) av_log(h, AV_LOG_WARNING, "Error refreshing variable info: %s\n", av_err2str(AVERROR(errno))); fb_pix_fmt = ff_get_pixfmt_from_fb_varinfo(&fbdev->varinfo); if (fb_pix_fmt != video_pix_fmt) { av_log(h, AV_LOG_ERROR, "Pixel format %s is not supported, use %s\n", av_get_pix_fmt_name(video_pix_fmt), av_get_pix_fmt_name(fb_pix_fmt)); return AVERROR(EINVAL); } disp_height = FFMIN(fbdev->varinfo.yres, video_height); bytes_to_copy = FFMIN(fbdev->varinfo.xres, video_width) * bytes_per_pixel; pin = pkt->data; pout = fbdev->data + bytes_per_pixel * fbdev->varinfo.xoffset + fbdev->varinfo.yoffset * fbdev->fixinfo.line_length; if (fbdev->xoffset) { if (fbdev->xoffset < 0) { if (-fbdev->xoffset >= video_width) //nothing to display return 0; bytes_to_copy += fbdev->xoffset * bytes_per_pixel; pin -= fbdev->xoffset * bytes_per_pixel; } else { int diff = (video_width + fbdev->xoffset) - fbdev->varinfo.xres; if (diff > 0) { if (diff >= video_width) //nothing to display return 0; bytes_to_copy -= diff * bytes_per_pixel; } pout += bytes_per_pixel * fbdev->xoffset; } } if (fbdev->yoffset) { if (fbdev->yoffset < 0) { if (-fbdev->yoffset >= video_height) //nothing to display return 0; disp_height += fbdev->yoffset; pin -= fbdev->yoffset * src_line_size; } else { int diff = (video_height + fbdev->yoffset) - fbdev->varinfo.yres; if (diff > 0) { if (diff >= video_height) //nothing to display return 0; disp_height -= diff; } pout += fbdev->yoffset * fbdev->fixinfo.line_length; } } for (i = 0; i < disp_height; i++) { memcpy(pout, pin, bytes_to_copy); pout += fbdev->fixinfo.line_length; pin += src_line_size; } return 0; }
20,616
0
static void mxf_read_pixel_layout(ByteIOContext *pb, MXFDescriptor *descriptor) { int code, value, ofs = 0; char layout[16] = {}; do { code = get_byte(pb); value = get_byte(pb); dprintf(NULL, "pixel layout: code %#x\n", code); if (ofs < 16) { layout[ofs++] = code; layout[ofs++] = value; } } while (code != 0); /* SMPTE 377M E.2.46 */ ff_mxf_decode_pixel_layout(layout, &descriptor->pix_fmt); }
20,619
1
static int xmv_read_header(AVFormatContext *s) { XMVDemuxContext *xmv = s->priv_data; AVIOContext *pb = s->pb; AVStream *vst = NULL; uint32_t file_version; uint32_t this_packet_size; uint16_t audio_track; int ret; avio_skip(pb, 4); /* Next packet size */ this_packet_size = avio_rl32(pb); avio_skip(pb, 4); /* Max packet size */ avio_skip(pb, 4); /* "xobX" */ file_version = avio_rl32(pb); if ((file_version != 4) && (file_version != 2)) avpriv_request_sample(s, "Uncommon version %d", file_version); /* Video track */ vst = avformat_new_stream(s, NULL); if (!vst) return AVERROR(ENOMEM); avpriv_set_pts_info(vst, 32, 1, 1000); vst->codec->codec_type = AVMEDIA_TYPE_VIDEO; vst->codec->codec_id = AV_CODEC_ID_WMV2; vst->codec->codec_tag = MKBETAG('W', 'M', 'V', '2'); vst->codec->width = avio_rl32(pb); vst->codec->height = avio_rl32(pb); vst->duration = avio_rl32(pb); xmv->video.stream_index = vst->index; /* Audio tracks */ xmv->audio_track_count = avio_rl16(pb); avio_skip(pb, 2); /* Unknown (padding?) */ xmv->audio_tracks = av_malloc(xmv->audio_track_count * sizeof(XMVAudioTrack)); if (!xmv->audio_tracks) return AVERROR(ENOMEM); xmv->audio = av_malloc(xmv->audio_track_count * sizeof(XMVAudioPacket)); if (!xmv->audio) return AVERROR(ENOMEM); for (audio_track = 0; audio_track < xmv->audio_track_count; audio_track++) { XMVAudioTrack *track = &xmv->audio_tracks[audio_track]; XMVAudioPacket *packet = &xmv->audio [audio_track]; AVStream *ast = NULL; track->compression = avio_rl16(pb); track->channels = avio_rl16(pb); track->sample_rate = avio_rl32(pb); track->bits_per_sample = avio_rl16(pb); track->flags = avio_rl16(pb); track->bit_rate = track->bits_per_sample * track->sample_rate * track->channels; track->block_align = 36 * track->channels; track->block_samples = 64; track->codec_id = ff_wav_codec_get_id(track->compression, track->bits_per_sample); packet->track = track; packet->stream_index = -1; packet->frame_size = 0; packet->block_count = 0; /* TODO: ADPCM'd 5.1 sound is encoded in three separate streams. * Those need to be interleaved to a proper 5.1 stream. */ if (track->flags & XMV_AUDIO_ADPCM51) av_log(s, AV_LOG_WARNING, "Unsupported 5.1 ADPCM audio stream " "(0x%04X)\n", track->flags); if (!track->channels || !track->sample_rate) { av_log(s, AV_LOG_ERROR, "Invalid parameters for audio track %d.\n", audio_track); ret = AVERROR_INVALIDDATA; goto fail; } ast = avformat_new_stream(s, NULL); if (!ast) return AVERROR(ENOMEM); ast->codec->codec_type = AVMEDIA_TYPE_AUDIO; ast->codec->codec_id = track->codec_id; ast->codec->codec_tag = track->compression; ast->codec->channels = track->channels; ast->codec->sample_rate = track->sample_rate; ast->codec->bits_per_coded_sample = track->bits_per_sample; ast->codec->bit_rate = track->bit_rate; ast->codec->block_align = 36 * track->channels; avpriv_set_pts_info(ast, 32, track->block_samples, track->sample_rate); packet->stream_index = ast->index; ast->duration = vst->duration; } /** Initialize the packet context */ xmv->next_packet_offset = avio_tell(pb); xmv->next_packet_size = this_packet_size - xmv->next_packet_offset; xmv->stream_count = xmv->audio_track_count + 1; return 0; fail: xmv_read_close(s); return ret; }
20,621