| project
				 stringclasses 2
				values | commit_id
				 stringlengths 40 40 | target
				 int64 0 1 | func
				 stringlengths 26 142k | idx
				 int64 0 27.3k | 
|---|---|---|---|---|
| 
	qemu | 
	bec1631100323fac0900aea71043d5c4e22fc2fa | 0 | 
	static void tcg_out_br(TCGContext *s, int label_index)
{
    TCGLabel *l = &s->labels[label_index];
    uint64_t imm;
    /* We pay attention here to not modify the branch target by reading
       the existing value and using it again. This ensure that caches and
       memory are kept coherent during retranslation. */
    if (l->has_value) {
        imm = l->u.value_ptr -  s->code_ptr;
    } else {
        imm = get_reloc_pcrel21b_slot2(s->code_ptr);
        tcg_out_reloc(s, s->code_ptr, R_IA64_PCREL21B, label_index, 0);
    }
    tcg_out_bundle(s, mmB,
                   INSN_NOP_M,
                   INSN_NOP_M,
                   tcg_opc_b1(TCG_REG_P0, OPC_BR_SPTK_MANY_B1, imm));
}
 | 18,104 | 
| 
	qemu | 
	ea776abca628d855e03c4929da3864985afd8aae | 0 | 
	static uint32_t virtio_blk_get_features(VirtIODevice *vdev, uint32_t features)
{
    VirtIOBlock *s = to_virtio_blk(vdev);
    features |= (1 << VIRTIO_BLK_F_SEG_MAX);
    features |= (1 << VIRTIO_BLK_F_GEOMETRY);
    features |= (1 << VIRTIO_BLK_F_TOPOLOGY);
    features |= (1 << VIRTIO_BLK_F_BLK_SIZE);
    features |= (1 << VIRTIO_BLK_F_SCSI);
    features |= (1 << VIRTIO_BLK_F_CONFIG_WCE);
    if (bdrv_enable_write_cache(s->bs))
        features |= (1 << VIRTIO_BLK_F_WCE);
    if (bdrv_is_read_only(s->bs))
        features |= 1 << VIRTIO_BLK_F_RO;
    return features;
}
 | 18,105 | 
| 
	FFmpeg | 
	53c05b1eacd5f7dbfa3651b45e797adaea0a5ff8 | 0 | 
	static inline void write_back_non_zero_count(H264Context *h){
    MpegEncContext * const s = &h->s;
    const int mb_xy= s->mb_x + s->mb_y*s->mb_stride;
    int n;
    for( n = 0; n < 16+4+4; n++ )
        h->non_zero_count[mb_xy][n] = h->non_zero_count_cache[scan8[n]];
}
 | 18,106 | 
| 
	qemu | 
	4be746345f13e99e468c60acbd3a355e8183e3ce | 0 | 
	static void lm32_evr_init(MachineState *machine)
{
    const char *cpu_model = machine->cpu_model;
    const char *kernel_filename = machine->kernel_filename;
    LM32CPU *cpu;
    CPULM32State *env;
    DriveInfo *dinfo;
    MemoryRegion *address_space_mem =  get_system_memory();
    MemoryRegion *phys_ram = g_new(MemoryRegion, 1);
    qemu_irq *cpu_irq, irq[32];
    ResetInfo *reset_info;
    int i;
    /* memory map */
    hwaddr flash_base  = 0x04000000;
    size_t flash_sector_size       = 256 * 1024;
    size_t flash_size              = 32 * 1024 * 1024;
    hwaddr ram_base    = 0x08000000;
    size_t ram_size                = 64 * 1024 * 1024;
    hwaddr timer0_base = 0x80002000;
    hwaddr uart0_base  = 0x80006000;
    hwaddr timer1_base = 0x8000a000;
    int uart0_irq                  = 0;
    int timer0_irq                 = 1;
    int timer1_irq                 = 3;
    reset_info = g_malloc0(sizeof(ResetInfo));
    if (cpu_model == NULL) {
        cpu_model = "lm32-full";
    }
    cpu = cpu_lm32_init(cpu_model);
    if (cpu == NULL) {
        fprintf(stderr, "qemu: unable to find CPU '%s'\n", cpu_model);
        exit(1);
    }
    env = &cpu->env;
    reset_info->cpu = cpu;
    reset_info->flash_base = flash_base;
    memory_region_init_ram(phys_ram, NULL, "lm32_evr.sdram", ram_size,
                           &error_abort);
    vmstate_register_ram_global(phys_ram);
    memory_region_add_subregion(address_space_mem, ram_base, phys_ram);
    dinfo = drive_get(IF_PFLASH, 0, 0);
    /* Spansion S29NS128P */
    pflash_cfi02_register(flash_base, NULL, "lm32_evr.flash", flash_size,
                          dinfo ? blk_bs(blk_by_legacy_dinfo(dinfo)) : NULL,
                          flash_sector_size, flash_size / flash_sector_size,
                          1, 2, 0x01, 0x7e, 0x43, 0x00, 0x555, 0x2aa, 1);
    /* create irq lines */
    cpu_irq = qemu_allocate_irqs(cpu_irq_handler, cpu, 1);
    env->pic_state = lm32_pic_init(*cpu_irq);
    for (i = 0; i < 32; i++) {
        irq[i] = qdev_get_gpio_in(env->pic_state, i);
    }
    sysbus_create_simple("lm32-uart", uart0_base, irq[uart0_irq]);
    sysbus_create_simple("lm32-timer", timer0_base, irq[timer0_irq]);
    sysbus_create_simple("lm32-timer", timer1_base, irq[timer1_irq]);
    /* make sure juart isn't the first chardev */
    env->juart_state = lm32_juart_init();
    reset_info->bootstrap_pc = flash_base;
    if (kernel_filename) {
        uint64_t entry;
        int kernel_size;
        kernel_size = load_elf(kernel_filename, NULL, NULL, &entry, NULL, NULL,
                               1, ELF_MACHINE, 0);
        reset_info->bootstrap_pc = entry;
        if (kernel_size < 0) {
            kernel_size = load_image_targphys(kernel_filename, ram_base,
                                              ram_size);
            reset_info->bootstrap_pc = ram_base;
        }
        if (kernel_size < 0) {
            fprintf(stderr, "qemu: could not load kernel '%s'\n",
                    kernel_filename);
            exit(1);
        }
    }
    qemu_register_reset(main_cpu_reset, reset_info);
}
 | 18,107 | 
| 
	qemu | 
	f17fd4fdf0df3d2f3444399d04c38d22b9a3e1b7 | 0 | 
	static void test_qemu_strtosz_trailing(void)
{
    const char *str;
    char *endptr = NULL;
    int64_t res;
    str = "123xxx";
    res = qemu_strtosz_MiB(str, &endptr);
    g_assert_cmpint(res, ==, 123 * M_BYTE);
    g_assert(endptr == str + 3);
    res = qemu_strtosz(str, NULL);
    g_assert_cmpint(res, ==, -EINVAL);
    str = "1kiB";
    res = qemu_strtosz(str, &endptr);
    g_assert_cmpint(res, ==, 1024);
    g_assert(endptr == str + 2);
    res = qemu_strtosz(str, NULL);
    g_assert_cmpint(res, ==, -EINVAL);
}
 | 18,108 | 
| 
	qemu | 
	28213cb6a61a724e2cb1e3a76d2bb17aa0ce9b36 | 0 | 
	build_header(BIOSLinker *linker, GArray *table_data,
             AcpiTableHeader *h, const char *sig, int len, uint8_t rev,
             const char *oem_id, const char *oem_table_id)
{
    memcpy(&h->signature, sig, 4);
    h->length = cpu_to_le32(len);
    h->revision = rev;
    if (oem_id) {
        strncpy((char *)h->oem_id, oem_id, sizeof h->oem_id);
    } else {
        memcpy(h->oem_id, ACPI_BUILD_APPNAME6, 6);
    }
    if (oem_table_id) {
        strncpy((char *)h->oem_table_id, oem_table_id, sizeof(h->oem_table_id));
    } else {
        memcpy(h->oem_table_id, ACPI_BUILD_APPNAME4, 4);
        memcpy(h->oem_table_id + 4, sig, 4);
    }
    h->oem_revision = cpu_to_le32(1);
    memcpy(h->asl_compiler_id, ACPI_BUILD_APPNAME4, 4);
    h->asl_compiler_revision = cpu_to_le32(1);
    h->checksum = 0;
    /* Checksum to be filled in by Guest linker */
    bios_linker_loader_add_checksum(linker, ACPI_BUILD_TABLE_FILE,
                                    h, len, &h->checksum);
}
 | 18,109 | 
| 
	qemu | 
	03fc0548b70393b0c8d43703591a9e34fb8e3123 | 0 | 
	static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
                       const int *const_args)
{
    uint8_t *old_code_ptr = s->code_ptr;
    tcg_out_op_t(s, opc);
    switch (opc) {
    case INDEX_op_exit_tb:
        tcg_out64(s, args[0]);
        break;
    case INDEX_op_goto_tb:
        if (s->tb_jmp_offset) {
            /* Direct jump method. */
            assert(args[0] < ARRAY_SIZE(s->tb_jmp_offset));
            s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
            tcg_out32(s, 0);
        } else {
            /* Indirect jump method. */
            TODO();
        }
        assert(args[0] < ARRAY_SIZE(s->tb_next_offset));
        s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
        break;
    case INDEX_op_br:
        tci_out_label(s, args[0]);
        break;
    case INDEX_op_call:
        tcg_out_ri(s, const_args[0], args[0]);
        break;
    case INDEX_op_setcond_i32:
        tcg_out_r(s, args[0]);
        tcg_out_r(s, args[1]);
        tcg_out_ri32(s, const_args[2], args[2]);
        tcg_out8(s, args[3]);   /* condition */
        break;
#if TCG_TARGET_REG_BITS == 32
    case INDEX_op_setcond2_i32:
        /* setcond2_i32 cond, t0, t1_low, t1_high, t2_low, t2_high */
        tcg_out_r(s, args[0]);
        tcg_out_r(s, args[1]);
        tcg_out_r(s, args[2]);
        tcg_out_ri32(s, const_args[3], args[3]);
        tcg_out_ri32(s, const_args[4], args[4]);
        tcg_out8(s, args[5]);   /* condition */
        break;
#elif TCG_TARGET_REG_BITS == 64
    case INDEX_op_setcond_i64:
        tcg_out_r(s, args[0]);
        tcg_out_r(s, args[1]);
        tcg_out_ri64(s, const_args[2], args[2]);
        tcg_out8(s, args[3]);   /* condition */
        break;
#endif
    case INDEX_op_movi_i32:
        TODO(); /* Handled by tcg_out_movi? */
        break;
    case INDEX_op_ld8u_i32:
    case INDEX_op_ld8s_i32:
    case INDEX_op_ld16u_i32:
    case INDEX_op_ld16s_i32:
    case INDEX_op_ld_i32:
    case INDEX_op_st8_i32:
    case INDEX_op_st16_i32:
    case INDEX_op_st_i32:
    case INDEX_op_ld8u_i64:
    case INDEX_op_ld8s_i64:
    case INDEX_op_ld16u_i64:
    case INDEX_op_ld16s_i64:
    case INDEX_op_ld32u_i64:
    case INDEX_op_ld32s_i64:
    case INDEX_op_ld_i64:
    case INDEX_op_st8_i64:
    case INDEX_op_st16_i64:
    case INDEX_op_st32_i64:
    case INDEX_op_st_i64:
        tcg_out_r(s, args[0]);
        tcg_out_r(s, args[1]);
        assert(args[2] == (uint32_t)args[2]);
        tcg_out32(s, args[2]);
        break;
    case INDEX_op_add_i32:
    case INDEX_op_sub_i32:
    case INDEX_op_mul_i32:
    case INDEX_op_and_i32:
    case INDEX_op_andc_i32:     /* Optional (TCG_TARGET_HAS_andc_i32). */
    case INDEX_op_eqv_i32:      /* Optional (TCG_TARGET_HAS_eqv_i32). */
    case INDEX_op_nand_i32:     /* Optional (TCG_TARGET_HAS_nand_i32). */
    case INDEX_op_nor_i32:      /* Optional (TCG_TARGET_HAS_nor_i32). */
    case INDEX_op_or_i32:
    case INDEX_op_orc_i32:      /* Optional (TCG_TARGET_HAS_orc_i32). */
    case INDEX_op_xor_i32:
    case INDEX_op_shl_i32:
    case INDEX_op_shr_i32:
    case INDEX_op_sar_i32:
    case INDEX_op_rotl_i32:     /* Optional (TCG_TARGET_HAS_rot_i32). */
    case INDEX_op_rotr_i32:     /* Optional (TCG_TARGET_HAS_rot_i32). */
        tcg_out_r(s, args[0]);
        tcg_out_ri32(s, const_args[1], args[1]);
        tcg_out_ri32(s, const_args[2], args[2]);
        break;
    case INDEX_op_deposit_i32:  /* Optional (TCG_TARGET_HAS_deposit_i32). */
        tcg_out_r(s, args[0]);
        tcg_out_r(s, args[1]);
        tcg_out_r(s, args[2]);
        assert(args[3] <= UINT8_MAX);
        tcg_out8(s, args[3]);
        assert(args[4] <= UINT8_MAX);
        tcg_out8(s, args[4]);
        break;
#if TCG_TARGET_REG_BITS == 64
    case INDEX_op_mov_i64:
    case INDEX_op_movi_i64:
        TODO();
        break;
    case INDEX_op_add_i64:
    case INDEX_op_sub_i64:
    case INDEX_op_mul_i64:
    case INDEX_op_and_i64:
    case INDEX_op_andc_i64:     /* Optional (TCG_TARGET_HAS_andc_i64). */
    case INDEX_op_eqv_i64:      /* Optional (TCG_TARGET_HAS_eqv_i64). */
    case INDEX_op_nand_i64:     /* Optional (TCG_TARGET_HAS_nand_i64). */
    case INDEX_op_nor_i64:      /* Optional (TCG_TARGET_HAS_nor_i64). */
    case INDEX_op_or_i64:
    case INDEX_op_orc_i64:      /* Optional (TCG_TARGET_HAS_orc_i64). */
    case INDEX_op_xor_i64:
    case INDEX_op_shl_i64:
    case INDEX_op_shr_i64:
    case INDEX_op_sar_i64:
        /* TODO: Implementation of rotl_i64, rotr_i64 missing in tci.c. */
    case INDEX_op_rotl_i64:     /* Optional (TCG_TARGET_HAS_rot_i64). */
    case INDEX_op_rotr_i64:     /* Optional (TCG_TARGET_HAS_rot_i64). */
        tcg_out_r(s, args[0]);
        tcg_out_ri64(s, const_args[1], args[1]);
        tcg_out_ri64(s, const_args[2], args[2]);
        break;
    case INDEX_op_deposit_i64:  /* Optional (TCG_TARGET_HAS_deposit_i64). */
        tcg_out_r(s, args[0]);
        tcg_out_r(s, args[1]);
        tcg_out_r(s, args[2]);
        assert(args[3] <= UINT8_MAX);
        tcg_out8(s, args[3]);
        assert(args[4] <= UINT8_MAX);
        tcg_out8(s, args[4]);
        break;
    case INDEX_op_div_i64:      /* Optional (TCG_TARGET_HAS_div_i64). */
    case INDEX_op_divu_i64:     /* Optional (TCG_TARGET_HAS_div_i64). */
    case INDEX_op_rem_i64:      /* Optional (TCG_TARGET_HAS_div_i64). */
    case INDEX_op_remu_i64:     /* Optional (TCG_TARGET_HAS_div_i64). */
        TODO();
        break;
    case INDEX_op_div2_i64:     /* Optional (TCG_TARGET_HAS_div2_i64). */
    case INDEX_op_divu2_i64:    /* Optional (TCG_TARGET_HAS_div2_i64). */
        TODO();
        break;
    case INDEX_op_brcond_i64:
        tcg_out_r(s, args[0]);
        tcg_out_ri64(s, const_args[1], args[1]);
        tcg_out8(s, args[2]);           /* condition */
        tci_out_label(s, args[3]);
        break;
    case INDEX_op_bswap16_i64:  /* Optional (TCG_TARGET_HAS_bswap16_i64). */
    case INDEX_op_bswap32_i64:  /* Optional (TCG_TARGET_HAS_bswap32_i64). */
    case INDEX_op_bswap64_i64:  /* Optional (TCG_TARGET_HAS_bswap64_i64). */
    case INDEX_op_not_i64:      /* Optional (TCG_TARGET_HAS_not_i64). */
    case INDEX_op_neg_i64:      /* Optional (TCG_TARGET_HAS_neg_i64). */
    case INDEX_op_ext8s_i64:    /* Optional (TCG_TARGET_HAS_ext8s_i64). */
    case INDEX_op_ext8u_i64:    /* Optional (TCG_TARGET_HAS_ext8u_i64). */
    case INDEX_op_ext16s_i64:   /* Optional (TCG_TARGET_HAS_ext16s_i64). */
    case INDEX_op_ext16u_i64:   /* Optional (TCG_TARGET_HAS_ext16u_i64). */
    case INDEX_op_ext32s_i64:   /* Optional (TCG_TARGET_HAS_ext32s_i64). */
    case INDEX_op_ext32u_i64:   /* Optional (TCG_TARGET_HAS_ext32u_i64). */
#endif /* TCG_TARGET_REG_BITS == 64 */
    case INDEX_op_neg_i32:      /* Optional (TCG_TARGET_HAS_neg_i32). */
    case INDEX_op_not_i32:      /* Optional (TCG_TARGET_HAS_not_i32). */
    case INDEX_op_ext8s_i32:    /* Optional (TCG_TARGET_HAS_ext8s_i32). */
    case INDEX_op_ext16s_i32:   /* Optional (TCG_TARGET_HAS_ext16s_i32). */
    case INDEX_op_ext8u_i32:    /* Optional (TCG_TARGET_HAS_ext8u_i32). */
    case INDEX_op_ext16u_i32:   /* Optional (TCG_TARGET_HAS_ext16u_i32). */
    case INDEX_op_bswap16_i32:  /* Optional (TCG_TARGET_HAS_bswap16_i32). */
    case INDEX_op_bswap32_i32:  /* Optional (TCG_TARGET_HAS_bswap32_i32). */
        tcg_out_r(s, args[0]);
        tcg_out_r(s, args[1]);
        break;
    case INDEX_op_div_i32:      /* Optional (TCG_TARGET_HAS_div_i32). */
    case INDEX_op_divu_i32:     /* Optional (TCG_TARGET_HAS_div_i32). */
    case INDEX_op_rem_i32:      /* Optional (TCG_TARGET_HAS_div_i32). */
    case INDEX_op_remu_i32:     /* Optional (TCG_TARGET_HAS_div_i32). */
        tcg_out_r(s, args[0]);
        tcg_out_ri32(s, const_args[1], args[1]);
        tcg_out_ri32(s, const_args[2], args[2]);
        break;
    case INDEX_op_div2_i32:     /* Optional (TCG_TARGET_HAS_div2_i32). */
    case INDEX_op_divu2_i32:    /* Optional (TCG_TARGET_HAS_div2_i32). */
        TODO();
        break;
#if TCG_TARGET_REG_BITS == 32
    case INDEX_op_add2_i32:
    case INDEX_op_sub2_i32:
        tcg_out_r(s, args[0]);
        tcg_out_r(s, args[1]);
        tcg_out_r(s, args[2]);
        tcg_out_r(s, args[3]);
        tcg_out_r(s, args[4]);
        tcg_out_r(s, args[5]);
        break;
    case INDEX_op_brcond2_i32:
        tcg_out_r(s, args[0]);
        tcg_out_r(s, args[1]);
        tcg_out_ri32(s, const_args[2], args[2]);
        tcg_out_ri32(s, const_args[3], args[3]);
        tcg_out8(s, args[4]);           /* condition */
        tci_out_label(s, args[5]);
        break;
    case INDEX_op_mulu2_i32:
        tcg_out_r(s, args[0]);
        tcg_out_r(s, args[1]);
        tcg_out_r(s, args[2]);
        tcg_out_r(s, args[3]);
        break;
#endif
    case INDEX_op_brcond_i32:
        tcg_out_r(s, args[0]);
        tcg_out_ri32(s, const_args[1], args[1]);
        tcg_out8(s, args[2]);           /* condition */
        tci_out_label(s, args[3]);
        break;
    case INDEX_op_qemu_ld8u:
    case INDEX_op_qemu_ld8s:
    case INDEX_op_qemu_ld16u:
    case INDEX_op_qemu_ld16s:
    case INDEX_op_qemu_ld32:
#if TCG_TARGET_REG_BITS == 64
    case INDEX_op_qemu_ld32s:
    case INDEX_op_qemu_ld32u:
#endif
        tcg_out_r(s, *args++);
        tcg_out_r(s, *args++);
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
        tcg_out_r(s, *args++);
#endif
#ifdef CONFIG_SOFTMMU
        tcg_out_i(s, *args);
#endif
        break;
    case INDEX_op_qemu_ld64:
        tcg_out_r(s, *args++);
#if TCG_TARGET_REG_BITS == 32
        tcg_out_r(s, *args++);
#endif
        tcg_out_r(s, *args++);
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
        tcg_out_r(s, *args++);
#endif
#ifdef CONFIG_SOFTMMU
        tcg_out_i(s, *args);
#endif
        break;
    case INDEX_op_qemu_st8:
    case INDEX_op_qemu_st16:
    case INDEX_op_qemu_st32:
        tcg_out_r(s, *args++);
        tcg_out_r(s, *args++);
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
        tcg_out_r(s, *args++);
#endif
#ifdef CONFIG_SOFTMMU
        tcg_out_i(s, *args);
#endif
        break;
    case INDEX_op_qemu_st64:
        tcg_out_r(s, *args++);
#if TCG_TARGET_REG_BITS == 32
        tcg_out_r(s, *args++);
#endif
        tcg_out_r(s, *args++);
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
        tcg_out_r(s, *args++);
#endif
#ifdef CONFIG_SOFTMMU
        tcg_out_i(s, *args);
#endif
        break;
    case INDEX_op_end:
        TODO();
        break;
    default:
        fprintf(stderr, "Missing: %s\n", tcg_op_defs[opc].name);
        tcg_abort();
    }
    old_code_ptr[1] = s->code_ptr - old_code_ptr;
}
 | 18,110 | 
| 
	qemu | 
	1510168e273a12a56e3bd4488b4b2904f5138e09 | 0 | 
	static int usbredir_check_filter(USBRedirDevice *dev)
{
    if (dev->interface_info.interface_count == 0) {
        ERROR("No interface info for device\n");
        goto error;
    }
    if (dev->filter_rules) {
        if (!usbredirparser_peer_has_cap(dev->parser,
                                    usb_redir_cap_connect_device_version)) {
            ERROR("Device filter specified and peer does not have the "
                  "connect_device_version capability\n");
            goto error;
        }
        if (usbredirfilter_check(
                dev->filter_rules,
                dev->filter_rules_count,
                dev->device_info.device_class,
                dev->device_info.device_subclass,
                dev->device_info.device_protocol,
                dev->interface_info.interface_class,
                dev->interface_info.interface_subclass,
                dev->interface_info.interface_protocol,
                dev->interface_info.interface_count,
                dev->device_info.vendor_id,
                dev->device_info.product_id,
                dev->device_info.device_version_bcd,
                0) != 0) {
            goto error;
        }
    }
    return 0;
error:
    usbredir_device_disconnect(dev);
    if (usbredirparser_peer_has_cap(dev->parser, usb_redir_cap_filter)) {
        usbredirparser_send_filter_reject(dev->parser);
        usbredirparser_do_write(dev->parser);
    }
    return -1;
}
 | 18,112 | 
| 
	qemu | 
	f1c2dc7c866a939c39c14729290a21309a1c8a38 | 0 | 
	int spapr_allocate_irq_block(int num, bool lsi)
{
    int first = -1;
    int i;
    for (i = 0; i < num; ++i) {
        int irq;
        irq = spapr_allocate_irq(0, lsi);
        if (!irq) {
            return -1;
        }
        if (0 == i) {
            first = irq;
        }
        /* If the above doesn't create a consecutive block then that's
         * an internal bug */
        assert(irq == (first + i));
    }
    return first;
}
 | 18,113 | 
| 
	qemu | 
	6e4e6f0d403b1fb25f9dfdbe17754c643997753d | 0 | 
	static void vfio_realize(PCIDevice *pdev, Error **errp)
{
    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
    VFIODevice *vbasedev_iter;
    VFIOGroup *group;
    char *tmp, group_path[PATH_MAX], *group_name;
    Error *err = NULL;
    ssize_t len;
    struct stat st;
    int groupid;
    int i, ret;
    if (!vdev->vbasedev.sysfsdev) {
        if (!(~vdev->host.domain || ~vdev->host.bus ||
              ~vdev->host.slot || ~vdev->host.function)) {
            error_setg(errp, "No provided host device");
            error_append_hint(errp, "Use -vfio-pci,host=DDDD:BB:DD.F "
                              "or -vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
            return;
        }
        vdev->vbasedev.sysfsdev =
            g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
                            vdev->host.domain, vdev->host.bus,
                            vdev->host.slot, vdev->host.function);
    }
    if (stat(vdev->vbasedev.sysfsdev, &st) < 0) {
        error_setg_errno(errp, errno, "no such host device");
        error_prepend(errp, ERR_PREFIX, vdev->vbasedev.sysfsdev);
        return;
    }
    vdev->vbasedev.name = g_strdup(basename(vdev->vbasedev.sysfsdev));
    vdev->vbasedev.ops = &vfio_pci_ops;
    vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI;
    tmp = g_strdup_printf("%s/iommu_group", vdev->vbasedev.sysfsdev);
    len = readlink(tmp, group_path, sizeof(group_path));
    g_free(tmp);
    if (len <= 0 || len >= sizeof(group_path)) {
        error_setg_errno(errp, len < 0 ? errno : ENAMETOOLONG,
                         "no iommu_group found");
        goto error;
    }
    group_path[len] = 0;
    group_name = basename(group_path);
    if (sscanf(group_name, "%d", &groupid) != 1) {
        error_setg_errno(errp, errno, "failed to read %s", group_path);
        goto error;
    }
    trace_vfio_realize(vdev->vbasedev.name, groupid);
    group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev), errp);
    if (!group) {
        goto error;
    }
    QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
        if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) {
            error_setg(errp, "device is already attached");
            vfio_put_group(group);
            goto error;
        }
    }
    ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev, errp);
    if (ret) {
        vfio_put_group(group);
        goto error;
    }
    vfio_populate_device(vdev, &err);
    if (err) {
        error_propagate(errp, err);
        goto error;
    }
    /* Get a copy of config space */
    ret = pread(vdev->vbasedev.fd, vdev->pdev.config,
                MIN(pci_config_size(&vdev->pdev), vdev->config_size),
                vdev->config_offset);
    if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
        ret = ret < 0 ? -errno : -EFAULT;
        error_setg_errno(errp, -ret, "failed to read device config space");
        goto error;
    }
    /* vfio emulates a lot for us, but some bits need extra love */
    vdev->emulated_config_bits = g_malloc0(vdev->config_size);
    /* QEMU can choose to expose the ROM or not */
    memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
    /*
     * The PCI spec reserves vendor ID 0xffff as an invalid value.  The
     * device ID is managed by the vendor and need only be a 16-bit value.
     * Allow any 16-bit value for subsystem so they can be hidden or changed.
     */
    if (vdev->vendor_id != PCI_ANY_ID) {
        if (vdev->vendor_id >= 0xffff) {
            error_setg(errp, "invalid PCI vendor ID provided");
            goto error;
        }
        vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0);
        trace_vfio_pci_emulated_vendor_id(vdev->vbasedev.name, vdev->vendor_id);
    } else {
        vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
    }
    if (vdev->device_id != PCI_ANY_ID) {
        if (vdev->device_id > 0xffff) {
            error_setg(errp, "invalid PCI device ID provided");
            goto error;
        }
        vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0);
        trace_vfio_pci_emulated_device_id(vdev->vbasedev.name, vdev->device_id);
    } else {
        vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
    }
    if (vdev->sub_vendor_id != PCI_ANY_ID) {
        if (vdev->sub_vendor_id > 0xffff) {
            error_setg(errp, "invalid PCI subsystem vendor ID provided");
            goto error;
        }
        vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID,
                               vdev->sub_vendor_id, ~0);
        trace_vfio_pci_emulated_sub_vendor_id(vdev->vbasedev.name,
                                              vdev->sub_vendor_id);
    }
    if (vdev->sub_device_id != PCI_ANY_ID) {
        if (vdev->sub_device_id > 0xffff) {
            error_setg(errp, "invalid PCI subsystem device ID provided");
            goto error;
        }
        vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0);
        trace_vfio_pci_emulated_sub_device_id(vdev->vbasedev.name,
                                              vdev->sub_device_id);
    }
    /* QEMU can change multi-function devices to single function, or reverse */
    vdev->emulated_config_bits[PCI_HEADER_TYPE] =
                                              PCI_HEADER_TYPE_MULTI_FUNCTION;
    /* Restore or clear multifunction, this is always controlled by QEMU */
    if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
        vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
    } else {
        vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
    }
    /*
     * Clear host resource mapping info.  If we choose not to register a
     * BAR, such as might be the case with the option ROM, we can get
     * confusing, unwritable, residual addresses from the host here.
     */
    memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
    memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
    vfio_pci_size_rom(vdev);
    vfio_msix_early_setup(vdev, &err);
    if (err) {
        error_propagate(errp, err);
        goto error;
    }
    vfio_bars_setup(vdev);
    ret = vfio_add_capabilities(vdev, errp);
    if (ret) {
        goto out_teardown;
    }
    if (vdev->vga) {
        vfio_vga_quirk_setup(vdev);
    }
    for (i = 0; i < PCI_ROM_SLOT; i++) {
        vfio_bar_quirk_setup(vdev, i);
    }
    if (!vdev->igd_opregion &&
        vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) {
        struct vfio_region_info *opregion;
        if (vdev->pdev.qdev.hotplugged) {
            error_setg(errp,
                       "cannot support IGD OpRegion feature on hotplugged "
                       "device");
            goto out_teardown;
        }
        ret = vfio_get_dev_region_info(&vdev->vbasedev,
                        VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
                        VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
        if (ret) {
            error_setg_errno(errp, -ret,
                             "does not support requested IGD OpRegion feature");
            goto out_teardown;
        }
        ret = vfio_pci_igd_opregion_init(vdev, opregion, errp);
        g_free(opregion);
        if (ret) {
            goto out_teardown;
        }
    }
    /* QEMU emulates all of MSI & MSIX */
    if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
        memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
               MSIX_CAP_LENGTH);
    }
    if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
        memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
               vdev->msi_cap_size);
    }
    if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
        vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
                                                  vfio_intx_mmap_enable, vdev);
        pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_intx_update);
        ret = vfio_intx_enable(vdev, errp);
        if (ret) {
            goto out_teardown;
        }
    }
    vfio_register_err_notifier(vdev);
    vfio_register_req_notifier(vdev);
    vfio_setup_resetfn_quirk(vdev);
    return;
out_teardown:
    pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
    vfio_teardown_msi(vdev);
    vfio_bars_exit(vdev);
error:
    error_prepend(errp, ERR_PREFIX, vdev->vbasedev.name);
}
 | 18,115 | 
| 
	qemu | 
	d9f62dde1303286b24ac8ce88be27e2b9b9c5f46 | 0 | 
	static void qmp_input_push(QmpInputVisitor *qiv, QObject *obj, Error **errp)
{
    GHashTable *h;
    StackObject *tos = &qiv->stack[qiv->nb_stack];
    assert(obj);
    if (qiv->nb_stack >= QIV_STACK_SIZE) {
        error_setg(errp, "An internal buffer overran");
        return;
    }
    tos->obj = obj;
    assert(!tos->h);
    assert(!tos->entry);
    if (qiv->strict && qobject_type(obj) == QTYPE_QDICT) {
        h = g_hash_table_new(g_str_hash, g_str_equal);
        qdict_iter(qobject_to_qdict(obj), qdict_add_key, h);
        tos->h = h;
    } else if (qobject_type(obj) == QTYPE_QLIST) {
        tos->entry = qlist_first(qobject_to_qlist(obj));
        tos->first = true;
    }
    qiv->nb_stack++;
}
 | 18,116 | 
| 
	FFmpeg | 
	bf606334ad5ba9180d9a13682504bb1d7cb6ba3a | 0 | 
	static int hls_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    HLSContext *c = s->priv_data;
    int ret, i, minvariant = -1;
    if (c->first_packet) {
        recheck_discard_flags(s, 1);
        c->first_packet = 0;
    }
start:
    c->end_of_segment = 0;
    for (i = 0; i < c->n_variants; i++) {
        struct variant *var = c->variants[i];
        /* Make sure we've got one buffered packet from each open variant
         * stream */
        if (var->needed && !var->pkt.data) {
            while (1) {
                int64_t ts_diff;
                AVStream *st;
                ret = av_read_frame(var->ctx, &var->pkt);
                if (ret < 0) {
                    if (!url_feof(&var->pb))
                        return ret;
                    reset_packet(&var->pkt);
                    break;
                } else {
                    if (c->first_timestamp == AV_NOPTS_VALUE)
                        c->first_timestamp = var->pkt.dts;
                }
                if (c->seek_timestamp == AV_NOPTS_VALUE)
                    break;
                if (var->pkt.dts == AV_NOPTS_VALUE) {
                    c->seek_timestamp = AV_NOPTS_VALUE;
                    break;
                }
                st = var->ctx->streams[var->pkt.stream_index];
                ts_diff = av_rescale_rnd(var->pkt.dts, AV_TIME_BASE,
                                         st->time_base.den, AV_ROUND_DOWN) -
                          c->seek_timestamp;
                if (ts_diff >= 0 && (c->seek_flags  & AVSEEK_FLAG_ANY ||
                                     var->pkt.flags & AV_PKT_FLAG_KEY)) {
                    c->seek_timestamp = AV_NOPTS_VALUE;
                    break;
                }
            }
        }
        /* Check if this stream has the packet with the lowest dts */
        if (var->pkt.data) {
            struct variant *minvar = c->variants[minvariant];
            if (minvariant < 0 ||
                av_compare_ts(var->pkt.dts, var->ctx->streams[var->pkt.stream_index]->time_base,
                              minvar->pkt.dts, minvar->ctx->streams[minvar->pkt.stream_index]->time_base) > 0)
                minvariant = i;
        }
    }
    if (c->end_of_segment) {
        if (recheck_discard_flags(s, 0))
            goto start;
    }
    /* If we got a packet, return it */
    if (minvariant >= 0) {
        *pkt = c->variants[minvariant]->pkt;
        pkt->stream_index += c->variants[minvariant]->stream_offset;
        reset_packet(&c->variants[minvariant]->pkt);
        return 0;
    }
    return AVERROR_EOF;
}
 | 18,117 | 
| 
	qemu | 
	bd269ebc82fbaa5fe7ce5bc7c1770ac8acecd884 | 0 | 
	static int qio_dns_resolver_lookup_sync_inet(QIODNSResolver *resolver,
                                             SocketAddressLegacy *addr,
                                             size_t *naddrs,
                                             SocketAddressLegacy ***addrs,
                                             Error **errp)
{
    struct addrinfo ai, *res, *e;
    InetSocketAddress *iaddr = addr->u.inet.data;
    char port[33];
    char uaddr[INET6_ADDRSTRLEN + 1];
    char uport[33];
    int rc;
    Error *err = NULL;
    size_t i;
    *naddrs = 0;
    *addrs = NULL;
    memset(&ai, 0, sizeof(ai));
    ai.ai_flags = AI_PASSIVE;
    if (iaddr->has_numeric && iaddr->numeric) {
        ai.ai_flags |= AI_NUMERICHOST | AI_NUMERICSERV;
    }
    ai.ai_family = inet_ai_family_from_address(iaddr, &err);
    ai.ai_socktype = SOCK_STREAM;
    if (err) {
        error_propagate(errp, err);
        return -1;
    }
    if (iaddr->host == NULL) {
        error_setg(errp, "host not specified");
        return -1;
    }
    if (iaddr->port != NULL) {
        pstrcpy(port, sizeof(port), iaddr->port);
    } else {
        port[0] = '\0';
    }
    rc = getaddrinfo(strlen(iaddr->host) ? iaddr->host : NULL,
                     strlen(port) ? port : NULL, &ai, &res);
    if (rc != 0) {
        error_setg(errp, "address resolution failed for %s:%s: %s",
                   iaddr->host, port, gai_strerror(rc));
        return -1;
    }
    for (e = res; e != NULL; e = e->ai_next) {
        (*naddrs)++;
    }
    *addrs = g_new0(SocketAddressLegacy *, *naddrs);
    /* create socket + bind */
    for (i = 0, e = res; e != NULL; i++, e = e->ai_next) {
        SocketAddressLegacy *newaddr = g_new0(SocketAddressLegacy, 1);
        InetSocketAddress *newiaddr = g_new0(InetSocketAddress, 1);
        newaddr->u.inet.data = newiaddr;
        newaddr->type = SOCKET_ADDRESS_LEGACY_KIND_INET;
        getnameinfo((struct sockaddr *)e->ai_addr, e->ai_addrlen,
                    uaddr, INET6_ADDRSTRLEN, uport, 32,
                    NI_NUMERICHOST | NI_NUMERICSERV);
        *newiaddr = (InetSocketAddress){
            .host = g_strdup(uaddr),
            .port = g_strdup(uport),
            .has_numeric = true,
            .numeric = true,
            .has_to = iaddr->has_to,
            .to = iaddr->to,
            .has_ipv4 = false,
            .has_ipv6 = false,
        };
        (*addrs)[i] = newaddr;
    }
    freeaddrinfo(res);
    return 0;
}
 | 18,118 | 
| 
	qemu | 
	7241f532c3adbebfc8689b878aec3f244043d147 | 0 | 
	void helper_vmrun(target_ulong addr)
{
    uint32_t event_inj;
    uint32_t int_ctl;
    if (loglevel & CPU_LOG_TB_IN_ASM)
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
    env->vm_vmcb = addr;
    regs_to_env();
    /* save the current CPU state in the hsave page */
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
    SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
    SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
    SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
    SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
    /* load the interception bitmaps so we do not need to access the
       vmcb in svm mode */
    /* We shift all the intercept bits so we can OR them with the TB
       flags later on */
    env->intercept            = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
    /* clear exit_info_2 so we behave like the real hardware */
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
    if (int_ctl & V_INTR_MASKING_MASK) {
        env->cr[8] = int_ctl & V_TPR_MASK;
        cpu_set_apic_tpr(env, env->cr[8]);
        if (env->eflags & IF_MASK)
            env->hflags |= HF_HIF_MASK;
    }
#ifdef TARGET_X86_64
    env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
    env->hflags &= ~HF_LMA_MASK;
    if (env->efer & MSR_EFER_LMA)
       env->hflags |= HF_LMA_MASK;
#endif
    env->eflags = 0;
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
    CC_OP = CC_OP_EFLAGS;
    CC_DST = 0xffffffff;
    SVM_LOAD_SEG(env->vm_vmcb, ES, es);
    SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
    SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
    SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
    env->eip = EIP;
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
    /* FIXME: guest state consistency checks */
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
        case TLB_CONTROL_DO_NOTHING:
            break;
        case TLB_CONTROL_FLUSH_ALL_ASID:
            /* FIXME: this is not 100% correct but should work for now */
            tlb_flush(env, 1);
        break;
    }
    helper_stgi();
    regs_to_env();
    /* maybe we need to inject an event */
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
    if (event_inj & SVM_EVTINJ_VALID) {
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
        if (loglevel & CPU_LOG_TB_IN_ASM)
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
        /* FIXME: need to implement valid_err */
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
        case SVM_EVTINJ_TYPE_INTR:
                env->exception_index = vector;
                env->error_code = event_inj_err;
                env->exception_is_int = 1;
                env->exception_next_eip = -1;
                if (loglevel & CPU_LOG_TB_IN_ASM)
                    fprintf(logfile, "INTR");
                break;
        case SVM_EVTINJ_TYPE_NMI:
                env->exception_index = vector;
                env->error_code = event_inj_err;
                env->exception_is_int = 1;
                env->exception_next_eip = EIP;
                if (loglevel & CPU_LOG_TB_IN_ASM)
                    fprintf(logfile, "NMI");
                break;
        case SVM_EVTINJ_TYPE_EXEPT:
                env->exception_index = vector;
                env->error_code = event_inj_err;
                env->exception_is_int = 0;
                env->exception_next_eip = -1;
                if (loglevel & CPU_LOG_TB_IN_ASM)
                    fprintf(logfile, "EXEPT");
                break;
        case SVM_EVTINJ_TYPE_SOFT:
                env->exception_index = vector;
                env->error_code = event_inj_err;
                env->exception_is_int = 1;
                env->exception_next_eip = EIP;
                if (loglevel & CPU_LOG_TB_IN_ASM)
                    fprintf(logfile, "SOFT");
                break;
        }
        if (loglevel & CPU_LOG_TB_IN_ASM)
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
    }
    if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
    }
    cpu_loop_exit();
}
 | 18,119 | 
| 
	qemu | 
	4295e15aa730a95003a3639d6dad2eb1e65a59e2 | 0 | 
	void qxl_spice_update_area(PCIQXLDevice *qxl, uint32_t surface_id,
                           struct QXLRect *area, struct QXLRect *dirty_rects,
                           uint32_t num_dirty_rects,
                           uint32_t clear_dirty_region,
                           qxl_async_io async)
{
    if (async == QXL_SYNC) {
        qxl->ssd.worker->update_area(qxl->ssd.worker, surface_id, area,
                        dirty_rects, num_dirty_rects, clear_dirty_region);
    } else {
#if SPICE_INTERFACE_QXL_MINOR >= 1
        spice_qxl_update_area_async(&qxl->ssd.qxl, surface_id, area,
                                    clear_dirty_region, 0);
#else
        abort();
#endif
    }
}
 | 18,120 | 
| 
	qemu | 
	1ea879e5580f63414693655fcf0328559cdce138 | 0 | 
	static int audio_validate_settings (audsettings_t *as)
{
    int invalid;
    invalid = as->nchannels != 1 && as->nchannels != 2;
    invalid |= as->endianness != 0 && as->endianness != 1;
    switch (as->fmt) {
    case AUD_FMT_S8:
    case AUD_FMT_U8:
    case AUD_FMT_S16:
    case AUD_FMT_U16:
    case AUD_FMT_S32:
    case AUD_FMT_U32:
        break;
    default:
        invalid = 1;
        break;
    }
    invalid |= as->freq <= 0;
    return invalid ? -1 : 0;
}
 | 18,121 | 
| 
	qemu | 
	70bca53ffb811ea59dc090b3ca7825cf0bf346a7 | 0 | 
	const ppc_def_t *kvmppc_host_cpu_def(void)
{
    uint32_t host_pvr = mfpvr();
    const ppc_def_t *base_spec;
    ppc_def_t *spec;
    uint32_t vmx = kvmppc_get_vmx();
    uint32_t dfp = kvmppc_get_dfp();
    base_spec = ppc_find_by_pvr(host_pvr);
    spec = g_malloc0(sizeof(*spec));
    memcpy(spec, base_spec, sizeof(*spec));
    /* Now fix up the spec with information we can query from the host */
    alter_insns(&spec->insns_flags, PPC_ALTIVEC, vmx > 0);
    alter_insns(&spec->insns_flags2, PPC2_VSX, vmx > 1);
    alter_insns(&spec->insns_flags2, PPC2_DFP, dfp);
    return spec;
}
 | 18,122 | 
| 
	qemu | 
	e3845e7c47cc3eaf35305c9c0f9d55ca3840b49b | 0 | 
	I2CBus *piix4_pm_init(PCIBus *bus, int devfn, uint32_t smb_io_base,
                      qemu_irq sci_irq, qemu_irq smi_irq,
                      int kvm_enabled, FWCfgState *fw_cfg,
                      DeviceState **piix4_pm)
{
    DeviceState *dev;
    PIIX4PMState *s;
    dev = DEVICE(pci_create(bus, devfn, TYPE_PIIX4_PM));
    qdev_prop_set_uint32(dev, "smb_io_base", smb_io_base);
    if (piix4_pm) {
        *piix4_pm = dev;
    }
    s = PIIX4_PM(dev);
    s->irq = sci_irq;
    s->smi_irq = smi_irq;
    s->kvm_enabled = kvm_enabled;
    if (xen_enabled()) {
        s->use_acpi_pci_hotplug = false;
    }
    qdev_init_nofail(dev);
    if (fw_cfg) {
        uint8_t suspend[6] = {128, 0, 0, 129, 128, 128};
        suspend[3] = 1 | ((!s->disable_s3) << 7);
        suspend[4] = s->s4_val | ((!s->disable_s4) << 7);
        fw_cfg_add_file(fw_cfg, "etc/system-states", g_memdup(suspend, 6), 6);
    }
    return s->smb.smbus;
}
 | 18,123 | 
| 
	qemu | 
	b2bedb214469af55179d907a60cd67fed6b0779e | 0 | 
	static void init_timetables( FM_OPL *OPL , int ARRATE , int DRRATE )
{
	int i;
	double rate;
	/* make attack rate & decay rate tables */
	for (i = 0;i < 4;i++) OPL->AR_TABLE[i] = OPL->DR_TABLE[i] = 0;
	for (i = 4;i <= 60;i++){
		rate  = OPL->freqbase;						/* frequency rate */
		if( i < 60 ) rate *= 1.0+(i&3)*0.25;		/* b0-1 : x1 , x1.25 , x1.5 , x1.75 */
		rate *= 1<<((i>>2)-1);						/* b2-5 : shift bit */
		rate *= (double)(EG_ENT<<ENV_BITS);
		OPL->AR_TABLE[i] = rate / ARRATE;
		OPL->DR_TABLE[i] = rate / DRRATE;
	}
	for (i = 60; i < ARRAY_SIZE(OPL->AR_TABLE); i++)
	{
		OPL->AR_TABLE[i] = EG_AED-1;
		OPL->DR_TABLE[i] = OPL->DR_TABLE[60];
	}
#if 0
	for (i = 0;i < 64 ;i++){	/* make for overflow area */
		LOG(LOG_WAR,("rate %2d , ar %f ms , dr %f ms \n",i,
			((double)(EG_ENT<<ENV_BITS) / OPL->AR_TABLE[i]) * (1000.0 / OPL->rate),
			((double)(EG_ENT<<ENV_BITS) / OPL->DR_TABLE[i]) * (1000.0 / OPL->rate) ));
	}
#endif
}
 | 18,124 | 
| 
	qemu | 
	f4b618360e5a81b097e2e35d52011bec3c63af68 | 0 | 
	static int colo_packet_compare_all(Packet *spkt, Packet *ppkt)
{
    trace_colo_compare_main("compare all");
    return colo_packet_compare(ppkt, spkt);
}
 | 18,125 | 
| 
	qemu | 
	a8170e5e97ad17ca169c64ba87ae2f53850dab4c | 0 | 
	static uint64_t fw_cfg_data_mem_read(void *opaque, target_phys_addr_t addr,
                                     unsigned size)
{
    return fw_cfg_read(opaque);
}
 | 18,126 | 
| 
	FFmpeg | 
	87b041e0f099e5ae2d0ba2d2ebb7c9963b26ac54 | 0 | 
	static void mov_write_uuidprof_tag(ByteIOContext *pb, AVFormatContext *s)
{
    AVCodecContext *VideoCodec = s->streams[0]->codec;
    AVCodecContext *AudioCodec = s->streams[1]->codec;
    int AudioRate = AudioCodec->sample_rate;
    int FrameRate = ((VideoCodec->time_base.den) * (0x10000))/ (VideoCodec->time_base.num);
    int audio_kbitrate= AudioCodec->bit_rate / 1000;
    int video_kbitrate= FFMIN(VideoCodec->bit_rate / 1000, 800 - audio_kbitrate);
    put_be32(pb, 0x94 ); /* size */
    put_tag(pb, "uuid");
    put_tag(pb, "PROF");
    put_be32(pb, 0x21d24fce ); /* 96 bit UUID */
    put_be32(pb, 0xbb88695c );
    put_be32(pb, 0xfac9c740 );
    put_be32(pb, 0x0 );  /* ? */
    put_be32(pb, 0x3 );  /* 3 sections ? */
    put_be32(pb, 0x14 ); /* size */
    put_tag(pb, "FPRF");
    put_be32(pb, 0x0 );  /* ? */
    put_be32(pb, 0x0 );  /* ? */
    put_be32(pb, 0x0 );  /* ? */
    put_be32(pb, 0x2c );  /* size */
    put_tag(pb, "APRF");   /* audio */
    put_be32(pb, 0x0 );
    put_be32(pb, 0x2 );   /* TrackID */
    put_tag(pb, "mp4a");
    put_be32(pb, 0x20f );
    put_be32(pb, 0x0 );
    put_be32(pb, audio_kbitrate);
    put_be32(pb, audio_kbitrate);
    put_be32(pb, AudioRate );
    put_be32(pb, AudioCodec->channels );
    put_be32(pb, 0x34 );  /* size */
    put_tag(pb, "VPRF");   /* video */
    put_be32(pb, 0x0 );
    put_be32(pb, 0x1 );    /* TrackID */
    put_tag(pb, "mp4v");
    put_be32(pb, 0x103 );
    put_be32(pb, 0x0 );
    put_be32(pb, video_kbitrate);
    put_be32(pb, video_kbitrate);
    put_be32(pb, FrameRate);
    put_be32(pb, FrameRate);
    put_be16(pb, VideoCodec->width);
    put_be16(pb, VideoCodec->height);
    put_be32(pb, 0x010001); /* ? */
}
 | 18,128 | 
| 
	qemu | 
	d3c348b6e3af3598bfcb755d59f8f4de80a2228a | 0 | 
	static void xilinx_spips_reset(DeviceState *d)
{
    XilinxSPIPS *s = XILINX_SPIPS(d);
    int i;
    for (i = 0; i < XLNX_SPIPS_R_MAX; i++) {
        s->regs[i] = 0;
    }
    fifo8_reset(&s->rx_fifo);
    fifo8_reset(&s->rx_fifo);
    /* non zero resets */
    s->regs[R_CONFIG] |= MODEFAIL_GEN_EN;
    s->regs[R_SLAVE_IDLE_COUNT] = 0xFF;
    s->regs[R_TX_THRES] = 1;
    s->regs[R_RX_THRES] = 1;
    /* FIXME: move magic number definition somewhere sensible */
    s->regs[R_MOD_ID] = 0x01090106;
    s->regs[R_LQSPI_CFG] = R_LQSPI_CFG_RESET;
    s->link_state = 1;
    s->link_state_next = 1;
    s->link_state_next_when = 0;
    s->snoop_state = SNOOP_CHECKING;
    s->cmd_dummies = 0;
    s->man_start_com = false;
    xilinx_spips_update_ixr(s);
    xilinx_spips_update_cs_lines(s);
}
 | 18,131 | 
| 
	FFmpeg | 
	e87190f5d20d380608f792ceb14d0def1d80e24b | 0 | 
	static void show_chapters(WriterContext *w, AVFormatContext *fmt_ctx)
{
    int i;
    writer_print_section_header(w, SECTION_ID_CHAPTERS);
    for (i = 0; i < fmt_ctx->nb_chapters; i++) {
        AVChapter *chapter = fmt_ctx->chapters[i];
        writer_print_section_header(w, SECTION_ID_CHAPTER);
        print_int("id", chapter->id);
        print_q  ("time_base", chapter->time_base, '/');
        print_int("start", chapter->start);
        print_time("start_time", chapter->start, &chapter->time_base);
        print_int("end", chapter->end);
        print_time("end_time", chapter->end, &chapter->time_base);
        show_tags(w, chapter->metadata, SECTION_ID_CHAPTER_TAGS);
        writer_print_section_footer(w);
    }
    writer_print_section_footer(w);
}
 | 18,132 | 
| 
	qemu | 
	e1833e1f96456fd8fc17463246fe0b2050e68efb | 0 | 
	static void spr_write_403_pbr (void *opaque, int sprn)
{
    DisasContext *ctx = opaque;
    gen_op_store_403_pb(sprn - SPR_403_PBL1);
    RET_STOP(ctx);
}
 | 18,134 | 
| 
	qemu | 
	db1e80ee2ed6fc9eb6b203873b39752144f5577f | 0 | 
	static int vhdx_create_new_metadata(BlockDriverState *bs,
                                    uint64_t image_size,
                                    uint32_t block_size,
                                    uint32_t sector_size,
                                    uint64_t metadata_offset,
                                    VHDXImageType type)
{
    int ret = 0;
    uint32_t offset = 0;
    void *buffer = NULL;
    void *entry_buffer;
    VHDXMetadataTableHeader *md_table;
    VHDXMetadataTableEntry  *md_table_entry;
    /* Metadata entries */
    VHDXFileParameters     *mt_file_params;
    VHDXVirtualDiskSize    *mt_virtual_size;
    VHDXPage83Data         *mt_page83;
    VHDXVirtualDiskLogicalSectorSize  *mt_log_sector_size;
    VHDXVirtualDiskPhysicalSectorSize *mt_phys_sector_size;
    entry_buffer = g_malloc0(VHDX_METADATA_ENTRY_BUFFER_SIZE);
    mt_file_params = entry_buffer;
    offset += sizeof(VHDXFileParameters);
    mt_virtual_size = entry_buffer + offset;
    offset += sizeof(VHDXVirtualDiskSize);
    mt_page83 = entry_buffer + offset;
    offset += sizeof(VHDXPage83Data);
    mt_log_sector_size = entry_buffer + offset;
    offset += sizeof(VHDXVirtualDiskLogicalSectorSize);
    mt_phys_sector_size = entry_buffer + offset;
    mt_file_params->block_size = cpu_to_le32(block_size);
    if (type == VHDX_TYPE_FIXED) {
        mt_file_params->data_bits |= VHDX_PARAMS_LEAVE_BLOCKS_ALLOCED;
        cpu_to_le32s(&mt_file_params->data_bits);
    }
    vhdx_guid_generate(&mt_page83->page_83_data);
    cpu_to_leguids(&mt_page83->page_83_data);
    mt_virtual_size->virtual_disk_size        = cpu_to_le64(image_size);
    mt_log_sector_size->logical_sector_size   = cpu_to_le32(sector_size);
    mt_phys_sector_size->physical_sector_size = cpu_to_le32(sector_size);
    buffer = g_malloc0(VHDX_HEADER_BLOCK_SIZE);
    md_table = buffer;
    md_table->signature   = VHDX_METADATA_SIGNATURE;
    md_table->entry_count = 5;
    vhdx_metadata_header_le_export(md_table);
    /* This will reference beyond the reserved table portion */
    offset = 64 * KiB;
    md_table_entry = buffer + sizeof(VHDXMetadataTableHeader);
    md_table_entry[0].item_id = file_param_guid;
    md_table_entry[0].offset  = offset;
    md_table_entry[0].length  = sizeof(VHDXFileParameters);
    md_table_entry[0].data_bits |= VHDX_META_FLAGS_IS_REQUIRED;
    offset += md_table_entry[0].length;
    vhdx_metadata_entry_le_export(&md_table_entry[0]);
    md_table_entry[1].item_id = virtual_size_guid;
    md_table_entry[1].offset  = offset;
    md_table_entry[1].length  = sizeof(VHDXVirtualDiskSize);
    md_table_entry[1].data_bits |= VHDX_META_FLAGS_IS_REQUIRED |
                                   VHDX_META_FLAGS_IS_VIRTUAL_DISK;
    offset += md_table_entry[1].length;
    vhdx_metadata_entry_le_export(&md_table_entry[1]);
    md_table_entry[2].item_id = page83_guid;
    md_table_entry[2].offset  = offset;
    md_table_entry[2].length  = sizeof(VHDXPage83Data);
    md_table_entry[2].data_bits |= VHDX_META_FLAGS_IS_REQUIRED |
                                   VHDX_META_FLAGS_IS_VIRTUAL_DISK;
    offset += md_table_entry[2].length;
    vhdx_metadata_entry_le_export(&md_table_entry[2]);
    md_table_entry[3].item_id = logical_sector_guid;
    md_table_entry[3].offset  = offset;
    md_table_entry[3].length  = sizeof(VHDXVirtualDiskLogicalSectorSize);
    md_table_entry[3].data_bits |= VHDX_META_FLAGS_IS_REQUIRED |
                                   VHDX_META_FLAGS_IS_VIRTUAL_DISK;
    offset += md_table_entry[3].length;
    vhdx_metadata_entry_le_export(&md_table_entry[3]);
    md_table_entry[4].item_id = phys_sector_guid;
    md_table_entry[4].offset  = offset;
    md_table_entry[4].length  = sizeof(VHDXVirtualDiskPhysicalSectorSize);
    md_table_entry[4].data_bits |= VHDX_META_FLAGS_IS_REQUIRED |
                                   VHDX_META_FLAGS_IS_VIRTUAL_DISK;
    vhdx_metadata_entry_le_export(&md_table_entry[4]);
    ret = bdrv_pwrite(bs, metadata_offset, buffer, VHDX_HEADER_BLOCK_SIZE);
    if (ret < 0) {
        goto exit;
    }
    ret = bdrv_pwrite(bs, metadata_offset + (64 * KiB), entry_buffer,
                      VHDX_METADATA_ENTRY_BUFFER_SIZE);
    if (ret < 0) {
        goto exit;
    }
exit:
    g_free(buffer);
    g_free(entry_buffer);
    return ret;
}
 | 18,135 | 
| 
	qemu | 
	031380d8770d2df6c386e4aeabd412007d3ebd54 | 0 | 
	create_iovec(QEMUIOVector *qiov, char **argv, int nr_iov, int pattern)
{
    size_t *sizes = calloc(nr_iov, sizeof(size_t));
    size_t count = 0;
    void *buf = NULL;
    void *p;
    int i;
    for (i = 0; i < nr_iov; i++) {
        char *arg = argv[i];
        int64_t len;
        len = cvtnum(arg);
        if (len < 0) {
            printf("non-numeric length argument -- %s\n", arg);
            goto fail;
        }
        /* should be SIZE_T_MAX, but that doesn't exist */
        if (len > INT_MAX) {
            printf("too large length argument -- %s\n", arg);
            goto fail;
        }
        if (len & 0x1ff) {
            printf("length argument %" PRId64
                   " is not sector aligned\n", len);
            goto fail;
        }
        sizes[i] = len;
        count += len;
    }
    qemu_iovec_init(qiov, nr_iov);
    buf = p = qemu_io_alloc(count, pattern);
    for (i = 0; i < nr_iov; i++) {
        qemu_iovec_add(qiov, p, sizes[i]);
        p += sizes[i];
    }
fail:
    free(sizes);
    return buf;
}
 | 18,136 | 
| 
	qemu | 
	eb700029c7836798046191d62d595363d92c84d4 | 0 | 
	void net_tx_pkt_reset(struct NetTxPkt *pkt)
{
    int i;
    /* no assert, as reset can be called before tx_pkt_init */
    if (!pkt) {
        return;
    }
    memset(&pkt->virt_hdr, 0, sizeof(pkt->virt_hdr));
    g_free(pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base);
    pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base = NULL;
    assert(pkt->vec);
    for (i = NET_TX_PKT_L2HDR_FRAG;
         i < pkt->payload_frags + NET_TX_PKT_PL_START_FRAG; i++) {
        pkt->vec[i].iov_len = 0;
    }
    pkt->payload_len = 0;
    pkt->payload_frags = 0;
    assert(pkt->raw);
    for (i = 0; i < pkt->raw_frags; i++) {
        assert(pkt->raw[i].iov_base);
        cpu_physical_memory_unmap(pkt->raw[i].iov_base, pkt->raw[i].iov_len,
                                  false, pkt->raw[i].iov_len);
        pkt->raw[i].iov_len = 0;
    }
    pkt->raw_frags = 0;
    pkt->hdr_len = 0;
    pkt->packet_type = 0;
    pkt->l4proto = 0;
}
 | 18,137 | 
| 
	qemu | 
	51dcdbd319f8d46834d8155defc8d384a9958a73 | 0 | 
	static void tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code,
                                       int ilen)
{
#ifdef CONFIG_TCG
    trigger_pgm_exception(env, code, ilen);
    cpu_loop_exit(CPU(s390_env_get_cpu(env)));
#else
    g_assert_not_reached();
#endif
}
 | 18,139 | 
| 
	qemu | 
	76abe4071d111a9ca6dcc9b9689a831c39ffa718 | 0 | 
	static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
                    Error **errp)
{
    BDRVVPCState *s = bs->opaque;
    int i;
    VHDFooter *footer;
    VHDDynDiskHeader *dyndisk_header;
    uint8_t buf[HEADER_SIZE];
    uint32_t checksum;
    int disk_type = VHD_DYNAMIC;
    int ret;
    ret = bdrv_pread(bs->file, 0, s->footer_buf, HEADER_SIZE);
    if (ret < 0) {
        goto fail;
    }
    footer = (VHDFooter *) s->footer_buf;
    if (strncmp(footer->creator, "conectix", 8)) {
        int64_t offset = bdrv_getlength(bs->file);
        if (offset < 0) {
            ret = offset;
            goto fail;
        } else if (offset < HEADER_SIZE) {
            ret = -EINVAL;
            goto fail;
        }
        /* If a fixed disk, the footer is found only at the end of the file */
        ret = bdrv_pread(bs->file, offset-HEADER_SIZE, s->footer_buf,
                         HEADER_SIZE);
        if (ret < 0) {
            goto fail;
        }
        if (strncmp(footer->creator, "conectix", 8)) {
            ret = -EMEDIUMTYPE;
            goto fail;
        }
        disk_type = VHD_FIXED;
    }
    checksum = be32_to_cpu(footer->checksum);
    footer->checksum = 0;
    if (vpc_checksum(s->footer_buf, HEADER_SIZE) != checksum)
        fprintf(stderr, "block-vpc: The header checksum of '%s' is "
            "incorrect.\n", bs->filename);
    /* Write 'checksum' back to footer, or else will leave it with zero. */
    footer->checksum = be32_to_cpu(checksum);
    // The visible size of a image in Virtual PC depends on the geometry
    // rather than on the size stored in the footer (the size in the footer
    // is too large usually)
    bs->total_sectors = (int64_t)
        be16_to_cpu(footer->cyls) * footer->heads * footer->secs_per_cyl;
    /* images created with disk2vhd report a far higher virtual size
     * than expected with the cyls * heads * sectors_per_cyl formula.
     * use the footer->size instead if the image was created with
     * disk2vhd.
     */
    if (!strncmp(footer->creator_app, "d2v", 4)) {
        bs->total_sectors = be64_to_cpu(footer->size) / BDRV_SECTOR_SIZE;
    }
    /* Allow a maximum disk size of approximately 2 TB */
    if (bs->total_sectors >= 65535LL * 255 * 255) {
        ret = -EFBIG;
        goto fail;
    }
    if (disk_type == VHD_DYNAMIC) {
        ret = bdrv_pread(bs->file, be64_to_cpu(footer->data_offset), buf,
                         HEADER_SIZE);
        if (ret < 0) {
            goto fail;
        }
        dyndisk_header = (VHDDynDiskHeader *) buf;
        if (strncmp(dyndisk_header->magic, "cxsparse", 8)) {
            ret = -EINVAL;
            goto fail;
        }
        s->block_size = be32_to_cpu(dyndisk_header->block_size);
        s->bitmap_size = ((s->block_size / (8 * 512)) + 511) & ~511;
        s->max_table_entries = be32_to_cpu(dyndisk_header->max_table_entries);
        s->pagetable = g_malloc(s->max_table_entries * 4);
        s->bat_offset = be64_to_cpu(dyndisk_header->table_offset);
        ret = bdrv_pread(bs->file, s->bat_offset, s->pagetable,
                         s->max_table_entries * 4);
        if (ret < 0) {
            goto fail;
        }
        s->free_data_block_offset =
            (s->bat_offset + (s->max_table_entries * 4) + 511) & ~511;
        for (i = 0; i < s->max_table_entries; i++) {
            be32_to_cpus(&s->pagetable[i]);
            if (s->pagetable[i] != 0xFFFFFFFF) {
                int64_t next = (512 * (int64_t) s->pagetable[i]) +
                    s->bitmap_size + s->block_size;
                if (next > s->free_data_block_offset) {
                    s->free_data_block_offset = next;
                }
            }
        }
        if (s->free_data_block_offset > bdrv_getlength(bs->file)) {
            error_setg(errp, "block-vpc: free_data_block_offset points after "
                             "the end of file. The image has been truncated.");
            ret = -EINVAL;
            goto fail;
        }
        s->last_bitmap_offset = (int64_t) -1;
#ifdef CACHE
        s->pageentry_u8 = g_malloc(512);
        s->pageentry_u32 = s->pageentry_u8;
        s->pageentry_u16 = s->pageentry_u8;
        s->last_pagetable = -1;
#endif
    }
    qemu_co_mutex_init(&s->lock);
    /* Disable migration when VHD images are used */
    error_set(&s->migration_blocker,
              QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
              "vpc", bs->device_name, "live migration");
    migrate_add_blocker(s->migration_blocker);
    return 0;
fail:
    g_free(s->pagetable);
#ifdef CACHE
    g_free(s->pageentry_u8);
#endif
    return ret;
}
 | 18,140 | 
| 
	qemu | 
	7385ac0ba2456159a52b9b2cbb5f6c71921d0c23 | 0 | 
	static always_inline void check_mips_mt(CPUState *env, DisasContext *ctx)
{
    if (unlikely(!(env->CP0_Config3 & (1 << CP0C3_MT))))
        generate_exception(ctx, EXCP_RI);
}
 | 18,141 | 
| 
	FFmpeg | 
	b46a77f19ddc4b2b5fa3187835ceb602a5244e24 | 0 | 
	static void dxva_adjust_hwframes(AVCodecContext *avctx, AVHWFramesContext *frames_ctx)
{
    FFDXVASharedContext *sctx = DXVA_SHARED_CONTEXT(avctx);
    int surface_alignment, num_surfaces;
    frames_ctx->format = sctx->pix_fmt;
    /* decoding MPEG-2 requires additional alignment on some Intel GPUs,
    but it causes issues for H.264 on certain AMD GPUs..... */
    if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO)
        surface_alignment = 32;
    /* the HEVC DXVA2 spec asks for 128 pixel aligned surfaces to ensure
    all coding features have enough room to work with */
    else if (avctx->codec_id == AV_CODEC_ID_HEVC)
        surface_alignment = 128;
    else
        surface_alignment = 16;
    /* 4 base work surfaces */
    num_surfaces = 4;
    /* add surfaces based on number of possible refs */
    if (avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_HEVC)
        num_surfaces += 16;
    else
        num_surfaces += 2;
    /* add extra surfaces for frame threading */
    if (avctx->active_thread_type & FF_THREAD_FRAME)
        num_surfaces += avctx->thread_count;
    frames_ctx->sw_format = avctx->sw_pix_fmt == AV_PIX_FMT_YUV420P10 ?
                            AV_PIX_FMT_P010 : AV_PIX_FMT_NV12;
    frames_ctx->width = FFALIGN(avctx->coded_width, surface_alignment);
    frames_ctx->height = FFALIGN(avctx->coded_height, surface_alignment);
    frames_ctx->initial_pool_size = num_surfaces;
#if CONFIG_DXVA2
    if (frames_ctx->format == AV_PIX_FMT_DXVA2_VLD) {
        AVDXVA2FramesContext *frames_hwctx = frames_ctx->hwctx;
        frames_hwctx->surface_type = DXVA2_VideoDecoderRenderTarget;
    }
#endif
#if CONFIG_D3D11VA
    if (frames_ctx->format == AV_PIX_FMT_D3D11) {
        AVD3D11VAFramesContext *frames_hwctx = frames_ctx->hwctx;
        frames_hwctx->BindFlags |= D3D11_BIND_DECODER;
    }
#endif
}
 | 18,143 | 
| 
	qemu | 
	c0f4ce7751f0b9a9a7815f931a09a6c3de127cee | 0 | 
	USBDevice *usb_msd_init(const char *filename)
{
    MSDState *s;
    BlockDriverState *bdrv;
    BlockDriver *drv = NULL;
    const char *p1;
    char fmt[32];
    p1 = strchr(filename, ':');
    if (p1++) {
        const char *p2;
        if (strstart(filename, "format=", &p2)) {
            int len = MIN(p1 - p2, sizeof(fmt));
            pstrcpy(fmt, len, p2);
            drv = bdrv_find_format(fmt);
            if (!drv) {
                printf("invalid format %s\n", fmt);
                return NULL;
            }
        } else if (*filename != ':') {
            printf("unrecognized USB mass-storage option %s\n", filename);
            return NULL;
        }
        filename = p1;
    }
    if (!*filename) {
        printf("block device specification needed\n");
        return NULL;
    }
    s = qemu_mallocz(sizeof(MSDState));
    bdrv = bdrv_new("usb");
    if (bdrv_open2(bdrv, filename, 0, drv) < 0)
        goto fail;
    if (qemu_key_check(bdrv, filename))
        goto fail;
    s->bs = bdrv;
    s->dev.speed = USB_SPEED_FULL;
    s->dev.handle_packet = usb_generic_handle_packet;
    s->dev.handle_reset = usb_msd_handle_reset;
    s->dev.handle_control = usb_msd_handle_control;
    s->dev.handle_data = usb_msd_handle_data;
    s->dev.handle_destroy = usb_msd_handle_destroy;
    snprintf(s->dev.devname, sizeof(s->dev.devname), "QEMU USB MSD(%.16s)",
             filename);
    s->scsi_dev = scsi_disk_init(bdrv, 0, usb_msd_command_complete, s);
    usb_msd_handle_reset((USBDevice *)s);
    return (USBDevice *)s;
 fail:
    qemu_free(s);
    return NULL;
}
 | 18,144 | 
| 
	qemu | 
	260fecf13b0d30621dc88da03dc1b502b7358c6b | 0 | 
	static bool append_open_options(QDict *d, BlockDriverState *bs)
{
    const QDictEntry *entry;
    QemuOptDesc *desc;
    bool found_any = false;
    for (entry = qdict_first(bs->options); entry;
         entry = qdict_next(bs->options, entry))
    {
        /* Only take options for this level */
        if (strchr(qdict_entry_key(entry), '.')) {
            continue;
        }
        /* And exclude all non-driver-specific options */
        for (desc = bdrv_runtime_opts.desc; desc->name; desc++) {
            if (!strcmp(qdict_entry_key(entry), desc->name)) {
                break;
            }
        }
        if (desc->name) {
            continue;
        }
        qobject_incref(qdict_entry_value(entry));
        qdict_put_obj(d, qdict_entry_key(entry), qdict_entry_value(entry));
        found_any = true;
    }
    return found_any;
}
 | 18,145 | 
| 
	qemu | 
	c52ab08aee6f7d4717fc6b517174043126bd302f | 0 | 
	static void gen_eob(DisasContext *s)
{
    gen_eob_inhibit_irq(s, false);
}
 | 18,147 | 
| 
	qemu | 
	e3e09d87c6e69c2da684d5aacabe3124ebcb6f8e | 0 | 
	hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
{
    S390CPU *cpu = S390_CPU(cs);
    CPUS390XState *env = &cpu->env;
    target_ulong raddr;
    int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
    int old_exc = cs->exception_index;
    uint64_t asc = env->psw.mask & PSW_MASK_ASC;
    /* 31-Bit mode */
    if (!(env->psw.mask & PSW_MASK_64)) {
        vaddr &= 0x7fffffff;
    }
    mmu_translate(env, vaddr, 2, asc, &raddr, &prot);
    cs->exception_index = old_exc;
    return raddr;
}
 | 18,148 | 
| 
	qemu | 
	307b7715d0256c95444cada36a02882e46bada2f | 0 | 
	void spapr_drc_detach(sPAPRDRConnector *drc, DeviceState *d, Error **errp)
{
    trace_spapr_drc_detach(spapr_drc_index(drc));
    /* if we've signalled device presence to the guest, or if the guest
     * has gone ahead and configured the device (via manually-executed
     * device add via drmgr in guest, namely), we need to wait
     * for the guest to quiesce the device before completing detach.
     * Otherwise, we can assume the guest hasn't seen it and complete the
     * detach immediately. Note that there is a small race window
     * just before, or during, configuration, which is this context
     * refers mainly to fetching the device tree via RTAS.
     * During this window the device access will be arbitrated by
     * associated DRC, which will simply fail the RTAS calls as invalid.
     * This is recoverable within guest and current implementations of
     * drmgr should be able to cope.
     */
    if (!drc->signalled && !drc->configured) {
        /* if the guest hasn't seen the device we can't rely on it to
         * set it back to an isolated state via RTAS, so do it here manually
         */
        drc->isolation_state = SPAPR_DR_ISOLATION_STATE_ISOLATED;
    }
    if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) {
        trace_spapr_drc_awaiting_isolated(spapr_drc_index(drc));
        drc->awaiting_release = true;
        return;
    }
    if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI &&
        drc->allocation_state != SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
        trace_spapr_drc_awaiting_unusable(spapr_drc_index(drc));
        drc->awaiting_release = true;
        return;
    }
    if (drc->awaiting_allocation) {
        drc->awaiting_release = true;
        trace_spapr_drc_awaiting_allocation(spapr_drc_index(drc));
        return;
    }
    drc->dr_indicator = SPAPR_DR_INDICATOR_INACTIVE;
    /* Calling release callbacks based on spapr_drc_type(drc). */
    switch (spapr_drc_type(drc)) {
    case SPAPR_DR_CONNECTOR_TYPE_CPU:
        spapr_core_release(drc->dev);
        break;
    case SPAPR_DR_CONNECTOR_TYPE_PCI:
        spapr_phb_remove_pci_device_cb(drc->dev);
        break;
    case SPAPR_DR_CONNECTOR_TYPE_LMB:
        spapr_lmb_release(drc->dev);
        break;
    case SPAPR_DR_CONNECTOR_TYPE_PHB:
    case SPAPR_DR_CONNECTOR_TYPE_VIO:
    default:
        g_assert(false);
    }
    drc->awaiting_release = false;
    g_free(drc->fdt);
    drc->fdt = NULL;
    drc->fdt_start_offset = 0;
    object_property_del(OBJECT(drc), "device", NULL);
    drc->dev = NULL;
}
 | 18,151 | 
| 
	qemu | 
	5a3d7b23ba41b4884b43b6bc936ea18f999d5c6b | 0 | 
	static XICSState *try_create_xics(const char *type, int nr_servers,
                                  int nr_irqs)
{
    DeviceState *dev;
    dev = qdev_create(NULL, type);
    qdev_prop_set_uint32(dev, "nr_servers", nr_servers);
    qdev_prop_set_uint32(dev, "nr_irqs", nr_irqs);
    if (qdev_init(dev) < 0) {
        return NULL;
    }
    return XICS(dev);
}
 | 18,152 | 
| 
	qemu | 
	b931bfbf042983f311b3b09894d8030b2755a638 | 0 | 
	static void vhost_user_stop(VhostUserState *s)
{
    if (vhost_user_running(s)) {
        vhost_net_cleanup(s->vhost_net);
    }
    s->vhost_net = 0;
}
 | 18,153 | 
| 
	qemu | 
	c10c9d96158ce4d05f4325e64c0ce6a5fcd64b8b | 0 | 
	static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
                                  size_t size)
{
    int64_t len;
    if (size > INT_MAX) {
        return -EIO;
    }
    if (!blk_is_available(blk)) {
        return -ENOMEDIUM;
    }
    len = blk_getlength(blk);
    if (len < 0) {
        return len;
    }
    if (offset < 0) {
        return -EIO;
    }
    if (offset > len || len - offset < size) {
        return -EIO;
    }
    return 0;
}
 | 18,155 | 
| 
	qemu | 
	e1833e1f96456fd8fc17463246fe0b2050e68efb | 0 | 
	static void spr_write_ibatl_h (void *opaque, int sprn)
{
    DisasContext *ctx = opaque;
    gen_op_store_ibatl((sprn - SPR_IBAT4L) / 2);
    RET_STOP(ctx);
}
 | 18,156 | 
| 
	qemu | 
	c39ce112b60ffafbaf700853e32bea74cbb2c148 | 0 | 
	static int vscsi_queue_cmd(VSCSIState *s, vscsi_req *req)
{
    union srp_iu *srp = &req->iu.srp;
    SCSIDevice *sdev;
    int n, id, lun;
    vscsi_decode_id_lun(be64_to_cpu(srp->cmd.lun), &id, &lun);
    /* Qemu vs. linux issue with LUNs to be sorted out ... */
    sdev = (id < 8 && lun < 16) ? s->bus.devs[id] : NULL;
    if (!sdev) {
        dprintf("VSCSI: Command for id %d with no drive\n", id);
        if (srp->cmd.cdb[0] == INQUIRY) {
            vscsi_inquiry_no_target(s, req);
        } else {
            vscsi_makeup_sense(s, req, ILLEGAL_REQUEST, 0x24, 0x00);
            vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0);
        } return 1;
    }
    req->lun = lun;
    req->sreq = scsi_req_new(sdev, req->qtag, lun, req);
    n = scsi_req_enqueue(req->sreq, srp->cmd.cdb);
    dprintf("VSCSI: Queued command tag 0x%x CMD 0x%x ID %d LUN %d ret: %d\n",
            req->qtag, srp->cmd.cdb[0], id, lun, n);
    if (n) {
        /* Transfer direction must be set before preprocessing the
         * descriptors
         */
        req->writing = (n < 1);
        /* Preprocess RDMA descriptors */
        vscsi_preprocess_desc(req);
        /* Get transfer direction and initiate transfer */
        if (n > 0) {
            req->data_len = n;
        } else if (n < 0) {
            req->data_len = -n;
        }
        scsi_req_continue(req->sreq);
    }
    /* Don't touch req here, it may have been recycled already */
    return 0;
}
 | 18,157 | 
| 
	qemu | 
	0ed93d84edabc7656f5c998ae1a346fe8b94ca54 | 0 | 
	int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
                                uint64_t offset, QEMUIOVector *qiov, int type)
{
    int ret;
    struct qemu_laiocb laiocb = {
        .co         = qemu_coroutine_self(),
        .nbytes     = qiov->size,
        .ctx        = s,
        .is_read    = (type == QEMU_AIO_READ),
        .qiov       = qiov,
    };
    ret = laio_do_submit(fd, &laiocb, offset, type);
    if (ret < 0) {
        return ret;
    }
    qemu_coroutine_yield();
    return laiocb.ret;
}
 | 18,158 | 
| 
	qemu | 
	51b19ebe4320f3dcd93cea71235c1219318ddfd2 | 0 | 
	static void balloon_stats_poll_cb(void *opaque)
{
    VirtIOBalloon *s = opaque;
    VirtIODevice *vdev = VIRTIO_DEVICE(s);
    if (!balloon_stats_supported(s)) {
        /* re-schedule */
        balloon_stats_change_timer(s, s->stats_poll_interval);
        return;
    }
    virtqueue_push(s->svq, &s->stats_vq_elem, s->stats_vq_offset);
    virtio_notify(vdev, s->svq);
}
 | 18,160 | 
| 
	qemu | 
	a89f364ae8740dfc31b321eed9ee454e996dc3c1 | 0 | 
	static void pxa2xx_mm_write(void *opaque, hwaddr addr,
                            uint64_t value, unsigned size)
{
    PXA2xxState *s = (PXA2xxState *) opaque;
    switch (addr) {
    case MDCNFG ... SA1110:
        if ((addr & 3) == 0) {
            s->mm_regs[addr >> 2] = value;
            break;
        }
    default:
        printf("%s: Bad register " REG_FMT "\n", __FUNCTION__, addr);
        break;
    }
}
 | 18,162 | 
| 
	qemu | 
	92c0bba9a95739c92e959fe478cb1acb92fa5446 | 0 | 
	static void omap_l4_io_writeh(void *opaque, target_phys_addr_t addr,
                uint32_t value)
{
    unsigned int i = (addr - OMAP2_L4_BASE) >> TARGET_PAGE_BITS;
    return omap_l4_io_writeh_fn[i](omap_l4_io_opaque[i], addr, value);
}
 | 18,163 | 
| 
	qemu | 
	1f4ad7d3b8f7162ec0471506d86f57a5d77b8f76 | 0 | 
	BlockBackend *blk_new_open(const char *filename, const char *reference,
                           QDict *options, int flags, Error **errp)
{
    BlockBackend *blk;
    BlockDriverState *bs;
    uint64_t perm;
    /* blk_new_open() is mainly used in .bdrv_create implementations and the
     * tools where sharing isn't a concern because the BDS stays private, so we
     * just request permission according to the flags.
     *
     * The exceptions are xen_disk and blockdev_init(); in these cases, the
     * caller of blk_new_open() doesn't make use of the permissions, but they
     * shouldn't hurt either. We can still share everything here because the
     * guest devices will add their own blockers if they can't share. */
    perm = BLK_PERM_CONSISTENT_READ;
    if (flags & BDRV_O_RDWR) {
        perm |= BLK_PERM_WRITE;
    }
    if (flags & BDRV_O_RESIZE) {
        perm |= BLK_PERM_RESIZE;
    }
    blk = blk_new(perm, BLK_PERM_ALL);
    bs = bdrv_open(filename, reference, options, flags, errp);
    if (!bs) {
        blk_unref(blk);
        return NULL;
    }
    blk->root = bdrv_root_attach_child(bs, "root", &child_root,
                                       perm, BLK_PERM_ALL, blk, errp);
    if (!blk->root) {
        bdrv_unref(bs);
        blk_unref(blk);
        return NULL;
    }
    return blk;
}
 | 18,164 | 
| 
	FFmpeg | 
	87e8788680e16c51f6048af26f3f7830c35207a5 | 0 | 
	static int mpc_probe(AVProbeData *p)
{
    const uint8_t *d = p->buf;
    if (p->buf_size < 32)
        return 0;
    if (d[0] == 'M' && d[1] == 'P' && d[2] == '+' && (d[3] == 0x17 || d[3] == 0x7))
        return AVPROBE_SCORE_MAX;
    if (d[0] == 'I' && d[1] == 'D' && d[2] == '3')
        return AVPROBE_SCORE_MAX / 2;
    return 0;
}
 | 18,165 | 
| 
	FFmpeg | 
	0a39c9ac0bfd7345fe676b4e2707d9cec3cbb553 | 0 | 
	av_cold void ff_hpeldsp_init_x86(HpelDSPContext *c, int flags)
{
    int cpu_flags = av_get_cpu_flags();
    if (INLINE_MMX(cpu_flags))
        hpeldsp_init_mmx(c, flags);
    if (EXTERNAL_AMD3DNOW(cpu_flags))
        hpeldsp_init_3dnow(c, flags);
    if (EXTERNAL_MMXEXT(cpu_flags))
        hpeldsp_init_mmxext(c, flags);
    if (EXTERNAL_SSE2_FAST(cpu_flags))
        hpeldsp_init_sse2_fast(c, flags);
    if (CONFIG_VP3_DECODER)
        ff_hpeldsp_vp3_init_x86(c, cpu_flags, flags);
}
 | 18,166 | 
| 
	FFmpeg | 
	5d75730c58f72918a41bb5abda4b448ecdd4273c | 0 | 
	static int guess_ni_flag(AVFormatContext *s)
{
    int i;
    int64_t last_start = 0;
    int64_t first_end  = INT64_MAX;
    int64_t oldpos     = avio_tell(s->pb);
    int *idx;
    int64_t min_pos, pos;
    for (i = 0; i < s->nb_streams; i++) {
        AVStream *st = s->streams[i];
        int n        = st->nb_index_entries;
        unsigned int size;
        if (n <= 0)
            continue;
        if (n >= 2) {
            int64_t pos = st->index_entries[0].pos;
            avio_seek(s->pb, pos + 4, SEEK_SET);
            size = avio_rl32(s->pb);
            if (pos + size > st->index_entries[1].pos)
                last_start = INT64_MAX;
        }
        if (st->index_entries[0].pos > last_start)
            last_start = st->index_entries[0].pos;
        if (st->index_entries[n - 1].pos < first_end)
            first_end = st->index_entries[n - 1].pos;
    }
    avio_seek(s->pb, oldpos, SEEK_SET);
    if (last_start > first_end)
        return 1;
    idx= av_calloc(s->nb_streams, sizeof(*idx));
    if (!idx)
        return 0;
    for (min_pos=pos=0; min_pos!=INT64_MAX; pos= min_pos+1LU) {
        int64_t max_dts = INT64_MIN/2, min_dts= INT64_MAX/2;
        min_pos = INT64_MAX;
        for (i=0; i<s->nb_streams; i++) {
            AVStream *st = s->streams[i];
            AVIStream *ast = st->priv_data;
            int n= st->nb_index_entries;
            while (idx[i]<n && st->index_entries[idx[i]].pos < pos)
                idx[i]++;
            if (idx[i] < n) {
                min_dts = FFMIN(min_dts, av_rescale_q(st->index_entries[idx[i]].timestamp/FFMAX(ast->sample_size, 1), st->time_base, AV_TIME_BASE_Q));
                min_pos = FFMIN(min_pos, st->index_entries[idx[i]].pos);
            }
            if (idx[i])
                max_dts = FFMAX(max_dts, av_rescale_q(st->index_entries[idx[i]-1].timestamp/FFMAX(ast->sample_size, 1), st->time_base, AV_TIME_BASE_Q));
        }
        if (max_dts - min_dts > 2*AV_TIME_BASE) {
            av_free(idx);
            return 1;
        }
    }
    av_free(idx);
    return 0;
}
 | 18,167 | 
| 
	FFmpeg | 
	57d77b3963ce1023eaf5ada8cba58b9379405cc8 | 0 | 
	int av_opencl_buffer_read_image(uint8_t **dst_data, int *plane_size, int plane_num,
                                       cl_mem src_cl_buf, size_t cl_buffer_size)
{
    int i,buffer_size = 0,ret = 0;
    uint8_t *temp;
    void *mapped;
    cl_int status;
    if ((unsigned int)plane_num > 8) {
        return AVERROR(EINVAL);
    }
    for (i = 0;i < plane_num;i++) {
        buffer_size += plane_size[i];
    }
    if (buffer_size > cl_buffer_size) {
        av_log(&openclutils, AV_LOG_ERROR, "Cannot write image to CPU buffer: OpenCL buffer too small\n");
        return AVERROR(EINVAL);
    }
    mapped = clEnqueueMapBuffer(gpu_env.command_queue, src_cl_buf,
                                      CL_TRUE,CL_MAP_READ, 0, buffer_size,
                                      0, NULL, NULL, &status);
    if (status != CL_SUCCESS) {
        av_log(&openclutils, AV_LOG_ERROR, "Could not map OpenCL buffer: %s\n", opencl_errstr(status));
        return AVERROR_EXTERNAL;
    }
    temp = mapped;
    if (ret >= 0) {
        for (i = 0;i < plane_num;i++) {
            memcpy(dst_data[i], temp, plane_size[i]);
            temp += plane_size[i];
        }
    }
    status = clEnqueueUnmapMemObject(gpu_env.command_queue, src_cl_buf, mapped, 0, NULL, NULL);
    if (status != CL_SUCCESS) {
        av_log(&openclutils, AV_LOG_ERROR, "Could not unmap OpenCL buffer: %s\n", opencl_errstr(status));
        return AVERROR_EXTERNAL;
    }
    return 0;
}
 | 18,168 | 
| 
	FFmpeg | 
	6ba5cbc699e77cae66bb719354fa142114b64eab | 0 | 
	static void rtsp_cmd_describe(HTTPContext *c, const char *url)
{
    FFStream *stream;
    char path1[1024];
    const char *path;
    uint8_t *content;
    int content_length, len;
    struct sockaddr_in my_addr;
    
    /* find which url is asked */
    url_split(NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
    path = path1;
    if (*path == '/')
        path++;
    for(stream = first_stream; stream != NULL; stream = stream->next) {
        if (!stream->is_feed && stream->fmt == &rtp_mux &&
            !strcmp(path, stream->filename)) {
            goto found;
        }
    }
    /* no stream found */
    rtsp_reply_error(c, RTSP_STATUS_SERVICE); /* XXX: right error ? */
    return;
 found:
    /* prepare the media description in sdp format */
    /* get the host IP */
    len = sizeof(my_addr);
    getsockname(c->fd, (struct sockaddr *)&my_addr, &len);
    content_length = prepare_sdp_description(stream, &content, my_addr.sin_addr);
    if (content_length < 0) {
        rtsp_reply_error(c, RTSP_STATUS_INTERNAL);
        return;
    }
    rtsp_reply_header(c, RTSP_STATUS_OK);
    url_fprintf(c->pb, "Content-Type: application/sdp\r\n");
    url_fprintf(c->pb, "Content-Length: %d\r\n", content_length);
    url_fprintf(c->pb, "\r\n");
    put_buffer(c->pb, content, content_length);
}
 | 18,169 | 
| 
	FFmpeg | 
	1ec83d9a9e472f485897ac92bad9631d551a8c5b | 0 | 
	static char *shorts2str(int *sp, int count, const char *sep)
{
    int i;
    char *ap, *ap0;
    if (!sep) sep = ", ";
    ap = av_malloc((5 + strlen(sep)) * count);
    if (!ap)
        return NULL;
    ap0   = ap;
    ap[0] = '\0';
    for (i = 0; i < count; i++) {
        int l = snprintf(ap, 5 + strlen(sep), "%d%s", sp[i], sep);
        ap += l;
    }
    ap0[strlen(ap0) - strlen(sep)] = '\0';
    return ap0;
}
 | 18,170 | 
| 
	qemu | 
	60fe637bf0e4d7989e21e50f52526444765c63b4 | 1 | 
	uint64_t blk_mig_bytes_transferred(void)
{
    BlkMigDevState *bmds;
    uint64_t sum = 0;
    blk_mig_lock();
    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
        sum += bmds->completed_sectors;
    }
    blk_mig_unlock();
    return sum << BDRV_SECTOR_BITS;
}
 | 18,173 | 
| 
	FFmpeg | 
	6e42e6c4b410dbef8b593c2d796a5dad95f89ee4 | 1 | 
	void palette8torgb16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
{
	long i;
	for(i=0; i<num_pixels; i++)
		((uint16_t *)dst)[i] = ((uint16_t *)palette)[ src[i] ];
}
 | 18,174 | 
| 
	qemu | 
	e23a1b33b53d25510320b26d9f154e19c6c99725 | 1 | 
	RTCState *rtc_init(int base_year)
{
    ISADevice *dev;
    dev = isa_create("mc146818rtc");
    qdev_prop_set_int32(&dev->qdev, "base_year", base_year);
    qdev_init(&dev->qdev);
    return DO_UPCAST(RTCState, dev, dev);
}
 | 18,176 | 
| 
	FFmpeg | 
	0953736b7e97f6e121a0587a95434bf1857a27da | 1 | 
	static int headroom(int *la)
{
    int l;
    if (*la == 0) {
        return 31;
    }
    l = 30 - av_log2(FFABS(*la));
    *la <<= l;
    return l;
}
 | 18,177 | 
| 
	FFmpeg | 
	b25e84b7399bd91605596b67d761d3464dbe8a6e | 1 | 
	static int decode_nal_unit(HEVCContext *s, const uint8_t *nal, int length)
{
    HEVCLocalContext *lc = &s->HEVClc;
    GetBitContext *gb    = &lc->gb;
    int ctb_addr_ts, ret;
    ret = init_get_bits8(gb, nal, length);
    if (ret < 0)
        return ret;
    ret = hls_nal_unit(s);
    if (ret < 0) {
        av_log(s->avctx, AV_LOG_ERROR, "Invalid NAL unit %d, skipping.\n",
               s->nal_unit_type);
        if (s->avctx->err_recognition & AV_EF_EXPLODE)
            return ret;
        return 0;
    } else if (!ret)
        return 0;
    switch (s->nal_unit_type) {
    case NAL_VPS:
        ret = ff_hevc_decode_nal_vps(s);
        if (ret < 0)
            return ret;
        break;
    case NAL_SPS:
        ret = ff_hevc_decode_nal_sps(s);
        if (ret < 0)
            return ret;
        break;
    case NAL_PPS:
        ret = ff_hevc_decode_nal_pps(s);
        if (ret < 0)
            return ret;
        break;
    case NAL_SEI_PREFIX:
    case NAL_SEI_SUFFIX:
        ret = ff_hevc_decode_nal_sei(s);
        if (ret < 0)
            return ret;
        break;
    case NAL_TRAIL_R:
    case NAL_TRAIL_N:
    case NAL_TSA_N:
    case NAL_TSA_R:
    case NAL_STSA_N:
    case NAL_STSA_R:
    case NAL_BLA_W_LP:
    case NAL_BLA_W_RADL:
    case NAL_BLA_N_LP:
    case NAL_IDR_W_RADL:
    case NAL_IDR_N_LP:
    case NAL_CRA_NUT:
    case NAL_RADL_N:
    case NAL_RADL_R:
    case NAL_RASL_N:
    case NAL_RASL_R:
        ret = hls_slice_header(s);
        if (ret < 0)
            return ret;
        if (s->max_ra == INT_MAX) {
            if (s->nal_unit_type == NAL_CRA_NUT || IS_BLA(s)) {
                s->max_ra = s->poc;
            } else {
                if (IS_IDR(s))
                    s->max_ra = INT_MIN;
        if ((s->nal_unit_type == NAL_RASL_R || s->nal_unit_type == NAL_RASL_N) &&
            s->poc <= s->max_ra) {
            s->is_decoded = 0;
            break;
        } else {
            if (s->nal_unit_type == NAL_RASL_R && s->poc > s->max_ra)
                s->max_ra = INT_MIN;
        if (s->sh.first_slice_in_pic_flag) {
            ret = hevc_frame_start(s);
            if (ret < 0)
                return ret;
        } else if (!s->ref) {
            av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
        if (!s->sh.dependent_slice_segment_flag &&
            s->sh.slice_type != I_SLICE) {
            ret = ff_hevc_slice_rpl(s);
            if (ret < 0) {
                av_log(s->avctx, AV_LOG_WARNING,
                       "Error constructing the reference lists for the current slice.\n");
                if (s->avctx->err_recognition & AV_EF_EXPLODE)
                    return ret;
        ctb_addr_ts = hls_slice_data(s);
        if (ctb_addr_ts >= (s->sps->ctb_width * s->sps->ctb_height)) {
            s->is_decoded = 1;
            if ((s->pps->transquant_bypass_enable_flag ||
                 (s->sps->pcm.loop_filter_disable_flag && s->sps->pcm_enabled_flag)) &&
                s->sps->sao_enabled)
                restore_tqb_pixels(s);
        if (ctb_addr_ts < 0)
            return ctb_addr_ts;
        break;
    case NAL_EOS_NUT:
    case NAL_EOB_NUT:
        s->seq_decode = (s->seq_decode + 1) & 0xff;
        s->max_ra     = INT_MAX;
        break;
    case NAL_AUD:
    case NAL_FD_NUT:
        break;
    default:
        av_log(s->avctx, AV_LOG_INFO,
               "Skipping NAL unit %d\n", s->nal_unit_type);
    return 0; | 18,178 | 
| 
	qemu | 
	c5633d998a27502ad8cc10c2d46f91b02555ae7a | 1 | 
	static void xen_read_physmap(XenIOState *state)
{
    XenPhysmap *physmap = NULL;
    unsigned int len, num, i;
    char path[80], *value = NULL;
    char **entries = NULL;
    snprintf(path, sizeof(path),
            "/local/domain/0/device-model/%d/physmap", xen_domid);
    entries = xs_directory(state->xenstore, 0, path, &num);
    if (entries == NULL)
        return;
    for (i = 0; i < num; i++) {
        physmap = g_malloc(sizeof (XenPhysmap));
        physmap->phys_offset = strtoull(entries[i], NULL, 16);
        snprintf(path, sizeof(path),
                "/local/domain/0/device-model/%d/physmap/%s/start_addr",
                xen_domid, entries[i]);
        value = xs_read(state->xenstore, 0, path, &len);
        if (value == NULL) {
            free(physmap);
            continue;
        }
        physmap->start_addr = strtoull(value, NULL, 16);
        free(value);
        snprintf(path, sizeof(path),
                "/local/domain/0/device-model/%d/physmap/%s/size",
                xen_domid, entries[i]);
        value = xs_read(state->xenstore, 0, path, &len);
        if (value == NULL) {
            free(physmap);
            continue;
        }
        physmap->size = strtoull(value, NULL, 16);
        free(value);
        snprintf(path, sizeof(path),
                "/local/domain/0/device-model/%d/physmap/%s/name",
                xen_domid, entries[i]);
        physmap->name = xs_read(state->xenstore, 0, path, &len);
        QLIST_INSERT_HEAD(&state->physmap, physmap, list);
    }
    free(entries);
}
 | 18,180 | 
| 
	qemu | 
	c919297379e9980c2bcc4d2053addbc1fd6d762b | 1 | 
	static int img_map(int argc, char **argv)
{
    int c;
    OutputFormat output_format = OFORMAT_HUMAN;
    BlockBackend *blk;
    BlockDriverState *bs;
    const char *filename, *fmt, *output;
    int64_t length;
    MapEntry curr = { .length = 0 }, next;
    int ret = 0;
    bool image_opts = false;
    fmt = NULL;
    output = NULL;
    for (;;) {
        int option_index = 0;
        static const struct option long_options[] = {
            {"help", no_argument, 0, 'h'},
            {"format", required_argument, 0, 'f'},
            {"output", required_argument, 0, OPTION_OUTPUT},
            {"object", required_argument, 0, OPTION_OBJECT},
            {"image-opts", no_argument, 0, OPTION_IMAGE_OPTS},
            {0, 0, 0, 0}
        };
        c = getopt_long(argc, argv, "f:h",
                        long_options, &option_index);
        if (c == -1) {
            break;
        }
        switch (c) {
        case '?':
        case 'h':
            help();
            break;
        case 'f':
            fmt = optarg;
            break;
        case OPTION_OUTPUT:
            output = optarg;
            break;
        case OPTION_OBJECT: {
            QemuOpts *opts;
            opts = qemu_opts_parse_noisily(&qemu_object_opts,
                                           optarg, true);
            if (!opts) {
                return 1;
            }
        }   break;
        case OPTION_IMAGE_OPTS:
            image_opts = true;
            break;
        }
    }
    if (optind != argc - 1) {
        error_exit("Expecting one image file name");
    }
    filename = argv[optind];
    if (output && !strcmp(output, "json")) {
        output_format = OFORMAT_JSON;
    } else if (output && !strcmp(output, "human")) {
        output_format = OFORMAT_HUMAN;
    } else if (output) {
        error_report("--output must be used with human or json as argument.");
        return 1;
    }
    if (qemu_opts_foreach(&qemu_object_opts,
                          user_creatable_add_opts_foreach,
                          NULL, NULL)) {
        return 1;
    }
    blk = img_open(image_opts, filename, fmt, 0, false, false);
    if (!blk) {
        return 1;
    }
    bs = blk_bs(blk);
    if (output_format == OFORMAT_HUMAN) {
        printf("%-16s%-16s%-16s%s\n", "Offset", "Length", "Mapped to", "File");
    }
    length = blk_getlength(blk);
    while (curr.start + curr.length < length) {
        int64_t nsectors_left;
        int64_t sector_num;
        int n;
        sector_num = (curr.start + curr.length) >> BDRV_SECTOR_BITS;
        /* Probe up to 1 GiB at a time.  */
        nsectors_left = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE) - sector_num;
        n = MIN(1 << (30 - BDRV_SECTOR_BITS), nsectors_left);
        ret = get_block_status(bs, sector_num, n, &next);
        if (ret < 0) {
            error_report("Could not read file metadata: %s", strerror(-ret));
            goto out;
        }
        if (entry_mergeable(&curr, &next)) {
            curr.length += next.length;
            continue;
        }
        if (curr.length > 0) {
            dump_map_entry(output_format, &curr, &next);
        }
        curr = next;
    }
    dump_map_entry(output_format, &curr, NULL);
out:
    blk_unref(blk);
    return ret < 0;
}
 | 18,181 | 
| 
	FFmpeg | 
	0726b2d1ea4343698ff603cc32b824f5bce952c5 | 1 | 
	static int jpeg_read_close(AVFormatContext *s1)
{
    JpegContext *s = s1->priv_data;
    av_free(s);
    return 0;
}
 | 18,182 | 
| 
	qemu | 
	c61e684e44272f2acb2bef34cf2aa234582a73a9 | 1 | 
	static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs,
        BlockDriverState *base,
        int64_t sector_num,
        int nb_sectors,
        int *pnum,
        BlockDriverState **file)
{
    BlockDriverState *p;
    int64_t ret = 0;
    assert(bs != base);
    for (p = bs; p != base; p = backing_bs(p)) {
        ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file);
        if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) {
            break;
        }
        /* [sector_num, pnum] unallocated on this layer, which could be only
         * the first part of [sector_num, nb_sectors].  */
        nb_sectors = MIN(nb_sectors, *pnum);
    }
    return ret;
}
 | 18,184 | 
| 
	FFmpeg | 
	ca203e9985cd2dcf42a0c0853940850d3a8edf3a | 1 | 
	static av_cold int psy_3gpp_init(FFPsyContext *ctx) {
    AacPsyContext *pctx;
    float bark;
    int i, j, g, start;
    float prev, minscale, minath, minsnr, pe_min;
    int chan_bitrate = ctx->avctx->bit_rate / ((ctx->avctx->flags & CODEC_FLAG_QSCALE) ? 2.0f : ctx->avctx->channels);
    const int bandwidth    = ctx->avctx->cutoff ? ctx->avctx->cutoff : AAC_CUTOFF(ctx->avctx);
    const float num_bark   = calc_bark((float)bandwidth);
    ctx->model_priv_data = av_mallocz(sizeof(AacPsyContext));
    if (!ctx->model_priv_data)
        return AVERROR(ENOMEM);
    pctx = (AacPsyContext*) ctx->model_priv_data;
    pctx->global_quality = (ctx->avctx->global_quality ? ctx->avctx->global_quality : 120) * 0.01f;
    if (ctx->avctx->flags & CODEC_FLAG_QSCALE) {
        /* Use the target average bitrate to compute spread parameters */
        chan_bitrate = (int)(chan_bitrate / 120.0 * (ctx->avctx->global_quality ? ctx->avctx->global_quality : 120));
    }
    pctx->chan_bitrate = chan_bitrate;
    pctx->frame_bits   = FFMIN(2560, chan_bitrate * AAC_BLOCK_SIZE_LONG / ctx->avctx->sample_rate);
    pctx->pe.min       =  8.0f * AAC_BLOCK_SIZE_LONG * bandwidth / (ctx->avctx->sample_rate * 2.0f);
    pctx->pe.max       = 12.0f * AAC_BLOCK_SIZE_LONG * bandwidth / (ctx->avctx->sample_rate * 2.0f);
    ctx->bitres.size   = 6144 - pctx->frame_bits;
    ctx->bitres.size  -= ctx->bitres.size % 8;
    pctx->fill_level   = ctx->bitres.size;
    minath = ath(3410 - 0.733 * ATH_ADD, ATH_ADD);
    for (j = 0; j < 2; j++) {
        AacPsyCoeffs *coeffs = pctx->psy_coef[j];
        const uint8_t *band_sizes = ctx->bands[j];
        float line_to_frequency = ctx->avctx->sample_rate / (j ? 256.f : 2048.0f);
        float avg_chan_bits = chan_bitrate * (j ? 128.0f : 1024.0f) / ctx->avctx->sample_rate;
        /* reference encoder uses 2.4% here instead of 60% like the spec says */
        float bark_pe = 0.024f * PSY_3GPP_BITS_TO_PE(avg_chan_bits) / num_bark;
        float en_spread_low = j ? PSY_3GPP_EN_SPREAD_LOW_S : PSY_3GPP_EN_SPREAD_LOW_L;
        /* High energy spreading for long blocks <= 22kbps/channel and short blocks are the same. */
        float en_spread_hi  = (j || (chan_bitrate <= 22.0f)) ? PSY_3GPP_EN_SPREAD_HI_S : PSY_3GPP_EN_SPREAD_HI_L1;
        i = 0;
        prev = 0.0;
        for (g = 0; g < ctx->num_bands[j]; g++) {
            i += band_sizes[g];
            bark = calc_bark((i-1) * line_to_frequency);
            coeffs[g].barks = (bark + prev) / 2.0;
            prev = bark;
        }
        for (g = 0; g < ctx->num_bands[j] - 1; g++) {
            AacPsyCoeffs *coeff = &coeffs[g];
            float bark_width = coeffs[g+1].barks - coeffs->barks;
            coeff->spread_low[0] = pow(10.0, -bark_width * PSY_3GPP_THR_SPREAD_LOW);
            coeff->spread_hi [0] = pow(10.0, -bark_width * PSY_3GPP_THR_SPREAD_HI);
            coeff->spread_low[1] = pow(10.0, -bark_width * en_spread_low);
            coeff->spread_hi [1] = pow(10.0, -bark_width * en_spread_hi);
            pe_min = bark_pe * bark_width;
            minsnr = exp2(pe_min / band_sizes[g]) - 1.5f;
            coeff->min_snr = av_clipf(1.0f / minsnr, PSY_SNR_25DB, PSY_SNR_1DB);
        }
        start = 0;
        for (g = 0; g < ctx->num_bands[j]; g++) {
            minscale = ath(start * line_to_frequency, ATH_ADD);
            for (i = 1; i < band_sizes[g]; i++)
                minscale = FFMIN(minscale, ath((start + i) * line_to_frequency, ATH_ADD));
            coeffs[g].ath = minscale - minath;
            start += band_sizes[g];
        }
    }
    pctx->ch = av_mallocz_array(ctx->avctx->channels, sizeof(AacPsyChannel));
    if (!pctx->ch) {
        av_freep(&ctx->model_priv_data);
        return AVERROR(ENOMEM);
    }
    lame_window_init(pctx, ctx->avctx);
    return 0;
}
 | 18,185 | 
| 
	FFmpeg | 
	82d705e245050c1040321022e200969f9c3ff9c3 | 1 | 
	static av_cold int nvenc_encode_init(AVCodecContext *avctx)
{
    NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS encode_session_params = { 0 };
    NV_ENC_PRESET_CONFIG preset_config = { 0 };
    CUcontext cu_context_curr;
    CUresult cu_res;
    GUID encoder_preset = NV_ENC_PRESET_HQ_GUID;
    GUID codec;
    NVENCSTATUS nv_status = NV_ENC_SUCCESS;
    AVCPBProperties *cpb_props;
    int surfaceCount = 0;
    int i, num_mbs;
    int isLL = 0;
    int lossless = 0;
    int res = 0;
    int dw, dh;
    int qp_inter_p;
    NvencContext *ctx = avctx->priv_data;
    NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
    NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
    if (!nvenc_dyload_nvenc(avctx))
        return AVERROR_EXTERNAL;
    ctx->last_dts = AV_NOPTS_VALUE;
    ctx->encode_config.version = NV_ENC_CONFIG_VER;
    ctx->init_encode_params.version = NV_ENC_INITIALIZE_PARAMS_VER;
    preset_config.version = NV_ENC_PRESET_CONFIG_VER;
    preset_config.presetCfg.version = NV_ENC_CONFIG_VER;
    encode_session_params.version = NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER;
    encode_session_params.apiVersion = NVENCAPI_VERSION;
    if (ctx->gpu >= dl_fn->nvenc_device_count) {
        av_log(avctx, AV_LOG_FATAL, "Requested GPU %d, but only %d GPUs are available!\n", ctx->gpu, dl_fn->nvenc_device_count);
        res = AVERROR(EINVAL);
        goto error;
    }
    ctx->cu_context = NULL;
    cu_res = dl_fn->cu_ctx_create(&ctx->cu_context, 4, dl_fn->nvenc_devices[ctx->gpu]); // CU_CTX_SCHED_BLOCKING_SYNC=4, avoid CPU spins
    if (cu_res != CUDA_SUCCESS) {
        av_log(avctx, AV_LOG_FATAL, "Failed creating CUDA context for NVENC: 0x%x\n", (int)cu_res);
        res = AVERROR_EXTERNAL;
        goto error;
    }
    cu_res = dl_fn->cu_ctx_pop_current(&cu_context_curr);
    if (cu_res != CUDA_SUCCESS) {
        av_log(avctx, AV_LOG_FATAL, "Failed popping CUDA context: 0x%x\n", (int)cu_res);
        res = AVERROR_EXTERNAL;
        goto error;
    }
    encode_session_params.device = ctx->cu_context;
    encode_session_params.deviceType = NV_ENC_DEVICE_TYPE_CUDA;
    nv_status = p_nvenc->nvEncOpenEncodeSessionEx(&encode_session_params, &ctx->nvencoder);
    if (nv_status != NV_ENC_SUCCESS) {
        ctx->nvencoder = NULL;
        av_log(avctx, AV_LOG_FATAL, "OpenEncodeSessionEx failed: 0x%x\n", (int)nv_status);
        res = AVERROR_EXTERNAL;
        goto error;
    }
    if (ctx->preset) {
        if (!strcmp(ctx->preset, "slow")) {
            encoder_preset = NV_ENC_PRESET_HQ_GUID;
            ctx->twopass = 1;
        } else if (!strcmp(ctx->preset, "medium")) {
            encoder_preset = NV_ENC_PRESET_HQ_GUID;
            ctx->twopass = 0;
        } else if (!strcmp(ctx->preset, "fast")) {
            encoder_preset = NV_ENC_PRESET_HP_GUID;
            ctx->twopass = 0;
        } else if (!strcmp(ctx->preset, "hq")) {
            encoder_preset = NV_ENC_PRESET_HQ_GUID;
        } else if (!strcmp(ctx->preset, "hp")) {
            encoder_preset = NV_ENC_PRESET_HP_GUID;
        } else if (!strcmp(ctx->preset, "bd")) {
            encoder_preset = NV_ENC_PRESET_BD_GUID;
        } else if (!strcmp(ctx->preset, "ll")) {
            encoder_preset = NV_ENC_PRESET_LOW_LATENCY_DEFAULT_GUID;
            isLL = 1;
        } else if (!strcmp(ctx->preset, "llhp")) {
            encoder_preset = NV_ENC_PRESET_LOW_LATENCY_HP_GUID;
            isLL = 1;
        } else if (!strcmp(ctx->preset, "llhq")) {
            encoder_preset = NV_ENC_PRESET_LOW_LATENCY_HQ_GUID;
            isLL = 1;
        } else if (!strcmp(ctx->preset, "lossless")) {
            encoder_preset = NV_ENC_PRESET_LOSSLESS_DEFAULT_GUID;
            lossless = 1;
        } else if (!strcmp(ctx->preset, "losslesshp")) {
            encoder_preset = NV_ENC_PRESET_LOSSLESS_HP_GUID;
            lossless = 1;
        } else if (!strcmp(ctx->preset, "default")) {
            encoder_preset = NV_ENC_PRESET_DEFAULT_GUID;
        } else {
            av_log(avctx, AV_LOG_FATAL, "Preset \"%s\" is unknown! Supported presets: slow, medium, fast, hp, hq, bd, ll, llhp, llhq, lossless, losslesshp, default\n", ctx->preset);
            res = AVERROR(EINVAL);
            goto error;
        }
    }
    if (ctx->twopass < 0) {
        ctx->twopass = isLL;
    }
    switch (avctx->codec->id) {
    case AV_CODEC_ID_H264:
        codec = NV_ENC_CODEC_H264_GUID;
        break;
    case AV_CODEC_ID_H265:
        codec = NV_ENC_CODEC_HEVC_GUID;
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "Unknown codec name\n");
        res = AVERROR(EINVAL);
        goto error;
    }
    nv_status = p_nvenc->nvEncGetEncodePresetConfig(ctx->nvencoder, codec, encoder_preset, &preset_config);
    if (nv_status != NV_ENC_SUCCESS) {
        av_log(avctx, AV_LOG_FATAL, "GetEncodePresetConfig failed: 0x%x\n", (int)nv_status);
        res = AVERROR_EXTERNAL;
        goto error;
    }
    ctx->init_encode_params.encodeGUID = codec;
    ctx->init_encode_params.encodeHeight = avctx->height;
    ctx->init_encode_params.encodeWidth = avctx->width;
    if (avctx->sample_aspect_ratio.num && avctx->sample_aspect_ratio.den &&
        (avctx->sample_aspect_ratio.num != 1 || avctx->sample_aspect_ratio.num != 1)) {
        av_reduce(&dw, &dh,
                  avctx->width * avctx->sample_aspect_ratio.num,
                  avctx->height * avctx->sample_aspect_ratio.den,
                  1024 * 1024);
        ctx->init_encode_params.darHeight = dh;
        ctx->init_encode_params.darWidth = dw;
    } else {
        ctx->init_encode_params.darHeight = avctx->height;
        ctx->init_encode_params.darWidth = avctx->width;
    }
    // De-compensate for hardware, dubiously, trying to compensate for
    // playback at 704 pixel width.
    if (avctx->width == 720 &&
        (avctx->height == 480 || avctx->height == 576)) {
        av_reduce(&dw, &dh,
                  ctx->init_encode_params.darWidth * 44,
                  ctx->init_encode_params.darHeight * 45,
                  1024 * 1024);
        ctx->init_encode_params.darHeight = dh;
        ctx->init_encode_params.darWidth = dw;
    }
    ctx->init_encode_params.frameRateNum = avctx->time_base.den;
    ctx->init_encode_params.frameRateDen = avctx->time_base.num * avctx->ticks_per_frame;
    num_mbs = ((avctx->width + 15) >> 4) * ((avctx->height + 15) >> 4);
    ctx->max_surface_count = (num_mbs >= 8160) ? 32 : 48;
    if (ctx->buffer_delay >= ctx->max_surface_count)
        ctx->buffer_delay = ctx->max_surface_count - 1;
    ctx->init_encode_params.enableEncodeAsync = 0;
    ctx->init_encode_params.enablePTD = 1;
    ctx->init_encode_params.presetGUID = encoder_preset;
    ctx->init_encode_params.encodeConfig = &ctx->encode_config;
    memcpy(&ctx->encode_config, &preset_config.presetCfg, sizeof(ctx->encode_config));
    ctx->encode_config.version = NV_ENC_CONFIG_VER;
    if (avctx->refs >= 0) {
        /* 0 means "let the hardware decide" */
        switch (avctx->codec->id) {
        case AV_CODEC_ID_H264:
            ctx->encode_config.encodeCodecConfig.h264Config.maxNumRefFrames = avctx->refs;
            break;
        case AV_CODEC_ID_H265:
            ctx->encode_config.encodeCodecConfig.hevcConfig.maxNumRefFramesInDPB = avctx->refs;
            break;
        /* Earlier switch/case will return if unknown codec is passed. */
        }
    }
    if (avctx->gop_size > 0) {
        if (avctx->max_b_frames >= 0) {
            /* 0 is intra-only, 1 is I/P only, 2 is one B Frame, 3 two B frames, and so on. */
            ctx->encode_config.frameIntervalP = avctx->max_b_frames + 1;
        }
        ctx->encode_config.gopLength = avctx->gop_size;
        switch (avctx->codec->id) {
        case AV_CODEC_ID_H264:
            ctx->encode_config.encodeCodecConfig.h264Config.idrPeriod = avctx->gop_size;
            break;
        case AV_CODEC_ID_H265:
            ctx->encode_config.encodeCodecConfig.hevcConfig.idrPeriod = avctx->gop_size;
            break;
        /* Earlier switch/case will return if unknown codec is passed. */
        }
    } else if (avctx->gop_size == 0) {
        ctx->encode_config.frameIntervalP = 0;
        ctx->encode_config.gopLength = 1;
        switch (avctx->codec->id) {
        case AV_CODEC_ID_H264:
            ctx->encode_config.encodeCodecConfig.h264Config.idrPeriod = 1;
            break;
        case AV_CODEC_ID_H265:
            ctx->encode_config.encodeCodecConfig.hevcConfig.idrPeriod = 1;
            break;
        /* Earlier switch/case will return if unknown codec is passed. */
        }
    }
    /* when there're b frames, set dts offset */
    if (ctx->encode_config.frameIntervalP >= 2)
        ctx->last_dts = -2;
    if (avctx->bit_rate > 0) {
        ctx->encode_config.rcParams.averageBitRate = avctx->bit_rate;
    } else if (ctx->encode_config.rcParams.averageBitRate > 0) {
        ctx->encode_config.rcParams.maxBitRate = ctx->encode_config.rcParams.averageBitRate;
    }
    if (avctx->rc_max_rate > 0)
        ctx->encode_config.rcParams.maxBitRate = avctx->rc_max_rate;
    if (lossless) {
        if (avctx->codec->id == AV_CODEC_ID_H264)
            ctx->encode_config.encodeCodecConfig.h264Config.qpPrimeYZeroTransformBypassFlag = 1;
        ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_CONSTQP;
        ctx->encode_config.rcParams.constQP.qpInterB = 0;
        ctx->encode_config.rcParams.constQP.qpInterP = 0;
        ctx->encode_config.rcParams.constQP.qpIntra = 0;
        avctx->qmin = -1;
        avctx->qmax = -1;
    } else if (ctx->cbr) {
        if (!ctx->twopass) {
            ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_CBR;
        } else {
            ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_2_PASS_QUALITY;
            if (avctx->codec->id == AV_CODEC_ID_H264) {
                ctx->encode_config.encodeCodecConfig.h264Config.adaptiveTransformMode = NV_ENC_H264_ADAPTIVE_TRANSFORM_ENABLE;
                ctx->encode_config.encodeCodecConfig.h264Config.fmoMode = NV_ENC_H264_FMO_DISABLE;
            }
        }
        if (avctx->codec->id == AV_CODEC_ID_H264) {
            ctx->encode_config.encodeCodecConfig.h264Config.outputBufferingPeriodSEI = 1;
            ctx->encode_config.encodeCodecConfig.h264Config.outputPictureTimingSEI = 1;
        } else if(avctx->codec->id == AV_CODEC_ID_H265) {
            ctx->encode_config.encodeCodecConfig.hevcConfig.outputBufferingPeriodSEI = 1;
            ctx->encode_config.encodeCodecConfig.hevcConfig.outputPictureTimingSEI = 1;
        }
    } else if (avctx->global_quality > 0) {
        ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_CONSTQP;
        ctx->encode_config.rcParams.constQP.qpInterB = avctx->global_quality;
        ctx->encode_config.rcParams.constQP.qpInterP = avctx->global_quality;
        ctx->encode_config.rcParams.constQP.qpIntra = avctx->global_quality;
        avctx->qmin = -1;
        avctx->qmax = -1;
    } else {
        if (avctx->qmin >= 0 && avctx->qmax >= 0) {
            ctx->encode_config.rcParams.enableMinQP = 1;
            ctx->encode_config.rcParams.enableMaxQP = 1;
            ctx->encode_config.rcParams.minQP.qpInterB = avctx->qmin;
            ctx->encode_config.rcParams.minQP.qpInterP = avctx->qmin;
            ctx->encode_config.rcParams.minQP.qpIntra = avctx->qmin;
            ctx->encode_config.rcParams.maxQP.qpInterB = avctx->qmax;
            ctx->encode_config.rcParams.maxQP.qpInterP = avctx->qmax;
            ctx->encode_config.rcParams.maxQP.qpIntra = avctx->qmax;
            qp_inter_p = (avctx->qmax + 3 * avctx->qmin) / 4; // biased towards Qmin
            if (ctx->twopass) {
                ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_2_PASS_VBR;
                if (avctx->codec->id == AV_CODEC_ID_H264) {
                    ctx->encode_config.encodeCodecConfig.h264Config.adaptiveTransformMode = NV_ENC_H264_ADAPTIVE_TRANSFORM_ENABLE;
                    ctx->encode_config.encodeCodecConfig.h264Config.fmoMode = NV_ENC_H264_FMO_DISABLE;
                }
            } else {
                ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_VBR_MINQP;
            }
        } else {
            qp_inter_p = 26; // default to 26
            if (ctx->twopass) {
                ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_2_PASS_VBR;
            } else {
                ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_VBR;
            }
        }
        ctx->encode_config.rcParams.enableInitialRCQP = 1;
        ctx->encode_config.rcParams.initialRCQP.qpInterP  = qp_inter_p;
        if(avctx->i_quant_factor != 0.0 && avctx->b_quant_factor != 0.0) {
            ctx->encode_config.rcParams.initialRCQP.qpIntra = av_clip(
                qp_inter_p * fabs(avctx->i_quant_factor) + avctx->i_quant_offset, 0, 51);
            ctx->encode_config.rcParams.initialRCQP.qpInterB = av_clip(
                qp_inter_p * fabs(avctx->b_quant_factor) + avctx->b_quant_offset, 0, 51);
        } else {
            ctx->encode_config.rcParams.initialRCQP.qpIntra = qp_inter_p;
            ctx->encode_config.rcParams.initialRCQP.qpInterB = qp_inter_p;
        }
    }
    if (avctx->rc_buffer_size > 0) {
        ctx->encode_config.rcParams.vbvBufferSize = avctx->rc_buffer_size;
    } else if (ctx->encode_config.rcParams.averageBitRate > 0) {
        ctx->encode_config.rcParams.vbvBufferSize = 2 * ctx->encode_config.rcParams.averageBitRate;
    }
    if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
        ctx->encode_config.frameFieldMode = NV_ENC_PARAMS_FRAME_FIELD_MODE_FIELD;
    } else {
        ctx->encode_config.frameFieldMode = NV_ENC_PARAMS_FRAME_FIELD_MODE_FRAME;
    }
    switch (avctx->codec->id) {
    case AV_CODEC_ID_H264:
        ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.colourMatrix = avctx->colorspace;
        ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.colourPrimaries = avctx->color_primaries;
        ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.transferCharacteristics = avctx->color_trc;
        ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.videoFullRangeFlag = (avctx->color_range == AVCOL_RANGE_JPEG
            || avctx->pix_fmt == AV_PIX_FMT_YUVJ420P || avctx->pix_fmt == AV_PIX_FMT_YUVJ422P || avctx->pix_fmt == AV_PIX_FMT_YUVJ444P);
        ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.colourDescriptionPresentFlag =
            (avctx->colorspace != 2 || avctx->color_primaries != 2 || avctx->color_trc != 2);
        ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.videoSignalTypePresentFlag =
            (ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.colourDescriptionPresentFlag
            || ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.videoFormat != 5
            || ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.videoFullRangeFlag != 0);
        ctx->encode_config.encodeCodecConfig.h264Config.sliceMode = 3;
        ctx->encode_config.encodeCodecConfig.h264Config.sliceModeData = 1;
        ctx->encode_config.encodeCodecConfig.h264Config.disableSPSPPS = (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 1 : 0;
        ctx->encode_config.encodeCodecConfig.h264Config.repeatSPSPPS = (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 0 : 1;
        ctx->encode_config.encodeCodecConfig.h264Config.outputAUD = 1;
        if (!ctx->profile && !lossless) {
            switch (avctx->profile) {
            case FF_PROFILE_H264_HIGH_444_PREDICTIVE:
                ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_HIGH_444_GUID;
                break;
            case FF_PROFILE_H264_BASELINE:
                ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_BASELINE_GUID;
                break;
            case FF_PROFILE_H264_MAIN:
                ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_MAIN_GUID;
                break;
            case FF_PROFILE_H264_HIGH:
            case FF_PROFILE_UNKNOWN:
                ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_HIGH_GUID;
                break;
            default:
                av_log(avctx, AV_LOG_WARNING, "Unsupported profile requested, falling back to high\n");
                ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_HIGH_GUID;
                break;
            }
        } else if(!lossless) {
            if (!strcmp(ctx->profile, "high")) {
                ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_HIGH_GUID;
                avctx->profile = FF_PROFILE_H264_HIGH;
            } else if (!strcmp(ctx->profile, "main")) {
                ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_MAIN_GUID;
                avctx->profile = FF_PROFILE_H264_MAIN;
            } else if (!strcmp(ctx->profile, "baseline")) {
                ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_BASELINE_GUID;
                avctx->profile = FF_PROFILE_H264_BASELINE;
            } else if (!strcmp(ctx->profile, "high444p")) {
                ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_HIGH_444_GUID;
                avctx->profile = FF_PROFILE_H264_HIGH_444_PREDICTIVE;
            } else {
                av_log(avctx, AV_LOG_FATAL, "Profile \"%s\" is unknown! Supported profiles: high, main, baseline\n", ctx->profile);
                res = AVERROR(EINVAL);
                goto error;
            }
        }
        // force setting profile as high444p if input is AV_PIX_FMT_YUV444P
        if (avctx->pix_fmt == AV_PIX_FMT_YUV444P) {
            ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_HIGH_444_GUID;
            avctx->profile = FF_PROFILE_H264_HIGH_444_PREDICTIVE;
        }
        ctx->encode_config.encodeCodecConfig.h264Config.chromaFormatIDC = avctx->profile == FF_PROFILE_H264_HIGH_444_PREDICTIVE ? 3 : 1;
        if (ctx->level) {
            res = input_string_to_uint32(avctx, nvenc_h264_level_pairs, ctx->level, &ctx->encode_config.encodeCodecConfig.h264Config.level);
            if (res) {
                av_log(avctx, AV_LOG_FATAL, "Level \"%s\" is unknown! Supported levels: auto, 1, 1b, 1.1, 1.2, 1.3, 2, 2.1, 2.2, 3, 3.1, 3.2, 4, 4.1, 4.2, 5, 5.1\n", ctx->level);
                goto error;
            }
        } else {
            ctx->encode_config.encodeCodecConfig.h264Config.level = NV_ENC_LEVEL_AUTOSELECT;
        }
        break;
    case AV_CODEC_ID_H265:
        ctx->encode_config.encodeCodecConfig.hevcConfig.hevcVUIParameters.colourMatrix = avctx->colorspace;
        ctx->encode_config.encodeCodecConfig.hevcConfig.hevcVUIParameters.colourPrimaries = avctx->color_primaries;
        ctx->encode_config.encodeCodecConfig.hevcConfig.hevcVUIParameters.transferCharacteristics = avctx->color_trc;
        ctx->encode_config.encodeCodecConfig.hevcConfig.hevcVUIParameters.videoFullRangeFlag = (avctx->color_range == AVCOL_RANGE_JPEG
            || avctx->pix_fmt == AV_PIX_FMT_YUVJ420P || avctx->pix_fmt == AV_PIX_FMT_YUVJ422P || avctx->pix_fmt == AV_PIX_FMT_YUVJ444P);
        ctx->encode_config.encodeCodecConfig.hevcConfig.hevcVUIParameters.colourDescriptionPresentFlag =
            (avctx->colorspace != 2 || avctx->color_primaries != 2 || avctx->color_trc != 2);
        ctx->encode_config.encodeCodecConfig.hevcConfig.hevcVUIParameters.videoSignalTypePresentFlag =
            (ctx->encode_config.encodeCodecConfig.hevcConfig.hevcVUIParameters.colourDescriptionPresentFlag
            || ctx->encode_config.encodeCodecConfig.hevcConfig.hevcVUIParameters.videoFormat != 5
            || ctx->encode_config.encodeCodecConfig.hevcConfig.hevcVUIParameters.videoFullRangeFlag != 0);
        ctx->encode_config.encodeCodecConfig.hevcConfig.sliceMode = 3;
        ctx->encode_config.encodeCodecConfig.hevcConfig.sliceModeData = 1;
        ctx->encode_config.encodeCodecConfig.hevcConfig.disableSPSPPS = (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 1 : 0;
        ctx->encode_config.encodeCodecConfig.hevcConfig.repeatSPSPPS = (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 0 : 1;
        ctx->encode_config.encodeCodecConfig.hevcConfig.outputAUD = 1;
        /* No other profile is supported in the current SDK version 5 */
        ctx->encode_config.profileGUID = NV_ENC_HEVC_PROFILE_MAIN_GUID;
        avctx->profile = FF_PROFILE_HEVC_MAIN;
        if (ctx->level) {
            res = input_string_to_uint32(avctx, nvenc_hevc_level_pairs, ctx->level, &ctx->encode_config.encodeCodecConfig.hevcConfig.level);
            if (res) {
                av_log(avctx, AV_LOG_FATAL, "Level \"%s\" is unknown! Supported levels: auto, 1, 2, 2.1, 3, 3.1, 4, 4.1, 5, 5.1, 5.2, 6, 6.1, 6.2\n", ctx->level);
                goto error;
            }
        } else {
            ctx->encode_config.encodeCodecConfig.hevcConfig.level = NV_ENC_LEVEL_AUTOSELECT;
        }
        if (ctx->tier) {
            if (!strcmp(ctx->tier, "main")) {
                ctx->encode_config.encodeCodecConfig.hevcConfig.tier = NV_ENC_TIER_HEVC_MAIN;
            } else if (!strcmp(ctx->tier, "high")) {
                ctx->encode_config.encodeCodecConfig.hevcConfig.tier = NV_ENC_TIER_HEVC_HIGH;
            } else {
                av_log(avctx, AV_LOG_FATAL, "Tier \"%s\" is unknown! Supported tiers: main, high\n", ctx->tier);
                res = AVERROR(EINVAL);
                goto error;
            }
        }
        break;
    /* Earlier switch/case will return if unknown codec is passed. */
    }
    nv_status = p_nvenc->nvEncInitializeEncoder(ctx->nvencoder, &ctx->init_encode_params);
    if (nv_status != NV_ENC_SUCCESS) {
        av_log(avctx, AV_LOG_FATAL, "InitializeEncoder failed: 0x%x\n", (int)nv_status);
        res = AVERROR_EXTERNAL;
        goto error;
    }
    ctx->input_surfaces = av_malloc(ctx->max_surface_count * sizeof(*ctx->input_surfaces));
    if (!ctx->input_surfaces) {
        res = AVERROR(ENOMEM);
        goto error;
    }
    ctx->output_surfaces = av_malloc(ctx->max_surface_count * sizeof(*ctx->output_surfaces));
    if (!ctx->output_surfaces) {
        res = AVERROR(ENOMEM);
        goto error;
    }
    for (surfaceCount = 0; surfaceCount < ctx->max_surface_count; ++surfaceCount) {
        NV_ENC_CREATE_INPUT_BUFFER allocSurf = { 0 };
        NV_ENC_CREATE_BITSTREAM_BUFFER allocOut = { 0 };
        allocSurf.version = NV_ENC_CREATE_INPUT_BUFFER_VER;
        allocOut.version = NV_ENC_CREATE_BITSTREAM_BUFFER_VER;
        allocSurf.width = (avctx->width + 31) & ~31;
        allocSurf.height = (avctx->height + 31) & ~31;
        allocSurf.memoryHeap = NV_ENC_MEMORY_HEAP_SYSMEM_CACHED;
        switch (avctx->pix_fmt) {
        case AV_PIX_FMT_YUV420P:
            allocSurf.bufferFmt = NV_ENC_BUFFER_FORMAT_YV12_PL;
            break;
        case AV_PIX_FMT_NV12:
            allocSurf.bufferFmt = NV_ENC_BUFFER_FORMAT_NV12_PL;
            break;
        case AV_PIX_FMT_YUV444P:
            allocSurf.bufferFmt = NV_ENC_BUFFER_FORMAT_YUV444_PL;
            break;
        default:
            av_log(avctx, AV_LOG_FATAL, "Invalid input pixel format\n");
            res = AVERROR(EINVAL);
            goto error;
        }
        nv_status = p_nvenc->nvEncCreateInputBuffer(ctx->nvencoder, &allocSurf);
        if (nv_status != NV_ENC_SUCCESS) {
            av_log(avctx, AV_LOG_FATAL, "CreateInputBuffer failed\n");
            res = AVERROR_EXTERNAL;
            goto error;
        }
        ctx->input_surfaces[surfaceCount].lockCount = 0;
        ctx->input_surfaces[surfaceCount].input_surface = allocSurf.inputBuffer;
        ctx->input_surfaces[surfaceCount].format = allocSurf.bufferFmt;
        ctx->input_surfaces[surfaceCount].width = allocSurf.width;
        ctx->input_surfaces[surfaceCount].height = allocSurf.height;
        /* 1MB is large enough to hold most output frames. NVENC increases this automaticaly if it's not enough. */
        allocOut.size = 1024 * 1024;
        allocOut.memoryHeap = NV_ENC_MEMORY_HEAP_SYSMEM_CACHED;
        nv_status = p_nvenc->nvEncCreateBitstreamBuffer(ctx->nvencoder, &allocOut);
        if (nv_status != NV_ENC_SUCCESS) {
            av_log(avctx, AV_LOG_FATAL, "CreateBitstreamBuffer failed\n");
            ctx->output_surfaces[surfaceCount++].output_surface = NULL;
            res = AVERROR_EXTERNAL;
            goto error;
        }
        ctx->output_surfaces[surfaceCount].output_surface = allocOut.bitstreamBuffer;
        ctx->output_surfaces[surfaceCount].size = allocOut.size;
        ctx->output_surfaces[surfaceCount].busy = 0;
    }
    if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
        uint32_t outSize = 0;
        char tmpHeader[256];
        NV_ENC_SEQUENCE_PARAM_PAYLOAD payload = { 0 };
        payload.version = NV_ENC_SEQUENCE_PARAM_PAYLOAD_VER;
        payload.spsppsBuffer = tmpHeader;
        payload.inBufferSize = sizeof(tmpHeader);
        payload.outSPSPPSPayloadSize = &outSize;
        nv_status = p_nvenc->nvEncGetSequenceParams(ctx->nvencoder, &payload);
        if (nv_status != NV_ENC_SUCCESS) {
            av_log(avctx, AV_LOG_FATAL, "GetSequenceParams failed\n");
            goto error;
        }
        avctx->extradata_size = outSize;
        avctx->extradata = av_mallocz(outSize + AV_INPUT_BUFFER_PADDING_SIZE);
        if (!avctx->extradata) {
            res = AVERROR(ENOMEM);
            goto error;
        }
        memcpy(avctx->extradata, tmpHeader, outSize);
    }
    if (ctx->encode_config.frameIntervalP > 1)
        avctx->has_b_frames = 2;
    if (ctx->encode_config.rcParams.averageBitRate > 0)
        avctx->bit_rate = ctx->encode_config.rcParams.averageBitRate;
    cpb_props = ff_add_cpb_side_data(avctx);
    if (!cpb_props)
        return AVERROR(ENOMEM);
    cpb_props->max_bitrate = ctx->encode_config.rcParams.maxBitRate;
    cpb_props->avg_bitrate = avctx->bit_rate;
    cpb_props->buffer_size = ctx->encode_config.rcParams.vbvBufferSize;
    return 0;
error:
    for (i = 0; i < surfaceCount; ++i) {
        p_nvenc->nvEncDestroyInputBuffer(ctx->nvencoder, ctx->input_surfaces[i].input_surface);
        if (ctx->output_surfaces[i].output_surface)
            p_nvenc->nvEncDestroyBitstreamBuffer(ctx->nvencoder, ctx->output_surfaces[i].output_surface);
    }
    if (ctx->nvencoder)
        p_nvenc->nvEncDestroyEncoder(ctx->nvencoder);
    if (ctx->cu_context)
        dl_fn->cu_ctx_destroy(ctx->cu_context);
    nvenc_unload_nvenc(avctx);
    ctx->nvencoder = NULL;
    ctx->cu_context = NULL;
    return res;
}
 | 18,186 | 
| 
	FFmpeg | 
	7f526efd17973ec6d2204f7a47b6923e2be31363 | 1 | 
	static inline void RENAME(yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
	unsigned int width, unsigned int height,
	int lumStride, int chromStride, int dstStride)
{
	RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
}
 | 18,188 | 
| 
	qemu | 
	4c315c27661502a0813b129e41c0bf640c34a8d6 | 1 | 
	static void digic_class_init(ObjectClass *oc, void *data)
{
    DeviceClass *dc = DEVICE_CLASS(oc);
    dc->realize = digic_realize;
} | 18,189 | 
| 
	qemu | 
	ee83d41466ab393d82d9abf57b9ec24d4e6633be | 1 | 
	vcard_emul_mirror_card(VReader *vreader)
{
    /*
     * lookup certs using the C_FindObjects. The Stan Cert handle won't give
     * us the real certs until we log in.
     */
    PK11GenericObject *firstObj, *thisObj;
    int cert_count;
    unsigned char **certs;
    int *cert_len;
    VCardKey **keys;
    PK11SlotInfo *slot;
    PRBool ret;
    slot = vcard_emul_reader_get_slot(vreader);
    if (slot == NULL) {
        return NULL;
    }
    firstObj = PK11_FindGenericObjects(slot, CKO_CERTIFICATE);
    if (firstObj == NULL) {
        return NULL;
    }
    /* count the certs */
    cert_count = 0;
    for (thisObj = firstObj; thisObj;
                             thisObj = PK11_GetNextGenericObject(thisObj)) {
        cert_count++;
    }
    if (cert_count == 0) {
        PK11_DestroyGenericObjects(firstObj);
        return NULL;
    }
    /* allocate the arrays */
    ret = vcard_emul_alloc_arrays(&certs, &cert_len, &keys, cert_count);
    if (ret == PR_FALSE) {
        return NULL;
    }
    /* fill in the arrays */
    cert_count = 0;
    for (thisObj = firstObj; thisObj;
                             thisObj = PK11_GetNextGenericObject(thisObj)) {
        SECItem derCert;
        CERTCertificate *cert;
        SECStatus rv;
        rv = PK11_ReadRawAttribute(PK11_TypeGeneric, thisObj,
                                   CKA_VALUE, &derCert);
        if (rv != SECSuccess) {
            continue;
        }
        /* create floating temp cert. This gives us a cert structure even if
         * the token isn't logged in */
        cert = CERT_NewTempCertificate(CERT_GetDefaultCertDB(), &derCert,
                                       NULL, PR_FALSE, PR_TRUE);
        SECITEM_FreeItem(&derCert, PR_FALSE);
        if (cert == NULL) {
            continue;
        }
        certs[cert_count] = cert->derCert.data;
        cert_len[cert_count] = cert->derCert.len;
        keys[cert_count] = vcard_emul_make_key(slot, cert);
        cert_count++;
        CERT_DestroyCertificate(cert); /* key obj still has a reference */
    }
    /* now create the card */
    return vcard_emul_make_card(vreader, certs, cert_len, keys, cert_count);
}
 | 18,190 | 
| 
	qemu | 
	7d1b0095bff7157e856d1d0e6c4295641ced2752 | 1 | 
	static void gen_neon_trn_u8(TCGv t0, TCGv t1)
{
    TCGv rd, tmp;
    rd = new_tmp();
    tmp = new_tmp();
    tcg_gen_shli_i32(rd, t0, 8);
    tcg_gen_andi_i32(rd, rd, 0xff00ff00);
    tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
    tcg_gen_or_i32(rd, rd, tmp);
    tcg_gen_shri_i32(t1, t1, 8);
    tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
    tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
    tcg_gen_or_i32(t1, t1, tmp);
    tcg_gen_mov_i32(t0, rd);
    dead_tmp(tmp);
    dead_tmp(rd);
}
 | 18,192 | 
| 
	FFmpeg | 
	7f526efd17973ec6d2204f7a47b6923e2be31363 | 1 | 
	static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, unsigned src_size)
{
	const uint16_t *end;
#ifdef HAVE_MMX
	const uint16_t *mm_end;
#endif
	uint8_t *d = (uint8_t *)dst;
	const uint16_t *s = (const uint16_t *)src;
	end = s + src_size/2;
#ifdef HAVE_MMX
	__asm __volatile(PREFETCH"	%0"::"m"(*s):"memory");
	__asm __volatile("pxor	%%mm7,%%mm7\n\t":::"memory");
	mm_end = end - 3;
	while(s < mm_end)
	{
	    __asm __volatile(
		PREFETCH" 32%1\n\t"
		"movq	%1, %%mm0\n\t"
		"movq	%1, %%mm1\n\t"
		"movq	%1, %%mm2\n\t"
		"pand	%2, %%mm0\n\t"
		"pand	%3, %%mm1\n\t"
		"pand	%4, %%mm2\n\t"
		"psllq	$3, %%mm0\n\t"
		"psrlq	$2, %%mm1\n\t"
		"psrlq	$7, %%mm2\n\t"
		"movq	%%mm0, %%mm3\n\t"
		"movq	%%mm1, %%mm4\n\t"
		"movq	%%mm2, %%mm5\n\t"
		"punpcklwd %%mm7, %%mm0\n\t"
		"punpcklwd %%mm7, %%mm1\n\t"
		"punpcklwd %%mm7, %%mm2\n\t"
		"punpckhwd %%mm7, %%mm3\n\t"
		"punpckhwd %%mm7, %%mm4\n\t"
		"punpckhwd %%mm7, %%mm5\n\t"
		"psllq	$8, %%mm1\n\t"
		"psllq	$16, %%mm2\n\t"
		"por	%%mm1, %%mm0\n\t"
		"por	%%mm2, %%mm0\n\t"
		"psllq	$8, %%mm4\n\t"
		"psllq	$16, %%mm5\n\t"
		"por	%%mm4, %%mm3\n\t"
		"por	%%mm5, %%mm3\n\t"
		MOVNTQ"	%%mm0, %0\n\t"
		MOVNTQ"	%%mm3, 8%0\n\t"
		:"=m"(*d)
		:"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r)
		:"memory");
		d += 16;
		s += 4;
	}
	__asm __volatile(SFENCE:::"memory");
	__asm __volatile(EMMS:::"memory");
#endif
	while(s < end)
	{
#if 0 //slightly slower on athlon
		int bgr= *s++;
		*((uint32_t*)d)++ = ((bgr&0x1F)<<3) + ((bgr&0x3E0)<<6) + ((bgr&0x7C00)<<9);
#else
//FIXME this is very likely wrong for bigendian (and the following converters too)
		register uint16_t bgr;
		bgr = *s++;
#ifdef WORDS_BIGENDIAN
		*d++ = 0;
		*d++ = (bgr&0x1F)<<3;
		*d++ = (bgr&0x3E0)>>2;
		*d++ = (bgr&0x7C00)>>7;
#else
		*d++ = (bgr&0x1F)<<3;
		*d++ = (bgr&0x3E0)>>2;
		*d++ = (bgr&0x7C00)>>7;
		*d++ = 0;
#endif
#endif
	}
}
 | 18,193 | 
| 
	qemu | 
	33848ceed79679b5c9e558b768447af2614b8db2 | 1 | 
	static int xio3130_upstream_initfn(PCIDevice *d)
{
    PCIEPort *p = PCIE_PORT(d);
    int rc;
    Error *err = NULL;
    pci_bridge_initfn(d, TYPE_PCIE_BUS);
    pcie_port_init_reg(d);
    rc = msi_init(d, XIO3130_MSI_OFFSET, XIO3130_MSI_NR_VECTOR,
                  XIO3130_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_64BIT,
                  XIO3130_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_MASKBIT, &err);
    if (rc < 0) {
        assert(rc == -ENOTSUP);
        error_report_err(err);
        goto err_bridge;
    }
    rc = pci_bridge_ssvid_init(d, XIO3130_SSVID_OFFSET,
                               XIO3130_SSVID_SVID, XIO3130_SSVID_SSID);
    if (rc < 0) {
        goto err_bridge;
    }
    rc = pcie_cap_init(d, XIO3130_EXP_OFFSET, PCI_EXP_TYPE_UPSTREAM,
                       p->port);
    if (rc < 0) {
        goto err_msi;
    }
    pcie_cap_flr_init(d);
    pcie_cap_deverr_init(d);
    rc = pcie_aer_init(d, XIO3130_AER_OFFSET, PCI_ERR_SIZEOF);
    if (rc < 0) {
        goto err;
    }
    return 0;
err:
    pcie_cap_exit(d);
err_msi:
    msi_uninit(d);
err_bridge:
    pci_bridge_exitfn(d);
    return rc;
}
 | 18,196 | 
| 
	qemu | 
	f5946dbab388050da6d9343978a38c81cce0508d | 1 | 
	int qemu_register_machine(QEMUMachine *m)
{
    TypeInfo ti = {
        .name       = g_strconcat(m->name, TYPE_MACHINE_SUFFIX, NULL),
        .parent     = TYPE_MACHINE,
        .class_init = machine_class_init,
        .class_data = (void *)m,
    };
    type_register(&ti);
    return 0;
}
 | 18,197 | 
| 
	qemu | 
	60fe637bf0e4d7989e21e50f52526444765c63b4 | 1 | 
	int64_t migrate_xbzrle_cache_size(void)
{
    MigrationState *s;
    s = migrate_get_current();
    return s->xbzrle_cache_size;
}
 | 18,198 | 
| 
	qemu | 
	5706db1deb061ee9affdcea81e59c4c2cad7c41e | 1 | 
	static int line_out_init (HWVoiceOut *hw, struct audsettings *as)
{
    SpiceVoiceOut *out = container_of (hw, SpiceVoiceOut, hw);
    struct audsettings settings;
#if SPICE_INTERFACE_PLAYBACK_MAJOR > 1 || SPICE_INTERFACE_PLAYBACK_MINOR >= 3
    settings.freq       = spice_server_get_best_playback_rate(NULL);
#else
    settings.freq       = SPICE_INTERFACE_PLAYBACK_FREQ;
#endif
    settings.nchannels  = SPICE_INTERFACE_PLAYBACK_CHAN;
    settings.fmt        = AUD_FMT_S16;
    settings.endianness = AUDIO_HOST_ENDIANNESS;
    audio_pcm_init_info (&hw->info, &settings);
    hw->samples = LINE_OUT_SAMPLES;
    out->active = 0;
    out->sin.base.sif = &playback_sif.base;
    qemu_spice_add_interface (&out->sin.base);
#if SPICE_INTERFACE_PLAYBACK_MAJOR > 1 || SPICE_INTERFACE_PLAYBACK_MINOR >= 3
    spice_server_set_playback_rate(&out->sin, settings.freq);
#endif
    return 0;
}
 | 18,199 | 
| 
	FFmpeg | 
	80ca19f766aea8f4724aac1b3faa772d25163c8a | 0 | 
	static int ipvideo_decode_block_opcode_0xD(IpvideoContext *s)
{
    int y;
    unsigned char P[2];
    /* 4-color block encoding: each 4x4 block is a different color */
    CHECK_STREAM_PTR(4);
    for (y = 0; y < 8; y++) {
        if (!(y & 3)) {
            P[0] = *s->stream_ptr++;
            P[1] = *s->stream_ptr++;
        }
        memset(s->pixel_ptr,     P[0], 4);
        memset(s->pixel_ptr + 4, P[1], 4);
        s->pixel_ptr += s->stride;
    }
    /* report success */
    return 0;
}
 | 18,201 | 
| 
	qemu | 
	2d7ad7c05e762d5b10a57eba9af1bb6b41700854 | 1 | 
	void socket_listen_cleanup(int fd, Error **errp)
{
    SocketAddress *addr;
    addr = socket_local_address(fd, errp);
    if (addr->type == SOCKET_ADDRESS_TYPE_UNIX
        && addr->u.q_unix.path) {
        if (unlink(addr->u.q_unix.path) < 0 && errno != ENOENT) {
            error_setg_errno(errp, errno,
                             "Failed to unlink socket %s",
                             addr->u.q_unix.path);
    qapi_free_SocketAddress(addr);
 | 18,204 | 
| 
	qemu | 
	3d3efba020da1de57a715e2087cf761ed0ad0904 | 1 | 
	int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
{
    int ret;
    sigset_t val;
    sigset_t *temp = NULL;
    CPUState *cpu = thread_cpu;
    TaskState *ts = (TaskState *)cpu->opaque;
    bool segv_was_blocked = ts->sigsegv_blocked;
    if (set) {
        bool has_sigsegv = sigismember(set, SIGSEGV);
        val = *set;
        temp = &val;
        sigdelset(temp, SIGSEGV);
        switch (how) {
        case SIG_BLOCK:
            if (has_sigsegv) {
                ts->sigsegv_blocked = true;
            }
            break;
        case SIG_UNBLOCK:
            if (has_sigsegv) {
                ts->sigsegv_blocked = false;
            }
            break;
        case SIG_SETMASK:
            ts->sigsegv_blocked = has_sigsegv;
            break;
        default:
            g_assert_not_reached();
        }
    }
    ret = sigprocmask(how, temp, oldset);
    if (oldset && segv_was_blocked) {
        sigaddset(oldset, SIGSEGV);
    }
    return ret;
}
 | 18,205 | 
| 
	FFmpeg | 
	2cbe6bac0337939f023bd1c37a9c455e6d535f3a | 1 | 
	static int process_work_frame(AVFilterContext *ctx)
{
    FrameRateContext *s = ctx->priv;
    int64_t work_pts;
    int interpolate;
    int ret;
    if (!s->f1)
        return 0;
    if (!s->f0 && !s->flush)
        return 0;
    work_pts = s->start_pts + av_rescale_q(s->n, av_inv_q(s->dest_frame_rate), s->dest_time_base);
    if (work_pts >= s->pts1 && !s->flush)
        return 0;
    if (!s->f0) {
        s->work = av_frame_clone(s->f1);
    } else {
        if (work_pts >= s->pts1 + s->delta && s->flush)
            return 0;
        interpolate = av_rescale(work_pts - s->pts0, s->max, s->delta);
        ff_dlog(ctx, "process_work_frame() interpolate:%d/%d\n", interpolate, s->max);
        if (interpolate > s->interp_end) {
            s->work = av_frame_clone(s->f1);
        } else if (interpolate < s->interp_start) {
            s->work = av_frame_clone(s->f0);
        } else {
            ret = blend_frames(ctx, interpolate);
            if (ret < 0)
                return ret;
            if (ret == 0)
                s->work = av_frame_clone(interpolate > (s->max >> 1) ? s->f1 : s->f0);
        }
    }
    if (!s->work)
        return AVERROR(ENOMEM);
    s->work->pts = work_pts;
    s->n++;
    return 1;
}
 | 18,206 | 
| 
	FFmpeg | 
	83b2b34d06e74cc8775ba3d833f9782505e17539 | 1 | 
	int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length,
                          void *logctx, int is_nalff, int nal_length_size,
                          enum AVCodecID codec_id)
{
    int consumed, ret = 0;
    const uint8_t *next_avc = buf + (is_nalff ? 0 : length);
    pkt->nb_nals = 0;
    while (length >= 4) {
        H2645NAL *nal;
        int extract_length = 0;
        int skip_trailing_zeros = 1;
        /*
         * Only parse an AVC1 length field if one is expected at the current
         * buffer position. There are unfortunately streams with multiple
         * NAL units covered by the length field. Those NAL units are delimited
         * by Annex B start code prefixes. ff_h2645_extract_rbsp() detects it
         * correctly and consumes only the first NAL unit. The additional NAL
         * units are handled here in the Annex B parsing code.
         */
        if (buf == next_avc) {
            int i;
            for (i = 0; i < nal_length_size; i++)
                extract_length = (extract_length << 8) | buf[i];
            if (extract_length > length) {
                av_log(logctx, AV_LOG_ERROR,
                       "Invalid NAL unit size (%d > %d).\n",
                       extract_length, length);
                return AVERROR_INVALIDDATA;
            }
            buf     += nal_length_size;
            length  -= nal_length_size;
            // keep track of the next AVC1 length field
            next_avc = buf + extract_length;
        } else {
            /*
             * expected to return immediately except for streams with mixed
             * NAL unit coding
             */
            int buf_index = find_next_start_code(buf, next_avc);
            buf    += buf_index;
            length -= buf_index;
            /*
             * break if an AVC1 length field is expected at the current buffer
             * position
             */
            if (buf == next_avc)
                continue;
            if (length > 0) {
                extract_length = length;
            } else if (pkt->nb_nals == 0) {
                av_log(logctx, AV_LOG_ERROR, "No NAL unit found\n");
                return AVERROR_INVALIDDATA;
            } else {
                break;
            }
        }
        if (pkt->nals_allocated < pkt->nb_nals + 1) {
            int new_size = pkt->nals_allocated + 1;
            H2645NAL *tmp = av_realloc_array(pkt->nals, new_size, sizeof(*tmp));
            if (!tmp)
                return AVERROR(ENOMEM);
            pkt->nals = tmp;
            memset(pkt->nals + pkt->nals_allocated, 0,
                   (new_size - pkt->nals_allocated) * sizeof(*tmp));
            pkt->nals_allocated = new_size;
        }
        nal = &pkt->nals[pkt->nb_nals++];
        consumed = ff_h2645_extract_rbsp(buf, extract_length, nal);
        if (consumed < 0)
            return consumed;
        /* see commit 3566042a0 */
        if (consumed < length - 3 &&
            buf[consumed]     == 0x00 && buf[consumed + 1] == 0x00 &&
            buf[consumed + 2] == 0x01 && buf[consumed + 3] == 0xE0)
            skip_trailing_zeros = 0;
        nal->size_bits = get_bit_length(nal, skip_trailing_zeros);
        ret = init_get_bits(&nal->gb, nal->data, nal->size_bits);
        if (ret < 0)
            return ret;
        if (codec_id == AV_CODEC_ID_HEVC)
            ret = hevc_parse_nal_header(nal, logctx);
        else
            ret = h264_parse_nal_header(nal, logctx);
        if (ret <= 0) {
            if (ret < 0) {
                av_log(logctx, AV_LOG_ERROR, "Invalid NAL unit %d, skipping.\n",
                       nal->type);
            }
            pkt->nb_nals--;
        }
        buf    += consumed;
        length -= consumed;
    }
    return 0;
}
 | 18,207 | 
| 
	qemu | 
	c5a49c63fa26e8825ad101dfe86339ae4c216539 | 1 | 
	static void gen_check_interrupts(DisasContext *dc)
{
    if (dc->tb->cflags & CF_USE_ICOUNT) {
        gen_io_start();
    }
    gen_helper_check_interrupts(cpu_env);
    if (dc->tb->cflags & CF_USE_ICOUNT) {
        gen_io_end();
    }
}
 | 18,208 | 
| 
	qemu | 
	60fe637bf0e4d7989e21e50f52526444765c63b4 | 1 | 
	static void put_buffer(QEMUFile *f, void *pv, size_t size)
{
    uint8_t *v = pv;
    qemu_put_buffer(f, v, size);
}
 | 18,209 | 
| 
	qemu | 
	2d896b454a0e19ec4c1ddbb0e0b65b7e54fcedf3 | 1 | 
	static void boston_lcd_write(void *opaque, hwaddr addr,
                             uint64_t val, unsigned size)
{
    BostonState *s = opaque;
    switch (size) {
    case 8:
        s->lcd_content[(addr + 7) & 0x7] = val >> 56;
        s->lcd_content[(addr + 6) & 0x7] = val >> 48;
        s->lcd_content[(addr + 5) & 0x7] = val >> 40;
        s->lcd_content[(addr + 4) & 0x7] = val >> 32;
        /* fall through */
    case 4:
        s->lcd_content[(addr + 3) & 0x7] = val >> 24;
        s->lcd_content[(addr + 2) & 0x7] = val >> 16;
        /* fall through */
    case 2:
        s->lcd_content[(addr + 1) & 0x7] = val >> 8;
        /* fall through */
    case 1:
        s->lcd_content[(addr + 0) & 0x7] = val;
        break;
    }
    qemu_chr_fe_printf(&s->lcd_display,
                       "\r%-8.8s", s->lcd_content);
}
 | 18,210 | 
| 
	qemu | 
	b3af1bc9d21e6bec7dfd283d91b465c9f815b6d6 | 1 | 
	int qemu_loadvm_state(QEMUFile *f)
{
    QLIST_HEAD(, LoadStateEntry) loadvm_handlers =
        QLIST_HEAD_INITIALIZER(loadvm_handlers);
    LoadStateEntry *le, *new_le;
    Error *local_err = NULL;
    uint8_t section_type;
    unsigned int v;
    int ret;
    int file_error_after_eof = -1;
    if (qemu_savevm_state_blocked(&local_err)) {
        error_report_err(local_err);
        return -EINVAL;
    }
    v = qemu_get_be32(f);
    if (v != QEMU_VM_FILE_MAGIC) {
        error_report("Not a migration stream");
        return -EINVAL;
    }
    v = qemu_get_be32(f);
    if (v == QEMU_VM_FILE_VERSION_COMPAT) {
        error_report("SaveVM v2 format is obsolete and don't work anymore");
        return -ENOTSUP;
    }
    if (v != QEMU_VM_FILE_VERSION) {
        error_report("Unsupported migration stream version");
        return -ENOTSUP;
    }
    while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) {
        uint32_t instance_id, version_id, section_id;
        SaveStateEntry *se;
        char idstr[257];
        int len;
        trace_qemu_loadvm_state_section(section_type);
        switch (section_type) {
        case QEMU_VM_SECTION_START:
        case QEMU_VM_SECTION_FULL:
            /* Read section start */
            section_id = qemu_get_be32(f);
            len = qemu_get_byte(f);
            qemu_get_buffer(f, (uint8_t *)idstr, len);
            idstr[len] = 0;
            instance_id = qemu_get_be32(f);
            version_id = qemu_get_be32(f);
            trace_qemu_loadvm_state_section_startfull(section_id, idstr,
                                                      instance_id, version_id);
            /* Find savevm section */
            se = find_se(idstr, instance_id);
            if (se == NULL) {
                error_report("Unknown savevm section or instance '%s' %d",
                             idstr, instance_id);
                ret = -EINVAL;
                goto out;
            }
            /* Validate version */
            if (version_id > se->version_id) {
                error_report("savevm: unsupported version %d for '%s' v%d",
                             version_id, idstr, se->version_id);
                ret = -EINVAL;
                goto out;
            }
            /* Add entry */
            le = g_malloc0(sizeof(*le));
            le->se = se;
            le->section_id = section_id;
            le->version_id = version_id;
            QLIST_INSERT_HEAD(&loadvm_handlers, le, entry);
            ret = vmstate_load(f, le->se, le->version_id);
            if (ret < 0) {
                error_report("error while loading state for instance 0x%x of"
                             " device '%s'", instance_id, idstr);
                goto out;
            }
            break;
        case QEMU_VM_SECTION_PART:
        case QEMU_VM_SECTION_END:
            section_id = qemu_get_be32(f);
            trace_qemu_loadvm_state_section_partend(section_id);
            QLIST_FOREACH(le, &loadvm_handlers, entry) {
                if (le->section_id == section_id) {
                    break;
                }
            }
            if (le == NULL) {
                error_report("Unknown savevm section %d", section_id);
                ret = -EINVAL;
                goto out;
            }
            ret = vmstate_load(f, le->se, le->version_id);
            if (ret < 0) {
                error_report("error while loading state section id %d(%s)",
                             section_id, le->se->idstr);
                goto out;
            }
            break;
        default:
            error_report("Unknown savevm section type %d", section_type);
            ret = -EINVAL;
            goto out;
        }
    }
    file_error_after_eof = qemu_file_get_error(f);
    /*
     * Try to read in the VMDESC section as well, so that dumping tools that
     * intercept our migration stream have the chance to see it.
     */
    if (qemu_get_byte(f) == QEMU_VM_VMDESCRIPTION) {
        uint32_t size = qemu_get_be32(f);
        uint8_t *buf = g_malloc(0x1000);
        while (size > 0) {
            uint32_t read_chunk = MIN(size, 0x1000);
            qemu_get_buffer(f, buf, read_chunk);
            size -= read_chunk;
        }
        g_free(buf);
    }
    cpu_synchronize_all_post_init();
    ret = 0;
out:
    QLIST_FOREACH_SAFE(le, &loadvm_handlers, entry, new_le) {
        QLIST_REMOVE(le, entry);
        g_free(le);
    }
    if (ret == 0) {
        /* We may not have a VMDESC section, so ignore relative errors */
        ret = file_error_after_eof;
    }
    return ret;
}
 | 18,212 | 
| 
	qemu | 
	8be7e7e4c72c048b90e3482557954a24bba43ba7 | 1 | 
	int inet_connect(const char *str, bool block, Error **errp)
{
    QemuOpts *opts;
    int sock = -1;
    opts = qemu_opts_create(&dummy_opts, NULL, 0);
    if (inet_parse(opts, str) == 0) {
        if (block) {
            qemu_opt_set(opts, "block", "on");
        }
        sock = inet_connect_opts(opts, errp);
    } else {
        error_set(errp, QERR_SOCKET_CREATE_FAILED);
    }
    qemu_opts_del(opts);
    return sock;
}
 | 18,213 | 
| 
	qemu | 
	44b6789299a8acca3f25331bc411055cafc7bb06 | 1 | 
	static void blkverify_verify_readv(BlkverifyAIOCB *acb)
{
    ssize_t offset = qemu_iovec_compare(acb->qiov, &acb->raw_qiov);
    if (offset != -1) {
        blkverify_err(acb, "contents mismatch in sector %" PRId64,
                      acb->sector_num + (int64_t)(offset / BDRV_SECTOR_SIZE));
    }
}
 | 18,214 | 
| 
	qemu | 
	d9bce9d99f4656ae0b0127f7472db9067b8f84ab | 1 | 
	void do_subfo (void)
{
    T2 = T0;
    T0 = T1 - T0;
    if (likely(!(((~T2) ^ T1 ^ (-1)) & ((~T2) ^ T0) & (1 << 31)))) {
        xer_ov = 0;
    } else {
        xer_so = 1;
        xer_ov = 1;
    }
    RETURN();
}
 | 18,215 | 
| 
	FFmpeg | 
	57ec555e8ef3c5ef1d77d48dc7cc868e56ddadc9 | 1 | 
	static void png_filter_row(DSPContext *dsp, uint8_t *dst, int filter_type,
                           uint8_t *src, uint8_t *top, int size, int bpp)
{
    int i;
    switch(filter_type) {
    case PNG_FILTER_VALUE_NONE:
        memcpy(dst, src, size);
        break;
    case PNG_FILTER_VALUE_SUB:
        dsp->diff_bytes(dst, src, src-bpp, size);
        memcpy(dst, src, bpp);
        break;
    case PNG_FILTER_VALUE_UP:
        dsp->diff_bytes(dst, src, top, size);
        break;
    case PNG_FILTER_VALUE_AVG:
        for(i = 0; i < bpp; i++)
            dst[i] = src[i] - (top[i] >> 1);
        for(; i < size; i++)
            dst[i] = src[i] - ((src[i-bpp] + top[i]) >> 1);
        break;
    case PNG_FILTER_VALUE_PAETH:
        for(i = 0; i < bpp; i++)
            dst[i] = src[i] - top[i];
        sub_png_paeth_prediction(dst+i, src+i, top+i, size-i, bpp);
        break;
    }
}
 | 18,216 | 
| 
	qemu | 
	d3d74d6fe095e2e49d030e0c163cecfb9c20f1d4 | 1 | 
	static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq)
{
    V9fsVirtioState *v = (V9fsVirtioState *)vdev;
    V9fsState *s = &v->state;
    V9fsPDU *pdu;
    ssize_t len;
    while ((pdu = pdu_alloc(s))) {
        struct {
            uint32_t size_le;
            uint8_t id;
            uint16_t tag_le;
        } QEMU_PACKED out;
        VirtQueueElement *elem;
        elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
        if (!elem) {
            pdu_free(pdu);
            break;
        }
        BUG_ON(elem->out_num == 0 || elem->in_num == 0);
        QEMU_BUILD_BUG_ON(sizeof(out) != 7);
        v->elems[pdu->idx] = elem;
        len = iov_to_buf(elem->out_sg, elem->out_num, 0,
                         &out, sizeof(out));
        BUG_ON(len != sizeof(out));
        pdu->size = le32_to_cpu(out.size_le);
        pdu->id = out.id;
        pdu->tag = le16_to_cpu(out.tag_le);
        qemu_co_queue_init(&pdu->complete);
        pdu_submit(pdu);
    }
}
 | 18,218 | 
| 
	qemu | 
	8297be80f7cf71e09617669a8bd8b2836dcfd4c3 | 0 | 
	static void add_keysym(char *line, int keysym, int keycode, kbd_layout_t *k) {
    if (keysym < MAX_NORMAL_KEYCODE) {
        trace_keymap_add("normal", keysym, keycode, line);
        k->keysym2keycode[keysym] = keycode;
    } else {
        if (k->extra_count >= MAX_EXTRA_COUNT) {
            fprintf(stderr, "Warning: Could not assign keysym %s (0x%x)"
                    " because of memory constraints.\n", line, keysym);
        } else {
            trace_keymap_add("extra", keysym, keycode, line);
            k->keysym2keycode_extra[k->extra_count].
            keysym = keysym;
            k->keysym2keycode_extra[k->extra_count].
            keycode = keycode;
            k->extra_count++;
        }
    }
}
 | 18,219 | 
| 
	qemu | 
	4fa4ce7107c6ec432f185307158c5df91ce54308 | 0 | 
	static int local_unlinkat(FsContext *ctx, V9fsPath *dir,
                          const char *name, int flags)
{
    int ret;
    V9fsString fullname;
    char buffer[PATH_MAX];
    v9fs_string_init(&fullname);
    v9fs_string_sprintf(&fullname, "%s/%s", dir->data, name);
    if (ctx->export_flags & V9FS_SM_MAPPED_FILE) {
        if (flags == AT_REMOVEDIR) {
            /*
             * If directory remove .virtfs_metadata contained in the
             * directory
             */
            snprintf(buffer, ARRAY_SIZE(buffer), "%s/%s/%s", ctx->fs_root,
                     fullname.data, VIRTFS_META_DIR);
            ret = remove(buffer);
            if (ret < 0 && errno != ENOENT) {
                /*
                 * We didn't had the .virtfs_metadata file. May be file created
                 * in non-mapped mode ?. Ignore ENOENT.
                 */
                goto err_out;
            }
        }
        /*
         * Now remove the name from parent directory
         * .virtfs_metadata directory.
         */
        ret = remove(local_mapped_attr_path(ctx, fullname.data, buffer));
        if (ret < 0 && errno != ENOENT) {
            /*
             * We didn't had the .virtfs_metadata file. May be file created
             * in non-mapped mode ?. Ignore ENOENT.
             */
            goto err_out;
        }
    }
    /* Remove the name finally */
    ret = remove(rpath(ctx, fullname.data, buffer));
err_out:
    v9fs_string_free(&fullname);
    return ret;
}
 | 18,222 | 
| 
	FFmpeg | 
	9dfd89b831f7c5a11b6406164e0d6d65c0392d24 | 0 | 
	static int dnxhd_init_vlc(DNXHDEncContext *ctx)
{
    int i, j, level, run;
    int max_level = 1<<(ctx->cid_table->bit_depth+2);
    FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->vlc_codes, max_level*4*sizeof(*ctx->vlc_codes), fail);
    FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->vlc_bits,  max_level*4*sizeof(*ctx->vlc_bits) , fail);
    FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->run_codes, 63*2,                                fail);
    FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->run_bits,  63,                                  fail);
    ctx->vlc_codes += max_level*2;
    ctx->vlc_bits  += max_level*2;
    for (level = -max_level; level < max_level; level++) {
        for (run = 0; run < 2; run++) {
            int index = (level<<1)|run;
            int sign, offset = 0, alevel = level;
            MASK_ABS(sign, alevel);
            if (alevel > 64) {
                offset = (alevel-1)>>6;
                alevel -= offset<<6;
            }
            for (j = 0; j < 257; j++) {
                if (ctx->cid_table->ac_level[j] >> 1 == alevel &&
                    (!offset || (ctx->cid_table->ac_index_flag[j] && offset)) &&
                    (!run    || (ctx->cid_table->ac_run_flag  [j] && run))) {
                    assert(!ctx->vlc_codes[index]);
                    if (alevel) {
                        ctx->vlc_codes[index] = (ctx->cid_table->ac_codes[j]<<1)|(sign&1);
                        ctx->vlc_bits [index] = ctx->cid_table->ac_bits[j]+1;
                    } else {
                        ctx->vlc_codes[index] = ctx->cid_table->ac_codes[j];
                        ctx->vlc_bits [index] = ctx->cid_table->ac_bits [j];
                    }
                    break;
                }
            }
            assert(!alevel || j < 257);
            if (offset) {
                ctx->vlc_codes[index] = (ctx->vlc_codes[index]<<ctx->cid_table->index_bits)|offset;
                ctx->vlc_bits [index]+= ctx->cid_table->index_bits;
            }
        }
    }
    for (i = 0; i < 62; i++) {
        int run = ctx->cid_table->run[i];
        assert(run < 63);
        ctx->run_codes[run] = ctx->cid_table->run_codes[i];
        ctx->run_bits [run] = ctx->cid_table->run_bits[i];
    }
    return 0;
 fail:
    return -1;
}
 | 18,223 | 
| 
	qemu | 
	46c5874e9cd752ed8ded31af03472edd8fc3efc1 | 0 | 
	static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu,
                                                   sPAPREnvironment *spapr,
                                                   uint32_t token,
                                                   uint32_t nargs,
                                                   target_ulong args,
                                                   uint32_t nret,
                                                   target_ulong rets)
{
    uint32_t config_addr = rtas_ld(args, 0);
    uint64_t buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
    unsigned int intr_src_num = -1, ioa_intr_num = rtas_ld(args, 3);
    sPAPRPHBState *phb = NULL;
    PCIDevice *pdev = NULL;
    spapr_pci_msi *msi;
    /* Find sPAPRPHBState */
    phb = find_phb(spapr, buid);
    if (phb) {
        pdev = find_dev(spapr, buid, config_addr);
    }
    if (!phb || !pdev) {
        rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
        return;
    }
    /* Find device descriptor and start IRQ */
    msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
    if (!msi || !msi->first_irq || !msi->num || (ioa_intr_num >= msi->num)) {
        trace_spapr_pci_msi("Failed to return vector", config_addr);
        rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
        return;
    }
    intr_src_num = msi->first_irq + ioa_intr_num;
    trace_spapr_pci_rtas_ibm_query_interrupt_source_number(ioa_intr_num,
                                                           intr_src_num);
    rtas_st(rets, 0, RTAS_OUT_SUCCESS);
    rtas_st(rets, 1, intr_src_num);
    rtas_st(rets, 2, 1);/* 0 == level; 1 == edge */
}
 | 18,224 | 
| 
	qemu | 
	36778660d7fd0748a6129916e47ecedd67bdb758 | 0 | 
	void ppc_hash64_set_sdr1(PowerPCCPU *cpu, target_ulong value,
                         Error **errp)
{
    CPUPPCState *env = &cpu->env;
    target_ulong htabsize = value & SDR_64_HTABSIZE;
    env->spr[SPR_SDR1] = value;
    if (htabsize > 28) {
        error_setg(errp,
                   "Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1",
                   htabsize);
        htabsize = 28;
    }
    env->htab_mask = (1ULL << (htabsize + 18 - 7)) - 1;
    env->htab_base = value & SDR_64_HTABORG;
}
 | 18,225 | 
| 
	qemu | 
	550830f9351291c585c963204ad9127998b1c1ce | 0 | 
	static coroutine_fn int cow_co_write(BlockDriverState *bs, int64_t sector_num,
                                     const uint8_t *buf, int nb_sectors)
{
    int ret;
    BDRVCowState *s = bs->opaque;
    qemu_co_mutex_lock(&s->lock);
    ret = cow_write(bs, sector_num, buf, nb_sectors);
    qemu_co_mutex_unlock(&s->lock);
    return ret;
}
 | 18,226 | 
| 
	qemu | 
	9646f4927faf68e8690588c2fd6dc9834c440b58 | 0 | 
	static void x86_cpu_reset(CPUState *s)
{
    X86CPU *cpu = X86_CPU(s);
    X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
    CPUX86State *env = &cpu->env;
    target_ulong cr4;
    uint64_t xcr0;
    int i;
    xcc->parent_reset(s);
    memset(env, 0, offsetof(CPUX86State, end_reset_fields));
    tlb_flush(s, 1);
    env->old_exception = -1;
    /* init to reset state */
    env->hflags2 |= HF2_GIF_MASK;
    cpu_x86_update_cr0(env, 0x60000010);
    env->a20_mask = ~0x0;
    env->smbase = 0x30000;
    env->idt.limit = 0xffff;
    env->gdt.limit = 0xffff;
    env->ldt.limit = 0xffff;
    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
    env->tr.limit = 0xffff;
    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
                           DESC_R_MASK | DESC_A_MASK);
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
                           DESC_A_MASK);
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
                           DESC_A_MASK);
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
                           DESC_A_MASK);
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
                           DESC_A_MASK);
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
                           DESC_A_MASK);
    env->eip = 0xfff0;
    env->regs[R_EDX] = env->cpuid_version;
    env->eflags = 0x2;
    /* FPU init */
    for (i = 0; i < 8; i++) {
        env->fptags[i] = 1;
    }
    cpu_set_fpuc(env, 0x37f);
    env->mxcsr = 0x1f80;
    /* All units are in INIT state.  */
    env->xstate_bv = 0;
    env->pat = 0x0007040600070406ULL;
    env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
    memset(env->dr, 0, sizeof(env->dr));
    env->dr[6] = DR6_FIXED_1;
    env->dr[7] = DR7_FIXED_1;
    cpu_breakpoint_remove_all(s, BP_CPU);
    cpu_watchpoint_remove_all(s, BP_CPU);
    cr4 = 0;
    xcr0 = XSTATE_FP_MASK;
#ifdef CONFIG_USER_ONLY
    /* Enable all the features for user-mode.  */
    if (env->features[FEAT_1_EDX] & CPUID_SSE) {
        xcr0 |= XSTATE_SSE_MASK;
    }
    for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
        const ExtSaveArea *esa = &x86_ext_save_areas[i];
        if ((env->features[esa->feature] & esa->bits) == esa->bits) {
            xcr0 |= 1ull << i;
        }
    }
    if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
        cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
    }
    if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
        cr4 |= CR4_FSGSBASE_MASK;
    }
#endif
    env->xcr0 = xcr0;
    cpu_x86_update_cr4(env, cr4);
    /*
     * SDM 11.11.5 requires:
     *  - IA32_MTRR_DEF_TYPE MSR.E = 0
     *  - IA32_MTRR_PHYSMASKn.V = 0
     * All other bits are undefined.  For simplification, zero it all.
     */
    env->mtrr_deftype = 0;
    memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
    memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
#if !defined(CONFIG_USER_ONLY)
    /* We hard-wire the BSP to the first CPU. */
    apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
    s->halted = !cpu_is_bsp(cpu);
    if (kvm_enabled()) {
        kvm_arch_reset_vcpu(cpu);
    }
#endif
}
 | 18,227 | 
| 
	qemu | 
	bec1631100323fac0900aea71043d5c4e22fc2fa | 0 | 
	static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
                             const int *const_args)
{
    tcg_out_cmp2(s, args, const_args);
    tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, args[5]);
}
 | 18,228 | 
| 
	qemu | 
	7bba83bf80eae9c9e323319ff40d0ca477b0a77a | 0 | 
	static int net_rx_ok(NetClientState *nc)
{
    struct XenNetDev *netdev = qemu_get_nic_opaque(nc);
    RING_IDX rc, rp;
    if (netdev->xendev.be_state != XenbusStateConnected) {
        return 0;
    }
    rc = netdev->rx_ring.req_cons;
    rp = netdev->rx_ring.sring->req_prod;
    xen_rmb();
    if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) {
        xen_be_printf(&netdev->xendev, 2, "%s: no rx buffers (%d/%d)\n",
                      __FUNCTION__, rc, rp);
        return 0;
    }
    return 1;
}
 | 18,229 | 
| 
	qemu | 
	03f4995781a64e106e6f73864a1e9c4163dac53b | 0 | 
	static void page_flush_tb_1(int level, void **lp)
{
    int i;
    if (*lp == NULL) {
        return;
    }
    if (level == 0) {
        PageDesc *pd = *lp;
        for (i = 0; i < L2_SIZE; ++i) {
            pd[i].first_tb = NULL;
            invalidate_page_bitmap(pd + i);
        }
    } else {
        void **pp = *lp;
        for (i = 0; i < L2_SIZE; ++i) {
            page_flush_tb_1(level - 1, pp + i);
        }
    }
}
 | 18,230 | 
| 
	qemu | 
	8f577d3d29996ad5c60ac6419881557183806d8b | 0 | 
	static void disas_sparc_insn(DisasContext * dc)
{
    unsigned int insn, opc, rs1, rs2, rd;
    insn = ldl_code(dc->pc);
    opc = GET_FIELD(insn, 0, 1);
    rd = GET_FIELD(insn, 2, 6);
    switch (opc) {
    case 0:                     /* branches/sethi */
        {
            unsigned int xop = GET_FIELD(insn, 7, 9);
            int32_t target;
            switch (xop) {
#ifdef TARGET_SPARC64
            case 0x1:           /* V9 BPcc */
                {
                    int cc;
                    target = GET_FIELD_SP(insn, 0, 18);
                    target = sign_extend(target, 18);
                    target <<= 2;
                    cc = GET_FIELD_SP(insn, 20, 21);
                    if (cc == 0)
                        do_branch(dc, target, insn, 0);
                    else if (cc == 2)
                        do_branch(dc, target, insn, 1);
                    else
                        goto illegal_insn;
                    goto jmp_insn;
                }
            case 0x3:           /* V9 BPr */
                {
                    target = GET_FIELD_SP(insn, 0, 13) |
                        (GET_FIELD_SP(insn, 20, 21) << 14);
                    target = sign_extend(target, 16);
                    target <<= 2;
                    rs1 = GET_FIELD(insn, 13, 17);
                    gen_movl_reg_T0(rs1);
                    do_branch_reg(dc, target, insn);
                    goto jmp_insn;
                }
            case 0x5:           /* V9 FBPcc */
                {
                    int cc = GET_FIELD_SP(insn, 20, 21);
                    if (gen_trap_ifnofpu(dc))
                        goto jmp_insn;
                    target = GET_FIELD_SP(insn, 0, 18);
                    target = sign_extend(target, 19);
                    target <<= 2;
                    do_fbranch(dc, target, insn, cc);
                    goto jmp_insn;
                }
#else
            case 0x7:           /* CBN+x */
                {
                    goto ncp_insn;
                }
#endif
            case 0x2:           /* BN+x */
                {
                    target = GET_FIELD(insn, 10, 31);
                    target = sign_extend(target, 22);
                    target <<= 2;
                    do_branch(dc, target, insn, 0);
                    goto jmp_insn;
                }
            case 0x6:           /* FBN+x */
                {
                    if (gen_trap_ifnofpu(dc))
                        goto jmp_insn;
                    target = GET_FIELD(insn, 10, 31);
                    target = sign_extend(target, 22);
                    target <<= 2;
                    do_fbranch(dc, target, insn, 0);
                    goto jmp_insn;
                }
            case 0x4:           /* SETHI */
#define OPTIM
#if defined(OPTIM)
                if (rd) { // nop
#endif
                    uint32_t value = GET_FIELD(insn, 10, 31);
                    gen_movl_imm_T0(value << 10);
                    gen_movl_T0_reg(rd);
#if defined(OPTIM)
                }
#endif
                break;
            case 0x0:           /* UNIMPL */
            default:
                goto illegal_insn;
            }
            break;
        }
        break;
    case 1:
        /*CALL*/ {
            target_long target = GET_FIELDs(insn, 2, 31) << 2;
#ifdef TARGET_SPARC64
            if (dc->pc == (uint32_t)dc->pc) {
                gen_op_movl_T0_im(dc->pc);
            } else {
                gen_op_movq_T0_im64(dc->pc >> 32, dc->pc);
            }
#else
            gen_op_movl_T0_im(dc->pc);
#endif
            gen_movl_T0_reg(15);
            target += dc->pc;
            gen_mov_pc_npc(dc);
            dc->npc = target;
        }
        goto jmp_insn;
    case 2:                     /* FPU & Logical Operations */
        {
            unsigned int xop = GET_FIELD(insn, 7, 12);
            if (xop == 0x3a) {  /* generate trap */
                int cond;
                rs1 = GET_FIELD(insn, 13, 17);
                gen_movl_reg_T0(rs1);
                if (IS_IMM) {
                    rs2 = GET_FIELD(insn, 25, 31);
#if defined(OPTIM)
                    if (rs2 != 0) {
#endif
                        gen_movl_simm_T1(rs2);
                        gen_op_add_T1_T0();
#if defined(OPTIM)
                    }
#endif
                } else {
                    rs2 = GET_FIELD(insn, 27, 31);
#if defined(OPTIM)
                    if (rs2 != 0) {
#endif
                        gen_movl_reg_T1(rs2);
                        gen_op_add_T1_T0();
#if defined(OPTIM)
                    }
#endif
                }
                cond = GET_FIELD(insn, 3, 6);
                if (cond == 0x8) {
                    save_state(dc);
                    gen_op_trap_T0();
                } else if (cond != 0) {
#ifdef TARGET_SPARC64
                    /* V9 icc/xcc */
                    int cc = GET_FIELD_SP(insn, 11, 12);
                    flush_T2(dc);
                    save_state(dc);
                    if (cc == 0)
                        gen_cond[0][cond]();
                    else if (cc == 2)
                        gen_cond[1][cond]();
                    else
                        goto illegal_insn;
#else
                    flush_T2(dc);
                    save_state(dc);
                    gen_cond[0][cond]();
#endif
                    gen_op_trapcc_T0();
                }
                gen_op_next_insn();
                gen_op_movl_T0_0();
                gen_op_exit_tb();
                dc->is_br = 1;
                goto jmp_insn;
            } else if (xop == 0x28) {
                rs1 = GET_FIELD(insn, 13, 17);
                switch(rs1) {
                case 0: /* rdy */
#ifndef TARGET_SPARC64
                case 0x01 ... 0x0e: /* undefined in the SPARCv8
                                       manual, rdy on the microSPARC
                                       II */
                case 0x0f:          /* stbar in the SPARCv8 manual,
                                       rdy on the microSPARC II */
                case 0x10 ... 0x1f: /* implementation-dependent in the
                                       SPARCv8 manual, rdy on the
                                       microSPARC II */
#endif
                    gen_op_movtl_T0_env(offsetof(CPUSPARCState, y));
                    gen_movl_T0_reg(rd);
                    break;
#ifdef TARGET_SPARC64
                case 0x2: /* V9 rdccr */
                    gen_op_rdccr();
                    gen_movl_T0_reg(rd);
                    break;
                case 0x3: /* V9 rdasi */
                    gen_op_movl_T0_env(offsetof(CPUSPARCState, asi));
                    gen_movl_T0_reg(rd);
                    break;
                case 0x4: /* V9 rdtick */
                    gen_op_rdtick();
                    gen_movl_T0_reg(rd);
                    break;
                case 0x5: /* V9 rdpc */
                    if (dc->pc == (uint32_t)dc->pc) {
                        gen_op_movl_T0_im(dc->pc);
                    } else {
                        gen_op_movq_T0_im64(dc->pc >> 32, dc->pc);
                    }
                    gen_movl_T0_reg(rd);
                    break;
                case 0x6: /* V9 rdfprs */
                    gen_op_movl_T0_env(offsetof(CPUSPARCState, fprs));
                    gen_movl_T0_reg(rd);
                    break;
                case 0xf: /* V9 membar */
                    break; /* no effect */
                case 0x13: /* Graphics Status */
                    if (gen_trap_ifnofpu(dc))
                        goto jmp_insn;
                    gen_op_movtl_T0_env(offsetof(CPUSPARCState, gsr));
                    gen_movl_T0_reg(rd);
                    break;
                case 0x17: /* Tick compare */
                    gen_op_movtl_T0_env(offsetof(CPUSPARCState, tick_cmpr));
                    gen_movl_T0_reg(rd);
                    break;
                case 0x18: /* System tick */
                    gen_op_rdstick();
                    gen_movl_T0_reg(rd);
                    break;
                case 0x19: /* System tick compare */
                    gen_op_movtl_T0_env(offsetof(CPUSPARCState, stick_cmpr));
                    gen_movl_T0_reg(rd);
                    break;
                case 0x10: /* Performance Control */
                case 0x11: /* Performance Instrumentation Counter */
                case 0x12: /* Dispatch Control */
                case 0x14: /* Softint set, WO */
                case 0x15: /* Softint clear, WO */
                case 0x16: /* Softint write */
#endif
                default:
                    goto illegal_insn;
                }
#if !defined(CONFIG_USER_ONLY)
            } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
#ifndef TARGET_SPARC64
                if (!supervisor(dc))
                    goto priv_insn;
                gen_op_rdpsr();
#else
                if (!hypervisor(dc))
                    goto priv_insn;
                rs1 = GET_FIELD(insn, 13, 17);
                switch (rs1) {
                case 0: // hpstate
                    // gen_op_rdhpstate();
                    break;
                case 1: // htstate
                    // gen_op_rdhtstate();
                    break;
                case 3: // hintp
                    gen_op_movl_T0_env(offsetof(CPUSPARCState, hintp));
                    break;
                case 5: // htba
                    gen_op_movl_T0_env(offsetof(CPUSPARCState, htba));
                    break;
                case 6: // hver
                    gen_op_movl_T0_env(offsetof(CPUSPARCState, hver));
                    break;
                case 31: // hstick_cmpr
                    gen_op_movl_env_T0(offsetof(CPUSPARCState, hstick_cmpr));
                    break;
                default:
                    goto illegal_insn;
                }
#endif
                gen_movl_T0_reg(rd);
                break;
            } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
                if (!supervisor(dc))
                    goto priv_insn;
#ifdef TARGET_SPARC64
                rs1 = GET_FIELD(insn, 13, 17);
                switch (rs1) {
                case 0: // tpc
                    gen_op_rdtpc();
                    break;
                case 1: // tnpc
                    gen_op_rdtnpc();
                    break;
                case 2: // tstate
                    gen_op_rdtstate();
                    break;
                case 3: // tt
                    gen_op_rdtt();
                    break;
                case 4: // tick
                    gen_op_rdtick();
                    break;
                case 5: // tba
                    gen_op_movtl_T0_env(offsetof(CPUSPARCState, tbr));
                    break;
                case 6: // pstate
                    gen_op_rdpstate();
                    break;
                case 7: // tl
                    gen_op_movl_T0_env(offsetof(CPUSPARCState, tl));
                    break;
                case 8: // pil
                    gen_op_movl_T0_env(offsetof(CPUSPARCState, psrpil));
                    break;
                case 9: // cwp
                    gen_op_rdcwp();
                    break;
                case 10: // cansave
                    gen_op_movl_T0_env(offsetof(CPUSPARCState, cansave));
                    break;
                case 11: // canrestore
                    gen_op_movl_T0_env(offsetof(CPUSPARCState, canrestore));
                    break;
                case 12: // cleanwin
                    gen_op_movl_T0_env(offsetof(CPUSPARCState, cleanwin));
                    break;
                case 13: // otherwin
                    gen_op_movl_T0_env(offsetof(CPUSPARCState, otherwin));
                    break;
                case 14: // wstate
                    gen_op_movl_T0_env(offsetof(CPUSPARCState, wstate));
                    break;
                case 16: // UA2005 gl
                    gen_op_movl_T0_env(offsetof(CPUSPARCState, gl));
                    break;
                case 26: // UA2005 strand status
                    if (!hypervisor(dc))
                        goto priv_insn;
                    gen_op_movl_T0_env(offsetof(CPUSPARCState, ssr));
                    break;
                case 31: // ver
                    gen_op_movtl_T0_env(offsetof(CPUSPARCState, version));
                    break;
                case 15: // fq
                default:
                    goto illegal_insn;
                }
#else
                gen_op_movl_T0_env(offsetof(CPUSPARCState, wim));
#endif
                gen_movl_T0_reg(rd);
                break;
            } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
#ifdef TARGET_SPARC64
                gen_op_flushw();
#else
                if (!supervisor(dc))
                    goto priv_insn;
                gen_op_movtl_T0_env(offsetof(CPUSPARCState, tbr));
                gen_movl_T0_reg(rd);
#endif
                break;
#endif
            } else if (xop == 0x34) {   /* FPU Operations */
                if (gen_trap_ifnofpu(dc))
                    goto jmp_insn;
                gen_op_clear_ieee_excp_and_FTT();
                rs1 = GET_FIELD(insn, 13, 17);
                rs2 = GET_FIELD(insn, 27, 31);
                xop = GET_FIELD(insn, 18, 26);
                switch (xop) {
                    case 0x1: /* fmovs */
                        gen_op_load_fpr_FT0(rs2);
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0x5: /* fnegs */
                        gen_op_load_fpr_FT1(rs2);
                        gen_op_fnegs();
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0x9: /* fabss */
                        gen_op_load_fpr_FT1(rs2);
                        gen_op_fabss();
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0x29: /* fsqrts */
                        gen_op_load_fpr_FT1(rs2);
                        gen_op_fsqrts();
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0x2a: /* fsqrtd */
                        gen_op_load_fpr_DT1(DFPREG(rs2));
                        gen_op_fsqrtd();
                        gen_op_store_DT0_fpr(DFPREG(rd));
                        break;
                    case 0x2b: /* fsqrtq */
                        goto nfpu_insn;
                    case 0x41:
                        gen_op_load_fpr_FT0(rs1);
                        gen_op_load_fpr_FT1(rs2);
                        gen_op_fadds();
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0x42:
                        gen_op_load_fpr_DT0(DFPREG(rs1));
                        gen_op_load_fpr_DT1(DFPREG(rs2));
                        gen_op_faddd();
                        gen_op_store_DT0_fpr(DFPREG(rd));
                        break;
                    case 0x43: /* faddq */
                        goto nfpu_insn;
                    case 0x45:
                        gen_op_load_fpr_FT0(rs1);
                        gen_op_load_fpr_FT1(rs2);
                        gen_op_fsubs();
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0x46:
                        gen_op_load_fpr_DT0(DFPREG(rs1));
                        gen_op_load_fpr_DT1(DFPREG(rs2));
                        gen_op_fsubd();
                        gen_op_store_DT0_fpr(DFPREG(rd));
                        break;
                    case 0x47: /* fsubq */
                        goto nfpu_insn;
                    case 0x49:
                        gen_op_load_fpr_FT0(rs1);
                        gen_op_load_fpr_FT1(rs2);
                        gen_op_fmuls();
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0x4a:
                        gen_op_load_fpr_DT0(DFPREG(rs1));
                        gen_op_load_fpr_DT1(DFPREG(rs2));
                        gen_op_fmuld();
                        gen_op_store_DT0_fpr(rd);
                        break;
                    case 0x4b: /* fmulq */
                        goto nfpu_insn;
                    case 0x4d:
                        gen_op_load_fpr_FT0(rs1);
                        gen_op_load_fpr_FT1(rs2);
                        gen_op_fdivs();
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0x4e:
                        gen_op_load_fpr_DT0(DFPREG(rs1));
                        gen_op_load_fpr_DT1(DFPREG(rs2));
                        gen_op_fdivd();
                        gen_op_store_DT0_fpr(DFPREG(rd));
                        break;
                    case 0x4f: /* fdivq */
                        goto nfpu_insn;
                    case 0x69:
                        gen_op_load_fpr_FT0(rs1);
                        gen_op_load_fpr_FT1(rs2);
                        gen_op_fsmuld();
                        gen_op_store_DT0_fpr(DFPREG(rd));
                        break;
                    case 0x6e: /* fdmulq */
                        goto nfpu_insn;
                    case 0xc4:
                        gen_op_load_fpr_FT1(rs2);
                        gen_op_fitos();
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0xc6:
                        gen_op_load_fpr_DT1(DFPREG(rs2));
                        gen_op_fdtos();
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0xc7: /* fqtos */
                        goto nfpu_insn;
                    case 0xc8:
                        gen_op_load_fpr_FT1(rs2);
                        gen_op_fitod();
                        gen_op_store_DT0_fpr(DFPREG(rd));
                        break;
                    case 0xc9:
                        gen_op_load_fpr_FT1(rs2);
                        gen_op_fstod();
                        gen_op_store_DT0_fpr(DFPREG(rd));
                        break;
                    case 0xcb: /* fqtod */
                        goto nfpu_insn;
                    case 0xcc: /* fitoq */
                        goto nfpu_insn;
                    case 0xcd: /* fstoq */
                        goto nfpu_insn;
                    case 0xce: /* fdtoq */
                        goto nfpu_insn;
                    case 0xd1:
                        gen_op_load_fpr_FT1(rs2);
                        gen_op_fstoi();
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0xd2:
                        gen_op_load_fpr_DT1(rs2);
                        gen_op_fdtoi();
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0xd3: /* fqtoi */
                        goto nfpu_insn;
#ifdef TARGET_SPARC64
                    case 0x2: /* V9 fmovd */
                        gen_op_load_fpr_DT0(DFPREG(rs2));
                        gen_op_store_DT0_fpr(DFPREG(rd));
                        break;
                    case 0x6: /* V9 fnegd */
                        gen_op_load_fpr_DT1(DFPREG(rs2));
                        gen_op_fnegd();
                        gen_op_store_DT0_fpr(DFPREG(rd));
                        break;
                    case 0xa: /* V9 fabsd */
                        gen_op_load_fpr_DT1(DFPREG(rs2));
                        gen_op_fabsd();
                        gen_op_store_DT0_fpr(DFPREG(rd));
                        break;
                    case 0x81: /* V9 fstox */
                        gen_op_load_fpr_FT1(rs2);
                        gen_op_fstox();
                        gen_op_store_DT0_fpr(DFPREG(rd));
                        break;
                    case 0x82: /* V9 fdtox */
                        gen_op_load_fpr_DT1(DFPREG(rs2));
                        gen_op_fdtox();
                        gen_op_store_DT0_fpr(DFPREG(rd));
                        break;
                    case 0x84: /* V9 fxtos */
                        gen_op_load_fpr_DT1(DFPREG(rs2));
                        gen_op_fxtos();
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0x88: /* V9 fxtod */
                        gen_op_load_fpr_DT1(DFPREG(rs2));
                        gen_op_fxtod();
                        gen_op_store_DT0_fpr(DFPREG(rd));
                        break;
                    case 0x3: /* V9 fmovq */
                    case 0x7: /* V9 fnegq */
                    case 0xb: /* V9 fabsq */
                    case 0x83: /* V9 fqtox */
                    case 0x8c: /* V9 fxtoq */
                        goto nfpu_insn;
#endif
                    default:
                        goto illegal_insn;
                }
            } else if (xop == 0x35) {   /* FPU Operations */
#ifdef TARGET_SPARC64
                int cond;
#endif
                if (gen_trap_ifnofpu(dc))
                    goto jmp_insn;
                gen_op_clear_ieee_excp_and_FTT();
                rs1 = GET_FIELD(insn, 13, 17);
                rs2 = GET_FIELD(insn, 27, 31);
                xop = GET_FIELD(insn, 18, 26);
#ifdef TARGET_SPARC64
                if ((xop & 0x11f) == 0x005) { // V9 fmovsr
                    cond = GET_FIELD_SP(insn, 14, 17);
                    gen_op_load_fpr_FT0(rd);
                    gen_op_load_fpr_FT1(rs2);
                    rs1 = GET_FIELD(insn, 13, 17);
                    gen_movl_reg_T0(rs1);
                    flush_T2(dc);
                    gen_cond_reg(cond);
                    gen_op_fmovs_cc();
                    gen_op_store_FT0_fpr(rd);
                    break;
                } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
                    cond = GET_FIELD_SP(insn, 14, 17);
                    gen_op_load_fpr_DT0(rd);
                    gen_op_load_fpr_DT1(rs2);
                    flush_T2(dc);
                    rs1 = GET_FIELD(insn, 13, 17);
                    gen_movl_reg_T0(rs1);
                    gen_cond_reg(cond);
                    gen_op_fmovs_cc();
                    gen_op_store_DT0_fpr(rd);
                    break;
                } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
                    goto nfpu_insn;
                }
#endif
                switch (xop) {
#ifdef TARGET_SPARC64
                    case 0x001: /* V9 fmovscc %fcc0 */
                        cond = GET_FIELD_SP(insn, 14, 17);
                        gen_op_load_fpr_FT0(rd);
                        gen_op_load_fpr_FT1(rs2);
                        flush_T2(dc);
                        gen_fcond[0][cond]();
                        gen_op_fmovs_cc();
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0x002: /* V9 fmovdcc %fcc0 */
                        cond = GET_FIELD_SP(insn, 14, 17);
                        gen_op_load_fpr_DT0(rd);
                        gen_op_load_fpr_DT1(rs2);
                        flush_T2(dc);
                        gen_fcond[0][cond]();
                        gen_op_fmovd_cc();
                        gen_op_store_DT0_fpr(rd);
                        break;
                    case 0x003: /* V9 fmovqcc %fcc0 */
                        goto nfpu_insn;
                    case 0x041: /* V9 fmovscc %fcc1 */
                        cond = GET_FIELD_SP(insn, 14, 17);
                        gen_op_load_fpr_FT0(rd);
                        gen_op_load_fpr_FT1(rs2);
                        flush_T2(dc);
                        gen_fcond[1][cond]();
                        gen_op_fmovs_cc();
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0x042: /* V9 fmovdcc %fcc1 */
                        cond = GET_FIELD_SP(insn, 14, 17);
                        gen_op_load_fpr_DT0(rd);
                        gen_op_load_fpr_DT1(rs2);
                        flush_T2(dc);
                        gen_fcond[1][cond]();
                        gen_op_fmovd_cc();
                        gen_op_store_DT0_fpr(rd);
                        break;
                    case 0x043: /* V9 fmovqcc %fcc1 */
                        goto nfpu_insn;
                    case 0x081: /* V9 fmovscc %fcc2 */
                        cond = GET_FIELD_SP(insn, 14, 17);
                        gen_op_load_fpr_FT0(rd);
                        gen_op_load_fpr_FT1(rs2);
                        flush_T2(dc);
                        gen_fcond[2][cond]();
                        gen_op_fmovs_cc();
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0x082: /* V9 fmovdcc %fcc2 */
                        cond = GET_FIELD_SP(insn, 14, 17);
                        gen_op_load_fpr_DT0(rd);
                        gen_op_load_fpr_DT1(rs2);
                        flush_T2(dc);
                        gen_fcond[2][cond]();
                        gen_op_fmovd_cc();
                        gen_op_store_DT0_fpr(rd);
                        break;
                    case 0x083: /* V9 fmovqcc %fcc2 */
                        goto nfpu_insn;
                    case 0x0c1: /* V9 fmovscc %fcc3 */
                        cond = GET_FIELD_SP(insn, 14, 17);
                        gen_op_load_fpr_FT0(rd);
                        gen_op_load_fpr_FT1(rs2);
                        flush_T2(dc);
                        gen_fcond[3][cond]();
                        gen_op_fmovs_cc();
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0x0c2: /* V9 fmovdcc %fcc3 */
                        cond = GET_FIELD_SP(insn, 14, 17);
                        gen_op_load_fpr_DT0(rd);
                        gen_op_load_fpr_DT1(rs2);
                        flush_T2(dc);
                        gen_fcond[3][cond]();
                        gen_op_fmovd_cc();
                        gen_op_store_DT0_fpr(rd);
                        break;
                    case 0x0c3: /* V9 fmovqcc %fcc3 */
                        goto nfpu_insn;
                    case 0x101: /* V9 fmovscc %icc */
                        cond = GET_FIELD_SP(insn, 14, 17);
                        gen_op_load_fpr_FT0(rd);
                        gen_op_load_fpr_FT1(rs2);
                        flush_T2(dc);
                        gen_cond[0][cond]();
                        gen_op_fmovs_cc();
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0x102: /* V9 fmovdcc %icc */
                        cond = GET_FIELD_SP(insn, 14, 17);
                        gen_op_load_fpr_DT0(rd);
                        gen_op_load_fpr_DT1(rs2);
                        flush_T2(dc);
                        gen_cond[0][cond]();
                        gen_op_fmovd_cc();
                        gen_op_store_DT0_fpr(rd);
                        break;
                    case 0x103: /* V9 fmovqcc %icc */
                        goto nfpu_insn;
                    case 0x181: /* V9 fmovscc %xcc */
                        cond = GET_FIELD_SP(insn, 14, 17);
                        gen_op_load_fpr_FT0(rd);
                        gen_op_load_fpr_FT1(rs2);
                        flush_T2(dc);
                        gen_cond[1][cond]();
                        gen_op_fmovs_cc();
                        gen_op_store_FT0_fpr(rd);
                        break;
                    case 0x182: /* V9 fmovdcc %xcc */
                        cond = GET_FIELD_SP(insn, 14, 17);
                        gen_op_load_fpr_DT0(rd);
                        gen_op_load_fpr_DT1(rs2);
                        flush_T2(dc);
                        gen_cond[1][cond]();
                        gen_op_fmovd_cc();
                        gen_op_store_DT0_fpr(rd);
                        break;
                    case 0x183: /* V9 fmovqcc %xcc */
                        goto nfpu_insn;
#endif
                    case 0x51: /* V9 %fcc */
                        gen_op_load_fpr_FT0(rs1);
                        gen_op_load_fpr_FT1(rs2);
#ifdef TARGET_SPARC64
                        gen_fcmps[rd & 3]();
#else
                        gen_op_fcmps();
#endif
                        break;
                    case 0x52: /* V9 %fcc */
                        gen_op_load_fpr_DT0(DFPREG(rs1));
                        gen_op_load_fpr_DT1(DFPREG(rs2));
#ifdef TARGET_SPARC64
                        gen_fcmpd[rd & 3]();
#else
                        gen_op_fcmpd();
#endif
                        break;
                    case 0x53: /* fcmpq */
                        goto nfpu_insn;
                    case 0x55: /* fcmpes, V9 %fcc */
                        gen_op_load_fpr_FT0(rs1);
                        gen_op_load_fpr_FT1(rs2);
#ifdef TARGET_SPARC64
                        gen_fcmpes[rd & 3]();
#else
                        gen_op_fcmpes();
#endif
                        break;
                    case 0x56: /* fcmped, V9 %fcc */
                        gen_op_load_fpr_DT0(DFPREG(rs1));
                        gen_op_load_fpr_DT1(DFPREG(rs2));
#ifdef TARGET_SPARC64
                        gen_fcmped[rd & 3]();
#else
                        gen_op_fcmped();
#endif
                        break;
                    case 0x57: /* fcmpeq */
                        goto nfpu_insn;
                    default:
                        goto illegal_insn;
                }
#if defined(OPTIM)
            } else if (xop == 0x2) {
                // clr/mov shortcut
                rs1 = GET_FIELD(insn, 13, 17);
                if (rs1 == 0) {
                    // or %g0, x, y -> mov T1, x; mov y, T1
                    if (IS_IMM) {       /* immediate */
                        rs2 = GET_FIELDs(insn, 19, 31);
                        gen_movl_simm_T1(rs2);
                    } else {            /* register */
                        rs2 = GET_FIELD(insn, 27, 31);
                        gen_movl_reg_T1(rs2);
                    }
                    gen_movl_T1_reg(rd);
                } else {
                    gen_movl_reg_T0(rs1);
                    if (IS_IMM) {       /* immediate */
                        // or x, #0, y -> mov T1, x; mov y, T1
                        rs2 = GET_FIELDs(insn, 19, 31);
                        if (rs2 != 0) {
                            gen_movl_simm_T1(rs2);
                            gen_op_or_T1_T0();
                        }
                    } else {            /* register */
                        // or x, %g0, y -> mov T1, x; mov y, T1
                        rs2 = GET_FIELD(insn, 27, 31);
                        if (rs2 != 0) {
                            gen_movl_reg_T1(rs2);
                            gen_op_or_T1_T0();
                        }
                    }
                    gen_movl_T0_reg(rd);
                }
#endif
#ifdef TARGET_SPARC64
            } else if (xop == 0x25) { /* sll, V9 sllx */
                rs1 = GET_FIELD(insn, 13, 17);
                gen_movl_reg_T0(rs1);
                if (IS_IMM) {   /* immediate */
                    rs2 = GET_FIELDs(insn, 20, 31);
                    gen_movl_simm_T1(rs2);
                } else {                /* register */
                    rs2 = GET_FIELD(insn, 27, 31);
                    gen_movl_reg_T1(rs2);
                }
                if (insn & (1 << 12))
                    gen_op_sllx();
                else
                    gen_op_sll();
                gen_movl_T0_reg(rd);
            } else if (xop == 0x26) { /* srl, V9 srlx */
                rs1 = GET_FIELD(insn, 13, 17);
                gen_movl_reg_T0(rs1);
                if (IS_IMM) {   /* immediate */
                    rs2 = GET_FIELDs(insn, 20, 31);
                    gen_movl_simm_T1(rs2);
                } else {                /* register */
                    rs2 = GET_FIELD(insn, 27, 31);
                    gen_movl_reg_T1(rs2);
                }
                if (insn & (1 << 12))
                    gen_op_srlx();
                else
                    gen_op_srl();
                gen_movl_T0_reg(rd);
            } else if (xop == 0x27) { /* sra, V9 srax */
                rs1 = GET_FIELD(insn, 13, 17);
                gen_movl_reg_T0(rs1);
                if (IS_IMM) {   /* immediate */
                    rs2 = GET_FIELDs(insn, 20, 31);
                    gen_movl_simm_T1(rs2);
                } else {                /* register */
                    rs2 = GET_FIELD(insn, 27, 31);
                    gen_movl_reg_T1(rs2);
                }
                if (insn & (1 << 12))
                    gen_op_srax();
                else
                    gen_op_sra();
                gen_movl_T0_reg(rd);
#endif
            } else if (xop < 0x36) {
                rs1 = GET_FIELD(insn, 13, 17);
                gen_movl_reg_T0(rs1);
                if (IS_IMM) {   /* immediate */
                    rs2 = GET_FIELDs(insn, 19, 31);
                    gen_movl_simm_T1(rs2);
                } else {                /* register */
                    rs2 = GET_FIELD(insn, 27, 31);
                    gen_movl_reg_T1(rs2);
                }
                if (xop < 0x20) {
                    switch (xop & ~0x10) {
                    case 0x0:
                        if (xop & 0x10)
                            gen_op_add_T1_T0_cc();
                        else
                            gen_op_add_T1_T0();
                        break;
                    case 0x1:
                        gen_op_and_T1_T0();
                        if (xop & 0x10)
                            gen_op_logic_T0_cc();
                        break;
                    case 0x2:
                        gen_op_or_T1_T0();
                        if (xop & 0x10)
                            gen_op_logic_T0_cc();
                        break;
                    case 0x3:
                        gen_op_xor_T1_T0();
                        if (xop & 0x10)
                            gen_op_logic_T0_cc();
                        break;
                    case 0x4:
                        if (xop & 0x10)
                            gen_op_sub_T1_T0_cc();
                        else
                            gen_op_sub_T1_T0();
                        break;
                    case 0x5:
                        gen_op_andn_T1_T0();
                        if (xop & 0x10)
                            gen_op_logic_T0_cc();
                        break;
                    case 0x6:
                        gen_op_orn_T1_T0();
                        if (xop & 0x10)
                            gen_op_logic_T0_cc();
                        break;
                    case 0x7:
                        gen_op_xnor_T1_T0();
                        if (xop & 0x10)
                            gen_op_logic_T0_cc();
                        break;
                    case 0x8:
                        if (xop & 0x10)
                            gen_op_addx_T1_T0_cc();
                        else
                            gen_op_addx_T1_T0();
                        break;
#ifdef TARGET_SPARC64
                    case 0x9: /* V9 mulx */
                        gen_op_mulx_T1_T0();
                        break;
#endif
                    case 0xa:
                        gen_op_umul_T1_T0();
                        if (xop & 0x10)
                            gen_op_logic_T0_cc();
                        break;
                    case 0xb:
                        gen_op_smul_T1_T0();
                        if (xop & 0x10)
                            gen_op_logic_T0_cc();
                        break;
                    case 0xc:
                        if (xop & 0x10)
                            gen_op_subx_T1_T0_cc();
                        else
                            gen_op_subx_T1_T0();
                        break;
#ifdef TARGET_SPARC64
                    case 0xd: /* V9 udivx */
                        gen_op_udivx_T1_T0();
                        break;
#endif
                    case 0xe:
                        gen_op_udiv_T1_T0();
                        if (xop & 0x10)
                            gen_op_div_cc();
                        break;
                    case 0xf:
                        gen_op_sdiv_T1_T0();
                        if (xop & 0x10)
                            gen_op_div_cc();
                        break;
                    default:
                        goto illegal_insn;
                    }
                    gen_movl_T0_reg(rd);
                } else {
                    switch (xop) {
                    case 0x20: /* taddcc */
                        gen_op_tadd_T1_T0_cc();
                        gen_movl_T0_reg(rd);
                        break;
                    case 0x21: /* tsubcc */
                        gen_op_tsub_T1_T0_cc();
                        gen_movl_T0_reg(rd);
                        break;
                    case 0x22: /* taddcctv */
                        save_state(dc);
                        gen_op_tadd_T1_T0_ccTV();
                        gen_movl_T0_reg(rd);
                        break;
                    case 0x23: /* tsubcctv */
                        save_state(dc);
                        gen_op_tsub_T1_T0_ccTV();
                        gen_movl_T0_reg(rd);
                        break;
                    case 0x24: /* mulscc */
                        gen_op_mulscc_T1_T0();
                        gen_movl_T0_reg(rd);
                        break;
#ifndef TARGET_SPARC64
                    case 0x25:  /* sll */
                        gen_op_sll();
                        gen_movl_T0_reg(rd);
                        break;
                    case 0x26:  /* srl */
                        gen_op_srl();
                        gen_movl_T0_reg(rd);
                        break;
                    case 0x27:  /* sra */
                        gen_op_sra();
                        gen_movl_T0_reg(rd);
                        break;
#endif
                    case 0x30:
                        {
                            switch(rd) {
                            case 0: /* wry */
                                gen_op_xor_T1_T0();
                                gen_op_movtl_env_T0(offsetof(CPUSPARCState, y));
                                break;
#ifndef TARGET_SPARC64
                            case 0x01 ... 0x0f: /* undefined in the
                                                   SPARCv8 manual, nop
                                                   on the microSPARC
                                                   II */
                            case 0x10 ... 0x1f: /* implementation-dependent
                                                   in the SPARCv8
                                                   manual, nop on the
                                                   microSPARC II */
                                break;
#else
                            case 0x2: /* V9 wrccr */
                                gen_op_xor_T1_T0();
                                gen_op_wrccr();
                                break;
                            case 0x3: /* V9 wrasi */
                                gen_op_xor_T1_T0();
                                gen_op_movl_env_T0(offsetof(CPUSPARCState, asi));
                                break;
                            case 0x6: /* V9 wrfprs */
                                gen_op_xor_T1_T0();
                                gen_op_movl_env_T0(offsetof(CPUSPARCState, fprs));
                                save_state(dc);
                                gen_op_next_insn();
                                gen_op_movl_T0_0();
                                gen_op_exit_tb();
                                dc->is_br = 1;
                                break;
                            case 0xf: /* V9 sir, nop if user */
#if !defined(CONFIG_USER_ONLY)
                                if (supervisor(dc))
                                    gen_op_sir();
#endif
                                break;
                            case 0x13: /* Graphics Status */
                                if (gen_trap_ifnofpu(dc))
                                    goto jmp_insn;
                                gen_op_xor_T1_T0();
                                gen_op_movtl_env_T0(offsetof(CPUSPARCState, gsr));
                                break;
                            case 0x17: /* Tick compare */
#if !defined(CONFIG_USER_ONLY)
                                if (!supervisor(dc))
                                    goto illegal_insn;
#endif
                                gen_op_xor_T1_T0();
                                gen_op_movtl_env_T0(offsetof(CPUSPARCState, tick_cmpr));
                                gen_op_wrtick_cmpr();
                                break;
                            case 0x18: /* System tick */
#if !defined(CONFIG_USER_ONLY)
                                if (!supervisor(dc))
                                    goto illegal_insn;
#endif
                                gen_op_xor_T1_T0();
                                gen_op_wrstick();
                                break;
                            case 0x19: /* System tick compare */
#if !defined(CONFIG_USER_ONLY)
                                if (!supervisor(dc))
                                    goto illegal_insn;
#endif
                                gen_op_xor_T1_T0();
                                gen_op_movtl_env_T0(offsetof(CPUSPARCState, stick_cmpr));
                                gen_op_wrstick_cmpr();
                                break;
                            case 0x10: /* Performance Control */
                            case 0x11: /* Performance Instrumentation Counter */
                            case 0x12: /* Dispatch Control */
                            case 0x14: /* Softint set */
                            case 0x15: /* Softint clear */
                            case 0x16: /* Softint write */
#endif
                            default:
                                goto illegal_insn;
                            }
                        }
                        break;
#if !defined(CONFIG_USER_ONLY)
                    case 0x31: /* wrpsr, V9 saved, restored */
                        {
                            if (!supervisor(dc))
                                goto priv_insn;
#ifdef TARGET_SPARC64
                            switch (rd) {
                            case 0:
                                gen_op_saved();
                                break;
                            case 1:
                                gen_op_restored();
                                break;
                            case 2: /* UA2005 allclean */
                            case 3: /* UA2005 otherw */
                            case 4: /* UA2005 normalw */
                            case 5: /* UA2005 invalw */
                                // XXX
                            default:
                                goto illegal_insn;
                            }
#else
                            gen_op_xor_T1_T0();
                            gen_op_wrpsr();
                            save_state(dc);
                            gen_op_next_insn();
                            gen_op_movl_T0_0();
                            gen_op_exit_tb();
                            dc->is_br = 1;
#endif
                        }
                        break;
                    case 0x32: /* wrwim, V9 wrpr */
                        {
                            if (!supervisor(dc))
                                goto priv_insn;
                            gen_op_xor_T1_T0();
#ifdef TARGET_SPARC64
                            switch (rd) {
                            case 0: // tpc
                                gen_op_wrtpc();
                                break;
                            case 1: // tnpc
                                gen_op_wrtnpc();
                                break;
                            case 2: // tstate
                                gen_op_wrtstate();
                                break;
                            case 3: // tt
                                gen_op_wrtt();
                                break;
                            case 4: // tick
                                gen_op_wrtick();
                                break;
                            case 5: // tba
                                gen_op_movtl_env_T0(offsetof(CPUSPARCState, tbr));
                                break;
                            case 6: // pstate
                                gen_op_wrpstate();
                                save_state(dc);
                                gen_op_next_insn();
                                gen_op_movl_T0_0();
                                gen_op_exit_tb();
                                dc->is_br = 1;
                                break;
                            case 7: // tl
                                gen_op_movl_env_T0(offsetof(CPUSPARCState, tl));
                                break;
                            case 8: // pil
                                gen_op_movl_env_T0(offsetof(CPUSPARCState, psrpil));
                                break;
                            case 9: // cwp
                                gen_op_wrcwp();
                                break;
                            case 10: // cansave
                                gen_op_movl_env_T0(offsetof(CPUSPARCState, cansave));
                                break;
                            case 11: // canrestore
                                gen_op_movl_env_T0(offsetof(CPUSPARCState, canrestore));
                                break;
                            case 12: // cleanwin
                                gen_op_movl_env_T0(offsetof(CPUSPARCState, cleanwin));
                                break;
                            case 13: // otherwin
                                gen_op_movl_env_T0(offsetof(CPUSPARCState, otherwin));
                                break;
                            case 14: // wstate
                                gen_op_movl_env_T0(offsetof(CPUSPARCState, wstate));
                                break;
                            case 16: // UA2005 gl
                                gen_op_movl_env_T0(offsetof(CPUSPARCState, gl));
                                break;
                            case 26: // UA2005 strand status
                                if (!hypervisor(dc))
                                    goto priv_insn;
                                gen_op_movl_env_T0(offsetof(CPUSPARCState, ssr));
                                break;
                            default:
                                goto illegal_insn;
                            }
#else
                            gen_op_wrwim();
#endif
                        }
                        break;
                    case 0x33: /* wrtbr, UA2005 wrhpr */
                        {
#ifndef TARGET_SPARC64
                            if (!supervisor(dc))
                                goto priv_insn;
                            gen_op_xor_T1_T0();
                            gen_op_movtl_env_T0(offsetof(CPUSPARCState, tbr));
#else
                            if (!hypervisor(dc))
                                goto priv_insn;
                            gen_op_xor_T1_T0();
                            switch (rd) {
                            case 0: // hpstate
                                // XXX gen_op_wrhpstate();
                                save_state(dc);
                                gen_op_next_insn();
                                gen_op_movl_T0_0();
                                gen_op_exit_tb();
                                dc->is_br = 1;
                                break;
                            case 1: // htstate
                                // XXX gen_op_wrhtstate();
                                break;
                            case 3: // hintp
                                gen_op_movl_env_T0(offsetof(CPUSPARCState, hintp));
                                break;
                            case 5: // htba
                                gen_op_movl_env_T0(offsetof(CPUSPARCState, htba));
                                break;
                            case 31: // hstick_cmpr
                                gen_op_movtl_env_T0(offsetof(CPUSPARCState, hstick_cmpr));
                                gen_op_wrhstick_cmpr();
                                break;
                            case 6: // hver readonly
                            default:
                                goto illegal_insn;
                            }
#endif
                        }
                        break;
#endif
#ifdef TARGET_SPARC64
                    case 0x2c: /* V9 movcc */
                        {
                            int cc = GET_FIELD_SP(insn, 11, 12);
                            int cond = GET_FIELD_SP(insn, 14, 17);
                            if (IS_IMM) {       /* immediate */
                                rs2 = GET_FIELD_SPs(insn, 0, 10);
                                gen_movl_simm_T1(rs2);
                            }
                            else {
                                rs2 = GET_FIELD_SP(insn, 0, 4);
                                gen_movl_reg_T1(rs2);
                            }
                            gen_movl_reg_T0(rd);
                            flush_T2(dc);
                            if (insn & (1 << 18)) {
                                if (cc == 0)
                                    gen_cond[0][cond]();
                                else if (cc == 2)
                                    gen_cond[1][cond]();
                                else
                                    goto illegal_insn;
                            } else {
                                gen_fcond[cc][cond]();
                            }
                            gen_op_mov_cc();
                            gen_movl_T0_reg(rd);
                            break;
                        }
                    case 0x2d: /* V9 sdivx */
                        gen_op_sdivx_T1_T0();
                        gen_movl_T0_reg(rd);
                        break;
                    case 0x2e: /* V9 popc */
                        {
                            if (IS_IMM) {       /* immediate */
                                rs2 = GET_FIELD_SPs(insn, 0, 12);
                                gen_movl_simm_T1(rs2);
                                // XXX optimize: popc(constant)
                            }
                            else {
                                rs2 = GET_FIELD_SP(insn, 0, 4);
                                gen_movl_reg_T1(rs2);
                            }
                            gen_op_popc();
                            gen_movl_T0_reg(rd);
                        }
                    case 0x2f: /* V9 movr */
                        {
                            int cond = GET_FIELD_SP(insn, 10, 12);
                            rs1 = GET_FIELD(insn, 13, 17);
                            flush_T2(dc);
                            gen_movl_reg_T0(rs1);
                            gen_cond_reg(cond);
                            if (IS_IMM) {       /* immediate */
                                rs2 = GET_FIELD_SPs(insn, 0, 9);
                                gen_movl_simm_T1(rs2);
                            }
                            else {
                                rs2 = GET_FIELD_SP(insn, 0, 4);
                                gen_movl_reg_T1(rs2);
                            }
                            gen_movl_reg_T0(rd);
                            gen_op_mov_cc();
                            gen_movl_T0_reg(rd);
                            break;
                        }
#endif
                    default:
                        goto illegal_insn;
                    }
                }
            } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
#ifdef TARGET_SPARC64
                int opf = GET_FIELD_SP(insn, 5, 13);
                rs1 = GET_FIELD(insn, 13, 17);
                rs2 = GET_FIELD(insn, 27, 31);
                if (gen_trap_ifnofpu(dc))
                    goto jmp_insn;
                switch (opf) {
                case 0x000: /* VIS I edge8cc */
                case 0x001: /* VIS II edge8n */
                case 0x002: /* VIS I edge8lcc */
                case 0x003: /* VIS II edge8ln */
                case 0x004: /* VIS I edge16cc */
                case 0x005: /* VIS II edge16n */
                case 0x006: /* VIS I edge16lcc */
                case 0x007: /* VIS II edge16ln */
                case 0x008: /* VIS I edge32cc */
                case 0x009: /* VIS II edge32n */
                case 0x00a: /* VIS I edge32lcc */
                case 0x00b: /* VIS II edge32ln */
                    // XXX
                    goto illegal_insn;
                case 0x010: /* VIS I array8 */
                    gen_movl_reg_T0(rs1);
                    gen_movl_reg_T1(rs2);
                    gen_op_array8();
                    gen_movl_T0_reg(rd);
                    break;
                case 0x012: /* VIS I array16 */
                    gen_movl_reg_T0(rs1);
                    gen_movl_reg_T1(rs2);
                    gen_op_array16();
                    gen_movl_T0_reg(rd);
                    break;
                case 0x014: /* VIS I array32 */
                    gen_movl_reg_T0(rs1);
                    gen_movl_reg_T1(rs2);
                    gen_op_array32();
                    gen_movl_T0_reg(rd);
                    break;
                case 0x018: /* VIS I alignaddr */
                    gen_movl_reg_T0(rs1);
                    gen_movl_reg_T1(rs2);
                    gen_op_alignaddr();
                    gen_movl_T0_reg(rd);
                    break;
                case 0x019: /* VIS II bmask */
                case 0x01a: /* VIS I alignaddrl */
                    // XXX
                    goto illegal_insn;
                case 0x020: /* VIS I fcmple16 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fcmple16();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x022: /* VIS I fcmpne16 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fcmpne16();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x024: /* VIS I fcmple32 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fcmple32();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x026: /* VIS I fcmpne32 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fcmpne32();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x028: /* VIS I fcmpgt16 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fcmpgt16();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x02a: /* VIS I fcmpeq16 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fcmpeq16();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x02c: /* VIS I fcmpgt32 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fcmpgt32();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x02e: /* VIS I fcmpeq32 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fcmpeq32();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x031: /* VIS I fmul8x16 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fmul8x16();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x033: /* VIS I fmul8x16au */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fmul8x16au();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x035: /* VIS I fmul8x16al */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fmul8x16al();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x036: /* VIS I fmul8sux16 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fmul8sux16();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x037: /* VIS I fmul8ulx16 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fmul8ulx16();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x038: /* VIS I fmuld8sux16 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fmuld8sux16();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x039: /* VIS I fmuld8ulx16 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fmuld8ulx16();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x03a: /* VIS I fpack32 */
                case 0x03b: /* VIS I fpack16 */
                case 0x03d: /* VIS I fpackfix */
                case 0x03e: /* VIS I pdist */
                    // XXX
                    goto illegal_insn;
                case 0x048: /* VIS I faligndata */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_faligndata();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x04b: /* VIS I fpmerge */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fpmerge();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x04c: /* VIS II bshuffle */
                    // XXX
                    goto illegal_insn;
                case 0x04d: /* VIS I fexpand */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fexpand();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x050: /* VIS I fpadd16 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fpadd16();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x051: /* VIS I fpadd16s */
                    gen_op_load_fpr_FT0(rs1);
                    gen_op_load_fpr_FT1(rs2);
                    gen_op_fpadd16s();
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x052: /* VIS I fpadd32 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fpadd32();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x053: /* VIS I fpadd32s */
                    gen_op_load_fpr_FT0(rs1);
                    gen_op_load_fpr_FT1(rs2);
                    gen_op_fpadd32s();
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x054: /* VIS I fpsub16 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fpsub16();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x055: /* VIS I fpsub16s */
                    gen_op_load_fpr_FT0(rs1);
                    gen_op_load_fpr_FT1(rs2);
                    gen_op_fpsub16s();
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x056: /* VIS I fpsub32 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fpadd32();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x057: /* VIS I fpsub32s */
                    gen_op_load_fpr_FT0(rs1);
                    gen_op_load_fpr_FT1(rs2);
                    gen_op_fpsub32s();
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x060: /* VIS I fzero */
                    gen_op_movl_DT0_0();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x061: /* VIS I fzeros */
                    gen_op_movl_FT0_0();
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x062: /* VIS I fnor */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fnor();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x063: /* VIS I fnors */
                    gen_op_load_fpr_FT0(rs1);
                    gen_op_load_fpr_FT1(rs2);
                    gen_op_fnors();
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x064: /* VIS I fandnot2 */
                    gen_op_load_fpr_DT1(rs1);
                    gen_op_load_fpr_DT0(rs2);
                    gen_op_fandnot();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x065: /* VIS I fandnot2s */
                    gen_op_load_fpr_FT1(rs1);
                    gen_op_load_fpr_FT0(rs2);
                    gen_op_fandnots();
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x066: /* VIS I fnot2 */
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fnot();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x067: /* VIS I fnot2s */
                    gen_op_load_fpr_FT1(rs2);
                    gen_op_fnot();
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x068: /* VIS I fandnot1 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fandnot();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x069: /* VIS I fandnot1s */
                    gen_op_load_fpr_FT0(rs1);
                    gen_op_load_fpr_FT1(rs2);
                    gen_op_fandnots();
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x06a: /* VIS I fnot1 */
                    gen_op_load_fpr_DT1(rs1);
                    gen_op_fnot();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x06b: /* VIS I fnot1s */
                    gen_op_load_fpr_FT1(rs1);
                    gen_op_fnot();
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x06c: /* VIS I fxor */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fxor();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x06d: /* VIS I fxors */
                    gen_op_load_fpr_FT0(rs1);
                    gen_op_load_fpr_FT1(rs2);
                    gen_op_fxors();
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x06e: /* VIS I fnand */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fnand();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x06f: /* VIS I fnands */
                    gen_op_load_fpr_FT0(rs1);
                    gen_op_load_fpr_FT1(rs2);
                    gen_op_fnands();
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x070: /* VIS I fand */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fand();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x071: /* VIS I fands */
                    gen_op_load_fpr_FT0(rs1);
                    gen_op_load_fpr_FT1(rs2);
                    gen_op_fands();
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x072: /* VIS I fxnor */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fxnor();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x073: /* VIS I fxnors */
                    gen_op_load_fpr_FT0(rs1);
                    gen_op_load_fpr_FT1(rs2);
                    gen_op_fxnors();
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x074: /* VIS I fsrc1 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x075: /* VIS I fsrc1s */
                    gen_op_load_fpr_FT0(rs1);
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x076: /* VIS I fornot2 */
                    gen_op_load_fpr_DT1(rs1);
                    gen_op_load_fpr_DT0(rs2);
                    gen_op_fornot();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x077: /* VIS I fornot2s */
                    gen_op_load_fpr_FT1(rs1);
                    gen_op_load_fpr_FT0(rs2);
                    gen_op_fornots();
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x078: /* VIS I fsrc2 */
                    gen_op_load_fpr_DT0(rs2);
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x079: /* VIS I fsrc2s */
                    gen_op_load_fpr_FT0(rs2);
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x07a: /* VIS I fornot1 */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_fornot();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x07b: /* VIS I fornot1s */
                    gen_op_load_fpr_FT0(rs1);
                    gen_op_load_fpr_FT1(rs2);
                    gen_op_fornots();
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x07c: /* VIS I for */
                    gen_op_load_fpr_DT0(rs1);
                    gen_op_load_fpr_DT1(rs2);
                    gen_op_for();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x07d: /* VIS I fors */
                    gen_op_load_fpr_FT0(rs1);
                    gen_op_load_fpr_FT1(rs2);
                    gen_op_fors();
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x07e: /* VIS I fone */
                    gen_op_movl_DT0_1();
                    gen_op_store_DT0_fpr(rd);
                    break;
                case 0x07f: /* VIS I fones */
                    gen_op_movl_FT0_1();
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x080: /* VIS I shutdown */
                case 0x081: /* VIS II siam */
                    // XXX
                    goto illegal_insn;
                default:
                    goto illegal_insn;
                }
#else
                goto ncp_insn;
#endif
            } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
#ifdef TARGET_SPARC64
                goto illegal_insn;
#else
                goto ncp_insn;
#endif
#ifdef TARGET_SPARC64
            } else if (xop == 0x39) { /* V9 return */
                rs1 = GET_FIELD(insn, 13, 17);
                save_state(dc);
                gen_movl_reg_T0(rs1);
                if (IS_IMM) {   /* immediate */
                    rs2 = GET_FIELDs(insn, 19, 31);
#if defined(OPTIM)
                    if (rs2) {
#endif
                        gen_movl_simm_T1(rs2);
                        gen_op_add_T1_T0();
#if defined(OPTIM)
                    }
#endif
                } else {                /* register */
                    rs2 = GET_FIELD(insn, 27, 31);
#if defined(OPTIM)
                    if (rs2) {
#endif
                        gen_movl_reg_T1(rs2);
                        gen_op_add_T1_T0();
#if defined(OPTIM)
                    }
#endif
                }
                gen_op_restore();
                gen_mov_pc_npc(dc);
                gen_op_check_align_T0_3();
                gen_op_movl_npc_T0();
                dc->npc = DYNAMIC_PC;
                goto jmp_insn;
#endif
            } else {
                rs1 = GET_FIELD(insn, 13, 17);
                gen_movl_reg_T0(rs1);
                if (IS_IMM) {   /* immediate */
                    rs2 = GET_FIELDs(insn, 19, 31);
#if defined(OPTIM)
                    if (rs2) {
#endif
                        gen_movl_simm_T1(rs2);
                        gen_op_add_T1_T0();
#if defined(OPTIM)
                    }
#endif
                } else {                /* register */
                    rs2 = GET_FIELD(insn, 27, 31);
#if defined(OPTIM)
                    if (rs2) {
#endif
                        gen_movl_reg_T1(rs2);
                        gen_op_add_T1_T0();
#if defined(OPTIM)
                    }
#endif
                }
                switch (xop) {
                case 0x38:      /* jmpl */
                    {
                        if (rd != 0) {
#ifdef TARGET_SPARC64
                            if (dc->pc == (uint32_t)dc->pc) {
                                gen_op_movl_T1_im(dc->pc);
                            } else {
                                gen_op_movq_T1_im64(dc->pc >> 32, dc->pc);
                            }
#else
                            gen_op_movl_T1_im(dc->pc);
#endif
                            gen_movl_T1_reg(rd);
                        }
                        gen_mov_pc_npc(dc);
                        gen_op_check_align_T0_3();
                        gen_op_movl_npc_T0();
                        dc->npc = DYNAMIC_PC;
                    }
                    goto jmp_insn;
#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
                case 0x39:      /* rett, V9 return */
                    {
                        if (!supervisor(dc))
                            goto priv_insn;
                        gen_mov_pc_npc(dc);
                        gen_op_check_align_T0_3();
                        gen_op_movl_npc_T0();
                        dc->npc = DYNAMIC_PC;
                        gen_op_rett();
                    }
                    goto jmp_insn;
#endif
                case 0x3b: /* flush */
                    gen_op_flush_T0();
                    break;
                case 0x3c:      /* save */
                    save_state(dc);
                    gen_op_save();
                    gen_movl_T0_reg(rd);
                    break;
                case 0x3d:      /* restore */
                    save_state(dc);
                    gen_op_restore();
                    gen_movl_T0_reg(rd);
                    break;
#if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
                case 0x3e:      /* V9 done/retry */
                    {
                        switch (rd) {
                        case 0:
                            if (!supervisor(dc))
                                goto priv_insn;
                            dc->npc = DYNAMIC_PC;
                            dc->pc = DYNAMIC_PC;
                            gen_op_done();
                            goto jmp_insn;
                        case 1:
                            if (!supervisor(dc))
                                goto priv_insn;
                            dc->npc = DYNAMIC_PC;
                            dc->pc = DYNAMIC_PC;
                            gen_op_retry();
                            goto jmp_insn;
                        default:
                            goto illegal_insn;
                        }
                    }
                    break;
#endif
                default:
                    goto illegal_insn;
                }
            }
            break;
        }
        break;
    case 3:                     /* load/store instructions */
        {
            unsigned int xop = GET_FIELD(insn, 7, 12);
            rs1 = GET_FIELD(insn, 13, 17);
            save_state(dc);
            gen_movl_reg_T0(rs1);
            if (xop == 0x3c || xop == 0x3e)
            {
                rs2 = GET_FIELD(insn, 27, 31);
                gen_movl_reg_T1(rs2);
            }
            else if (IS_IMM) {       /* immediate */
                rs2 = GET_FIELDs(insn, 19, 31);
#if defined(OPTIM)
                if (rs2 != 0) {
#endif
                    gen_movl_simm_T1(rs2);
                    gen_op_add_T1_T0();
#if defined(OPTIM)
                }
#endif
            } else {            /* register */
                rs2 = GET_FIELD(insn, 27, 31);
#if defined(OPTIM)
                if (rs2 != 0) {
#endif
                    gen_movl_reg_T1(rs2);
                    gen_op_add_T1_T0();
#if defined(OPTIM)
                }
#endif
            }
            if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
                (xop > 0x17 && xop <= 0x1d ) ||
                (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
                switch (xop) {
                case 0x0:       /* load word */
#ifdef CONFIG_USER_ONLY
                    gen_op_check_align_T0_3();
#endif
#ifndef TARGET_SPARC64
                    gen_op_ldst(ld);
#else
                    gen_op_ldst(lduw);
#endif
                    break;
                case 0x1:       /* load unsigned byte */
                    gen_op_ldst(ldub);
                    break;
                case 0x2:       /* load unsigned halfword */
#ifdef CONFIG_USER_ONLY
                    gen_op_check_align_T0_1();
#endif
                    gen_op_ldst(lduh);
                    break;
                case 0x3:       /* load double word */
                    gen_op_check_align_T0_7();
                    if (rd & 1)
                        goto illegal_insn;
                    gen_op_ldst(ldd);
                    gen_movl_T0_reg(rd + 1);
                    break;
                case 0x9:       /* load signed byte */
                    gen_op_ldst(ldsb);
                    break;
                case 0xa:       /* load signed halfword */
#ifdef CONFIG_USER_ONLY
                    gen_op_check_align_T0_1();
#endif
                    gen_op_ldst(ldsh);
                    break;
                case 0xd:       /* ldstub -- XXX: should be atomically */
                    gen_op_ldst(ldstub);
                    break;
                case 0x0f:      /* swap register with memory. Also atomically */
#ifdef CONFIG_USER_ONLY
                    gen_op_check_align_T0_3();
#endif
                    gen_movl_reg_T1(rd);
                    gen_op_ldst(swap);
                    break;
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
                case 0x10:      /* load word alternate */
#ifndef TARGET_SPARC64
                    if (IS_IMM)
                        goto illegal_insn;
                    if (!supervisor(dc))
                        goto priv_insn;
#elif CONFIG_USER_ONLY
                    gen_op_check_align_T0_3();
#endif
                    gen_ld_asi(insn, 4, 0);
                    break;
                case 0x11:      /* load unsigned byte alternate */
#ifndef TARGET_SPARC64
                    if (IS_IMM)
                        goto illegal_insn;
                    if (!supervisor(dc))
                        goto priv_insn;
#endif
                    gen_ld_asi(insn, 1, 0);
                    break;
                case 0x12:      /* load unsigned halfword alternate */
#ifndef TARGET_SPARC64
                    if (IS_IMM)
                        goto illegal_insn;
                    if (!supervisor(dc))
                        goto priv_insn;
#elif CONFIG_USER_ONLY
                    gen_op_check_align_T0_1();
#endif
                    gen_ld_asi(insn, 2, 0);
                    break;
                case 0x13:      /* load double word alternate */
#ifndef TARGET_SPARC64
                    if (IS_IMM)
                        goto illegal_insn;
                    if (!supervisor(dc))
                        goto priv_insn;
#endif
                    if (rd & 1)
                        goto illegal_insn;
                    gen_op_check_align_T0_7();
                    gen_ldda_asi(insn);
                    gen_movl_T0_reg(rd + 1);
                    break;
                case 0x19:      /* load signed byte alternate */
#ifndef TARGET_SPARC64
                    if (IS_IMM)
                        goto illegal_insn;
                    if (!supervisor(dc))
                        goto priv_insn;
#endif
                    gen_ld_asi(insn, 1, 1);
                    break;
                case 0x1a:      /* load signed halfword alternate */
#ifndef TARGET_SPARC64
                    if (IS_IMM)
                        goto illegal_insn;
                    if (!supervisor(dc))
                        goto priv_insn;
#elif CONFIG_USER_ONLY
                    gen_op_check_align_T0_1();
#endif
                    gen_ld_asi(insn, 2, 1);
                    break;
                case 0x1d:      /* ldstuba -- XXX: should be atomically */
#ifndef TARGET_SPARC64
                    if (IS_IMM)
                        goto illegal_insn;
                    if (!supervisor(dc))
                        goto priv_insn;
#endif
                    gen_ldstub_asi(insn);
                    break;
                case 0x1f:      /* swap reg with alt. memory. Also atomically */
#ifndef TARGET_SPARC64
                    if (IS_IMM)
                        goto illegal_insn;
                    if (!supervisor(dc))
                        goto priv_insn;
#elif CONFIG_USER_ONLY
                    gen_op_check_align_T0_3();
#endif
                    gen_movl_reg_T1(rd);
                    gen_swap_asi(insn);
                    break;
#ifndef TARGET_SPARC64
                case 0x30: /* ldc */
                case 0x31: /* ldcsr */
                case 0x33: /* lddc */
                    goto ncp_insn;
#endif
#endif
#ifdef TARGET_SPARC64
                case 0x08: /* V9 ldsw */
#ifdef CONFIG_USER_ONLY
                    gen_op_check_align_T0_3();
#endif
                    gen_op_ldst(ldsw);
                    break;
                case 0x0b: /* V9 ldx */
                    gen_op_check_align_T0_7();
                    gen_op_ldst(ldx);
                    break;
                case 0x18: /* V9 ldswa */
#ifdef CONFIG_USER_ONLY
                    gen_op_check_align_T0_3();
#endif
                    gen_ld_asi(insn, 4, 1);
                    break;
                case 0x1b: /* V9 ldxa */
                    gen_op_check_align_T0_7();
                    gen_ld_asi(insn, 8, 0);
                    break;
                case 0x2d: /* V9 prefetch, no effect */
                    goto skip_move;
                case 0x30: /* V9 ldfa */
#ifdef CONFIG_USER_ONLY
                    gen_op_check_align_T0_3();
#endif
                    gen_ldf_asi(insn, 4);
                    goto skip_move;
                case 0x33: /* V9 lddfa */
                    gen_op_check_align_T0_3();
                    gen_ldf_asi(insn, 8);
                    goto skip_move;
                case 0x3d: /* V9 prefetcha, no effect */
                    goto skip_move;
                case 0x32: /* V9 ldqfa */
                    goto nfpu_insn;
#endif
                default:
                    goto illegal_insn;
                }
                gen_movl_T1_reg(rd);
#ifdef TARGET_SPARC64
            skip_move: ;
#endif
            } else if (xop >= 0x20 && xop < 0x24) {
                if (gen_trap_ifnofpu(dc))
                    goto jmp_insn;
                switch (xop) {
                case 0x20:      /* load fpreg */
#ifdef CONFIG_USER_ONLY
                    gen_op_check_align_T0_3();
#endif
                    gen_op_ldst(ldf);
                    gen_op_store_FT0_fpr(rd);
                    break;
                case 0x21:      /* load fsr */
#ifdef CONFIG_USER_ONLY
                    gen_op_check_align_T0_3();
#endif
                    gen_op_ldst(ldf);
                    gen_op_ldfsr();
                    break;
                case 0x22:      /* load quad fpreg */
                    goto nfpu_insn;
                case 0x23:      /* load double fpreg */
                    gen_op_check_align_T0_7();
                    gen_op_ldst(lddf);
                    gen_op_store_DT0_fpr(DFPREG(rd));
                    break;
                default:
                    goto illegal_insn;
                }
            } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) || \
                       xop == 0xe || xop == 0x1e) {
                gen_movl_reg_T1(rd);
                switch (xop) {
                case 0x4:
#ifdef CONFIG_USER_ONLY
                    gen_op_check_align_T0_3();
#endif
                    gen_op_ldst(st);
                    break;
                case 0x5:
                    gen_op_ldst(stb);
                    break;
                case 0x6:
#ifdef CONFIG_USER_ONLY
                    gen_op_check_align_T0_1();
#endif
                    gen_op_ldst(sth);
                    break;
                case 0x7:
                    if (rd & 1)
                        goto illegal_insn;
                    gen_op_check_align_T0_7();
                    flush_T2(dc);
                    gen_movl_reg_T2(rd + 1);
                    gen_op_ldst(std);
                    break;
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
                case 0x14:
#ifndef TARGET_SPARC64
                    if (IS_IMM)
                        goto illegal_insn;
                    if (!supervisor(dc))
                        goto priv_insn;
#endif
#ifdef CONFIG_USER_ONLY
                    gen_op_check_align_T0_3();
#endif
                    gen_st_asi(insn, 4);
                    break;
                case 0x15:
#ifndef TARGET_SPARC64
                    if (IS_IMM)
                        goto illegal_insn;
                    if (!supervisor(dc))
                        goto priv_insn;
#endif
                    gen_st_asi(insn, 1);
                    break;
                case 0x16:
#ifndef TARGET_SPARC64
                    if (IS_IMM)
                        goto illegal_insn;
                    if (!supervisor(dc))
                        goto priv_insn;
#endif
#ifdef CONFIG_USER_ONLY
                    gen_op_check_align_T0_1();
#endif
                    gen_st_asi(insn, 2);
                    break;
                case 0x17:
#ifndef TARGET_SPARC64
                    if (IS_IMM)
                        goto illegal_insn;
                    if (!supervisor(dc))
                        goto priv_insn;
#endif
                    if (rd & 1)
                        goto illegal_insn;
                    gen_op_check_align_T0_7();
                    flush_T2(dc);
                    gen_movl_reg_T2(rd + 1);
                    gen_stda_asi(insn);
                    break;
#endif
#ifdef TARGET_SPARC64
                case 0x0e: /* V9 stx */
                    gen_op_check_align_T0_7();
                    gen_op_ldst(stx);
                    break;
                case 0x1e: /* V9 stxa */
                    gen_op_check_align_T0_7();
                    gen_st_asi(insn, 8);
                    break;
#endif
                default:
                    goto illegal_insn;
                }
            } else if (xop > 0x23 && xop < 0x28) {
                if (gen_trap_ifnofpu(dc))
                    goto jmp_insn;
                switch (xop) {
                case 0x24:
#ifdef CONFIG_USER_ONLY
                    gen_op_check_align_T0_3();
#endif
                    gen_op_load_fpr_FT0(rd);
                    gen_op_ldst(stf);
                    break;
                case 0x25: /* stfsr, V9 stxfsr */
#ifdef CONFIG_USER_ONLY
                    gen_op_check_align_T0_3();
#endif
                    gen_op_stfsr();
                    gen_op_ldst(stf);
                    break;
#if !defined(CONFIG_USER_ONLY)
                case 0x26: /* stdfq */
                    if (!supervisor(dc))
                        goto priv_insn;
                    if (gen_trap_ifnofpu(dc))
                        goto jmp_insn;
                    goto nfq_insn;
#endif
                case 0x27:
                    gen_op_check_align_T0_7();
                    gen_op_load_fpr_DT0(DFPREG(rd));
                    gen_op_ldst(stdf);
                    break;
                default:
                    goto illegal_insn;
                }
            } else if (xop > 0x33 && xop < 0x3f) {
                switch (xop) {
#ifdef TARGET_SPARC64
                case 0x34: /* V9 stfa */
#ifdef CONFIG_USER_ONLY
                    gen_op_check_align_T0_3();
#endif
                    gen_op_load_fpr_FT0(rd);
                    gen_stf_asi(insn, 4);
                    break;
                case 0x37: /* V9 stdfa */
                    gen_op_check_align_T0_3();
                    gen_op_load_fpr_DT0(DFPREG(rd));
                    gen_stf_asi(insn, 8);
                    break;
                case 0x3c: /* V9 casa */
#ifdef CONFIG_USER_ONLY
                    gen_op_check_align_T0_3();
#endif
                    flush_T2(dc);
                    gen_movl_reg_T2(rd);
                    gen_cas_asi(insn);
                    gen_movl_T1_reg(rd);
                    break;
                case 0x3e: /* V9 casxa */
                    gen_op_check_align_T0_7();
                    flush_T2(dc);
                    gen_movl_reg_T2(rd);
                    gen_casx_asi(insn);
                    gen_movl_T1_reg(rd);
                    break;
                case 0x36: /* V9 stqfa */
                    goto nfpu_insn;
#else
                case 0x34: /* stc */
                case 0x35: /* stcsr */
                case 0x36: /* stdcq */
                case 0x37: /* stdc */
                    goto ncp_insn;
#endif
                default:
                    goto illegal_insn;
                }
            }
            else
                goto illegal_insn;
        }
        break;
    }
    /* default case for non jump instructions */
    if (dc->npc == DYNAMIC_PC) {
        dc->pc = DYNAMIC_PC;
        gen_op_next_insn();
    } else if (dc->npc == JUMP_PC) {
        /* we can do a static jump */
        gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1]);
        dc->is_br = 1;
    } else {
        dc->pc = dc->npc;
        dc->npc = dc->npc + 4;
    }
 jmp_insn:
    return;
 illegal_insn:
    save_state(dc);
    gen_op_exception(TT_ILL_INSN);
    dc->is_br = 1;
    return;
#if !defined(CONFIG_USER_ONLY)
 priv_insn:
    save_state(dc);
    gen_op_exception(TT_PRIV_INSN);
    dc->is_br = 1;
    return;
#endif
 nfpu_insn:
    save_state(dc);
    gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
    dc->is_br = 1;
    return;
#if !defined(CONFIG_USER_ONLY)
 nfq_insn:
    save_state(dc);
    gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
    dc->is_br = 1;
    return;
#endif
#ifndef TARGET_SPARC64
 ncp_insn:
    save_state(dc);
    gen_op_exception(TT_NCP_INSN);
    dc->is_br = 1;
    return;
#endif
}
 | 18,233 | 
| 
	qemu | 
	f8ad4a89e99848a554b0049d7a612f5a585b7231 | 0 | 
	static int local_set_xattr(const char *path, FsCred *credp)
{
    int err;
    if (credp->fc_uid != -1) {
        err = setxattr(path, "user.virtfs.uid", &credp->fc_uid, sizeof(uid_t),
                0);
        if (err) {
            return err;
        }
    }
    if (credp->fc_gid != -1) {
        err = setxattr(path, "user.virtfs.gid", &credp->fc_gid, sizeof(gid_t),
                0);
        if (err) {
            return err;
        }
    }
    if (credp->fc_mode != -1) {
        err = setxattr(path, "user.virtfs.mode", &credp->fc_mode,
                sizeof(mode_t), 0);
        if (err) {
            return err;
        }
    }
    if (credp->fc_rdev != -1) {
        err = setxattr(path, "user.virtfs.rdev", &credp->fc_rdev,
                sizeof(dev_t), 0);
        if (err) {
            return err;
        }
    }
    return 0;
}
 | 18,235 | 
| 
	qemu | 
	5d98bf8f38c17a348ab6e8af196088cd4953acd0 | 0 | 
	void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
{
    ARMCPU *cpu = arm_env_get_cpu(env);
    CPUState *cs = CPU(cpu);
    DisasContext dc1, *dc = &dc1;
    target_ulong pc_start;
    target_ulong next_page_start;
    int num_insns;
    int max_insns;
    /* generate intermediate code */
    /* The A64 decoder has its own top level loop, because it doesn't need
     * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
     */
    if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
        gen_intermediate_code_a64(cpu, tb);
        return;
    }
    pc_start = tb->pc;
    dc->tb = tb;
    dc->is_jmp = DISAS_NEXT;
    dc->pc = pc_start;
    dc->singlestep_enabled = cs->singlestep_enabled;
    dc->condjmp = 0;
    dc->aarch64 = 0;
    /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
     * there is no secure EL1, so we route exceptions to EL3.
     */
    dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
                               !arm_el_is_aa64(env, 3);
    dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
    dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
    dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
    dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
    dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
    dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
#if !defined(CONFIG_USER_ONLY)
    dc->user = (dc->current_el == 0);
#endif
    dc->ns = ARM_TBFLAG_NS(tb->flags);
    dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
    dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
    dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
    dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
    dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
    dc->cp_regs = cpu->cp_regs;
    dc->features = env->features;
    /* Single step state. The code-generation logic here is:
     *  SS_ACTIVE == 0:
     *   generate code with no special handling for single-stepping (except
     *   that anything that can make us go to SS_ACTIVE == 1 must end the TB;
     *   this happens anyway because those changes are all system register or
     *   PSTATE writes).
     *  SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
     *   emit code for one insn
     *   emit code to clear PSTATE.SS
     *   emit code to generate software step exception for completed step
     *   end TB (as usual for having generated an exception)
     *  SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
     *   emit code to generate a software step exception
     *   end the TB
     */
    dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
    dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
    dc->is_ldex = false;
    dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
    cpu_F0s = tcg_temp_new_i32();
    cpu_F1s = tcg_temp_new_i32();
    cpu_F0d = tcg_temp_new_i64();
    cpu_F1d = tcg_temp_new_i64();
    cpu_V0 = cpu_F0d;
    cpu_V1 = cpu_F1d;
    /* FIXME: cpu_M0 can probably be the same as cpu_V0.  */
    cpu_M0 = tcg_temp_new_i64();
    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
    num_insns = 0;
    max_insns = tb->cflags & CF_COUNT_MASK;
    if (max_insns == 0) {
        max_insns = CF_COUNT_MASK;
    }
    if (max_insns > TCG_MAX_INSNS) {
        max_insns = TCG_MAX_INSNS;
    }
    gen_tb_start(tb);
    tcg_clear_temp_count();
    /* A note on handling of the condexec (IT) bits:
     *
     * We want to avoid the overhead of having to write the updated condexec
     * bits back to the CPUARMState for every instruction in an IT block. So:
     * (1) if the condexec bits are not already zero then we write
     * zero back into the CPUARMState now. This avoids complications trying
     * to do it at the end of the block. (For example if we don't do this
     * it's hard to identify whether we can safely skip writing condexec
     * at the end of the TB, which we definitely want to do for the case
     * where a TB doesn't do anything with the IT state at all.)
     * (2) if we are going to leave the TB then we call gen_set_condexec()
     * which will write the correct value into CPUARMState if zero is wrong.
     * This is done both for leaving the TB at the end, and for leaving
     * it because of an exception we know will happen, which is done in
     * gen_exception_insn(). The latter is necessary because we need to
     * leave the TB with the PC/IT state just prior to execution of the
     * instruction which caused the exception.
     * (3) if we leave the TB unexpectedly (eg a data abort on a load)
     * then the CPUARMState will be wrong and we need to reset it.
     * This is handled in the same way as restoration of the
     * PC in these situations; we save the value of the condexec bits
     * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
     * then uses this to restore them after an exception.
     *
     * Note that there are no instructions which can read the condexec
     * bits, and none which can write non-static values to them, so
     * we don't need to care about whether CPUARMState is correct in the
     * middle of a TB.
     */
    /* Reset the conditional execution bits immediately. This avoids
       complications trying to do it at the end of the block.  */
    if (dc->condexec_mask || dc->condexec_cond)
      {
        TCGv_i32 tmp = tcg_temp_new_i32();
        tcg_gen_movi_i32(tmp, 0);
        store_cpu_field(tmp, condexec_bits);
      }
    do {
        tcg_gen_insn_start(dc->pc,
                           (dc->condexec_cond << 4) | (dc->condexec_mask >> 1));
        num_insns++;
#ifdef CONFIG_USER_ONLY
        /* Intercept jump to the magic kernel page.  */
        if (dc->pc >= 0xffff0000) {
            /* We always get here via a jump, so know we are not in a
               conditional execution block.  */
            gen_exception_internal(EXCP_KERNEL_TRAP);
            dc->is_jmp = DISAS_UPDATE;
            break;
        }
#else
        if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
            /* We always get here via a jump, so know we are not in a
               conditional execution block.  */
            gen_exception_internal(EXCP_EXCEPTION_EXIT);
            dc->is_jmp = DISAS_UPDATE;
            break;
        }
#endif
        if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
            CPUBreakpoint *bp;
            QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
                if (bp->pc == dc->pc) {
                    gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
                    /* Advance PC so that clearing the breakpoint will
                       invalidate this TB.  */
                    dc->pc += 2;
                    goto done_generating;
                }
            }
        }
        if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
            gen_io_start();
        }
        if (dc->ss_active && !dc->pstate_ss) {
            /* Singlestep state is Active-pending.
             * If we're in this state at the start of a TB then either
             *  a) we just took an exception to an EL which is being debugged
             *     and this is the first insn in the exception handler
             *  b) debug exceptions were masked and we just unmasked them
             *     without changing EL (eg by clearing PSTATE.D)
             * In either case we're going to take a swstep exception in the
             * "did not step an insn" case, and so the syndrome ISV and EX
             * bits should be zero.
             */
            assert(num_insns == 1);
            gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
                          default_exception_el(dc));
            goto done_generating;
        }
        if (dc->thumb) {
            disas_thumb_insn(env, dc);
            if (dc->condexec_mask) {
                dc->condexec_cond = (dc->condexec_cond & 0xe)
                                   | ((dc->condexec_mask >> 4) & 1);
                dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
                if (dc->condexec_mask == 0) {
                    dc->condexec_cond = 0;
                }
            }
        } else {
            unsigned int insn = arm_ldl_code(env, dc->pc, dc->bswap_code);
            dc->pc += 4;
            disas_arm_insn(dc, insn);
        }
        if (dc->condjmp && !dc->is_jmp) {
            gen_set_label(dc->condlabel);
            dc->condjmp = 0;
        }
        if (tcg_check_temp_count()) {
            fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
                    dc->pc);
        }
        /* Translation stops when a conditional branch is encountered.
         * Otherwise the subsequent code could get translated several times.
         * Also stop translation when a page boundary is reached.  This
         * ensures prefetch aborts occur at the right place.  */
    } while (!dc->is_jmp && !tcg_op_buf_full() &&
             !cs->singlestep_enabled &&
             !singlestep &&
             !dc->ss_active &&
             dc->pc < next_page_start &&
             num_insns < max_insns);
    if (tb->cflags & CF_LAST_IO) {
        if (dc->condjmp) {
            /* FIXME:  This can theoretically happen with self-modifying
               code.  */
            cpu_abort(cs, "IO on conditional branch instruction");
        }
        gen_io_end();
    }
    /* At this stage dc->condjmp will only be set when the skipped
       instruction was a conditional branch or trap, and the PC has
       already been written.  */
    if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
        /* Make sure the pc is updated, and raise a debug exception.  */
        if (dc->condjmp) {
            gen_set_condexec(dc);
            if (dc->is_jmp == DISAS_SWI) {
                gen_ss_advance(dc);
                gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
                              default_exception_el(dc));
            } else if (dc->is_jmp == DISAS_HVC) {
                gen_ss_advance(dc);
                gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
            } else if (dc->is_jmp == DISAS_SMC) {
                gen_ss_advance(dc);
                gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
            } else if (dc->ss_active) {
                gen_step_complete_exception(dc);
            } else {
                gen_exception_internal(EXCP_DEBUG);
            }
            gen_set_label(dc->condlabel);
        }
        if (dc->condjmp || !dc->is_jmp) {
            gen_set_pc_im(dc, dc->pc);
            dc->condjmp = 0;
        }
        gen_set_condexec(dc);
        if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
            gen_ss_advance(dc);
            gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
                          default_exception_el(dc));
        } else if (dc->is_jmp == DISAS_HVC && !dc->condjmp) {
            gen_ss_advance(dc);
            gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
        } else if (dc->is_jmp == DISAS_SMC && !dc->condjmp) {
            gen_ss_advance(dc);
            gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
        } else if (dc->ss_active) {
            gen_step_complete_exception(dc);
        } else {
            /* FIXME: Single stepping a WFI insn will not halt
               the CPU.  */
            gen_exception_internal(EXCP_DEBUG);
        }
    } else {
        /* While branches must always occur at the end of an IT block,
           there are a few other things that can cause us to terminate
           the TB in the middle of an IT block:
            - Exception generating instructions (bkpt, swi, undefined).
            - Page boundaries.
            - Hardware watchpoints.
           Hardware breakpoints have already been handled and skip this code.
         */
        gen_set_condexec(dc);
        switch(dc->is_jmp) {
        case DISAS_NEXT:
            gen_goto_tb(dc, 1, dc->pc);
            break;
        default:
        case DISAS_JUMP:
        case DISAS_UPDATE:
            /* indicate that the hash table must be used to find the next TB */
            tcg_gen_exit_tb(0);
            break;
        case DISAS_TB_JUMP:
            /* nothing more to generate */
            break;
        case DISAS_WFI:
            gen_helper_wfi(cpu_env);
            /* The helper doesn't necessarily throw an exception, but we
             * must go back to the main loop to check for interrupts anyway.
             */
            tcg_gen_exit_tb(0);
            break;
        case DISAS_WFE:
            gen_helper_wfe(cpu_env);
            break;
        case DISAS_YIELD:
            gen_helper_yield(cpu_env);
            break;
        case DISAS_SWI:
            gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
                          default_exception_el(dc));
            break;
        case DISAS_HVC:
            gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
            break;
        case DISAS_SMC:
            gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
            break;
        }
        if (dc->condjmp) {
            gen_set_label(dc->condlabel);
            gen_set_condexec(dc);
            gen_goto_tb(dc, 1, dc->pc);
            dc->condjmp = 0;
        }
    }
done_generating:
    gen_tb_end(tb, num_insns);
#ifdef DEBUG_DISAS
    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
        qemu_log("----------------\n");
        qemu_log("IN: %s\n", lookup_symbol(pc_start));
        log_target_disas(cs, pc_start, dc->pc - pc_start,
                         dc->thumb | (dc->bswap_code << 1));
        qemu_log("\n");
    }
#endif
    tb->size = dc->pc - pc_start;
    tb->icount = num_insns;
}
 | 18,236 | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
