label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
0 | bool aio_dispatch(AioContext *ctx) { bool progress; progress = aio_bh_poll(ctx); progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE); progress |= timerlistgroup_run_timers(&ctx->tlg); return progress; } | 19,374 |
0 | static void sm501_draw_crt(SM501State *s) { DisplaySurface *surface = qemu_console_surface(s->con); int y, c_x = 0, c_y = 0; uint8_t *hwc_src = NULL, *src = s->local_mem; int width = get_width(s, 1); int height = get_height(s, 1); int src_bpp = get_bpp(s, 1); int dst_bpp = surface_bytes_per_pixel(surface); uint32_t *palette = (uint32_t *)&s->dc_palette[SM501_DC_CRT_PALETTE - SM501_DC_PANEL_PALETTE]; uint8_t hwc_palette[3 * 3]; int ds_depth_index = get_depth_index(surface); draw_line_func *draw_line = NULL; draw_hwc_line_func *draw_hwc_line = NULL; int full_update = 0; int y_start = -1; ram_addr_t page_min = ~0l; ram_addr_t page_max = 0l; ram_addr_t offset = 0; /* choose draw_line function */ switch (src_bpp) { case 1: draw_line = draw_line8_funcs[ds_depth_index]; break; case 2: draw_line = draw_line16_funcs[ds_depth_index]; break; case 4: draw_line = draw_line32_funcs[ds_depth_index]; break; default: printf("sm501 draw crt : invalid DC_CRT_CONTROL=%x.\n", s->dc_crt_control); abort(); break; } /* set up to draw hardware cursor */ if (is_hwc_enabled(s, 1)) { /* choose cursor draw line function */ draw_hwc_line = draw_hwc_line_funcs[ds_depth_index]; hwc_src = get_hwc_address(s, 1); c_x = get_hwc_x(s, 1); c_y = get_hwc_y(s, 1); get_hwc_palette(s, 1, hwc_palette); } /* adjust console size */ if (s->last_width != width || s->last_height != height) { qemu_console_resize(s->con, width, height); surface = qemu_console_surface(s->con); s->last_width = width; s->last_height = height; full_update = 1; } /* draw each line according to conditions */ memory_region_sync_dirty_bitmap(&s->local_mem_region); for (y = 0; y < height; y++) { int update, update_hwc; ram_addr_t page0 = offset; ram_addr_t page1 = offset + width * src_bpp - 1; /* check if hardware cursor is enabled and we're within its range */ update_hwc = draw_hwc_line && c_y <= y && y < c_y + SM501_HWC_HEIGHT; update = full_update || update_hwc; /* check dirty flags for each line */ update |= memory_region_get_dirty(&s->local_mem_region, page0, page1 - page0, DIRTY_MEMORY_VGA); /* draw line and change status */ if (update) { uint8_t *d = surface_data(surface); d += y * width * dst_bpp; /* draw graphics layer */ draw_line(d, src, width, palette); /* draw hardware cursor */ if (update_hwc) { draw_hwc_line(d, hwc_src, width, hwc_palette, c_x, y - c_y); } if (y_start < 0) { y_start = y; } if (page0 < page_min) { page_min = page0; } if (page1 > page_max) { page_max = page1; } } else { if (y_start >= 0) { /* flush to display */ dpy_gfx_update(s->con, 0, y_start, width, y - y_start); y_start = -1; } } src += width * src_bpp; offset += width * src_bpp; } /* complete flush to display */ if (y_start >= 0) { dpy_gfx_update(s->con, 0, y_start, width, y - y_start); } /* clear dirty flags */ if (page_min != ~0l) { memory_region_reset_dirty(&s->local_mem_region, page_min, page_max + TARGET_PAGE_SIZE, DIRTY_MEMORY_VGA); } } | 19,375 |
0 | static void test_io(void) { #ifndef _WIN32 /* socketpair(PF_UNIX) which does not exist on windows */ int sv[2]; int r; unsigned i, j, k, s, t; fd_set fds; unsigned niov; struct iovec *iov, *siov; unsigned char *buf; size_t sz; iov_random(&iov, &niov); sz = iov_size(iov, niov); buf = g_malloc(sz); for (i = 0; i < sz; ++i) { buf[i] = i & 255; } iov_from_buf(iov, niov, 0, buf, sz); siov = g_memdup(iov, sizeof(*iov) * niov); if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) < 0) { perror("socketpair"); exit(1); } FD_ZERO(&fds); t = 0; if (fork() == 0) { /* writer */ close(sv[0]); FD_SET(sv[1], &fds); fcntl(sv[1], F_SETFL, O_RDWR|O_NONBLOCK); r = g_test_rand_int_range(sz / 2, sz); setsockopt(sv[1], SOL_SOCKET, SO_SNDBUF, &r, sizeof(r)); for (i = 0; i <= sz; ++i) { for (j = i; j <= sz; ++j) { k = i; do { s = g_test_rand_int_range(0, j - k + 1); r = iov_send(sv[1], iov, niov, k, s); g_assert(memcmp(iov, siov, sizeof(*iov)*niov) == 0); if (r >= 0) { k += r; t += r; usleep(g_test_rand_int_range(0, 30)); } else if (errno == EAGAIN) { select(sv[1]+1, NULL, &fds, NULL, NULL); continue; } else { perror("send"); exit(1); } } while(k < j); } } iov_free(iov, niov); g_free(buf); g_free(siov); exit(0); } else { /* reader & verifier */ close(sv[1]); FD_SET(sv[0], &fds); fcntl(sv[0], F_SETFL, O_RDWR|O_NONBLOCK); r = g_test_rand_int_range(sz / 2, sz); setsockopt(sv[0], SOL_SOCKET, SO_RCVBUF, &r, sizeof(r)); usleep(500000); for (i = 0; i <= sz; ++i) { for (j = i; j <= sz; ++j) { k = i; iov_memset(iov, niov, 0, 0xff, -1); do { s = g_test_rand_int_range(0, j - k + 1); r = iov_recv(sv[0], iov, niov, k, s); g_assert(memcmp(iov, siov, sizeof(*iov)*niov) == 0); if (r > 0) { k += r; t += r; } else if (!r) { if (s) { break; } } else if (errno == EAGAIN) { select(sv[0]+1, &fds, NULL, NULL, NULL); continue; } else { perror("recv"); exit(1); } } while(k < j); test_iov_bytes(iov, niov, i, j - i); } } iov_free(iov, niov); g_free(buf); g_free(siov); } #endif } | 19,376 |
0 | static gboolean gd_motion_event(GtkWidget *widget, GdkEventMotion *motion, void *opaque) { GtkDisplayState *s = opaque; int dx, dy; int x, y; x = motion->x / s->scale_x; y = motion->y / s->scale_y; if (kbd_mouse_is_absolute()) { dx = x * 0x7FFF / (ds_get_width(s->ds) - 1); dy = y * 0x7FFF / (ds_get_height(s->ds) - 1); } else if (s->last_x == -1 || s->last_y == -1) { dx = 0; dy = 0; } else { dx = x - s->last_x; dy = y - s->last_y; } s->last_x = x; s->last_y = y; if (kbd_mouse_is_absolute()) { kbd_mouse_event(dx, dy, 0, s->button_mask); } return TRUE; } | 19,377 |
0 | static int virtio_9p_init_pci(PCIDevice *pci_dev) { VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev); VirtIODevice *vdev; vdev = virtio_9p_init(&pci_dev->qdev, &proxy->fsconf); vdev->nvectors = proxy->nvectors; virtio_init_pci(proxy, vdev, PCI_VENDOR_ID_REDHAT_QUMRANET, 0x1009, 0x2, 0x00); /* make the actual value visible */ proxy->nvectors = vdev->nvectors; return 0; } | 19,378 |
0 | void pc_guest_info_init(PCMachineState *pcms) { int i, j; pcms->apic_xrupt_override = kvm_allows_irq0_override(); pcms->numa_nodes = nb_numa_nodes; pcms->node_mem = g_malloc0(pcms->numa_nodes * sizeof *pcms->node_mem); for (i = 0; i < nb_numa_nodes; i++) { pcms->node_mem[i] = numa_info[i].node_mem; } pcms->node_cpu = g_malloc0(pcms->apic_id_limit * sizeof *pcms->node_cpu); for (i = 0; i < max_cpus; i++) { unsigned int apic_id = x86_cpu_apic_id_from_index(i); assert(apic_id < pcms->apic_id_limit); for (j = 0; j < nb_numa_nodes; j++) { if (test_bit(i, numa_info[j].node_cpu)) { pcms->node_cpu[apic_id] = j; break; } } } pcms->machine_done.notify = pc_machine_done; qemu_add_machine_init_done_notifier(&pcms->machine_done); } | 19,379 |
0 | void DMA_schedule(int nchan) {} | 19,381 |
0 | static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet) { SgiContext *s = avctx->priv_data; const AVFrame * const p = frame; PutByteContext pbc; uint8_t *in_buf, *encode_buf; int x, y, z, length, tablesize, ret; unsigned int width, height, depth, dimension; unsigned int bytes_per_channel, pixmax, put_be; #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; avctx->coded_frame->key_frame = 1; FF_ENABLE_DEPRECATION_WARNINGS #endif #if FF_API_CODER_TYPE FF_DISABLE_DEPRECATION_WARNINGS if (avctx->coder_type == FF_CODER_TYPE_RAW) s->rle = 0; FF_ENABLE_DEPRECATION_WARNINGS #endif width = avctx->width; height = avctx->height; bytes_per_channel = 1; pixmax = 0xFF; put_be = HAVE_BIGENDIAN; switch (avctx->pix_fmt) { case AV_PIX_FMT_GRAY8: dimension = SGI_SINGLE_CHAN; depth = SGI_GRAYSCALE; break; case AV_PIX_FMT_RGB24: dimension = SGI_MULTI_CHAN; depth = SGI_RGB; break; case AV_PIX_FMT_RGBA: dimension = SGI_MULTI_CHAN; depth = SGI_RGBA; break; case AV_PIX_FMT_GRAY16LE: put_be = !HAVE_BIGENDIAN; case AV_PIX_FMT_GRAY16BE: bytes_per_channel = 2; pixmax = 0xFFFF; dimension = SGI_SINGLE_CHAN; depth = SGI_GRAYSCALE; break; case AV_PIX_FMT_RGB48LE: put_be = !HAVE_BIGENDIAN; case AV_PIX_FMT_RGB48BE: bytes_per_channel = 2; pixmax = 0xFFFF; dimension = SGI_MULTI_CHAN; depth = SGI_RGB; break; case AV_PIX_FMT_RGBA64LE: put_be = !HAVE_BIGENDIAN; case AV_PIX_FMT_RGBA64BE: bytes_per_channel = 2; pixmax = 0xFFFF; dimension = SGI_MULTI_CHAN; depth = SGI_RGBA; break; default: return AVERROR_INVALIDDATA; } tablesize = depth * height * 4; length = SGI_HEADER_SIZE; if (!s->rle) length += depth * height * width; else // assume sgi_rle_encode() produces at most 2x size of input length += tablesize * 2 + depth * height * (2 * width + 1); if ((ret = ff_alloc_packet(pkt, bytes_per_channel * length)) < 0) { av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", length); return ret; } bytestream2_init_writer(&pbc, pkt->data, pkt->size); /* Encode header. */ bytestream2_put_be16(&pbc, SGI_MAGIC); bytestream2_put_byte(&pbc, s->rle); /* RLE 1 - VERBATIM 0 */ bytestream2_put_byte(&pbc, bytes_per_channel); bytestream2_put_be16(&pbc, dimension); bytestream2_put_be16(&pbc, width); bytestream2_put_be16(&pbc, height); bytestream2_put_be16(&pbc, depth); bytestream2_put_be32(&pbc, 0L); /* pixmin */ bytestream2_put_be32(&pbc, pixmax); bytestream2_put_be32(&pbc, 0L); /* dummy */ /* name */ bytestream2_skip_p(&pbc, 80); /* colormap */ bytestream2_put_be32(&pbc, 0L); /* The rest of the 512 byte header is unused. */ bytestream2_skip_p(&pbc, 404); if (s->rle) { PutByteContext taboff_pcb, tablen_pcb; /* Skip RLE offset table. */ bytestream2_init_writer(&taboff_pcb, pbc.buffer, tablesize); bytestream2_skip_p(&pbc, tablesize); /* Skip RLE length table. */ bytestream2_init_writer(&tablen_pcb, pbc.buffer, tablesize); bytestream2_skip_p(&pbc, tablesize); /* Make an intermediate consecutive buffer. */ if (!(encode_buf = av_malloc(width * bytes_per_channel))) return AVERROR(ENOMEM); for (z = 0; z < depth; z++) { in_buf = p->data[0] + p->linesize[0] * (height - 1) + z * bytes_per_channel; for (y = 0; y < height; y++) { bytestream2_put_be32(&taboff_pcb, bytestream2_tell_p(&pbc)); for (x = 0; x < width * bytes_per_channel; x += bytes_per_channel) encode_buf[x] = in_buf[depth * x]; length = sgi_rle_encode(&pbc, encode_buf, width, bytes_per_channel); if (length < 1) { av_free(encode_buf); return AVERROR_INVALIDDATA; } bytestream2_put_be32(&tablen_pcb, length); in_buf -= p->linesize[0]; } } av_free(encode_buf); } else { for (z = 0; z < depth; z++) { in_buf = p->data[0] + p->linesize[0] * (height - 1) + z * bytes_per_channel; for (y = 0; y < height; y++) { for (x = 0; x < width * depth; x += depth) if (bytes_per_channel == 1) bytestream2_put_byte(&pbc, in_buf[x]); else if (put_be) bytestream2_put_be16(&pbc, ((uint16_t *)in_buf)[x]); else bytestream2_put_le16(&pbc, ((uint16_t *)in_buf)[x]); in_buf -= p->linesize[0]; } } } /* total length */ pkt->size = bytestream2_tell_p(&pbc); pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; return 0; } | 19,382 |
0 | static uint64_t sysbus_esp_mem_read(void *opaque, target_phys_addr_t addr, unsigned int size) { SysBusESPState *sysbus = opaque; uint32_t saddr; saddr = addr >> sysbus->it_shift; return esp_reg_read(&sysbus->esp, saddr); } | 19,383 |
0 | static int usb_bt_handle_data(USBDevice *dev, USBPacket *p) { struct USBBtState *s = (struct USBBtState *) dev->opaque; int ret = 0; if (!s->config) goto fail; switch (p->pid) { case USB_TOKEN_IN: switch (p->devep & 0xf) { case USB_EVT_EP: ret = usb_bt_fifo_dequeue(&s->evt, p); break; case USB_ACL_EP: ret = usb_bt_fifo_dequeue(&s->acl, p); break; case USB_SCO_EP: ret = usb_bt_fifo_dequeue(&s->sco, p); break; default: goto fail; } break; case USB_TOKEN_OUT: switch (p->devep & 0xf) { case USB_ACL_EP: usb_bt_fifo_out_enqueue(s, &s->outacl, s->hci->acl_send, usb_bt_hci_acl_complete, p); break; case USB_SCO_EP: usb_bt_fifo_out_enqueue(s, &s->outsco, s->hci->sco_send, usb_bt_hci_sco_complete, p); break; default: goto fail; } break; default: fail: ret = USB_RET_STALL; break; } return ret; } | 19,384 |
0 | void bdrv_io_unplugged_begin(BlockDriverState *bs) { BdrvChild *child; if (bs->io_plug_disabled++ == 0 && bs->io_plugged > 0) { BlockDriver *drv = bs->drv; if (drv && drv->bdrv_io_unplug) { drv->bdrv_io_unplug(bs); } } QLIST_FOREACH(child, &bs->children, next) { bdrv_io_unplugged_begin(child->bs); } } | 19,385 |
0 | static void acpi_get_pm_info(AcpiPmInfo *pm) { Object *piix = piix4_pm_find(); Object *lpc = ich9_lpc_find(); Object *obj = NULL; QObject *o; pm->cpu_hp_io_base = 0; pm->pcihp_io_base = 0; pm->pcihp_io_len = 0; if (piix) { obj = piix; pm->cpu_hp_io_base = PIIX4_CPU_HOTPLUG_IO_BASE; pm->pcihp_io_base = object_property_get_int(obj, ACPI_PCIHP_IO_BASE_PROP, NULL); pm->pcihp_io_len = object_property_get_int(obj, ACPI_PCIHP_IO_LEN_PROP, NULL); } if (lpc) { obj = lpc; pm->cpu_hp_io_base = ICH9_CPU_HOTPLUG_IO_BASE; } assert(obj); pm->cpu_hp_io_len = ACPI_GPE_PROC_LEN; pm->mem_hp_io_base = ACPI_MEMORY_HOTPLUG_BASE; pm->mem_hp_io_len = ACPI_MEMORY_HOTPLUG_IO_LEN; /* Fill in optional s3/s4 related properties */ o = object_property_get_qobject(obj, ACPI_PM_PROP_S3_DISABLED, NULL); if (o) { pm->s3_disabled = qint_get_int(qobject_to_qint(o)); } else { pm->s3_disabled = false; } qobject_decref(o); o = object_property_get_qobject(obj, ACPI_PM_PROP_S4_DISABLED, NULL); if (o) { pm->s4_disabled = qint_get_int(qobject_to_qint(o)); } else { pm->s4_disabled = false; } qobject_decref(o); o = object_property_get_qobject(obj, ACPI_PM_PROP_S4_VAL, NULL); if (o) { pm->s4_val = qint_get_int(qobject_to_qint(o)); } else { pm->s4_val = false; } qobject_decref(o); /* Fill in mandatory properties */ pm->sci_int = object_property_get_int(obj, ACPI_PM_PROP_SCI_INT, NULL); pm->acpi_enable_cmd = object_property_get_int(obj, ACPI_PM_PROP_ACPI_ENABLE_CMD, NULL); pm->acpi_disable_cmd = object_property_get_int(obj, ACPI_PM_PROP_ACPI_DISABLE_CMD, NULL); pm->io_base = object_property_get_int(obj, ACPI_PM_PROP_PM_IO_BASE, NULL); pm->gpe0_blk = object_property_get_int(obj, ACPI_PM_PROP_GPE0_BLK, NULL); pm->gpe0_blk_len = object_property_get_int(obj, ACPI_PM_PROP_GPE0_BLK_LEN, NULL); pm->pcihp_bridge_en = object_property_get_bool(obj, "acpi-pci-hotplug-with-bridge-support", NULL); } | 19,386 |
0 | static void icp_pit_write(void *opaque, target_phys_addr_t offset, uint64_t value, unsigned size) { icp_pit_state *s = (icp_pit_state *)opaque; int n; n = offset >> 8; if (n > 2) { hw_error("%s: Bad timer %d\n", __func__, n); } arm_timer_write(s->timer[n], offset & 0xff, value); } | 19,387 |
0 | static ram_addr_t get_current_ram_size(void) { GSList *list = NULL, *item; ram_addr_t size = ram_size; pc_dimm_build_list(qdev_get_machine(), &list); for (item = list; item; item = g_slist_next(item)) { Object *obj = OBJECT(item->data); size += object_property_get_int(obj, PC_DIMM_SIZE_PROP, &error_abort); } g_slist_free(list); return size; } | 19,388 |
0 | void gen_intermediate_code_internal_a64(ARMCPU *cpu, TranslationBlock *tb, bool search_pc) { CPUState *cs = CPU(cpu); CPUARMState *env = &cpu->env; DisasContext dc1, *dc = &dc1; CPUBreakpoint *bp; int j, lj; target_ulong pc_start; target_ulong next_page_start; int num_insns; int max_insns; pc_start = tb->pc; dc->tb = tb; dc->is_jmp = DISAS_NEXT; dc->pc = pc_start; dc->singlestep_enabled = cs->singlestep_enabled; dc->condjmp = 0; dc->aarch64 = 1; dc->el3_is_aa64 = arm_el_is_aa64(env, 3); dc->thumb = 0; dc->bswap_code = 0; dc->condexec_mask = 0; dc->condexec_cond = 0; dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags); dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); #if !defined(CONFIG_USER_ONLY) dc->user = (dc->current_el == 0); #endif dc->cpacr_fpen = ARM_TBFLAG_AA64_FPEN(tb->flags); dc->vec_len = 0; dc->vec_stride = 0; dc->cp_regs = cpu->cp_regs; dc->features = env->features; /* Single step state. The code-generation logic here is: * SS_ACTIVE == 0: * generate code with no special handling for single-stepping (except * that anything that can make us go to SS_ACTIVE == 1 must end the TB; * this happens anyway because those changes are all system register or * PSTATE writes). * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending) * emit code for one insn * emit code to clear PSTATE.SS * emit code to generate software step exception for completed step * end TB (as usual for having generated an exception) * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending) * emit code to generate a software step exception * end the TB */ dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags); dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags); dc->is_ldex = false; dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el); init_tmp_a64_array(dc); next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; lj = -1; num_insns = 0; max_insns = tb->cflags & CF_COUNT_MASK; if (max_insns == 0) { max_insns = CF_COUNT_MASK; } gen_tb_start(tb); tcg_clear_temp_count(); do { if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { if (bp->pc == dc->pc) { gen_exception_internal_insn(dc, 0, EXCP_DEBUG); /* Advance PC so that clearing the breakpoint will invalidate this TB. */ dc->pc += 2; goto done_generating; } } } if (search_pc) { j = tcg_op_buf_count(); if (lj < j) { lj++; while (lj < j) { tcg_ctx.gen_opc_instr_start[lj++] = 0; } } tcg_ctx.gen_opc_pc[lj] = dc->pc; tcg_ctx.gen_opc_instr_start[lj] = 1; tcg_ctx.gen_opc_icount[lj] = num_insns; } if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) { gen_io_start(); } if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { tcg_gen_debug_insn_start(dc->pc); } if (dc->ss_active && !dc->pstate_ss) { /* Singlestep state is Active-pending. * If we're in this state at the start of a TB then either * a) we just took an exception to an EL which is being debugged * and this is the first insn in the exception handler * b) debug exceptions were masked and we just unmasked them * without changing EL (eg by clearing PSTATE.D) * In either case we're going to take a swstep exception in the * "did not step an insn" case, and so the syndrome ISV and EX * bits should be zero. */ assert(num_insns == 0); gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0), default_exception_el(dc)); dc->is_jmp = DISAS_EXC; break; } disas_a64_insn(env, dc); if (tcg_check_temp_count()) { fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n", dc->pc); } /* Translation stops when a conditional branch is encountered. * Otherwise the subsequent code could get translated several times. * Also stop translation when a page boundary is reached. This * ensures prefetch aborts occur at the right place. */ num_insns++; } while (!dc->is_jmp && !tcg_op_buf_full() && !cs->singlestep_enabled && !singlestep && !dc->ss_active && dc->pc < next_page_start && num_insns < max_insns); if (tb->cflags & CF_LAST_IO) { gen_io_end(); } if (unlikely(cs->singlestep_enabled || dc->ss_active) && dc->is_jmp != DISAS_EXC) { /* Note that this means single stepping WFI doesn't halt the CPU. * For conditional branch insns this is harmless unreachable code as * gen_goto_tb() has already handled emitting the debug exception * (and thus a tb-jump is not possible when singlestepping). */ assert(dc->is_jmp != DISAS_TB_JUMP); if (dc->is_jmp != DISAS_JUMP) { gen_a64_set_pc_im(dc->pc); } if (cs->singlestep_enabled) { gen_exception_internal(EXCP_DEBUG); } else { gen_step_complete_exception(dc); } } else { switch (dc->is_jmp) { case DISAS_NEXT: gen_goto_tb(dc, 1, dc->pc); break; default: case DISAS_UPDATE: gen_a64_set_pc_im(dc->pc); /* fall through */ case DISAS_JUMP: /* indicate that the hash table must be used to find the next TB */ tcg_gen_exit_tb(0); break; case DISAS_TB_JUMP: case DISAS_EXC: case DISAS_SWI: break; case DISAS_WFE: gen_a64_set_pc_im(dc->pc); gen_helper_wfe(cpu_env); break; case DISAS_WFI: /* This is a special case because we don't want to just halt the CPU * if trying to debug across a WFI. */ gen_a64_set_pc_im(dc->pc); gen_helper_wfi(cpu_env); break; } } done_generating: gen_tb_end(tb, num_insns); #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { qemu_log("----------------\n"); qemu_log("IN: %s\n", lookup_symbol(pc_start)); log_target_disas(env, pc_start, dc->pc - pc_start, 4 | (dc->bswap_code << 1)); qemu_log("\n"); } #endif if (search_pc) { j = tcg_op_buf_count(); lj++; while (lj <= j) { tcg_ctx.gen_opc_instr_start[lj++] = 0; } } else { tb->size = dc->pc - pc_start; tb->icount = num_insns; } } | 19,390 |
0 | static void gen_farith (DisasContext *ctx, uint32_t op1, int ft, int fs, int fd, int cc) { const char *opn = "farith"; const char *condnames[] = { "c.f", "c.un", "c.eq", "c.ueq", "c.olt", "c.ult", "c.ole", "c.ule", "c.sf", "c.ngle", "c.seq", "c.ngl", "c.lt", "c.nge", "c.le", "c.ngt", }; const char *condnames_abs[] = { "cabs.f", "cabs.un", "cabs.eq", "cabs.ueq", "cabs.olt", "cabs.ult", "cabs.ole", "cabs.ule", "cabs.sf", "cabs.ngle", "cabs.seq", "cabs.ngl", "cabs.lt", "cabs.nge", "cabs.le", "cabs.ngt", }; enum { BINOP, CMPOP, OTHEROP } optype = OTHEROP; uint32_t func = ctx->opcode & 0x3f; switch (ctx->opcode & FOP(0x3f, 0x1f)) { case FOP(0, 16): GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WT1, ft); gen_op_float_add_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "add.s"; optype = BINOP; break; case FOP(1, 16): GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WT1, ft); gen_op_float_sub_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "sub.s"; optype = BINOP; break; case FOP(2, 16): GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WT1, ft); gen_op_float_mul_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "mul.s"; optype = BINOP; break; case FOP(3, 16): GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WT1, ft); gen_op_float_div_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "div.s"; optype = BINOP; break; case FOP(4, 16): GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_sqrt_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "sqrt.s"; break; case FOP(5, 16): GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_abs_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "abs.s"; break; case FOP(6, 16): GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_mov_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "mov.s"; break; case FOP(7, 16): GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_chs_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "neg.s"; break; case FOP(8, 16): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_roundl_s(); GEN_STORE_FTN_FREG(fd, DT2); opn = "round.l.s"; break; case FOP(9, 16): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_truncl_s(); GEN_STORE_FTN_FREG(fd, DT2); opn = "trunc.l.s"; break; case FOP(10, 16): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_ceill_s(); GEN_STORE_FTN_FREG(fd, DT2); opn = "ceil.l.s"; break; case FOP(11, 16): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_floorl_s(); GEN_STORE_FTN_FREG(fd, DT2); opn = "floor.l.s"; break; case FOP(12, 16): GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_roundw_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "round.w.s"; break; case FOP(13, 16): GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_truncw_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "trunc.w.s"; break; case FOP(14, 16): GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_ceilw_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "ceil.w.s"; break; case FOP(15, 16): GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_floorw_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "floor.w.s"; break; case FOP(17, 16): GEN_LOAD_REG_TN(T0, ft); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WT2, fd); gen_movcf_s(ctx, (ft >> 2) & 0x7, ft & 0x1); GEN_STORE_FTN_FREG(fd, WT2); opn = "movcf.s"; break; case FOP(18, 16): GEN_LOAD_REG_TN(T0, ft); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WT2, fd); gen_op_float_movz_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "movz.s"; break; case FOP(19, 16): GEN_LOAD_REG_TN(T0, ft); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WT2, fd); gen_op_float_movn_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "movn.s"; break; case FOP(21, 16): GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_recip_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "recip.s"; break; case FOP(22, 16): GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_rsqrt_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "rsqrt.s"; break; case FOP(28, 16): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WT2, fd); gen_op_float_recip2_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "recip2.s"; break; case FOP(29, 16): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_recip1_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "recip1.s"; break; case FOP(30, 16): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_rsqrt1_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "rsqrt1.s"; break; case FOP(31, 16): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WT2, fd); gen_op_float_rsqrt2_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "rsqrt2.s"; break; case FOP(33, 16): gen_op_cp1_registers(fd); GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_cvtd_s(); GEN_STORE_FTN_FREG(fd, DT2); opn = "cvt.d.s"; break; case FOP(36, 16): GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_cvtw_s(); GEN_STORE_FTN_FREG(fd, WT2); opn = "cvt.w.s"; break; case FOP(37, 16): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_cvtl_s(); GEN_STORE_FTN_FREG(fd, DT2); opn = "cvt.l.s"; break; case FOP(38, 16): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT1, fs); GEN_LOAD_FREG_FTN(WT0, ft); gen_op_float_cvtps_s(); GEN_STORE_FTN_FREG(fd, DT2); opn = "cvt.ps.s"; break; case FOP(48, 16): case FOP(49, 16): case FOP(50, 16): case FOP(51, 16): case FOP(52, 16): case FOP(53, 16): case FOP(54, 16): case FOP(55, 16): case FOP(56, 16): case FOP(57, 16): case FOP(58, 16): case FOP(59, 16): case FOP(60, 16): case FOP(61, 16): case FOP(62, 16): case FOP(63, 16): GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WT1, ft); if (ctx->opcode & (1 << 6)) { gen_op_cp1_64bitmode(); gen_cmpabs_s(func-48, cc); opn = condnames_abs[func-48]; } else { gen_cmp_s(func-48, cc); opn = condnames[func-48]; } break; case FOP(0, 17): gen_op_cp1_registers(fs | ft | fd); GEN_LOAD_FREG_FTN(DT0, fs); GEN_LOAD_FREG_FTN(DT1, ft); gen_op_float_add_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "add.d"; optype = BINOP; break; case FOP(1, 17): gen_op_cp1_registers(fs | ft | fd); GEN_LOAD_FREG_FTN(DT0, fs); GEN_LOAD_FREG_FTN(DT1, ft); gen_op_float_sub_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "sub.d"; optype = BINOP; break; case FOP(2, 17): gen_op_cp1_registers(fs | ft | fd); GEN_LOAD_FREG_FTN(DT0, fs); GEN_LOAD_FREG_FTN(DT1, ft); gen_op_float_mul_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "mul.d"; optype = BINOP; break; case FOP(3, 17): gen_op_cp1_registers(fs | ft | fd); GEN_LOAD_FREG_FTN(DT0, fs); GEN_LOAD_FREG_FTN(DT1, ft); gen_op_float_div_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "div.d"; optype = BINOP; break; case FOP(4, 17): gen_op_cp1_registers(fs | fd); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_sqrt_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "sqrt.d"; break; case FOP(5, 17): gen_op_cp1_registers(fs | fd); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_abs_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "abs.d"; break; case FOP(6, 17): gen_op_cp1_registers(fs | fd); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_mov_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "mov.d"; break; case FOP(7, 17): gen_op_cp1_registers(fs | fd); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_chs_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "neg.d"; break; case FOP(8, 17): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_roundl_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "round.l.d"; break; case FOP(9, 17): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_truncl_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "trunc.l.d"; break; case FOP(10, 17): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_ceill_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "ceil.l.d"; break; case FOP(11, 17): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_floorl_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "floor.l.d"; break; case FOP(12, 17): gen_op_cp1_registers(fs); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_roundw_d(); GEN_STORE_FTN_FREG(fd, WT2); opn = "round.w.d"; break; case FOP(13, 17): gen_op_cp1_registers(fs); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_truncw_d(); GEN_STORE_FTN_FREG(fd, WT2); opn = "trunc.w.d"; break; case FOP(14, 17): gen_op_cp1_registers(fs); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_ceilw_d(); GEN_STORE_FTN_FREG(fd, WT2); opn = "ceil.w.d"; break; case FOP(15, 17): gen_op_cp1_registers(fs); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_floorw_d(); GEN_STORE_FTN_FREG(fd, WT2); opn = "floor.w.d"; break; case FOP(17, 17): GEN_LOAD_REG_TN(T0, ft); GEN_LOAD_FREG_FTN(DT0, fs); GEN_LOAD_FREG_FTN(DT2, fd); gen_movcf_d(ctx, (ft >> 2) & 0x7, ft & 0x1); GEN_STORE_FTN_FREG(fd, DT2); opn = "movcf.d"; break; case FOP(18, 17): GEN_LOAD_REG_TN(T0, ft); GEN_LOAD_FREG_FTN(DT0, fs); GEN_LOAD_FREG_FTN(DT2, fd); gen_op_float_movz_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "movz.d"; break; case FOP(19, 17): GEN_LOAD_REG_TN(T0, ft); GEN_LOAD_FREG_FTN(DT0, fs); GEN_LOAD_FREG_FTN(DT2, fd); gen_op_float_movn_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "movn.d"; break; case FOP(21, 17): gen_op_cp1_registers(fs | fd); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_recip_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "recip.d"; break; case FOP(22, 17): gen_op_cp1_registers(fs | fd); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_rsqrt_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "rsqrt.d"; break; case FOP(28, 17): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(DT0, fs); GEN_LOAD_FREG_FTN(DT2, ft); gen_op_float_recip2_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "recip2.d"; break; case FOP(29, 17): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_recip1_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "recip1.d"; break; case FOP(30, 17): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_rsqrt1_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "rsqrt1.d"; break; case FOP(31, 17): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(DT0, fs); GEN_LOAD_FREG_FTN(DT2, ft); gen_op_float_rsqrt2_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "rsqrt2.d"; break; case FOP(48, 17): case FOP(49, 17): case FOP(50, 17): case FOP(51, 17): case FOP(52, 17): case FOP(53, 17): case FOP(54, 17): case FOP(55, 17): case FOP(56, 17): case FOP(57, 17): case FOP(58, 17): case FOP(59, 17): case FOP(60, 17): case FOP(61, 17): case FOP(62, 17): case FOP(63, 17): GEN_LOAD_FREG_FTN(DT0, fs); GEN_LOAD_FREG_FTN(DT1, ft); if (ctx->opcode & (1 << 6)) { gen_op_cp1_64bitmode(); gen_cmpabs_d(func-48, cc); opn = condnames_abs[func-48]; } else { gen_op_cp1_registers(fs | ft); gen_cmp_d(func-48, cc); opn = condnames[func-48]; } break; case FOP(32, 17): gen_op_cp1_registers(fs); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_cvts_d(); GEN_STORE_FTN_FREG(fd, WT2); opn = "cvt.s.d"; break; case FOP(36, 17): gen_op_cp1_registers(fs); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_cvtw_d(); GEN_STORE_FTN_FREG(fd, WT2); opn = "cvt.w.d"; break; case FOP(37, 17): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_cvtl_d(); GEN_STORE_FTN_FREG(fd, DT2); opn = "cvt.l.d"; break; case FOP(32, 20): GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_cvts_w(); GEN_STORE_FTN_FREG(fd, WT2); opn = "cvt.s.w"; break; case FOP(33, 20): gen_op_cp1_registers(fd); GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_cvtd_w(); GEN_STORE_FTN_FREG(fd, DT2); opn = "cvt.d.w"; break; case FOP(32, 21): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_cvts_l(); GEN_STORE_FTN_FREG(fd, WT2); opn = "cvt.s.l"; break; case FOP(33, 21): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(DT0, fs); gen_op_float_cvtd_l(); GEN_STORE_FTN_FREG(fd, DT2); opn = "cvt.d.l"; break; case FOP(38, 20): case FOP(38, 21): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WTH0, fs); gen_op_float_cvtps_pw(); GEN_STORE_FTN_FREG(fd, WT2); GEN_STORE_FTN_FREG(fd, WTH2); opn = "cvt.ps.pw"; break; case FOP(0, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WTH0, fs); GEN_LOAD_FREG_FTN(WT1, ft); GEN_LOAD_FREG_FTN(WTH1, ft); gen_op_float_add_ps(); GEN_STORE_FTN_FREG(fd, WT2); GEN_STORE_FTN_FREG(fd, WTH2); opn = "add.ps"; break; case FOP(1, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WTH0, fs); GEN_LOAD_FREG_FTN(WT1, ft); GEN_LOAD_FREG_FTN(WTH1, ft); gen_op_float_sub_ps(); GEN_STORE_FTN_FREG(fd, WT2); GEN_STORE_FTN_FREG(fd, WTH2); opn = "sub.ps"; break; case FOP(2, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WTH0, fs); GEN_LOAD_FREG_FTN(WT1, ft); GEN_LOAD_FREG_FTN(WTH1, ft); gen_op_float_mul_ps(); GEN_STORE_FTN_FREG(fd, WT2); GEN_STORE_FTN_FREG(fd, WTH2); opn = "mul.ps"; break; case FOP(5, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WTH0, fs); gen_op_float_abs_ps(); GEN_STORE_FTN_FREG(fd, WT2); GEN_STORE_FTN_FREG(fd, WTH2); opn = "abs.ps"; break; case FOP(6, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WTH0, fs); gen_op_float_mov_ps(); GEN_STORE_FTN_FREG(fd, WT2); GEN_STORE_FTN_FREG(fd, WTH2); opn = "mov.ps"; break; case FOP(7, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WTH0, fs); gen_op_float_chs_ps(); GEN_STORE_FTN_FREG(fd, WT2); GEN_STORE_FTN_FREG(fd, WTH2); opn = "neg.ps"; break; case FOP(17, 22): gen_op_cp1_64bitmode(); GEN_LOAD_REG_TN(T0, ft); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WTH0, fs); GEN_LOAD_FREG_FTN(WT2, fd); GEN_LOAD_FREG_FTN(WTH2, fd); gen_movcf_ps(ctx, (ft >> 2) & 0x7, ft & 0x1); GEN_STORE_FTN_FREG(fd, WT2); GEN_STORE_FTN_FREG(fd, WTH2); opn = "movcf.ps"; break; case FOP(18, 22): gen_op_cp1_64bitmode(); GEN_LOAD_REG_TN(T0, ft); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WTH0, fs); GEN_LOAD_FREG_FTN(WT2, fd); GEN_LOAD_FREG_FTN(WTH2, fd); gen_op_float_movz_ps(); GEN_STORE_FTN_FREG(fd, WT2); GEN_STORE_FTN_FREG(fd, WTH2); opn = "movz.ps"; break; case FOP(19, 22): gen_op_cp1_64bitmode(); GEN_LOAD_REG_TN(T0, ft); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WTH0, fs); GEN_LOAD_FREG_FTN(WT2, fd); GEN_LOAD_FREG_FTN(WTH2, fd); gen_op_float_movn_ps(); GEN_STORE_FTN_FREG(fd, WT2); GEN_STORE_FTN_FREG(fd, WTH2); opn = "movn.ps"; break; case FOP(24, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, ft); GEN_LOAD_FREG_FTN(WTH0, ft); GEN_LOAD_FREG_FTN(WT1, fs); GEN_LOAD_FREG_FTN(WTH1, fs); gen_op_float_addr_ps(); GEN_STORE_FTN_FREG(fd, WT2); GEN_STORE_FTN_FREG(fd, WTH2); opn = "addr.ps"; break; case FOP(26, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, ft); GEN_LOAD_FREG_FTN(WTH0, ft); GEN_LOAD_FREG_FTN(WT1, fs); GEN_LOAD_FREG_FTN(WTH1, fs); gen_op_float_mulr_ps(); GEN_STORE_FTN_FREG(fd, WT2); GEN_STORE_FTN_FREG(fd, WTH2); opn = "mulr.ps"; break; case FOP(28, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WTH0, fs); GEN_LOAD_FREG_FTN(WT2, fd); GEN_LOAD_FREG_FTN(WTH2, fd); gen_op_float_recip2_ps(); GEN_STORE_FTN_FREG(fd, WT2); GEN_STORE_FTN_FREG(fd, WTH2); opn = "recip2.ps"; break; case FOP(29, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WTH0, fs); gen_op_float_recip1_ps(); GEN_STORE_FTN_FREG(fd, WT2); GEN_STORE_FTN_FREG(fd, WTH2); opn = "recip1.ps"; break; case FOP(30, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WTH0, fs); gen_op_float_rsqrt1_ps(); GEN_STORE_FTN_FREG(fd, WT2); GEN_STORE_FTN_FREG(fd, WTH2); opn = "rsqrt1.ps"; break; case FOP(31, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WTH0, fs); GEN_LOAD_FREG_FTN(WT2, fd); GEN_LOAD_FREG_FTN(WTH2, fd); gen_op_float_rsqrt2_ps(); GEN_STORE_FTN_FREG(fd, WT2); GEN_STORE_FTN_FREG(fd, WTH2); opn = "rsqrt2.ps"; break; case FOP(32, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WTH0, fs); gen_op_float_cvts_pu(); GEN_STORE_FTN_FREG(fd, WT2); opn = "cvt.s.pu"; break; case FOP(36, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WTH0, fs); gen_op_float_cvtpw_ps(); GEN_STORE_FTN_FREG(fd, WT2); GEN_STORE_FTN_FREG(fd, WTH2); opn = "cvt.pw.ps"; break; case FOP(40, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); gen_op_float_cvts_pl(); GEN_STORE_FTN_FREG(fd, WT2); opn = "cvt.s.pl"; break; case FOP(44, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WT1, ft); gen_op_float_pll_ps(); GEN_STORE_FTN_FREG(fd, DT2); opn = "pll.ps"; break; case FOP(45, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WTH1, ft); gen_op_float_plu_ps(); GEN_STORE_FTN_FREG(fd, DT2); opn = "plu.ps"; break; case FOP(46, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WTH0, fs); GEN_LOAD_FREG_FTN(WT1, ft); gen_op_float_pul_ps(); GEN_STORE_FTN_FREG(fd, DT2); opn = "pul.ps"; break; case FOP(47, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WTH0, fs); GEN_LOAD_FREG_FTN(WTH1, ft); gen_op_float_puu_ps(); GEN_STORE_FTN_FREG(fd, DT2); opn = "puu.ps"; break; case FOP(48, 22): case FOP(49, 22): case FOP(50, 22): case FOP(51, 22): case FOP(52, 22): case FOP(53, 22): case FOP(54, 22): case FOP(55, 22): case FOP(56, 22): case FOP(57, 22): case FOP(58, 22): case FOP(59, 22): case FOP(60, 22): case FOP(61, 22): case FOP(62, 22): case FOP(63, 22): gen_op_cp1_64bitmode(); GEN_LOAD_FREG_FTN(WT0, fs); GEN_LOAD_FREG_FTN(WTH0, fs); GEN_LOAD_FREG_FTN(WT1, ft); GEN_LOAD_FREG_FTN(WTH1, ft); if (ctx->opcode & (1 << 6)) { gen_cmpabs_ps(func-48, cc); opn = condnames_abs[func-48]; } else { gen_cmp_ps(func-48, cc); opn = condnames[func-48]; } break; default: MIPS_INVAL(opn); generate_exception (ctx, EXCP_RI); return; } switch (optype) { case BINOP: MIPS_DEBUG("%s %s, %s, %s", opn, fregnames[fd], fregnames[fs], fregnames[ft]); break; case CMPOP: MIPS_DEBUG("%s %s,%s", opn, fregnames[fs], fregnames[ft]); break; default: MIPS_DEBUG("%s %s,%s", opn, fregnames[fd], fregnames[fs]); break; } } | 19,391 |
0 | static int count_contiguous_clusters_unallocated(int nb_clusters, uint64_t *l2_table, QCow2ClusterType wanted_type) { int i; assert(wanted_type == QCOW2_CLUSTER_ZERO || wanted_type == QCOW2_CLUSTER_UNALLOCATED); for (i = 0; i < nb_clusters; i++) { uint64_t entry = be64_to_cpu(l2_table[i]); QCow2ClusterType type = qcow2_get_cluster_type(entry); if (type != wanted_type || entry & L2E_OFFSET_MASK) { break; } } return i; } | 19,392 |
1 | static void rv34_idct_dc_add_c(uint8_t *dst, ptrdiff_t stride, int dc) { const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; int i, j; cm += (13*13*dc + 0x200) >> 10; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) dst[j] = cm[ dst[j] ]; dst += stride; } } | 19,393 |
1 | qemu_irq *armv7m_init(MemoryRegion *system_memory, int mem_size, int num_irq, const char *kernel_filename, const char *cpu_model) { ARMCPU *cpu; CPUARMState *env; DeviceState *nvic; qemu_irq *pic = g_new(qemu_irq, num_irq); int image_size; uint64_t entry; uint64_t lowaddr; int i; int big_endian; MemoryRegion *hack = g_new(MemoryRegion, 1); if (cpu_model == NULL) { cpu_model = "cortex-m3"; } cpu = cpu_arm_init(cpu_model); if (cpu == NULL) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } env = &cpu->env; armv7m_bitband_init(); nvic = qdev_create(NULL, "armv7m_nvic"); qdev_prop_set_uint32(nvic, "num-irq", num_irq); env->nvic = nvic; qdev_init_nofail(nvic); sysbus_connect_irq(SYS_BUS_DEVICE(nvic), 0, qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ)); for (i = 0; i < num_irq; i++) { pic[i] = qdev_get_gpio_in(nvic, i); } #ifdef TARGET_WORDS_BIGENDIAN big_endian = 1; #else big_endian = 0; #endif if (!kernel_filename && !qtest_enabled()) { fprintf(stderr, "Guest image must be specified (using -kernel)\n"); exit(1); } if (kernel_filename) { image_size = load_elf(kernel_filename, NULL, NULL, &entry, &lowaddr, NULL, big_endian, ELF_MACHINE, 1); if (image_size < 0) { image_size = load_image_targphys(kernel_filename, 0, mem_size); lowaddr = 0; } if (image_size < 0) { error_report("Could not load kernel '%s'", kernel_filename); exit(1); } } /* Hack to map an additional page of ram at the top of the address space. This stops qemu complaining about executing code outside RAM when returning from an exception. */ memory_region_init_ram(hack, NULL, "armv7m.hack", 0x1000, &error_abort); vmstate_register_ram_global(hack); memory_region_add_subregion(system_memory, 0xfffff000, hack); qemu_register_reset(armv7m_reset, cpu); return pic; } | 19,394 |
1 | int qemu_show_nic_models(const char *arg, const char *const *models) { int i; if (!arg || strcmp(arg, "?")) return 0; fprintf(stderr, "qemu: Supported NIC models: "); for (i = 0 ; models[i]; i++) fprintf(stderr, "%s%c", models[i], models[i+1] ? ',' : '\n'); return 1; } | 19,395 |
1 | void SwScale_Init(){ // generating tables: int i; for(i=0; i<768; i++){ int c= MIN(MAX(i-256, 0), 255); clip_table[i]=c; yuvtab_2568[c]= clip_yuvtab_2568[i]=(0x2568*(c-16))+(256<<13); yuvtab_3343[c]= clip_yuvtab_3343[i]=0x3343*(c-128); yuvtab_0c92[c]= clip_yuvtab_0c92[i]=-0x0c92*(c-128); yuvtab_1a1e[c]= clip_yuvtab_1a1e[i]=-0x1a1e*(c-128); yuvtab_40cf[c]= clip_yuvtab_40cf[i]=0x40cf*(c-128); } for(i=0; i<768; i++) { int v= clip_table[i]; clip_table16b[i]= v>>3; clip_table16g[i]= (v<<3)&0x07E0; clip_table16r[i]= (v<<8)&0xF800; clip_table15b[i]= v>>3; clip_table15g[i]= (v<<2)&0x03E0; clip_table15r[i]= (v<<7)&0x7C00; } } | 19,396 |
1 | void openrisc_cpu_do_interrupt(CPUState *cs) { #ifndef CONFIG_USER_ONLY OpenRISCCPU *cpu = OPENRISC_CPU(cs); CPUOpenRISCState *env = &cpu->env; env->epcr = env->pc; if (env->flags & D_FLAG) { env->flags &= ~D_FLAG; env->sr |= SR_DSX; env->epcr -= 4; if (cs->exception_index == EXCP_SYSCALL) { env->epcr += 4; /* For machine-state changed between user-mode and supervisor mode, we need flush TLB when we enter&exit EXCP. */ tlb_flush(cs); env->esr = env->sr; env->sr &= ~SR_DME; env->sr &= ~SR_IME; env->sr |= SR_SM; env->sr &= ~SR_IEE; env->sr &= ~SR_TEE; env->tlb->cpu_openrisc_map_address_data = &cpu_openrisc_get_phys_nommu; env->tlb->cpu_openrisc_map_address_code = &cpu_openrisc_get_phys_nommu; if (cs->exception_index > 0 && cs->exception_index < EXCP_NR) { env->pc = (cs->exception_index << 8); cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); #endif cs->exception_index = -1; | 19,397 |
1 | static char *qemu_rbd_array_opts(QDict *options, const char *prefix, int type, Error **errp) { int num_entries; QemuOpts *opts = NULL; QDict *sub_options; const char *host; const char *port; char *str; char *rados_str = NULL; Error *local_err = NULL; int i; assert(type == RBD_MON_HOST); num_entries = qdict_array_entries(options, prefix); if (num_entries < 0) { error_setg(errp, "Parse error on RBD QDict array"); return NULL; } for (i = 0; i < num_entries; i++) { char *strbuf = NULL; const char *value; char *rados_str_tmp; str = g_strdup_printf("%s%d.", prefix, i); qdict_extract_subqdict(options, &sub_options, str); g_free(str); opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(opts, sub_options, &local_err); QDECREF(sub_options); if (local_err) { error_propagate(errp, local_err); g_free(rados_str); rados_str = NULL; goto exit; } if (type == RBD_MON_HOST) { host = qemu_opt_get(opts, "host"); port = qemu_opt_get(opts, "port"); value = host; if (port) { /* check for ipv6 */ if (strchr(host, ':')) { strbuf = g_strdup_printf("[%s]:%s", host, port); } else { strbuf = g_strdup_printf("%s:%s", host, port); } value = strbuf; } else if (strchr(host, ':')) { strbuf = g_strdup_printf("[%s]", host); value = strbuf; } } else { abort(); } /* each iteration in the for loop will build upon the string, and if * rados_str is NULL then it is our first pass */ if (rados_str) { /* separate options with ';', as that is what rados_conf_set() * requires */ rados_str_tmp = rados_str; rados_str = g_strdup_printf("%s;%s", rados_str_tmp, value); g_free(rados_str_tmp); } else { rados_str = g_strdup(value); } g_free(strbuf); qemu_opts_del(opts); opts = NULL; } exit: qemu_opts_del(opts); return rados_str; } | 19,398 |
1 | static float *put_vector(vorbis_enc_codebook *book, PutBitContext *pb, float *num) { int i, entry = -1; float distance = FLT_MAX; assert(book->dimentions); for (i = 0; i < book->nentries; i++) { float * vec = book->dimentions + i * book->ndimentions, d = book->pow2[i]; int j; if (!book->lens[i]) continue; for (j = 0; j < book->ndimentions; j++) d -= vec[j] * num[j]; if (distance > d) { entry = i; distance = d; } } put_codeword(pb, book, entry); return &book->dimentions[entry * book->ndimentions]; } | 19,399 |
1 | static int mov_write_single_packet(AVFormatContext *s, AVPacket *pkt) { MOVMuxContext *mov = s->priv_data; MOVTrack *trk = &mov->tracks[pkt->stream_index]; AVCodecParameters *par = trk->par; int64_t frag_duration = 0; int size = pkt->size; if (mov->flags & FF_MOV_FLAG_FRAG_DISCONT) { int i; for (i = 0; i < s->nb_streams; i++) mov->tracks[i].frag_discont = 1; mov->flags &= ~FF_MOV_FLAG_FRAG_DISCONT; } if (!pkt->size) { if (trk->start_dts == AV_NOPTS_VALUE && trk->frag_discont) { trk->start_dts = pkt->dts; if (pkt->pts != AV_NOPTS_VALUE) trk->start_cts = pkt->pts - pkt->dts; else trk->start_cts = 0; } if (trk->par->codec_id == AV_CODEC_ID_MP4ALS) { int side_size = 0; uint8_t *side = av_packet_get_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size); if (side && side_size > 0 && (side_size != par->extradata_size || memcmp(side, par->extradata, side_size))) { void *newextra = av_mallocz(side_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!newextra) return AVERROR(ENOMEM); av_free(par->extradata); par->extradata = newextra; memcpy(par->extradata, side, side_size); par->extradata_size = side_size; mov->need_rewrite_extradata = 1; } } return 0; /* Discard 0 sized packets */ } if (trk->entry && pkt->stream_index < s->nb_streams) frag_duration = av_rescale_q(pkt->dts - trk->cluster[0].dts, s->streams[pkt->stream_index]->time_base, AV_TIME_BASE_Q); if ((mov->max_fragment_duration && frag_duration >= mov->max_fragment_duration) || (mov->max_fragment_size && mov->mdat_size + size >= mov->max_fragment_size) || (mov->flags & FF_MOV_FLAG_FRAG_KEYFRAME && par->codec_type == AVMEDIA_TYPE_VIDEO && trk->entry && pkt->flags & AV_PKT_FLAG_KEY)) { if (frag_duration >= mov->min_fragment_duration) { // Set the duration of this track to line up with the next // sample in this track. This avoids relying on AVPacket // duration, but only helps for this particular track, not // for the other ones that are flushed at the same time. trk->track_duration = pkt->dts - trk->start_dts; if (pkt->pts != AV_NOPTS_VALUE) trk->end_pts = pkt->pts; else trk->end_pts = pkt->dts; trk->end_reliable = 1; mov_auto_flush_fragment(s, 0); } } return ff_mov_write_packet(s, pkt); } | 19,400 |
1 | int cpu_exec(CPUArchState *env) { CPUState *cpu = ENV_GET_CPU(env); #if !(defined(CONFIG_USER_ONLY) && \ (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X))) CPUClass *cc = CPU_GET_CLASS(cpu); #endif #ifdef TARGET_I386 X86CPU *x86_cpu = X86_CPU(cpu); #endif int ret, interrupt_request; TranslationBlock *tb; uint8_t *tc_ptr; uintptr_t next_tb; if (cpu->halted) { if (!cpu_has_work(cpu)) { return EXCP_HALTED; } cpu->halted = 0; } current_cpu = cpu; /* As long as current_cpu is null, up to the assignment just above, * requests by other threads to exit the execution loop are expected to * be issued using the exit_request global. We must make sure that our * evaluation of the global value is performed past the current_cpu * value transition point, which requires a memory barrier as well as * an instruction scheduling constraint on modern architectures. */ smp_mb(); if (unlikely(exit_request)) { cpu->exit_request = 1; } #if defined(TARGET_I386) /* put eflags in CPU temporary format */ CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); env->df = 1 - (2 * ((env->eflags >> 10) & 1)); CC_OP = CC_OP_EFLAGS; env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); #elif defined(TARGET_SPARC) #elif defined(TARGET_M68K) env->cc_op = CC_OP_FLAGS; env->cc_dest = env->sr & 0xf; env->cc_x = (env->sr >> 4) & 1; #elif defined(TARGET_ALPHA) #elif defined(TARGET_ARM) #elif defined(TARGET_UNICORE32) #elif defined(TARGET_PPC) env->reserve_addr = -1; #elif defined(TARGET_LM32) #elif defined(TARGET_MICROBLAZE) #elif defined(TARGET_MIPS) #elif defined(TARGET_MOXIE) #elif defined(TARGET_OPENRISC) #elif defined(TARGET_SH4) #elif defined(TARGET_CRIS) #elif defined(TARGET_S390X) #elif defined(TARGET_XTENSA) /* XXXXX */ #else #error unsupported target CPU #endif cpu->exception_index = -1; /* prepare setjmp context for exception handling */ for(;;) { if (sigsetjmp(cpu->jmp_env, 0) == 0) { /* if an exception is pending, we execute it here */ if (cpu->exception_index >= 0) { if (cpu->exception_index >= EXCP_INTERRUPT) { /* exit request from the cpu execution loop */ ret = cpu->exception_index; if (ret == EXCP_DEBUG) { cpu_handle_debug_exception(env); } break; } else { #if defined(CONFIG_USER_ONLY) /* if user mode only, we simulate a fake exception which will be handled outside the cpu execution loop */ #if defined(TARGET_I386) cc->do_interrupt(cpu); #endif ret = cpu->exception_index; break; #else cc->do_interrupt(cpu); cpu->exception_index = -1; #endif } } next_tb = 0; /* force lookup of first TB */ for(;;) { interrupt_request = cpu->interrupt_request; if (unlikely(interrupt_request)) { if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { /* Mask out external interrupts for this step. */ interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; } if (interrupt_request & CPU_INTERRUPT_DEBUG) { cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; cpu->exception_index = EXCP_DEBUG; cpu_loop_exit(cpu); } #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \ defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \ defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32) if (interrupt_request & CPU_INTERRUPT_HALT) { cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; cpu->halted = 1; cpu->exception_index = EXCP_HLT; cpu_loop_exit(cpu); } #endif #if defined(TARGET_I386) #if !defined(CONFIG_USER_ONLY) if (interrupt_request & CPU_INTERRUPT_POLL) { cpu->interrupt_request &= ~CPU_INTERRUPT_POLL; apic_poll_irq(x86_cpu->apic_state); } #endif if (interrupt_request & CPU_INTERRUPT_INIT) { cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0); do_cpu_init(x86_cpu); cpu->exception_index = EXCP_HALTED; cpu_loop_exit(cpu); } else if (interrupt_request & CPU_INTERRUPT_SIPI) { do_cpu_sipi(x86_cpu); } else if (env->hflags2 & HF2_GIF_MASK) { if ((interrupt_request & CPU_INTERRUPT_SMI) && !(env->hflags & HF_SMM_MASK)) { cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0); cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; do_smm_enter(x86_cpu); next_tb = 0; } else if ((interrupt_request & CPU_INTERRUPT_NMI) && !(env->hflags2 & HF2_NMI_MASK)) { cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; env->hflags2 |= HF2_NMI_MASK; do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); next_tb = 0; } else if (interrupt_request & CPU_INTERRUPT_MCE) { cpu->interrupt_request &= ~CPU_INTERRUPT_MCE; do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); next_tb = 0; } else if ((interrupt_request & CPU_INTERRUPT_HARD) && (((env->hflags2 & HF2_VINTR_MASK) && (env->hflags2 & HF2_HIF_MASK)) || (!(env->hflags2 & HF2_VINTR_MASK) && (env->eflags & IF_MASK && !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { int intno; cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0); cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ); intno = cpu_get_pic_interrupt(env); qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno); do_interrupt_x86_hardirq(env, intno, 1); /* ensure that no TB jump will be modified as the program flow was changed */ next_tb = 0; #if !defined(CONFIG_USER_ONLY) } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) { int intno; /* FIXME: this should respect TPR */ cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0); intno = ldl_phys(cpu->as, env->vm_vmcb + offsetof(struct vmcb, control.int_vector)); qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno); do_interrupt_x86_hardirq(env, intno, 1); cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ; next_tb = 0; #endif } } #elif defined(TARGET_PPC) if ((interrupt_request & CPU_INTERRUPT_RESET)) { cpu_reset(cpu); } if (interrupt_request & CPU_INTERRUPT_HARD) { ppc_hw_interrupt(env); if (env->pending_interrupts == 0) { cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; } next_tb = 0; } #elif defined(TARGET_LM32) if ((interrupt_request & CPU_INTERRUPT_HARD) && (env->ie & IE_IE)) { cpu->exception_index = EXCP_IRQ; cc->do_interrupt(cpu); next_tb = 0; } #elif defined(TARGET_MICROBLAZE) if ((interrupt_request & CPU_INTERRUPT_HARD) && (env->sregs[SR_MSR] & MSR_IE) && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP)) && !(env->iflags & (D_FLAG | IMM_FLAG))) { cpu->exception_index = EXCP_IRQ; cc->do_interrupt(cpu); next_tb = 0; } #elif defined(TARGET_MIPS) if ((interrupt_request & CPU_INTERRUPT_HARD) && cpu_mips_hw_interrupts_pending(env)) { /* Raise it */ cpu->exception_index = EXCP_EXT_INTERRUPT; env->error_code = 0; cc->do_interrupt(cpu); next_tb = 0; } #elif defined(TARGET_OPENRISC) { int idx = -1; if ((interrupt_request & CPU_INTERRUPT_HARD) && (env->sr & SR_IEE)) { idx = EXCP_INT; } if ((interrupt_request & CPU_INTERRUPT_TIMER) && (env->sr & SR_TEE)) { idx = EXCP_TICK; } if (idx >= 0) { cpu->exception_index = idx; cc->do_interrupt(cpu); next_tb = 0; } } #elif defined(TARGET_SPARC) if (interrupt_request & CPU_INTERRUPT_HARD) { if (cpu_interrupts_enabled(env) && env->interrupt_index > 0) { int pil = env->interrupt_index & 0xf; int type = env->interrupt_index & 0xf0; if (((type == TT_EXTINT) && cpu_pil_allowed(env, pil)) || type != TT_EXTINT) { cpu->exception_index = env->interrupt_index; cc->do_interrupt(cpu); next_tb = 0; } } } #elif defined(TARGET_ARM) if (interrupt_request & CPU_INTERRUPT_FIQ && !(env->daif & PSTATE_F)) { cpu->exception_index = EXCP_FIQ; cc->do_interrupt(cpu); next_tb = 0; } /* ARMv7-M interrupt return works by loading a magic value into the PC. On real hardware the load causes the return to occur. The qemu implementation performs the jump normally, then does the exception return when the CPU tries to execute code at the magic address. This will cause the magic PC value to be pushed to the stack if an interrupt occurred at the wrong time. We avoid this by disabling interrupts when pc contains a magic address. */ if (interrupt_request & CPU_INTERRUPT_HARD && ((IS_M(env) && env->regs[15] < 0xfffffff0) || !(env->daif & PSTATE_I))) { cpu->exception_index = EXCP_IRQ; cc->do_interrupt(cpu); next_tb = 0; } #elif defined(TARGET_UNICORE32) if (interrupt_request & CPU_INTERRUPT_HARD && !(env->uncached_asr & ASR_I)) { cpu->exception_index = UC32_EXCP_INTR; cc->do_interrupt(cpu); next_tb = 0; } #elif defined(TARGET_SH4) if (interrupt_request & CPU_INTERRUPT_HARD) { cc->do_interrupt(cpu); next_tb = 0; } #elif defined(TARGET_ALPHA) { int idx = -1; /* ??? This hard-codes the OSF/1 interrupt levels. */ switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) { case 0 ... 3: if (interrupt_request & CPU_INTERRUPT_HARD) { idx = EXCP_DEV_INTERRUPT; } /* FALLTHRU */ case 4: if (interrupt_request & CPU_INTERRUPT_TIMER) { idx = EXCP_CLK_INTERRUPT; } /* FALLTHRU */ case 5: if (interrupt_request & CPU_INTERRUPT_SMP) { idx = EXCP_SMP_INTERRUPT; } /* FALLTHRU */ case 6: if (interrupt_request & CPU_INTERRUPT_MCHK) { idx = EXCP_MCHK; } } if (idx >= 0) { cpu->exception_index = idx; env->error_code = 0; cc->do_interrupt(cpu); next_tb = 0; } } #elif defined(TARGET_CRIS) if (interrupt_request & CPU_INTERRUPT_HARD && (env->pregs[PR_CCS] & I_FLAG) && !env->locked_irq) { cpu->exception_index = EXCP_IRQ; cc->do_interrupt(cpu); next_tb = 0; } if (interrupt_request & CPU_INTERRUPT_NMI) { unsigned int m_flag_archval; if (env->pregs[PR_VR] < 32) { m_flag_archval = M_FLAG_V10; } else { m_flag_archval = M_FLAG_V32; } if ((env->pregs[PR_CCS] & m_flag_archval)) { cpu->exception_index = EXCP_NMI; cc->do_interrupt(cpu); next_tb = 0; } } #elif defined(TARGET_M68K) if (interrupt_request & CPU_INTERRUPT_HARD && ((env->sr & SR_I) >> SR_I_SHIFT) < env->pending_level) { /* Real hardware gets the interrupt vector via an IACK cycle at this point. Current emulated hardware doesn't rely on this, so we provide/save the vector when the interrupt is first signalled. */ cpu->exception_index = env->pending_vector; do_interrupt_m68k_hardirq(env); next_tb = 0; } #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY) if ((interrupt_request & CPU_INTERRUPT_HARD) && (env->psw.mask & PSW_MASK_EXT)) { cc->do_interrupt(cpu); next_tb = 0; } #elif defined(TARGET_XTENSA) if (interrupt_request & CPU_INTERRUPT_HARD) { cpu->exception_index = EXC_IRQ; cc->do_interrupt(cpu); next_tb = 0; } #endif /* Don't use the cached interrupt_request value, do_interrupt may have updated the EXITTB flag. */ if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) { cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; /* ensure that no TB jump will be modified as the program flow was changed */ next_tb = 0; } } if (unlikely(cpu->exit_request)) { cpu->exit_request = 0; cpu->exception_index = EXCP_INTERRUPT; cpu_loop_exit(cpu); } spin_lock(&tcg_ctx.tb_ctx.tb_lock); have_tb_lock = true; tb = tb_find_fast(env); /* Note: we do it here to avoid a gcc bug on Mac OS X when doing it in tb_find_slow */ if (tcg_ctx.tb_ctx.tb_invalidated_flag) { /* as some TB could have been invalidated because of memory exceptions while generating the code, we must recompute the hash index here */ next_tb = 0; tcg_ctx.tb_ctx.tb_invalidated_flag = 0; } if (qemu_loglevel_mask(CPU_LOG_EXEC)) { qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n", tb->tc_ptr, tb->pc, lookup_symbol(tb->pc)); } /* see if we can patch the calling TB. When the TB spans two pages, we cannot safely do a direct jump. */ if (next_tb != 0 && tb->page_addr[1] == -1) { tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK), next_tb & TB_EXIT_MASK, tb); } have_tb_lock = false; spin_unlock(&tcg_ctx.tb_ctx.tb_lock); /* cpu_interrupt might be called while translating the TB, but before it is linked into a potentially infinite loop and becomes env->current_tb. Avoid starting execution if there is a pending interrupt. */ cpu->current_tb = tb; barrier(); if (likely(!cpu->exit_request)) { tc_ptr = tb->tc_ptr; /* execute the generated code */ next_tb = cpu_tb_exec(cpu, tc_ptr); switch (next_tb & TB_EXIT_MASK) { case TB_EXIT_REQUESTED: /* Something asked us to stop executing * chained TBs; just continue round the main * loop. Whatever requested the exit will also * have set something else (eg exit_request or * interrupt_request) which we will handle * next time around the loop. */ tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK); next_tb = 0; break; case TB_EXIT_ICOUNT_EXPIRED: { /* Instruction counter expired. */ int insns_left; tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK); insns_left = cpu->icount_decr.u32; if (cpu->icount_extra && insns_left >= 0) { /* Refill decrementer and continue execution. */ cpu->icount_extra += insns_left; if (cpu->icount_extra > 0xffff) { insns_left = 0xffff; } else { insns_left = cpu->icount_extra; } cpu->icount_extra -= insns_left; cpu->icount_decr.u16.low = insns_left; } else { if (insns_left > 0) { /* Execute remaining instructions. */ cpu_exec_nocache(env, insns_left, tb); } cpu->exception_index = EXCP_INTERRUPT; next_tb = 0; cpu_loop_exit(cpu); } break; } default: break; } } cpu->current_tb = NULL; /* reset soft MMU for next block (it can currently only be set by a memory fault) */ } /* for(;;) */ } else { /* Reload env after longjmp - the compiler may have smashed all * local variables as longjmp is marked 'noreturn'. */ cpu = current_cpu; env = cpu->env_ptr; #if !(defined(CONFIG_USER_ONLY) && \ (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X))) cc = CPU_GET_CLASS(cpu); #endif #ifdef TARGET_I386 x86_cpu = X86_CPU(cpu); #endif if (have_tb_lock) { spin_unlock(&tcg_ctx.tb_ctx.tb_lock); have_tb_lock = false; } } } /* for(;;) */ #if defined(TARGET_I386) /* restore flags in standard format */ env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK); #elif defined(TARGET_ARM) /* XXX: Save/restore host fpu exception state?. */ #elif defined(TARGET_UNICORE32) #elif defined(TARGET_SPARC) #elif defined(TARGET_PPC) #elif defined(TARGET_LM32) #elif defined(TARGET_M68K) cpu_m68k_flush_flags(env, env->cc_op); env->cc_op = CC_OP_FLAGS; env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4); #elif defined(TARGET_MICROBLAZE) #elif defined(TARGET_MIPS) #elif defined(TARGET_MOXIE) #elif defined(TARGET_OPENRISC) #elif defined(TARGET_SH4) #elif defined(TARGET_ALPHA) #elif defined(TARGET_CRIS) #elif defined(TARGET_S390X) #elif defined(TARGET_XTENSA) /* XXXXX */ #else #error unsupported target CPU #endif /* fail safe : never use current_cpu outside cpu_exec() */ current_cpu = NULL; return ret; } | 19,401 |
1 | static int bdrv_can_snapshot(BlockDriverState *bs) { return (bs && !bdrv_is_removable(bs) && !bdrv_is_read_only(bs)); } | 19,402 |
0 | int hw_device_init_from_string(const char *arg, HWDevice **dev_out) { // "type=name:device,key=value,key2=value2" // "type:device,key=value,key2=value2" // -> av_hwdevice_ctx_create() // "type=name@name" // "type@name" // -> av_hwdevice_ctx_create_derived() AVDictionary *options = NULL; char *type_name = NULL, *name = NULL, *device = NULL; enum AVHWDeviceType type; HWDevice *dev, *src; AVBufferRef *device_ref = NULL; int err; const char *errmsg, *p, *q; size_t k; k = strcspn(arg, ":=@"); p = arg + k; type_name = av_strndup(arg, k); if (!type_name) { err = AVERROR(ENOMEM); goto fail; } type = av_hwdevice_find_type_by_name(type_name); if (type == AV_HWDEVICE_TYPE_NONE) { errmsg = "unknown device type"; goto invalid; } if (*p == '=') { k = strcspn(p + 1, ":@"); name = av_strndup(p + 1, k); if (!name) { err = AVERROR(ENOMEM); goto fail; } if (hw_device_get_by_name(name)) { errmsg = "named device already exists"; goto invalid; } p += 1 + k; } else { // Give the device an automatic name of the form "type%d". // We arbitrarily limit at 1000 anonymous devices of the same // type - there is probably something else very wrong if you // get to this limit. size_t index_pos; int index, index_limit = 1000; index_pos = strlen(type_name); name = av_malloc(index_pos + 4); if (!name) { err = AVERROR(ENOMEM); goto fail; } for (index = 0; index < index_limit; index++) { snprintf(name, index_pos + 4, "%s%d", type_name, index); if (!hw_device_get_by_name(name)) break; } if (index >= index_limit) { errmsg = "too many devices"; goto invalid; } } if (!*p) { // New device with no parameters. err = av_hwdevice_ctx_create(&device_ref, type, NULL, NULL, 0); if (err < 0) goto fail; } else if (*p == ':') { // New device with some parameters. ++p; q = strchr(p, ','); if (q) { device = av_strndup(p, q - p); if (!device) { err = AVERROR(ENOMEM); goto fail; } err = av_dict_parse_string(&options, q + 1, "=", ",", 0); if (err < 0) { errmsg = "failed to parse options"; goto invalid; } } err = av_hwdevice_ctx_create(&device_ref, type, device ? device : p, options, 0); if (err < 0) goto fail; } else if (*p == '@') { // Derive from existing device. src = hw_device_get_by_name(p + 1); if (!src) { errmsg = "invalid source device name"; goto invalid; } err = av_hwdevice_ctx_create_derived(&device_ref, type, src->device_ref, 0); if (err < 0) goto fail; } else { errmsg = "parse error"; goto invalid; } dev = hw_device_add(); if (!dev) { err = AVERROR(ENOMEM); goto fail; } dev->name = name; dev->type = type; dev->device_ref = device_ref; if (dev_out) *dev_out = dev; name = NULL; err = 0; done: av_freep(&type_name); av_freep(&name); av_freep(&device); av_dict_free(&options); return err; invalid: av_log(NULL, AV_LOG_ERROR, "Invalid device specification \"%s\": %s\n", arg, errmsg); err = AVERROR(EINVAL); goto done; fail: av_log(NULL, AV_LOG_ERROR, "Device creation failed: %d.\n", err); av_buffer_unref(&device_ref); goto done; } | 19,404 |
0 | static void vaapi_encode_h264_write_pps(PutBitContext *pbc, VAAPIEncodeContext *ctx) { VAEncPictureParameterBufferH264 *vpic = ctx->codec_picture_params; VAAPIEncodeH264Context *priv = ctx->priv_data; VAAPIEncodeH264MiscSequenceParams *mseq = &priv->misc_sequence_params; vaapi_encode_h264_write_nal_header(pbc, NAL_PPS, 3); ue(vpic_var(pic_parameter_set_id)); ue(vpic_var(seq_parameter_set_id)); u(1, vpic_field(entropy_coding_mode_flag)); u(1, mseq_var(bottom_field_pic_order_in_frame_present_flag)); ue(mseq_var(num_slice_groups_minus1)); if (mseq->num_slice_groups_minus1 > 0) { ue(mseq_var(slice_group_map_type)); av_assert0(0 && "slice groups not supported"); } ue(vpic_var(num_ref_idx_l0_active_minus1)); ue(vpic_var(num_ref_idx_l1_active_minus1)); u(1, vpic_field(weighted_pred_flag)); u(2, vpic_field(weighted_bipred_idc)); se(vpic->pic_init_qp - 26, pic_init_qp_minus26); se(mseq_var(pic_init_qs_minus26)); se(vpic_var(chroma_qp_index_offset)); u(1, vpic_field(deblocking_filter_control_present_flag)); u(1, vpic_field(constrained_intra_pred_flag)); u(1, vpic_field(redundant_pic_cnt_present_flag)); u(1, vpic_field(transform_8x8_mode_flag)); u(1, vpic_field(pic_scaling_matrix_present_flag)); if (vpic->pic_fields.bits.pic_scaling_matrix_present_flag) { av_assert0(0 && "scaling matrices not supported"); } se(vpic_var(second_chroma_qp_index_offset)); vaapi_encode_h264_write_trailing_rbsp(pbc); } | 19,405 |
0 | static av_always_inline int coeff_abs_level_remaining_decode(HEVCContext *s, int rc_rice_param) { int prefix = 0; int suffix = 0; int last_coeff_abs_level_remaining; int i; while (prefix < CABAC_MAX_BIN && get_cabac_bypass(&s->HEVClc->cc)) prefix++; if (prefix == CABAC_MAX_BIN) { av_log(s->avctx, AV_LOG_ERROR, "CABAC_MAX_BIN : %d\n", prefix); return 0; } if (prefix < 3) { for (i = 0; i < rc_rice_param; i++) suffix = (suffix << 1) | get_cabac_bypass(&s->HEVClc->cc); last_coeff_abs_level_remaining = (prefix << rc_rice_param) + suffix; } else { int prefix_minus3 = prefix - 3; for (i = 0; i < prefix_minus3 + rc_rice_param; i++) suffix = (suffix << 1) | get_cabac_bypass(&s->HEVClc->cc); last_coeff_abs_level_remaining = (((1 << prefix_minus3) + 3 - 1) << rc_rice_param) + suffix; } return last_coeff_abs_level_remaining; } | 19,407 |
1 | static void float_number(void) { int i; struct { const char *encoded; double decoded; int skip; } test_cases[] = { { "32.43", 32.43 }, { "0.222", 0.222 }, { "-32.12313", -32.12313 }, { "-32.20e-10", -32.20e-10, .skip = 1 }, { }, }; for (i = 0; test_cases[i].encoded; i++) { QObject *obj; QFloat *qfloat; obj = qobject_from_json(test_cases[i].encoded, NULL); qfloat = qobject_to_qfloat(obj); g_assert(qfloat); g_assert(qfloat_get_double(qfloat) == test_cases[i].decoded); if (test_cases[i].skip == 0) { QString *str; str = qobject_to_json(obj); g_assert(strcmp(qstring_get_str(str), test_cases[i].encoded) == 0); QDECREF(str); } QDECREF(qfloat); } } | 19,409 |
0 | AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name) { int i; if(!name) return NULL; for(i = 0; i < graph->filter_count; i ++) if(graph->filters[i]->name && !strcmp(name, graph->filters[i]->name)) return graph->filters[i]; return NULL; } | 19,410 |
1 | static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num, int nb_sectors) { BlockBackend *source = s->common.blk; int sectors_per_chunk, nb_chunks; int ret = nb_sectors; MirrorOp *op; sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; /* We can only handle as much as buf_size at a time. */ nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors); assert(nb_sectors); if (s->cow_bitmap) { ret += mirror_cow_align(s, §or_num, &nb_sectors); } assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size); /* The sector range must meet granularity because: * 1) Caller passes in aligned values; * 2) mirror_cow_align is used only when target cluster is larger. */ assert(!(sector_num % sectors_per_chunk)); nb_chunks = DIV_ROUND_UP(nb_sectors, sectors_per_chunk); while (s->buf_free_count < nb_chunks) { trace_mirror_yield_in_flight(s, sector_num, s->in_flight); mirror_wait_for_io(s); } /* Allocate a MirrorOp that is used as an AIO callback. */ op = g_new(MirrorOp, 1); op->s = s; op->sector_num = sector_num; op->nb_sectors = nb_sectors; /* Now make a QEMUIOVector taking enough granularity-sized chunks * from s->buf_free. */ qemu_iovec_init(&op->qiov, nb_chunks); while (nb_chunks-- > 0) { MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); size_t remaining = nb_sectors * BDRV_SECTOR_SIZE - op->qiov.size; QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); s->buf_free_count--; qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); } /* Copy the dirty cluster. */ s->in_flight++; s->sectors_in_flight += nb_sectors; trace_mirror_one_iteration(s, sector_num, nb_sectors); blk_aio_preadv(source, sector_num * BDRV_SECTOR_SIZE, &op->qiov, nb_sectors * BDRV_SECTOR_SIZE, mirror_read_complete, op); return ret; } | 19,412 |
1 | static void pred_temp_direct_motion(H264Context * const h, int *mb_type){ MpegEncContext * const s = &h->s; int b8_stride = 2; int b4_stride = h->b_stride; int mb_xy = h->mb_xy, mb_y = s->mb_y; int mb_type_col[2]; const int16_t (*l1mv0)[2], (*l1mv1)[2]; const int8_t *l1ref0, *l1ref1; const int is_b8x8 = IS_8X8(*mb_type); unsigned int sub_mb_type; int i8, i4; assert(h->ref_list[1][0].f.reference & 3); await_reference_mb_row(h, &h->ref_list[1][0], s->mb_y + !!IS_INTERLACED(*mb_type)); if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL mb_y = (s->mb_y&~1) + h->col_parity; mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride; b8_stride = 0; }else{ mb_y += h->col_fieldoff; mb_xy += s->mb_stride*h->col_fieldoff; // non zero for FL -> FL & differ parity goto single_col; }else{ // AFL/AFR/FR/FL -> AFR/FR if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR mb_y = s->mb_y&~1; mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride; mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy]; mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride]; b8_stride = 2+4*s->mb_stride; b4_stride *= 6; sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */ if( (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA) && (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA) && !is_b8x8){ *mb_type |= MB_TYPE_16x8 |MB_TYPE_L0L1|MB_TYPE_DIRECT2; /* B_16x8 */ }else{ *mb_type |= MB_TYPE_8x8|MB_TYPE_L0L1; }else{ // AFR/FR -> AFR/FR single_col: mb_type_col[0] = mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy]; sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */ if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){ *mb_type |= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_16x16 */ }else if(!is_b8x8 && (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16))){ *mb_type |= MB_TYPE_L0L1|MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16)); }else{ if(!h->sps.direct_8x8_inference_flag){ /* FIXME save sub mb types from previous frames (or derive from MVs) * so we know exactly what block size to use */ sub_mb_type = MB_TYPE_8x8|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_4x4 */ *mb_type |= MB_TYPE_8x8|MB_TYPE_L0L1; await_reference_mb_row(h, &h->ref_list[1][0], mb_y); l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]]; l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]]; l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy]; l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy]; if(!b8_stride){ if(s->mb_y&1){ l1ref0 += 2; l1ref1 += 2; l1mv0 += 2*b4_stride; l1mv1 += 2*b4_stride; { const int *map_col_to_list0[2] = {h->map_col_to_list0[0], h->map_col_to_list0[1]}; const int *dist_scale_factor = h->dist_scale_factor; int ref_offset; if(FRAME_MBAFF && IS_INTERLACED(*mb_type)){ map_col_to_list0[0] = h->map_col_to_list0_field[s->mb_y&1][0]; map_col_to_list0[1] = h->map_col_to_list0_field[s->mb_y&1][1]; dist_scale_factor =h->dist_scale_factor_field[s->mb_y&1]; ref_offset = (h->ref_list[1][0].mbaff<<4) & (mb_type_col[0]>>3); //if(h->ref_list[1][0].mbaff && IS_INTERLACED(mb_type_col[0])) ref_offset=16 else 0 if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])){ int y_shift = 2*!IS_INTERLACED(*mb_type); assert(h->sps.direct_8x8_inference_flag); for(i8=0; i8<4; i8++){ const int x8 = i8&1; const int y8 = i8>>1; int ref0, scale; const int16_t (*l1mv)[2]= l1mv0; if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8])) continue; h->sub_mb_type[i8] = sub_mb_type; fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1); if(IS_INTRA(mb_type_col[y8])){ fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1); fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4); fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4); continue; ref0 = l1ref0[x8 + y8*b8_stride]; if(ref0 >= 0) ref0 = map_col_to_list0[0][ref0 + ref_offset]; else{ ref0 = map_col_to_list0[1][l1ref1[x8 + y8*b8_stride] + ref_offset]; l1mv= l1mv1; scale = dist_scale_factor[ref0]; fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1); { const int16_t *mv_col = l1mv[x8*3 + y8*b4_stride]; int my_col = (mv_col[1]<<y_shift)/2; int mx = (scale * mv_col[0] + 128) >> 8; int my = (scale * my_col + 128) >> 8; fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4); fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-my_col), 4); return; /* one-to-one mv scaling */ if(IS_16X16(*mb_type)){ int ref, mv0, mv1; fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1); if(IS_INTRA(mb_type_col[0])){ ref=mv0=mv1=0; }else{ const int ref0 = l1ref0[0] >= 0 ? map_col_to_list0[0][l1ref0[0] + ref_offset] : map_col_to_list0[1][l1ref1[0] + ref_offset]; const int scale = dist_scale_factor[ref0]; const int16_t *mv_col = l1ref0[0] >= 0 ? l1mv0[0] : l1mv1[0]; int mv_l0[2]; mv_l0[0] = (scale * mv_col[0] + 128) >> 8; mv_l0[1] = (scale * mv_col[1] + 128) >> 8; ref= ref0; mv0= pack16to32(mv_l0[0],mv_l0[1]); mv1= pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]); fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1); fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, mv0, 4); fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, mv1, 4); }else{ for(i8=0; i8<4; i8++){ const int x8 = i8&1; const int y8 = i8>>1; int ref0, scale; const int16_t (*l1mv)[2]= l1mv0; if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8])) continue; h->sub_mb_type[i8] = sub_mb_type; fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1); if(IS_INTRA(mb_type_col[0])){ fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1); fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4); fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4); continue; assert(b8_stride == 2); ref0 = l1ref0[i8]; if(ref0 >= 0) ref0 = map_col_to_list0[0][ref0 + ref_offset]; else{ ref0 = map_col_to_list0[1][l1ref1[i8] + ref_offset]; l1mv= l1mv1; scale = dist_scale_factor[ref0]; fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1); if(IS_SUB_8X8(sub_mb_type)){ const int16_t *mv_col = l1mv[x8*3 + y8*3*b4_stride]; int mx = (scale * mv_col[0] + 128) >> 8; int my = (scale * mv_col[1] + 128) >> 8; fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4); fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-mv_col[1]), 4); }else for(i4=0; i4<4; i4++){ const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*b4_stride]; int16_t *mv_l0 = h->mv_cache[0][scan8[i8*4+i4]]; mv_l0[0] = (scale * mv_col[0] + 128) >> 8; mv_l0[1] = (scale * mv_col[1] + 128) >> 8; AV_WN32A(h->mv_cache[1][scan8[i8*4+i4]], pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1])); | 19,413 |
1 | static inline int cris_abs(int n) { int r; asm ("abs\t%1, %0\n" : "=r" (r) : "r" (n)); return r; } | 19,415 |
1 | static int bmdma_prepare_buf(IDEDMA *dma, int is_write) { BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); IDEState *s = bmdma_active_if(bm); PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev); struct { uint32_t addr; uint32_t size; } prd; int l, len; pci_dma_sglist_init(&s->sg, pci_dev, s->nsector / (BMDMA_PAGE_SIZE / 512) + 1); s->io_buffer_size = 0; for(;;) { if (bm->cur_prd_len == 0) { /* end of table (with a fail safe of one page) */ if (bm->cur_prd_last || (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) return s->io_buffer_size != 0; pci_dma_read(pci_dev, bm->cur_addr, &prd, 8); bm->cur_addr += 8; prd.addr = le32_to_cpu(prd.addr); prd.size = le32_to_cpu(prd.size); len = prd.size & 0xfffe; if (len == 0) len = 0x10000; bm->cur_prd_len = len; bm->cur_prd_addr = prd.addr; bm->cur_prd_last = (prd.size & 0x80000000); } l = bm->cur_prd_len; if (l > 0) { qemu_sglist_add(&s->sg, bm->cur_prd_addr, l); bm->cur_prd_addr += l; bm->cur_prd_len -= l; s->io_buffer_size += l; } } return 1; } | 19,416 |
1 | av_cold int ff_mss12_decode_init(MSS12Context *c, int version, SliceContext* sc1, SliceContext *sc2) { AVCodecContext *avctx = c->avctx; int i; if (avctx->extradata_size < 52 + 256 * 3) { av_log(avctx, AV_LOG_ERROR, "Insufficient extradata size %d\n", avctx->extradata_size); if (AV_RB32(avctx->extradata) < avctx->extradata_size) { av_log(avctx, AV_LOG_ERROR, "Insufficient extradata size: expected %d got %d\n", AV_RB32(avctx->extradata), avctx->extradata_size); avctx->coded_width = AV_RB32(avctx->extradata + 20); avctx->coded_height = AV_RB32(avctx->extradata + 24); if (avctx->coded_width > 4096 || avctx->coded_height > 4096) { av_log(avctx, AV_LOG_ERROR, "Frame dimensions %dx%d too large", av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d\n", AV_RB32(avctx->extradata + 4), AV_RB32(avctx->extradata + 8)); if (version != AV_RB32(avctx->extradata + 4) > 1) { av_log(avctx, AV_LOG_ERROR, "Header version doesn't match codec tag\n"); return -1; c->free_colours = AV_RB32(avctx->extradata + 48); if ((unsigned)c->free_colours > 256) { av_log(avctx, AV_LOG_ERROR, "Incorrect number of changeable palette entries: %d\n", c->free_colours); av_log(avctx, AV_LOG_DEBUG, "%d free colour(s)\n", c->free_colours); av_log(avctx, AV_LOG_DEBUG, "Display dimensions %dx%d\n", AV_RB32(avctx->extradata + 12), AV_RB32(avctx->extradata + 16)); av_log(avctx, AV_LOG_DEBUG, "Coded dimensions %dx%d\n", av_log(avctx, AV_LOG_DEBUG, "%g frames per second\n", av_int2float(AV_RB32(avctx->extradata + 28))); av_log(avctx, AV_LOG_DEBUG, "Bitrate %d bps\n", AV_RB32(avctx->extradata + 32)); av_log(avctx, AV_LOG_DEBUG, "Max. lead time %g ms\n", av_int2float(AV_RB32(avctx->extradata + 36))); av_log(avctx, AV_LOG_DEBUG, "Max. lag time %g ms\n", av_int2float(AV_RB32(avctx->extradata + 40))); av_log(avctx, AV_LOG_DEBUG, "Max. seek time %g ms\n", av_int2float(AV_RB32(avctx->extradata + 44))); if (version) { if (avctx->extradata_size < 60 + 256 * 3) { av_log(avctx, AV_LOG_ERROR, "Insufficient extradata size %d for v2\n", avctx->extradata_size); c->slice_split = AV_RB32(avctx->extradata + 52); av_log(avctx, AV_LOG_DEBUG, "Slice split %d\n", c->slice_split); c->full_model_syms = AV_RB32(avctx->extradata + 56); if (c->full_model_syms < 2 || c->full_model_syms > 256) { av_log(avctx, AV_LOG_ERROR, "Incorrect number of used colours %d\n", c->full_model_syms); av_log(avctx, AV_LOG_DEBUG, "Used colours %d\n", c->full_model_syms); } else { c->slice_split = 0; c->full_model_syms = 256; for (i = 0; i < 256; i++) c->pal[i] = 0xFFU << 24 | AV_RB24(avctx->extradata + 52 + (version ? 8 : 0) + i * 3); c->mask_stride = FFALIGN(avctx->width, 16); c->mask = av_malloc(c->mask_stride * avctx->height); if (!c->mask) { av_log(avctx, AV_LOG_ERROR, "Cannot allocate mask plane\n"); return AVERROR(ENOMEM); sc1->c = c; slicecontext_init(sc1, version, c->full_model_syms); if (c->slice_split) { sc2->c = c; slicecontext_init(sc2, version, c->full_model_syms); c->corrupted = 1; return 0; | 19,417 |
1 | static int decode_frame(NUTContext *nut, AVPacket *pkt, int frame_code) { AVFormatContext *s = nut->avf; AVIOContext *bc = s->pb; int size, stream_id, discard; int64_t pts, last_IP_pts; StreamContext *stc; uint8_t header_idx; size = decode_frame_header(nut, &pts, &stream_id, &header_idx, frame_code); if (size < 0) return size; stc = &nut->stream[stream_id]; if (stc->last_flags & FLAG_KEY) stc->skip_until_key_frame = 0; discard = s->streams[stream_id]->discard; last_IP_pts = s->streams[stream_id]->last_IP_pts; if ((discard >= AVDISCARD_NONKEY && !(stc->last_flags & FLAG_KEY)) || (discard >= AVDISCARD_BIDIR && last_IP_pts != AV_NOPTS_VALUE && last_IP_pts > pts) || discard >= AVDISCARD_ALL || stc->skip_until_key_frame) { avio_skip(bc, size); return 1; } if (av_new_packet(pkt, size + nut->header_len[header_idx]) < 0) return AVERROR(ENOMEM); memcpy(pkt->data, nut->header[header_idx], nut->header_len[header_idx]); pkt->pos = avio_tell(bc); // FIXME if (stc->last_flags & FLAG_SM_DATA) { int sm_size; if (read_sm_data(s, bc, pkt, 0, pkt->pos + size) < 0) return AVERROR_INVALIDDATA; if (read_sm_data(s, bc, pkt, 1, pkt->pos + size) < 0) return AVERROR_INVALIDDATA; sm_size = avio_tell(bc) - pkt->pos; size -= sm_size; pkt->size -= sm_size; } avio_read(bc, pkt->data + nut->header_len[header_idx], size); pkt->stream_index = stream_id; if (stc->last_flags & FLAG_KEY) pkt->flags |= AV_PKT_FLAG_KEY; pkt->pts = pts; return 0; } | 19,418 |
1 | static int rm_assemble_video_frame(AVFormatContext *s, ByteIOContext *pb, RMDemuxContext *rm, RMStream *vst, AVPacket *pkt, int len, int *pseq) { int hdr, seq, pic_num, len2, pos; int type; hdr = get_byte(pb); len--; type = hdr >> 6; if(type != 3){ // not frame as a part of packet seq = get_byte(pb); len--; } if(type != 1){ // not whole frame len2 = get_num(pb, &len); pos = get_num(pb, &len); pic_num = get_byte(pb); len--; } if(len<0) return -1; rm->remaining_len = len; if(type&1){ // frame, not slice if(type == 3) // frame as a part of packet len= len2; if(rm->remaining_len < len) return -1; rm->remaining_len -= len; if(av_new_packet(pkt, len + 9) < 0) return AVERROR(EIO); pkt->data[0] = 0; AV_WL32(pkt->data + 1, 1); AV_WL32(pkt->data + 5, 0); get_buffer(pb, pkt->data + 9, len); return 0; } //now we have to deal with single slice *pseq = seq; if((seq & 0x7F) == 1 || vst->curpic_num != pic_num){ vst->slices = ((hdr & 0x3F) << 1) + 1; vst->videobufsize = len2 + 8*vst->slices + 1; av_free_packet(&vst->pkt); //FIXME this should be output. if(av_new_packet(&vst->pkt, vst->videobufsize) < 0) return AVERROR(ENOMEM); vst->videobufpos = 8*vst->slices + 1; vst->cur_slice = 0; vst->curpic_num = pic_num; vst->pktpos = url_ftell(pb); } if(type == 2) len = FFMIN(len, pos); if(++vst->cur_slice > vst->slices) return 1; AV_WL32(vst->pkt.data - 7 + 8*vst->cur_slice, 1); AV_WL32(vst->pkt.data - 3 + 8*vst->cur_slice, vst->videobufpos - 8*vst->slices - 1); if(vst->videobufpos + len > vst->videobufsize) return 1; if (get_buffer(pb, vst->pkt.data + vst->videobufpos, len) != len) return AVERROR(EIO); vst->videobufpos += len; rm->remaining_len-= len; if(type == 2 || (vst->videobufpos) == vst->videobufsize){ vst->pkt.data[0] = vst->cur_slice-1; *pkt= vst->pkt; vst->pkt.data= NULL; vst->pkt.size= 0; if(vst->slices != vst->cur_slice) //FIXME find out how to set slices correct from the begin memmove(pkt->data + 1 + 8*vst->cur_slice, pkt->data + 1 + 8*vst->slices, vst->videobufpos - 1 - 8*vst->slices); pkt->size = vst->videobufpos + 8*(vst->cur_slice - vst->slices); pkt->pts = AV_NOPTS_VALUE; pkt->pos = vst->pktpos; return 0; } return 1; } | 19,419 |
1 | static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos) { VideoPicture *vp; double frame_delay, pts = pts1; /* compute the exact PTS for the picture if it is omitted in the stream * pts1 is the dts of the pkt / pts of the frame */ if (pts != 0) { /* update video clock with pts, if present */ is->video_clock = pts; } else { pts = is->video_clock; /* update video clock for next frame */ frame_delay = av_q2d(is->video_st->codec->time_base); /* for MPEG2, the frame can be repeated, so we update the clock accordingly */ frame_delay += src_frame->repeat_pict * (frame_delay * 0.5); is->video_clock += frame_delay; #if defined(DEBUG_SYNC) && 0 printf("frame_type=%c clock=%0.3f pts=%0.3f\n", av_get_picture_type_char(src_frame->pict_type), pts, pts1); #endif /* wait until we have space to put a new picture */ SDL_LockMutex(is->pictq_mutex); if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh) is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR)); while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->videoq.abort_request) { SDL_UnlockMutex(is->pictq_mutex); if (is->videoq.abort_request) return -1; vp = &is->pictq[is->pictq_windex]; vp->duration = frame_delay; /* alloc or resize hardware picture buffer */ if (!vp->bmp || #if CONFIG_AVFILTER vp->width != is->out_video_filter->inputs[0]->w || vp->height != is->out_video_filter->inputs[0]->h) { #else vp->width != is->video_st->codec->width || vp->height != is->video_st->codec->height) { #endif SDL_Event event; vp->allocated = 0; /* the allocation must be done in the main thread to avoid locking problems */ event.type = FF_ALLOC_EVENT; event.user.data1 = is; SDL_PushEvent(&event); /* wait until the picture is allocated */ SDL_LockMutex(is->pictq_mutex); while (!vp->allocated && !is->videoq.abort_request) { SDL_UnlockMutex(is->pictq_mutex); if (is->videoq.abort_request) return -1; /* if the frame is not skipped, then display it */ if (vp->bmp) { AVPicture pict; #if CONFIG_AVFILTER if(vp->picref) avfilter_unref_buffer(vp->picref); vp->picref = src_frame->opaque; #endif /* get a pointer on the bitmap */ SDL_LockYUVOverlay (vp->bmp); memset(&pict,0,sizeof(AVPicture)); pict.data[0] = vp->bmp->pixels[0]; pict.data[1] = vp->bmp->pixels[2]; pict.data[2] = vp->bmp->pixels[1]; pict.linesize[0] = vp->bmp->pitches[0]; pict.linesize[1] = vp->bmp->pitches[2]; pict.linesize[2] = vp->bmp->pitches[1]; #if CONFIG_AVFILTER //FIXME use direct rendering av_picture_copy(&pict, (AVPicture *)src_frame, vp->pix_fmt, vp->width, vp->height); #else sws_flags = av_get_int(sws_opts, "sws_flags", NULL); is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx, vp->width, vp->height, vp->pix_fmt, vp->width, vp->height, PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL); if (is->img_convert_ctx == NULL) { fprintf(stderr, "Cannot initialize the conversion context\n"); exit(1); sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize, 0, vp->height, pict.data, pict.linesize); #endif /* update the bitmap content */ SDL_UnlockYUVOverlay(vp->bmp); vp->pts = pts; vp->pos = pos; /* now we can update the picture count */ if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) is->pictq_windex = 0; SDL_LockMutex(is->pictq_mutex); vp->target_clock= compute_target_time(vp->pts, is); is->pictq_size++; SDL_UnlockMutex(is->pictq_mutex); return 0; | 19,420 |
1 | static int assigned_device_pci_cap_init(PCIDevice *pci_dev) { AssignedDevice *dev = DO_UPCAST(AssignedDevice, dev, pci_dev); PCIRegion *pci_region = dev->real_device.regions; int ret, pos; /* Clear initial capabilities pointer and status copied from hw */ pci_set_byte(pci_dev->config + PCI_CAPABILITY_LIST, 0); pci_set_word(pci_dev->config + PCI_STATUS, pci_get_word(pci_dev->config + PCI_STATUS) & ~PCI_STATUS_CAP_LIST); /* Expose MSI capability * MSI capability is the 1st capability in capability config */ pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_MSI, 0); if (pos != 0 && kvm_check_extension(kvm_state, KVM_CAP_ASSIGN_DEV_IRQ)) { if (!check_irqchip_in_kernel()) { return -ENOTSUP; } dev->cap.available |= ASSIGNED_DEVICE_CAP_MSI; /* Only 32-bit/no-mask currently supported */ ret = pci_add_capability(pci_dev, PCI_CAP_ID_MSI, pos, 10); if (ret < 0) { return ret; } pci_dev->msi_cap = pos; pci_set_word(pci_dev->config + pos + PCI_MSI_FLAGS, pci_get_word(pci_dev->config + pos + PCI_MSI_FLAGS) & PCI_MSI_FLAGS_QMASK); pci_set_long(pci_dev->config + pos + PCI_MSI_ADDRESS_LO, 0); pci_set_word(pci_dev->config + pos + PCI_MSI_DATA_32, 0); /* Set writable fields */ pci_set_word(pci_dev->wmask + pos + PCI_MSI_FLAGS, PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE); pci_set_long(pci_dev->wmask + pos + PCI_MSI_ADDRESS_LO, 0xfffffffc); pci_set_word(pci_dev->wmask + pos + PCI_MSI_DATA_32, 0xffff); } /* Expose MSI-X capability */ pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_MSIX, 0); if (pos != 0 && kvm_device_msix_supported(kvm_state)) { int bar_nr; uint32_t msix_table_entry; if (!check_irqchip_in_kernel()) { return -ENOTSUP; } dev->cap.available |= ASSIGNED_DEVICE_CAP_MSIX; ret = pci_add_capability(pci_dev, PCI_CAP_ID_MSIX, pos, 12); if (ret < 0) { return ret; } pci_dev->msix_cap = pos; pci_set_word(pci_dev->config + pos + PCI_MSIX_FLAGS, pci_get_word(pci_dev->config + pos + PCI_MSIX_FLAGS) & PCI_MSIX_FLAGS_QSIZE); /* Only enable and function mask bits are writable */ pci_set_word(pci_dev->wmask + pos + PCI_MSIX_FLAGS, PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); msix_table_entry = pci_get_long(pci_dev->config + pos + PCI_MSIX_TABLE); bar_nr = msix_table_entry & PCI_MSIX_FLAGS_BIRMASK; msix_table_entry &= ~PCI_MSIX_FLAGS_BIRMASK; dev->msix_table_addr = pci_region[bar_nr].base_addr + msix_table_entry; dev->msix_max = pci_get_word(pci_dev->config + pos + PCI_MSIX_FLAGS); dev->msix_max &= PCI_MSIX_FLAGS_QSIZE; dev->msix_max += 1; } /* Minimal PM support, nothing writable, device appears to NAK changes */ pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_PM, 0); if (pos) { uint16_t pmc; ret = pci_add_capability(pci_dev, PCI_CAP_ID_PM, pos, PCI_PM_SIZEOF); if (ret < 0) { return ret; } assigned_dev_setup_cap_read(dev, pos, PCI_PM_SIZEOF); pmc = pci_get_word(pci_dev->config + pos + PCI_CAP_FLAGS); pmc &= (PCI_PM_CAP_VER_MASK | PCI_PM_CAP_DSI); pci_set_word(pci_dev->config + pos + PCI_CAP_FLAGS, pmc); /* assign_device will bring the device up to D0, so we don't need * to worry about doing that ourselves here. */ pci_set_word(pci_dev->config + pos + PCI_PM_CTRL, PCI_PM_CTRL_NO_SOFT_RESET); pci_set_byte(pci_dev->config + pos + PCI_PM_PPB_EXTENSIONS, 0); pci_set_byte(pci_dev->config + pos + PCI_PM_DATA_REGISTER, 0); } pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_EXP, 0); if (pos) { uint8_t version, size = 0; uint16_t type, devctl, lnksta; uint32_t devcap, lnkcap; version = pci_get_byte(pci_dev->config + pos + PCI_EXP_FLAGS); version &= PCI_EXP_FLAGS_VERS; if (version == 1) { size = 0x14; } else if (version == 2) { /* * Check for non-std size, accept reduced size to 0x34, * which is what bcm5761 implemented, violating the * PCIe v3.0 spec that regs should exist and be read as 0, * not optionally provided and shorten the struct size. */ size = MIN(0x3c, PCI_CONFIG_SPACE_SIZE - pos); if (size < 0x34) { error_report("%s: Invalid size PCIe cap-id 0x%x", __func__, PCI_CAP_ID_EXP); return -EINVAL; } else if (size != 0x3c) { error_report("WARNING, %s: PCIe cap-id 0x%x has " "non-standard size 0x%x; std size should be 0x3c", __func__, PCI_CAP_ID_EXP, size); } } else if (version == 0) { uint16_t vid, did; vid = pci_get_word(pci_dev->config + PCI_VENDOR_ID); did = pci_get_word(pci_dev->config + PCI_DEVICE_ID); if (vid == PCI_VENDOR_ID_INTEL && did == 0x10ed) { /* * quirk for Intel 82599 VF with invalid PCIe capability * version, should really be version 2 (same as PF) */ size = 0x3c; } } if (size == 0) { error_report("%s: Unsupported PCI express capability version %d", __func__, version); return -EINVAL; } ret = pci_add_capability(pci_dev, PCI_CAP_ID_EXP, pos, size); if (ret < 0) { return ret; } assigned_dev_setup_cap_read(dev, pos, size); type = pci_get_word(pci_dev->config + pos + PCI_EXP_FLAGS); type = (type & PCI_EXP_FLAGS_TYPE) >> 4; if (type != PCI_EXP_TYPE_ENDPOINT && type != PCI_EXP_TYPE_LEG_END && type != PCI_EXP_TYPE_RC_END) { error_report("Device assignment only supports endpoint assignment," " device type %d", type); return -EINVAL; } /* capabilities, pass existing read-only copy * PCI_EXP_FLAGS_IRQ: updated by hardware, should be direct read */ /* device capabilities: hide FLR */ devcap = pci_get_long(pci_dev->config + pos + PCI_EXP_DEVCAP); devcap &= ~PCI_EXP_DEVCAP_FLR; pci_set_long(pci_dev->config + pos + PCI_EXP_DEVCAP, devcap); /* device control: clear all error reporting enable bits, leaving * only a few host values. Note, these are * all writable, but not passed to hw. */ devctl = pci_get_word(pci_dev->config + pos + PCI_EXP_DEVCTL); devctl = (devctl & (PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_PAYLOAD)) | PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN; pci_set_word(pci_dev->config + pos + PCI_EXP_DEVCTL, devctl); devctl = PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_AUX_PME; pci_set_word(pci_dev->wmask + pos + PCI_EXP_DEVCTL, ~devctl); /* Clear device status */ pci_set_word(pci_dev->config + pos + PCI_EXP_DEVSTA, 0); /* Link capabilities, expose links and latencues, clear reporting */ lnkcap = pci_get_long(pci_dev->config + pos + PCI_EXP_LNKCAP); lnkcap &= (PCI_EXP_LNKCAP_SLS | PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_ASPMS | PCI_EXP_LNKCAP_L0SEL | PCI_EXP_LNKCAP_L1EL); pci_set_long(pci_dev->config + pos + PCI_EXP_LNKCAP, lnkcap); /* Link control, pass existing read-only copy. Should be writable? */ /* Link status, only expose current speed and width */ lnksta = pci_get_word(pci_dev->config + pos + PCI_EXP_LNKSTA); lnksta &= (PCI_EXP_LNKSTA_CLS | PCI_EXP_LNKSTA_NLW); pci_set_word(pci_dev->config + pos + PCI_EXP_LNKSTA, lnksta); if (version >= 2) { /* Slot capabilities, control, status - not needed for endpoints */ pci_set_long(pci_dev->config + pos + PCI_EXP_SLTCAP, 0); pci_set_word(pci_dev->config + pos + PCI_EXP_SLTCTL, 0); pci_set_word(pci_dev->config + pos + PCI_EXP_SLTSTA, 0); /* Root control, capabilities, status - not needed for endpoints */ pci_set_word(pci_dev->config + pos + PCI_EXP_RTCTL, 0); pci_set_word(pci_dev->config + pos + PCI_EXP_RTCAP, 0); pci_set_long(pci_dev->config + pos + PCI_EXP_RTSTA, 0); /* Device capabilities/control 2, pass existing read-only copy */ /* Link control 2, pass existing read-only copy */ } } pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_PCIX, 0); if (pos) { uint16_t cmd; uint32_t status; /* Only expose the minimum, 8 byte capability */ ret = pci_add_capability(pci_dev, PCI_CAP_ID_PCIX, pos, 8); if (ret < 0) { return ret; } assigned_dev_setup_cap_read(dev, pos, 8); /* Command register, clear upper bits, including extended modes */ cmd = pci_get_word(pci_dev->config + pos + PCI_X_CMD); cmd &= (PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO | PCI_X_CMD_MAX_READ | PCI_X_CMD_MAX_SPLIT); pci_set_word(pci_dev->config + pos + PCI_X_CMD, cmd); /* Status register, update with emulated PCI bus location, clear * error bits, leave the rest. */ status = pci_get_long(pci_dev->config + pos + PCI_X_STATUS); status &= ~(PCI_X_STATUS_BUS | PCI_X_STATUS_DEVFN); status |= (pci_bus_num(pci_dev->bus) << 8) | pci_dev->devfn; status &= ~(PCI_X_STATUS_SPL_DISC | PCI_X_STATUS_UNX_SPL | PCI_X_STATUS_SPL_ERR); pci_set_long(pci_dev->config + pos + PCI_X_STATUS, status); } pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_VPD, 0); if (pos) { /* Direct R/W passthrough */ ret = pci_add_capability(pci_dev, PCI_CAP_ID_VPD, pos, 8); if (ret < 0) { return ret; } assigned_dev_setup_cap_read(dev, pos, 8); /* direct write for cap content */ assigned_dev_direct_config_write(dev, pos + 2, 6); } /* Devices can have multiple vendor capabilities, get them all */ for (pos = 0; (pos = pci_find_cap_offset(pci_dev, PCI_CAP_ID_VNDR, pos)); pos += PCI_CAP_LIST_NEXT) { uint8_t len = pci_get_byte(pci_dev->config + pos + PCI_CAP_FLAGS); /* Direct R/W passthrough */ ret = pci_add_capability(pci_dev, PCI_CAP_ID_VNDR, pos, len); if (ret < 0) { return ret; } assigned_dev_setup_cap_read(dev, pos, len); /* direct write for cap content */ assigned_dev_direct_config_write(dev, pos + 2, len - 2); } /* If real and virtual capability list status bits differ, virtualize the * access. */ if ((pci_get_word(pci_dev->config + PCI_STATUS) & PCI_STATUS_CAP_LIST) != (assigned_dev_pci_read_byte(pci_dev, PCI_STATUS) & PCI_STATUS_CAP_LIST)) { dev->emulate_config_read[PCI_STATUS] |= PCI_STATUS_CAP_LIST; } return 0; } | 19,421 |
1 | static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { value &= 0x3fff; if (env->cp15.c15_cpar != value) { /* Changes cp0 to cp13 behavior, so needs a TB flush. */ tb_flush(env); env->cp15.c15_cpar = value; } } | 19,422 |
1 | static int compensate_volume(AVFilterContext *ctx) { struct SOFAlizerContext *s = ctx->priv; float compensate; float energy = 0; float *ir; int m; if (s->sofa.ncid) { /* find IR at front center position in the SOFA file (IR closest to 0°,0°,1m) */ struct NCSofa *sofa = &s->sofa; m = find_m(s, 0, 0, 1); /* get energy of that IR and compensate volume */ ir = sofa->data_ir + 2 * m * sofa->n_samples; if (sofa->n_samples & 31) { energy = avpriv_scalarproduct_float_c(ir, ir, sofa->n_samples); } else { energy = s->fdsp->scalarproduct_float(ir, ir, sofa->n_samples); } compensate = 256 / (sofa->n_samples * sqrt(energy)); av_log(ctx, AV_LOG_DEBUG, "Compensate-factor: %f\n", compensate); ir = sofa->data_ir; /* apply volume compensation to IRs */ s->fdsp->vector_fmul_scalar(ir, ir, compensate, sofa->n_samples * sofa->m_dim * 2); emms_c(); } return 0; } | 19,424 |
1 | static char *qio_channel_websock_handshake_entry(const char *handshake, size_t handshake_len, const char *name) { char *begin, *end, *ret = NULL; char *line = g_strdup_printf("%s%s: ", QIO_CHANNEL_WEBSOCK_HANDSHAKE_DELIM, name); begin = g_strstr_len(handshake, handshake_len, line); if (begin != NULL) { begin += strlen(line); end = g_strstr_len(begin, handshake_len - (begin - handshake), QIO_CHANNEL_WEBSOCK_HANDSHAKE_DELIM); if (end != NULL) { ret = g_strndup(begin, end - begin); } } g_free(line); return ret; } | 19,425 |
1 | static void machvirt_init(MachineState *machine) { VirtMachineState *vms = VIRT_MACHINE(machine); VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(machine); qemu_irq pic[NUM_IRQS]; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *secure_sysmem = NULL; int n, virt_max_cpus; MemoryRegion *ram = g_new(MemoryRegion, 1); const char *cpu_model = machine->cpu_model; char **cpustr; ObjectClass *oc; const char *typename; CPUClass *cc; Error *err = NULL; bool firmware_loaded = bios_name || drive_get(IF_PFLASH, 0, 0); uint8_t clustersz; if (!cpu_model) { cpu_model = "cortex-a15"; } /* We can probe only here because during property set * KVM is not available yet */ if (!vms->gic_version) { if (!kvm_enabled()) { error_report("gic-version=host requires KVM"); exit(1); } vms->gic_version = kvm_arm_vgic_probe(); if (!vms->gic_version) { error_report("Unable to determine GIC version supported by host"); exit(1); } } /* Separate the actual CPU model name from any appended features */ cpustr = g_strsplit(cpu_model, ",", 2); if (!cpuname_valid(cpustr[0])) { error_report("mach-virt: CPU %s not supported", cpustr[0]); exit(1); } /* If we have an EL3 boot ROM then the assumption is that it will * implement PSCI itself, so disable QEMU's internal implementation * so it doesn't get in the way. Instead of starting secondary * CPUs in PSCI powerdown state we will start them all running and * let the boot ROM sort them out. * The usual case is that we do use QEMU's PSCI implementation; * if the guest has EL2 then we will use SMC as the conduit, * and otherwise we will use HVC (for backwards compatibility and * because if we're using KVM then we must use HVC). */ if (vms->secure && firmware_loaded) { vms->psci_conduit = QEMU_PSCI_CONDUIT_DISABLED; } else if (vms->virt) { vms->psci_conduit = QEMU_PSCI_CONDUIT_SMC; } else { vms->psci_conduit = QEMU_PSCI_CONDUIT_HVC; } /* The maximum number of CPUs depends on the GIC version, or on how * many redistributors we can fit into the memory map. */ if (vms->gic_version == 3) { virt_max_cpus = vms->memmap[VIRT_GIC_REDIST].size / 0x20000; clustersz = GICV3_TARGETLIST_BITS; } else { virt_max_cpus = GIC_NCPU; clustersz = GIC_TARGETLIST_BITS; } if (max_cpus > virt_max_cpus) { error_report("Number of SMP CPUs requested (%d) exceeds max CPUs " "supported by machine 'mach-virt' (%d)", max_cpus, virt_max_cpus); exit(1); } vms->smp_cpus = smp_cpus; if (machine->ram_size > vms->memmap[VIRT_MEM].size) { error_report("mach-virt: cannot model more than %dGB RAM", RAMLIMIT_GB); exit(1); } if (vms->virt && kvm_enabled()) { error_report("mach-virt: KVM does not support providing " "Virtualization extensions to the guest CPU"); exit(1); } if (vms->secure) { if (kvm_enabled()) { error_report("mach-virt: KVM does not support Security extensions"); exit(1); } /* The Secure view of the world is the same as the NonSecure, * but with a few extra devices. Create it as a container region * containing the system memory at low priority; any secure-only * devices go in at higher priority and take precedence. */ secure_sysmem = g_new(MemoryRegion, 1); memory_region_init(secure_sysmem, OBJECT(machine), "secure-memory", UINT64_MAX); memory_region_add_subregion_overlap(secure_sysmem, 0, sysmem, -1); } create_fdt(vms); oc = cpu_class_by_name(TYPE_ARM_CPU, cpustr[0]); if (!oc) { error_report("Unable to find CPU definition"); exit(1); } typename = object_class_get_name(oc); /* convert -smp CPU options specified by the user into global props */ cc = CPU_CLASS(oc); cc->parse_features(typename, cpustr[1], &err); g_strfreev(cpustr); if (err) { error_report_err(err); exit(1); } for (n = 0; n < smp_cpus; n++) { Object *cpuobj = object_new(typename); if (!vmc->disallow_affinity_adjustment) { /* Adjust MPIDR like 64-bit KVM hosts, which incorporate the * GIC's target-list limitations. 32-bit KVM hosts currently * always create clusters of 4 CPUs, but that is expected to * change when they gain support for gicv3. When KVM is enabled * it will override the changes we make here, therefore our * purposes are to make TCG consistent (with 64-bit KVM hosts) * and to improve SGI efficiency. */ uint8_t aff1 = n / clustersz; uint8_t aff0 = n % clustersz; object_property_set_int(cpuobj, (aff1 << ARM_AFF1_SHIFT) | aff0, "mp-affinity", NULL); } if (!vms->secure) { object_property_set_bool(cpuobj, false, "has_el3", NULL); } if (!vms->virt && object_property_find(cpuobj, "has_el2", NULL)) { object_property_set_bool(cpuobj, false, "has_el2", NULL); } if (vms->psci_conduit != QEMU_PSCI_CONDUIT_DISABLED) { object_property_set_int(cpuobj, vms->psci_conduit, "psci-conduit", NULL); /* Secondary CPUs start in PSCI powered-down state */ if (n > 0) { object_property_set_bool(cpuobj, true, "start-powered-off", NULL); } } if (vmc->no_pmu && object_property_find(cpuobj, "pmu", NULL)) { object_property_set_bool(cpuobj, false, "pmu", NULL); } if (object_property_find(cpuobj, "reset-cbar", NULL)) { object_property_set_int(cpuobj, vms->memmap[VIRT_CPUPERIPHS].base, "reset-cbar", &error_abort); } object_property_set_link(cpuobj, OBJECT(sysmem), "memory", &error_abort); if (vms->secure) { object_property_set_link(cpuobj, OBJECT(secure_sysmem), "secure-memory", &error_abort); } object_property_set_bool(cpuobj, true, "realized", NULL); } fdt_add_timer_nodes(vms); fdt_add_cpu_nodes(vms); fdt_add_psci_node(vms); memory_region_allocate_system_memory(ram, NULL, "mach-virt.ram", machine->ram_size); memory_region_add_subregion(sysmem, vms->memmap[VIRT_MEM].base, ram); create_flash(vms, sysmem, secure_sysmem ? secure_sysmem : sysmem); create_gic(vms, pic); fdt_add_pmu_nodes(vms); create_uart(vms, pic, VIRT_UART, sysmem, serial_hds[0]); if (vms->secure) { create_secure_ram(vms, secure_sysmem); create_uart(vms, pic, VIRT_SECURE_UART, secure_sysmem, serial_hds[1]); } create_rtc(vms, pic); create_pcie(vms, pic); create_gpio(vms, pic); /* Create mmio transports, so the user can create virtio backends * (which will be automatically plugged in to the transports). If * no backend is created the transport will just sit harmlessly idle. */ create_virtio_devices(vms, pic); vms->fw_cfg = create_fw_cfg(vms, &address_space_memory); rom_set_fw(vms->fw_cfg); vms->machine_done.notify = virt_machine_done; qemu_add_machine_init_done_notifier(&vms->machine_done); vms->bootinfo.ram_size = machine->ram_size; vms->bootinfo.kernel_filename = machine->kernel_filename; vms->bootinfo.kernel_cmdline = machine->kernel_cmdline; vms->bootinfo.initrd_filename = machine->initrd_filename; vms->bootinfo.nb_cpus = smp_cpus; vms->bootinfo.board_id = -1; vms->bootinfo.loader_start = vms->memmap[VIRT_MEM].base; vms->bootinfo.get_dtb = machvirt_dtb; vms->bootinfo.firmware_loaded = firmware_loaded; arm_load_kernel(ARM_CPU(first_cpu), &vms->bootinfo); /* * arm_load_kernel machine init done notifier registration must * happen before the platform_bus_create call. In this latter, * another notifier is registered which adds platform bus nodes. * Notifiers are executed in registration reverse order. */ create_platform_bus(vms, pic); } | 19,426 |
1 | static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res, uint16_t *refcount_table, int64_t refcount_table_size, int64_t l2_offset, int flags) { BDRVQcowState *s = bs->opaque; uint64_t *l2_table, l2_entry; uint64_t next_contiguous_offset = 0; int i, l2_size, nb_csectors; /* Read L2 table from disk */ l2_size = s->l2_size * sizeof(uint64_t); l2_table = g_malloc(l2_size); if (bdrv_pread(bs->file, l2_offset, l2_table, l2_size) != l2_size) goto fail; /* Do the actual checks */ for(i = 0; i < s->l2_size; i++) { l2_entry = be64_to_cpu(l2_table[i]); switch (qcow2_get_cluster_type(l2_entry)) { case QCOW2_CLUSTER_COMPRESSED: /* Compressed clusters don't have QCOW_OFLAG_COPIED */ if (l2_entry & QCOW_OFLAG_COPIED) { fprintf(stderr, "ERROR: cluster %" PRId64 ": " "copied flag must never be set for compressed " "clusters\n", l2_entry >> s->cluster_bits); l2_entry &= ~QCOW_OFLAG_COPIED; res->corruptions++; } /* Mark cluster as used */ nb_csectors = ((l2_entry >> s->csize_shift) & s->csize_mask) + 1; l2_entry &= s->cluster_offset_mask; inc_refcounts(bs, res, refcount_table, refcount_table_size, l2_entry & ~511, nb_csectors * 512); if (flags & CHECK_FRAG_INFO) { res->bfi.allocated_clusters++; res->bfi.compressed_clusters++; /* Compressed clusters are fragmented by nature. Since they * take up sub-sector space but we only have sector granularity * I/O we need to re-read the same sectors even for adjacent * compressed clusters. */ res->bfi.fragmented_clusters++; } break; case QCOW2_CLUSTER_ZERO: if ((l2_entry & L2E_OFFSET_MASK) == 0) { break; } /* fall through */ case QCOW2_CLUSTER_NORMAL: { uint64_t offset = l2_entry & L2E_OFFSET_MASK; if (flags & CHECK_FRAG_INFO) { res->bfi.allocated_clusters++; if (next_contiguous_offset && offset != next_contiguous_offset) { res->bfi.fragmented_clusters++; } next_contiguous_offset = offset + s->cluster_size; } /* Mark cluster as used */ inc_refcounts(bs, res, refcount_table,refcount_table_size, offset, s->cluster_size); /* Correct offsets are cluster aligned */ if (offset_into_cluster(s, offset)) { fprintf(stderr, "ERROR offset=%" PRIx64 ": Cluster is not " "properly aligned; L2 entry corrupted.\n", offset); res->corruptions++; } break; } case QCOW2_CLUSTER_UNALLOCATED: break; default: abort(); } } g_free(l2_table); return 0; fail: fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n"); g_free(l2_table); return -EIO; } | 19,427 |
1 | static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame) { DiracFrame *out = s->delay_frames[0]; int i, out_idx = 0; int ret; /* find frame with lowest picture number */ for (i = 1; s->delay_frames[i]; i++) if (s->delay_frames[i]->avframe->display_picture_number < out->avframe->display_picture_number) { out = s->delay_frames[i]; out_idx = i; } for (i = out_idx; s->delay_frames[i]; i++) s->delay_frames[i] = s->delay_frames[i+1]; if (out) { out->reference ^= DELAYED_PIC_REF; *got_frame = 1; if((ret = av_frame_ref(picture, out->avframe)) < 0) return ret; } return 0; } | 19,428 |
1 | static void do_subtitle_out(AVFormatContext *s, OutputStream *ost, InputStream *ist, AVSubtitle *sub) { int subtitle_out_max_size = 1024 * 1024; int subtitle_out_size, nb, i; AVCodecContext *enc; AVPacket pkt; int64_t pts; if (sub->pts == AV_NOPTS_VALUE) { av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n"); if (exit_on_error) exit_program(1); return; } enc = ost->enc_ctx; if (!subtitle_out) { subtitle_out = av_malloc(subtitle_out_max_size); if (!subtitle_out) { av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n"); exit_program(1); } } /* Note: DVB subtitle need one packet to draw them and one other packet to clear them */ /* XXX: signal it in the codec context ? */ if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) nb = 2; else nb = 1; /* shift timestamp to honor -ss and make check_recording_time() work with -t */ pts = sub->pts; if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE) pts -= output_files[ost->file_index]->start_time; for (i = 0; i < nb; i++) { unsigned save_num_rects = sub->num_rects; ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base); if (!check_recording_time(ost)) return; sub->pts = pts; // start_display_time is required to be 0 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q); sub->end_display_time -= sub->start_display_time; sub->start_display_time = 0; if (i == 1) sub->num_rects = 0; ost->frames_encoded++; subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out, subtitle_out_max_size, sub); if (i == 1) sub->num_rects = save_num_rects; if (subtitle_out_size < 0) { av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n"); exit_program(1); } av_init_packet(&pkt); pkt.data = subtitle_out; pkt.size = subtitle_out_size; pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base); pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base); if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) { /* XXX: the pts correction is handled here. Maybe handling it in the codec would be better */ if (i == 0) pkt.pts += 90 * sub->start_display_time; else pkt.pts += 90 * sub->end_display_time; } pkt.dts = pkt.pts; write_frame(s, &pkt, ost); } } | 19,429 |
1 | static int bdrv_has_snapshot(BlockDriverState *bs) { return (bs && !bdrv_is_removable(bs) && !bdrv_is_read_only(bs)); } | 19,430 |
1 | static void dec_ill(DisasContext *dc) { cpu_abort(dc->env, "unknown opcode 0x%02x\n", dc->opcode); } | 19,432 |
1 | static int vhdx_create_new_headers(BlockDriverState *bs, uint64_t image_size, uint32_t log_size) { int ret = 0; VHDXHeader *hdr = NULL; hdr = g_malloc0(sizeof(VHDXHeader)); hdr->signature = VHDX_HEADER_SIGNATURE; hdr->sequence_number = g_random_int(); hdr->log_version = 0; hdr->version = 1; hdr->log_length = log_size; hdr->log_offset = VHDX_HEADER_SECTION_END; vhdx_guid_generate(&hdr->file_write_guid); vhdx_guid_generate(&hdr->data_write_guid); ret = vhdx_write_header(bs, hdr, VHDX_HEADER1_OFFSET, false); if (ret < 0) { goto exit; } hdr->sequence_number++; ret = vhdx_write_header(bs, hdr, VHDX_HEADER2_OFFSET, false); if (ret < 0) { goto exit; } exit: g_free(hdr); return ret; } | 19,433 |
0 | void checkasm_check_h264qpel(void) { LOCAL_ALIGNED_16(uint8_t, buf0, [BUF_SIZE]); LOCAL_ALIGNED_16(uint8_t, buf1, [BUF_SIZE]); LOCAL_ALIGNED_16(uint8_t, dst0, [BUF_SIZE]); LOCAL_ALIGNED_16(uint8_t, dst1, [BUF_SIZE]); H264QpelContext h; int op, bit_depth, i, j; for (op = 0; op < 2; op++) { qpel_mc_func (*tab)[16] = op ? h.avg_h264_qpel_pixels_tab : h.put_h264_qpel_pixels_tab; const char *op_name = op ? "avg" : "put"; for (bit_depth = 8; bit_depth <= 10; bit_depth++) { ff_h264qpel_init(&h, bit_depth); for (i = 0; i < (op ? 3 : 4); i++) { int size = 16 >> i; for (j = 0; j < 16; j++) if (check_func(tab[i][j], "%s_h264_qpel_%d_mc%d%d_%d", op_name, size, j & 3, j >> 2, bit_depth)) { randomize_buffers(); call_ref(dst0, src0, (ptrdiff_t)size * SIZEOF_PIXEL); call_new(dst1, src1, (ptrdiff_t)size * SIZEOF_PIXEL); if (memcmp(buf0, buf1, BUF_SIZE) || memcmp(dst0, dst1, BUF_SIZE)) fail(); bench_new(dst1, src1, (ptrdiff_t)size * SIZEOF_PIXEL); } } } report("%s", op_name); } } | 19,436 |
0 | static int wma_decode_init(AVCodecContext * avctx) { WMADecodeContext *s = avctx->priv_data; int i, flags1, flags2; float *window; uint8_t *extradata; float bps1, high_freq, bps; int sample_rate1; int coef_vlc_table; s->sample_rate = avctx->sample_rate; s->nb_channels = avctx->channels; s->bit_rate = avctx->bit_rate; s->block_align = avctx->block_align; if (avctx->codec_id == CODEC_ID_WMAV1) { s->version = 1; } else { s->version = 2; } /* extract flag infos */ flags1 = 0; flags2 = 0; extradata = avctx->extradata; if (s->version == 1 && avctx->extradata_size >= 4) { flags1 = extradata[0] | (extradata[1] << 8); flags2 = extradata[2] | (extradata[3] << 8); } else if (s->version == 2 && avctx->extradata_size >= 6) { flags1 = extradata[0] | (extradata[1] << 8) | (extradata[2] << 16) | (extradata[3] << 24); flags2 = extradata[4] | (extradata[5] << 8); } s->use_exp_vlc = flags2 & 0x0001; s->use_bit_reservoir = flags2 & 0x0002; s->use_variable_block_len = flags2 & 0x0004; /* compute MDCT block size */ if (s->sample_rate <= 16000) { s->frame_len_bits = 9; } else if (s->sample_rate <= 22050 || (s->sample_rate <= 32000 && s->version == 1)) { s->frame_len_bits = 10; } else { s->frame_len_bits = 11; } s->frame_len = 1 << s->frame_len_bits; if (s->use_variable_block_len) { s->nb_block_sizes = s->frame_len_bits - BLOCK_MIN_BITS + 1; } else { s->nb_block_sizes = 1; } /* init rate dependant parameters */ s->use_noise_coding = 1; high_freq = s->sample_rate * 0.5; /* if version 2, then the rates are normalized */ sample_rate1 = s->sample_rate; if (s->version == 2) { if (sample_rate1 >= 44100) sample_rate1 = 44100; else if (sample_rate1 >= 22050) sample_rate1 = 22050; else if (sample_rate1 >= 16000) sample_rate1 = 16000; else if (sample_rate1 >= 11025) sample_rate1 = 11025; else if (sample_rate1 >= 8000) sample_rate1 = 8000; } bps = (float)s->bit_rate / (float)(s->nb_channels * s->sample_rate); s->byte_offset_bits = av_log2((int)(bps * s->frame_len / 8.0)) + 2; /* compute high frequency value and choose if noise coding should be activated */ bps1 = bps; if (s->nb_channels == 2) bps1 = bps * 1.6; if (sample_rate1 == 44100) { if (bps1 >= 0.61) s->use_noise_coding = 0; else high_freq = high_freq * 0.4; } else if (sample_rate1 == 22050) { if (bps1 >= 1.16) s->use_noise_coding = 0; else if (bps1 >= 0.72) high_freq = high_freq * 0.7; else high_freq = high_freq * 0.6; } else if (sample_rate1 == 16000) { if (bps > 0.5) high_freq = high_freq * 0.5; else high_freq = high_freq * 0.3; } else if (sample_rate1 == 11025) { high_freq = high_freq * 0.7; } else if (sample_rate1 == 8000) { if (bps <= 0.625) { high_freq = high_freq * 0.5; } else if (bps > 0.75) { s->use_noise_coding = 0; } else { high_freq = high_freq * 0.65; } } else { if (bps >= 0.8) { high_freq = high_freq * 0.75; } else if (bps >= 0.6) { high_freq = high_freq * 0.6; } else { high_freq = high_freq * 0.5; } } #ifdef DEBUG_PARAMS printf("flags1=0x%x flags2=0x%x\n", flags1, flags2); printf("version=%d channels=%d sample_rate=%d bitrate=%d block_align=%d\n", s->version, s->nb_channels, s->sample_rate, s->bit_rate, s->block_align); printf("bps=%f bps1=%f high_freq=%f bitoffset=%d\n", bps, bps1, high_freq, s->byte_offset_bits); printf("use_noise_coding=%d use_exp_vlc=%d\n", s->use_noise_coding, s->use_exp_vlc); #endif /* compute the scale factor band sizes for each MDCT block size */ { int a, b, pos, lpos, k, block_len, i, j, n; const uint8_t *table; if (s->version == 1) { s->coefs_start = 3; } else { s->coefs_start = 0; } for(k = 0; k < s->nb_block_sizes; k++) { block_len = s->frame_len >> k; if (s->version == 1) { lpos = 0; for(i=0;i<25;i++) { a = wma_critical_freqs[i]; b = s->sample_rate; pos = ((block_len * 2 * a) + (b >> 1)) / b; if (pos > block_len) pos = block_len; s->exponent_bands[0][i] = pos - lpos; if (pos >= block_len) { i++; break; } lpos = pos; } s->exponent_sizes[0] = i; } else { /* hardcoded tables */ table = NULL; a = s->frame_len_bits - BLOCK_MIN_BITS - k; if (a < 3) { if (s->sample_rate >= 44100) table = exponent_band_44100[a]; else if (s->sample_rate >= 32000) table = exponent_band_32000[a]; else if (s->sample_rate >= 22050) table = exponent_band_22050[a]; } if (table) { n = *table++; for(i=0;i<n;i++) s->exponent_bands[k][i] = table[i]; s->exponent_sizes[k] = n; } else { j = 0; lpos = 0; for(i=0;i<25;i++) { a = wma_critical_freqs[i]; b = s->sample_rate; pos = ((block_len * 2 * a) + (b << 1)) / (4 * b); pos <<= 2; if (pos > block_len) pos = block_len; if (pos > lpos) s->exponent_bands[k][j++] = pos - lpos; if (pos >= block_len) break; lpos = pos; } s->exponent_sizes[k] = j; } } /* max number of coefs */ s->coefs_end[k] = (s->frame_len - ((s->frame_len * 9) / 100)) >> k; /* high freq computation */ s->high_band_start[k] = (int)((block_len * 2 * high_freq) / s->sample_rate + 0.5); n = s->exponent_sizes[k]; j = 0; pos = 0; for(i=0;i<n;i++) { int start, end; start = pos; pos += s->exponent_bands[k][i]; end = pos; if (start < s->high_band_start[k]) start = s->high_band_start[k]; if (end > s->coefs_end[k]) end = s->coefs_end[k]; if (end > start) s->exponent_high_bands[k][j++] = end - start; } s->exponent_high_sizes[k] = j; #if 0 trace("%5d: coefs_end=%d high_band_start=%d nb_high_bands=%d: ", s->frame_len >> k, s->coefs_end[k], s->high_band_start[k], s->exponent_high_sizes[k]); for(j=0;j<s->exponent_high_sizes[k];j++) trace(" %d", s->exponent_high_bands[k][j]); trace("\n"); #endif } } #ifdef DEBUG_TRACE { int i, j; for(i = 0; i < s->nb_block_sizes; i++) { trace("%5d: n=%2d:", s->frame_len >> i, s->exponent_sizes[i]); for(j=0;j<s->exponent_sizes[i];j++) trace(" %d", s->exponent_bands[i][j]); trace("\n"); } } #endif /* init MDCT */ for(i = 0; i < s->nb_block_sizes; i++) ff_mdct_init(&s->mdct_ctx[i], s->frame_len_bits - i + 1, 1); /* init MDCT windows : simple sinus window */ for(i = 0; i < s->nb_block_sizes; i++) { int n, j; float alpha; n = 1 << (s->frame_len_bits - i); window = av_malloc(sizeof(float) * n); alpha = M_PI / (2.0 * n); for(j=0;j<n;j++) { window[n - j - 1] = sin((j + 0.5) * alpha); } s->windows[i] = window; } s->reset_block_lengths = 1; if (s->use_noise_coding) { /* init the noise generator */ if (s->use_exp_vlc) s->noise_mult = 0.02; else s->noise_mult = 0.04; #if defined(DEBUG_TRACE) for(i=0;i<NOISE_TAB_SIZE;i++) s->noise_table[i] = 1.0 * s->noise_mult; #else { unsigned int seed; float norm; seed = 1; norm = (1.0 / (float)(1LL << 31)) * sqrt(3) * s->noise_mult; for(i=0;i<NOISE_TAB_SIZE;i++) { seed = seed * 314159 + 1; s->noise_table[i] = (float)((int)seed) * norm; } } #endif init_vlc(&s->hgain_vlc, 9, sizeof(hgain_huffbits), hgain_huffbits, 1, 1, hgain_huffcodes, 2, 2); } if (s->use_exp_vlc) { init_vlc(&s->exp_vlc, 9, sizeof(scale_huffbits), scale_huffbits, 1, 1, scale_huffcodes, 4, 4); } else { wma_lsp_to_curve_init(s, s->frame_len); } /* choose the VLC tables for the coefficients */ coef_vlc_table = 2; if (s->sample_rate >= 32000) { if (bps1 < 0.72) coef_vlc_table = 0; else if (bps1 < 1.16) coef_vlc_table = 1; } init_coef_vlc(&s->coef_vlc[0], &s->run_table[0], &s->level_table[0], &coef_vlcs[coef_vlc_table * 2]); init_coef_vlc(&s->coef_vlc[1], &s->run_table[1], &s->level_table[1], &coef_vlcs[coef_vlc_table * 2 + 1]); return 0; } | 19,437 |
0 | const char *av_get_string(void *obj, const char *name, const AVOption **o_out, char *buf, int buf_len) { const AVOption *o = av_opt_find(obj, name, NULL, 0, 0); void *dst; uint8_t *bin; int len, i; if (!o || o->offset<=0) return NULL; if (o->type != FF_OPT_TYPE_STRING && (!buf || !buf_len)) return NULL; dst= ((uint8_t*)obj) + o->offset; if (o_out) *o_out= o; switch (o->type) { case FF_OPT_TYPE_FLAGS: snprintf(buf, buf_len, "0x%08X",*(int *)dst);break; case FF_OPT_TYPE_INT: snprintf(buf, buf_len, "%d" , *(int *)dst);break; case FF_OPT_TYPE_INT64: snprintf(buf, buf_len, "%"PRId64, *(int64_t*)dst);break; case FF_OPT_TYPE_FLOAT: snprintf(buf, buf_len, "%f" , *(float *)dst);break; case FF_OPT_TYPE_DOUBLE: snprintf(buf, buf_len, "%f" , *(double *)dst);break; case FF_OPT_TYPE_RATIONAL: snprintf(buf, buf_len, "%d/%d", ((AVRational*)dst)->num, ((AVRational*)dst)->den);break; case FF_OPT_TYPE_STRING: return *(void**)dst; case FF_OPT_TYPE_BINARY: len = *(int*)(((uint8_t *)dst) + sizeof(uint8_t *)); if (len >= (buf_len + 1)/2) return NULL; bin = *(uint8_t**)dst; for (i = 0; i < len; i++) snprintf(buf + i*2, 3, "%02X", bin[i]); break; default: return NULL; } return buf; } | 19,438 |
0 | static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){ int i; memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop? /* mpeg1 */ d->mb_incr= s->mb_incr; for(i=0; i<3; i++) d->last_dc[i]= s->last_dc[i]; /* statistics */ d->mv_bits= s->mv_bits; d->i_tex_bits= s->i_tex_bits; d->p_tex_bits= s->p_tex_bits; d->i_count= s->i_count; d->p_count= s->p_count; d->skip_count= s->skip_count; d->misc_bits= s->misc_bits; d->last_bits= 0; d->mb_skiped= s->mb_skiped; } | 19,439 |
0 | static int get_bits(Jpeg2000DecoderContext *s, int n) { int res = 0; if (s->buf_end - s->buf < ((n - s->bit_index) >> 8)) return AVERROR(EINVAL); while (--n >= 0) { res <<= 1; if (s->bit_index == 0) { s->bit_index = 7 + (*s->buf != 0xff); s->buf++; } s->bit_index--; res |= (*s->buf >> s->bit_index) & 1; } return res; } | 19,440 |
1 | static int rgbToRgbWrapper(SwsContext *c, const uint8_t *src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t *dst[], int dstStride[]) { const enum PixelFormat srcFormat = c->srcFormat; const enum PixelFormat dstFormat = c->dstFormat; const int srcBpp = (c->srcFormatBpp + 7) >> 3; const int dstBpp = (c->dstFormatBpp + 7) >> 3; const int srcId = c->srcFormatBpp; const int dstId = c->dstFormatBpp; void (*conv)(const uint8_t *src, uint8_t *dst, int src_size) = NULL; #define CONV_IS(src, dst) (srcFormat == PIX_FMT_##src && dstFormat == PIX_FMT_##dst) if (isRGBA32(srcFormat) && isRGBA32(dstFormat)) { if ( CONV_IS(ABGR, RGBA) || CONV_IS(ARGB, BGRA) || CONV_IS(BGRA, ARGB) || CONV_IS(RGBA, ABGR)) conv = shuffle_bytes_3210; else if (CONV_IS(ABGR, ARGB) || CONV_IS(ARGB, ABGR)) conv = shuffle_bytes_0321; else if (CONV_IS(ABGR, BGRA) || CONV_IS(ARGB, RGBA)) conv = shuffle_bytes_1230; else if (CONV_IS(BGRA, RGBA) || CONV_IS(RGBA, BGRA)) conv = shuffle_bytes_2103; else if (CONV_IS(BGRA, ABGR) || CONV_IS(RGBA, ARGB)) conv = shuffle_bytes_3012; } else /* BGR -> BGR */ if ((isBGRinInt(srcFormat) && isBGRinInt(dstFormat)) || (isRGBinInt(srcFormat) && isRGBinInt(dstFormat))) { switch (srcId | (dstId << 16)) { case 0x000F0010: conv = rgb16to15; break; case 0x000F0018: conv = rgb24to15; break; case 0x000F0020: conv = rgb32to15; break; case 0x0010000F: conv = rgb15to16; break; case 0x00100018: conv = rgb24to16; break; case 0x00100020: conv = rgb32to16; break; case 0x0018000F: conv = rgb15to24; break; case 0x00180010: conv = rgb16to24; break; case 0x00180020: conv = rgb32to24; break; case 0x0020000F: conv = rgb15to32; break; case 0x00200010: conv = rgb16to32; break; case 0x00200018: conv = rgb24to32; break; } } else if ((isBGRinInt(srcFormat) && isRGBinInt(dstFormat)) || (isRGBinInt(srcFormat) && isBGRinInt(dstFormat))) { switch (srcId | (dstId << 16)) { case 0x000C000C: conv = rgb12tobgr12; break; case 0x000F000F: conv = rgb15tobgr15; break; case 0x000F0010: conv = rgb16tobgr15; break; case 0x000F0018: conv = rgb24tobgr15; break; case 0x000F0020: conv = rgb32tobgr15; break; case 0x0010000F: conv = rgb15tobgr16; break; case 0x00100010: conv = rgb16tobgr16; break; case 0x00100018: conv = rgb24tobgr16; break; case 0x00100020: conv = rgb32tobgr16; break; case 0x0018000F: conv = rgb15tobgr24; break; case 0x00180010: conv = rgb16tobgr24; break; case 0x00180018: conv = rgb24tobgr24; break; case 0x00180020: conv = rgb32tobgr24; break; case 0x0020000F: conv = rgb15tobgr32; break; case 0x00200010: conv = rgb16tobgr32; break; case 0x00200018: conv = rgb24tobgr32; break; } } if (!conv) { av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n", sws_format_name(srcFormat), sws_format_name(dstFormat)); } else { const uint8_t *srcPtr = src[0]; uint8_t *dstPtr = dst[0]; if ((srcFormat == PIX_FMT_RGB32_1 || srcFormat == PIX_FMT_BGR32_1) && !isRGBA32(dstFormat)) srcPtr += ALT32_CORR; if ((dstFormat == PIX_FMT_RGB32_1 || dstFormat == PIX_FMT_BGR32_1) && !isRGBA32(srcFormat)) dstPtr += ALT32_CORR; if (dstStride[0] * srcBpp == srcStride[0] * dstBpp && srcStride[0] > 0 && !(srcStride[0] % srcBpp)) conv(srcPtr, dstPtr + dstStride[0] * srcSliceY, srcSliceH * srcStride[0]); else { int i; dstPtr += dstStride[0] * srcSliceY; for (i = 0; i < srcSliceH; i++) { conv(srcPtr, dstPtr, c->srcW * srcBpp); srcPtr += srcStride[0]; dstPtr += dstStride[0]; } } } return srcSliceH; } | 19,442 |
1 | static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel) { const char *rn = "invalid"; if (sel != 0) check_insn(ctx, ISA_MIPS64); if (ctx->tb->cflags & CF_USE_ICOUNT) { gen_io_start(); } switch (reg) { case 0: switch (sel) { case 0: gen_helper_mtc0_index(cpu_env, arg); rn = "Index"; case 1: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_mvpcontrol(cpu_env, arg); rn = "MVPControl"; CP0_CHECK(ctx->insn_flags & ASE_MT); /* ignored */ rn = "MVPConf0"; CP0_CHECK(ctx->insn_flags & ASE_MT); /* ignored */ rn = "MVPConf1"; CP0_CHECK(ctx->vp); /* ignored */ rn = "VPControl"; default: goto cp0_unimplemented; } case 1: switch (sel) { case 0: /* ignored */ rn = "Random"; case 1: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_vpecontrol(cpu_env, arg); rn = "VPEControl"; CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_vpeconf0(cpu_env, arg); rn = "VPEConf0"; CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_vpeconf1(cpu_env, arg); rn = "VPEConf1"; CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_yqmask(cpu_env, arg); rn = "YQMask"; case 5: CP0_CHECK(ctx->insn_flags & ASE_MT); tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_VPESchedule)); rn = "VPESchedule"; case 6: CP0_CHECK(ctx->insn_flags & ASE_MT); tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_VPEScheFBack)); rn = "VPEScheFBack"; case 7: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_vpeopt(cpu_env, arg); rn = "VPEOpt"; default: goto cp0_unimplemented; } switch (sel) { case 0: gen_helper_dmtc0_entrylo0(cpu_env, arg); rn = "EntryLo0"; case 1: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcstatus(cpu_env, arg); rn = "TCStatus"; CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcbind(cpu_env, arg); rn = "TCBind"; CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcrestart(cpu_env, arg); rn = "TCRestart"; CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tchalt(cpu_env, arg); rn = "TCHalt"; case 5: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tccontext(cpu_env, arg); rn = "TCContext"; case 6: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcschedule(cpu_env, arg); rn = "TCSchedule"; case 7: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcschefback(cpu_env, arg); rn = "TCScheFBack"; default: goto cp0_unimplemented; } switch (sel) { case 0: gen_helper_dmtc0_entrylo1(cpu_env, arg); rn = "EntryLo1"; case 1: CP0_CHECK(ctx->vp); /* ignored */ rn = "GlobalNumber"; default: goto cp0_unimplemented; } switch (sel) { case 0: gen_helper_mtc0_context(cpu_env, arg); rn = "Context"; case 1: // gen_helper_mtc0_contextconfig(cpu_env, arg); /* SmartMIPS ASE */ rn = "ContextConfig"; goto cp0_unimplemented; CP0_CHECK(ctx->ulri); tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); rn = "UserLocal"; default: goto cp0_unimplemented; } case 5: switch (sel) { case 0: gen_helper_mtc0_pagemask(cpu_env, arg); rn = "PageMask"; case 1: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_pagegrain(cpu_env, arg); rn = "PageGrain"; default: goto cp0_unimplemented; } case 6: switch (sel) { case 0: gen_helper_mtc0_wired(cpu_env, arg); rn = "Wired"; case 1: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf0(cpu_env, arg); rn = "SRSConf0"; check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf1(cpu_env, arg); rn = "SRSConf1"; check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf2(cpu_env, arg); rn = "SRSConf2"; check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf3(cpu_env, arg); rn = "SRSConf3"; case 5: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf4(cpu_env, arg); rn = "SRSConf4"; default: goto cp0_unimplemented; } case 7: switch (sel) { case 0: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_hwrena(cpu_env, arg); ctx->bstate = BS_STOP; rn = "HWREna"; default: goto cp0_unimplemented; } case 8: switch (sel) { case 0: /* ignored */ rn = "BadVAddr"; case 1: /* ignored */ rn = "BadInstr"; /* ignored */ rn = "BadInstrP"; default: goto cp0_unimplemented; } case 9: switch (sel) { case 0: gen_helper_mtc0_count(cpu_env, arg); rn = "Count"; /* 6,7 are implementation dependent */ default: goto cp0_unimplemented; } /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; case 10: switch (sel) { case 0: gen_helper_mtc0_entryhi(cpu_env, arg); rn = "EntryHi"; default: goto cp0_unimplemented; } case 11: switch (sel) { case 0: gen_helper_mtc0_compare(cpu_env, arg); rn = "Compare"; /* 6,7 are implementation dependent */ default: goto cp0_unimplemented; } /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; case 12: switch (sel) { case 0: save_cpu_state(ctx, 1); gen_helper_mtc0_status(cpu_env, arg); /* BS_STOP isn't good enough here, hflags may have changed. */ gen_save_pc(ctx->pc + 4); ctx->bstate = BS_EXCP; rn = "Status"; case 1: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_intctl(cpu_env, arg); /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; rn = "IntCtl"; check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsctl(cpu_env, arg); /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; rn = "SRSCtl"; check_insn(ctx, ISA_MIPS32R2); gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_SRSMap)); /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; rn = "SRSMap"; default: goto cp0_unimplemented; } case 13: switch (sel) { case 0: save_cpu_state(ctx, 1); /* Mark as an IO operation because we may trigger a software interrupt. */ if (ctx->tb->cflags & CF_USE_ICOUNT) { gen_io_start(); } gen_helper_mtc0_cause(cpu_env, arg); if (ctx->tb->cflags & CF_USE_ICOUNT) { gen_io_end(); } /* Stop translation as we may have triggered an intetrupt */ ctx->bstate = BS_STOP; rn = "Cause"; default: goto cp0_unimplemented; } case 14: switch (sel) { case 0: tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC)); rn = "EPC"; default: goto cp0_unimplemented; } case 15: switch (sel) { case 0: /* ignored */ rn = "PRid"; case 1: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_ebase(cpu_env, arg); rn = "EBase"; default: goto cp0_unimplemented; } case 16: switch (sel) { case 0: gen_helper_mtc0_config0(cpu_env, arg); rn = "Config"; /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; case 1: /* ignored, read only */ rn = "Config1"; gen_helper_mtc0_config2(cpu_env, arg); rn = "Config2"; /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; gen_helper_mtc0_config3(cpu_env, arg); rn = "Config3"; /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; /* currently ignored */ rn = "Config4"; case 5: gen_helper_mtc0_config5(cpu_env, arg); rn = "Config5"; /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; /* 6,7 are implementation dependent */ default: rn = "Invalid config selector"; goto cp0_unimplemented; } case 17: switch (sel) { case 0: gen_helper_mtc0_lladdr(cpu_env, arg); rn = "LLAddr"; case 1: CP0_CHECK(ctx->mrp); gen_helper_mtc0_maar(cpu_env, arg); rn = "MAAR"; CP0_CHECK(ctx->mrp); gen_helper_mtc0_maari(cpu_env, arg); rn = "MAARI"; default: goto cp0_unimplemented; } case 18: switch (sel) { case 0 ... 7: gen_helper_0e1i(mtc0_watchlo, arg, sel); rn = "WatchLo"; default: goto cp0_unimplemented; } case 19: switch (sel) { case 0 ... 7: gen_helper_0e1i(mtc0_watchhi, arg, sel); rn = "WatchHi"; default: goto cp0_unimplemented; } case 20: switch (sel) { case 0: check_insn(ctx, ISA_MIPS3); gen_helper_mtc0_xcontext(cpu_env, arg); rn = "XContext"; default: goto cp0_unimplemented; } case 21: /* Officially reserved, but sel 0 is used for R1x000 framemask */ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); switch (sel) { case 0: gen_helper_mtc0_framemask(cpu_env, arg); rn = "Framemask"; default: goto cp0_unimplemented; } case 22: /* ignored */ rn = "Diagnostic"; /* implementation dependent */ case 23: switch (sel) { case 0: gen_helper_mtc0_debug(cpu_env, arg); /* EJTAG support */ /* BS_STOP isn't good enough here, hflags may have changed. */ gen_save_pc(ctx->pc + 4); ctx->bstate = BS_EXCP; rn = "Debug"; case 1: // gen_helper_mtc0_tracecontrol(cpu_env, arg); /* PDtrace support */ /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; rn = "TraceControl"; goto cp0_unimplemented; // gen_helper_mtc0_tracecontrol2(cpu_env, arg); /* PDtrace support */ /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; rn = "TraceControl2"; goto cp0_unimplemented; // gen_helper_mtc0_usertracedata(cpu_env, arg); /* PDtrace support */ /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; rn = "UserTraceData"; goto cp0_unimplemented; // gen_helper_mtc0_tracebpc(cpu_env, arg); /* PDtrace support */ /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; rn = "TraceBPC"; goto cp0_unimplemented; default: goto cp0_unimplemented; } case 24: switch (sel) { case 0: /* EJTAG support */ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC)); rn = "DEPC"; default: goto cp0_unimplemented; } case 25: switch (sel) { case 0: gen_helper_mtc0_performance0(cpu_env, arg); rn = "Performance0"; case 1: // gen_helper_mtc0_performance1(cpu_env, arg); rn = "Performance1"; goto cp0_unimplemented; // gen_helper_mtc0_performance2(cpu_env, arg); rn = "Performance2"; goto cp0_unimplemented; // gen_helper_mtc0_performance3(cpu_env, arg); rn = "Performance3"; goto cp0_unimplemented; // gen_helper_mtc0_performance4(cpu_env, arg); rn = "Performance4"; goto cp0_unimplemented; case 5: // gen_helper_mtc0_performance5(cpu_env, arg); rn = "Performance5"; goto cp0_unimplemented; case 6: // gen_helper_mtc0_performance6(cpu_env, arg); rn = "Performance6"; goto cp0_unimplemented; case 7: // gen_helper_mtc0_performance7(cpu_env, arg); rn = "Performance7"; goto cp0_unimplemented; default: goto cp0_unimplemented; } case 26: switch (sel) { case 0: gen_helper_mtc0_errctl(cpu_env, arg); ctx->bstate = BS_STOP; rn = "ErrCtl"; default: goto cp0_unimplemented; } case 27: switch (sel) { case 0 ... 3: /* ignored */ rn = "CacheErr"; default: goto cp0_unimplemented; } case 28: switch (sel) { case 0: case 6: gen_helper_mtc0_taglo(cpu_env, arg); rn = "TagLo"; case 1: case 5: case 7: gen_helper_mtc0_datalo(cpu_env, arg); rn = "DataLo"; default: goto cp0_unimplemented; } case 29: switch (sel) { case 0: case 6: gen_helper_mtc0_taghi(cpu_env, arg); rn = "TagHi"; case 1: case 5: case 7: gen_helper_mtc0_datahi(cpu_env, arg); rn = "DataHi"; default: rn = "invalid sel"; goto cp0_unimplemented; } case 30: switch (sel) { case 0: tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC)); rn = "ErrorEPC"; default: goto cp0_unimplemented; } case 31: switch (sel) { case 0: /* EJTAG support */ gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_DESAVE)); rn = "DESAVE"; case 2 ... 7: CP0_CHECK(ctx->kscrexist & (1 << sel)); tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_KScratch[sel-2])); rn = "KScratch"; default: goto cp0_unimplemented; } /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; default: goto cp0_unimplemented; } trace_mips_translate_c0("dmtc0", rn, reg, sel); /* For simplicity assume that all writes can cause interrupts. */ if (ctx->tb->cflags & CF_USE_ICOUNT) { gen_io_end(); ctx->bstate = BS_STOP; } return; cp0_unimplemented: qemu_log_mask(LOG_UNIMP, "dmtc0 %s (reg %d sel %d)\n", rn, reg, sel); } | 19,443 |
1 | uint64_t HELPER(neon_sub_saturate_s64)(uint64_t src1, uint64_t src2) { uint64_t res; res = src1 - src2; if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) { env->QF = 1; res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64; } return res; } | 19,444 |
1 | static int coroutine_fn copy_sectors(BlockDriverState *bs, uint64_t start_sect, uint64_t cluster_offset, int n_start, int n_end) { BDRVQcowState *s = bs->opaque; QEMUIOVector qiov; struct iovec iov; int n, ret; /* * If this is the last cluster and it is only partially used, we must only * copy until the end of the image, or bdrv_check_request will fail for the * bdrv_read/write calls below. */ if (start_sect + n_end > bs->total_sectors) { n_end = bs->total_sectors - start_sect; n = n_end - n_start; if (n <= 0) { return 0; iov.iov_len = n * BDRV_SECTOR_SIZE; iov.iov_base = qemu_blockalign(bs, iov.iov_len); qemu_iovec_init_external(&qiov, &iov, 1); BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); /* Call .bdrv_co_readv() directly instead of using the public block-layer * interface. This avoids double I/O throttling and request tracking, * which can lead to deadlock when block layer copy-on-read is enabled. */ ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov); if (ret < 0) { goto out; if (s->crypt_method) { qcow2_encrypt_sectors(s, start_sect + n_start, iov.iov_base, iov.iov_base, n, 1, &s->aes_encrypt_key); ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset + n_start * BDRV_SECTOR_SIZE, n * BDRV_SECTOR_SIZE); if (ret < 0) { goto out; BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov); if (ret < 0) { goto out; ret = 0; out: qemu_vfree(iov.iov_base); return ret; | 19,445 |
0 | static void FUNCC(pred16x16_vertical_add)(uint8_t *pix, const int *block_offset, const int16_t *block, ptrdiff_t stride) { int i; for(i=0; i<16; i++) FUNCC(pred4x4_vertical_add)(pix + block_offset[i], block + i*16*sizeof(pixel), stride); } | 19,446 |
0 | static int ac3_parse_audio_block(AC3DecodeContext * ctx, int index) { ac3_audio_block *ab = &ctx->audio_block; int nfchans = ctx->bsi.nfchans; int acmod = ctx->bsi.acmod; int i, bnd, rbnd, grp, seg; GetBitContext *gb = &ctx->gb; uint32_t *flags = &ab->flags; int bit_alloc_flags = 0; float drange; *flags = 0; ab->blksw = 0; for (i = 0; i < 5; i++) ab->chcoeffs[i] = 1.0; for (i = 0; i < nfchans; i++) /*block switch flag */ ab->blksw |= get_bits(gb, 1) << i; ab->dithflag = 0; for (i = 0; i < nfchans; i++) /* dithering flag */ ab->dithflag |= get_bits(gb, 1) << i; if (get_bits(gb, 1)) { /* dynamic range */ *flags |= AC3_AB_DYNRNGE; ab->dynrng = get_bits(gb, 8); drange = ((((ab->dynrng & 0x1f) | 0x20) << 13) * scale_factors[3 - (ab->dynrng >> 5)]); for (i = 0; i < nfchans; i++) ab->chcoeffs[i] *= drange; } if (acmod == 0x00) { /* dynamic range 1+1 mode */ if (get_bits(gb, 1)) { *flags |= AC3_AB_DYNRNG2E; ab->dynrng2 = get_bits(gb, 8); drange = ((((ab->dynrng2 & 0x1f) | 0x20) << 13) * scale_factors[3 - (ab->dynrng2 >> 5)]); ab->chcoeffs[1] *= drange; } } get_downmix_coeffs(ctx); ab->chincpl = 0; if (get_bits(gb, 1)) { /* coupling strategy */ *flags |= AC3_AB_CPLSTRE; ab->cplbndstrc = 0; if (get_bits(gb, 1)) { /* coupling in use */ *flags |= AC3_AB_CPLINU; for (i = 0; i < nfchans; i++) ab->chincpl |= get_bits(gb, 1) << i; if (acmod == 0x02) if (get_bits(gb, 1)) /* phase flag in use */ *flags |= AC3_AB_PHSFLGINU; ab->cplbegf = get_bits(gb, 4); ab->cplendf = get_bits(gb, 4); assert((ab->ncplsubnd = 3 + ab->cplendf - ab->cplbegf) > 0); ab->ncplbnd = ab->ncplsubnd; for (i = 0; i < ab->ncplsubnd - 1; i++) /* coupling band structure */ if (get_bits(gb, 1)) { ab->cplbndstrc |= 1 << i; ab->ncplbnd--; } } } if (*flags & AC3_AB_CPLINU) { ab->cplcoe = 0; for (i = 0; i < nfchans; i++) if (ab->chincpl & (1 << i)) if (get_bits(gb, 1)) { /* coupling co-ordinates */ ab->cplcoe |= 1 << i; ab->mstrcplco[i] = get_bits(gb, 2); for (bnd = 0; bnd < ab->ncplbnd; bnd++) { ab->cplcoexp[i][bnd] = get_bits(gb, 4); ab->cplcomant[i][bnd] = get_bits(gb, 4); } } } ab->phsflg = 0; if ((acmod == 0x02) && (*flags & AC3_AB_PHSFLGINU) && (ab->cplcoe & 1 || ab->cplcoe & (1 << 1))) { for (bnd = 0; bnd < ab->ncplbnd; bnd++) if (get_bits(gb, 1)) ab->phsflg |= 1 << bnd; } generate_coupling_coordinates(ctx); ab->rematflg = 0; if (acmod == 0x02) /* rematrixing */ if (get_bits(gb, 1)) { *flags |= AC3_AB_REMATSTR; if (ab->cplbegf > 2 || !(*flags & AC3_AB_CPLINU)) for (rbnd = 0; rbnd < 4; rbnd++) ab->rematflg |= get_bits(gb, 1) << bnd; else if (ab->cplbegf > 0 && ab->cplbegf <= 2 && *flags & AC3_AB_CPLINU) for (rbnd = 0; rbnd < 3; rbnd++) ab->rematflg |= get_bits(gb, 1) << bnd; else if (!(ab->cplbegf) && *flags & AC3_AB_CPLINU) for (rbnd = 0; rbnd < 2; rbnd++) ab->rematflg |= get_bits(gb, 1) << bnd; } if (*flags & AC3_AB_CPLINU) /* coupling exponent strategy */ ab->cplexpstr = get_bits(gb, 2); for (i = 0; i < nfchans; i++) /* channel exponent strategy */ ab->chexpstr[i] = get_bits(gb, 2); if (ctx->bsi.flags & AC3_BSI_LFEON) /* lfe exponent strategy */ ab->lfeexpstr = get_bits(gb, 1); for (i = 0; i < nfchans; i++) /* channel bandwidth code */ if (ab->chexpstr[i] != AC3_EXPSTR_REUSE) if (!(ab->chincpl & (1 << i))) { ab->chbwcod[i] = get_bits(gb, 6); assert (ab->chbwcod[i] <= 60); } if (*flags & AC3_AB_CPLINU) if (ab->cplexpstr != AC3_EXPSTR_REUSE) {/* coupling exponents */ bit_alloc_flags |= 64; ab->cplabsexp = get_bits(gb, 4) << 1; ab->cplstrtmant = (ab->cplbegf * 12) + 37; ab->cplendmant = ((ab->cplendmant + 3) * 12) + 37; ab->ncplgrps = (ab->cplendmant - ab->cplstrtmant) / (3 << (ab->cplexpstr - 1)); for (grp = 0; grp < ab->ncplgrps; grp++) ab->cplexps[grp] = get_bits(gb, 7); } for (i = 0; i < nfchans; i++) /* fbw channel exponents */ if (ab->chexpstr[i] != AC3_EXPSTR_REUSE) { bit_alloc_flags |= 1 << i; if (ab->chincpl & (1 << i)) ab->endmant[i] = (ab->cplbegf * 12) + 37; else ab->endmant[i] = ((ab->chbwcod[i] + 3) * 12) + 37; ab->nchgrps[i] = (ab->endmant[i] + (3 << (ab->chexpstr[i] - 1)) - 4) / (3 << (ab->chexpstr[i] - 1)); ab->exps[i][0] = ab->dexps[i][0] = get_bits(gb, 4); for (grp = 1; grp <= ab->nchgrps[i]; grp++) ab->exps[i][grp] = get_bits(gb, 7); ab->gainrng[i] = get_bits(gb, 2); } if (ctx->bsi.flags & AC3_BSI_LFEON) /* lfe exponents */ if (ab->lfeexpstr != AC3_EXPSTR_REUSE) { bit_alloc_flags |= 32; ab->lfeexps[0] = ab->dlfeexps[0] = get_bits(gb, 4); ab->lfeexps[1] = get_bits(gb, 7); ab->lfeexps[2] = get_bits(gb, 7); } if (decode_exponents(ctx)) {/* decode the exponents for this block */ av_log(NULL, AV_LOG_ERROR, "Error parsing exponents\n"); return -1; } if (get_bits(gb, 1)) { /* bit allocation information */ *flags |= AC3_AB_BAIE; bit_alloc_flags |= 127; ab->sdcycod = get_bits(gb, 2); ab->fdcycod = get_bits(gb, 2); ab->sgaincod = get_bits(gb, 2); ab->dbpbcod = get_bits(gb, 2); ab->floorcod = get_bits(gb, 3); } if (get_bits(gb, 1)) { /* snroffset */ *flags |= AC3_AB_SNROFFSTE; bit_alloc_flags |= 127; ab->csnroffst = get_bits(gb, 6); if (*flags & AC3_AB_CPLINU) { /* couling fine snr offset and fast gain code */ ab->cplfsnroffst = get_bits(gb, 4); ab->cplfgaincod = get_bits(gb, 3); } for (i = 0; i < nfchans; i++) { /* channel fine snr offset and fast gain code */ ab->fsnroffst[i] = get_bits(gb, 4); ab->fgaincod[i] = get_bits(gb, 3); } if (ctx->bsi.flags & AC3_BSI_LFEON) { /* lfe fine snr offset and fast gain code */ ab->lfefsnroffst = get_bits(gb, 4); ab->lfefgaincod = get_bits(gb, 3); } } if (*flags & AC3_AB_CPLINU) if (get_bits(gb, 1)) { /* coupling leak information */ bit_alloc_flags |= 64; *flags |= AC3_AB_CPLLEAKE; ab->cplfleak = get_bits(gb, 3); ab->cplsleak = get_bits(gb, 3); } if (get_bits(gb, 1)) { /* delta bit allocation information */ *flags |= AC3_AB_DELTBAIE; bit_alloc_flags |= 127; if (*flags & AC3_AB_CPLINU) { ab->cpldeltbae = get_bits(gb, 2); if (ab->cpldeltbae == AC3_DBASTR_RESERVED) { av_log(NULL, AV_LOG_ERROR, "coupling delta bit allocation strategy reserved\n"); return -1; } } for (i = 0; i < nfchans; i++) { ab->deltbae[i] = get_bits(gb, 2); if (ab->deltbae[i] == AC3_DBASTR_RESERVED) { av_log(NULL, AV_LOG_ERROR, "delta bit allocation strategy reserved\n"); return -1; } } if (*flags & AC3_AB_CPLINU) if (ab->cpldeltbae == AC3_DBASTR_NEW) { /*coupling delta offset, len and bit allocation */ ab->cpldeltnseg = get_bits(gb, 3); for (seg = 0; seg <= ab->cpldeltnseg; seg++) { ab->cpldeltoffst[seg] = get_bits(gb, 5); ab->cpldeltlen[seg] = get_bits(gb, 4); ab->cpldeltba[seg] = get_bits(gb, 3); } } for (i = 0; i < nfchans; i++) if (ab->deltbae[i] == AC3_DBASTR_NEW) {/*channel delta offset, len and bit allocation */ ab->deltnseg[i] = get_bits(gb, 3); for (seg = 0; seg <= ab->deltnseg[i]; seg++) { ab->deltoffst[i][seg] = get_bits(gb, 5); ab->deltlen[i][seg] = get_bits(gb, 4); ab->deltba[i][seg] = get_bits(gb, 3); } } } if (do_bit_allocation (ctx, bit_alloc_flags)) /* perform the bit allocation */ { av_log(NULL, AV_LOG_ERROR, "Error in bit allocation routine\n"); return -1; } if (get_bits(gb, 1)) { /* unused dummy data */ *flags |= AC3_AB_SKIPLE; ab->skipl = get_bits(gb, 9); while (ab->skipl) { get_bits(gb, 8); ab->skipl--; } } /* unpack the transform coefficients * * this also uncouples channels if coupling is in use. */ if (get_transform_coeffs(ctx)) { av_log(NULL, AV_LOG_ERROR, "Error in routine get_transform_coeffs\n"); return -1; } /* recover coefficients if rematrixing is in use */ if (*flags & AC3_AB_REMATSTR) do_rematrixing(ctx); if (ctx->output != AC3_OUTPUT_UNMODIFIED) do_downmix(ctx); return 0; } | 19,447 |
0 | static void h264_free_extradata(PayloadContext *data) { #ifdef DEBUG int ii; for (ii = 0; ii < 32; ii++) { if (data->packet_types_received[ii]) av_log(NULL, AV_LOG_DEBUG, "Received %d packets of type %d\n", data->packet_types_received[ii], ii); } #endif assert(data); assert(data->cookie == MAGIC_COOKIE); // avoid stale pointers (assert) data->cookie = DEAD_COOKIE; // and clear out this... av_free(data); } | 19,448 |
1 | static int net_socket_connect_init(VLANState *vlan, const char *model, const char *name, const char *host_str) { NetSocketState *s; int fd, connected, ret, err; struct sockaddr_in saddr; if (parse_host_port(&saddr, host_str) < 0) return -1; fd = socket(PF_INET, SOCK_STREAM, 0); if (fd < 0) { perror("socket"); return -1; } socket_set_nonblock(fd); connected = 0; for(;;) { ret = connect(fd, (struct sockaddr *)&saddr, sizeof(saddr)); if (ret < 0) { err = socket_error(); if (err == EINTR || err == EWOULDBLOCK) { } else if (err == EINPROGRESS) { break; #ifdef _WIN32 } else if (err == WSAEALREADY) { break; #endif } else { perror("connect"); closesocket(fd); return -1; } } else { connected = 1; break; } } s = net_socket_fd_init(vlan, model, name, fd, connected); if (!s) return -1; snprintf(s->nc.info_str, sizeof(s->nc.info_str), "socket: connect to %s:%d", inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port)); return 0; } | 19,449 |
1 | static int kvm_update_routing_entry(KVMState *s, struct kvm_irq_routing_entry *new_entry) { struct kvm_irq_routing_entry *entry; int n; for (n = 0; n < s->irq_routes->nr; n++) { entry = &s->irq_routes->entries[n]; if (entry->gsi != new_entry->gsi) { continue; } entry->type = new_entry->type; entry->flags = new_entry->flags; entry->u = new_entry->u; kvm_irqchip_commit_routes(s); return 0; } return -ESRCH; } | 19,450 |
1 | int nbd_client_session_co_discard(NbdClientSession *client, int64_t sector_num, int nb_sectors) { struct nbd_request request; struct nbd_reply reply; ssize_t ret; if (!(client->nbdflags & NBD_FLAG_SEND_TRIM)) { return 0; } request.type = NBD_CMD_TRIM; request.from = sector_num * 512; request.len = nb_sectors * 512; nbd_coroutine_start(client, &request); ret = nbd_co_send_request(client, &request, NULL, 0); if (ret < 0) { reply.error = -ret; } else { nbd_co_receive_reply(client, &request, &reply, NULL, 0); } nbd_coroutine_end(client, &request); return -reply.error; } | 19,451 |
1 | ssize_t vnc_client_io_error(VncState *vs, ssize_t ret, Error **errp) { if (ret <= 0) { if (ret == 0) { VNC_DEBUG("Closing down client sock: EOF\n"); } else if (ret != QIO_CHANNEL_ERR_BLOCK) { VNC_DEBUG("Closing down client sock: ret %zd (%s)\n", ret, errp ? error_get_pretty(*errp) : "Unknown"); } vnc_disconnect_start(vs); if (errp) { error_free(*errp); *errp = NULL; } return 0; } return ret; } | 19,452 |
1 | static void pl031_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); k->init = pl031_init; dc->no_user = 1; dc->vmsd = &vmstate_pl031; } | 19,453 |
1 | static inline void fimd_swap_data(unsigned int swap_ctl, uint64_t *data) { int i; uint64_t res; uint64_t x = *data; if (swap_ctl & FIMD_WINCON_SWAP_BITS) { res = 0; for (i = 0; i < 64; i++) { if (x & (1ULL << (64 - i))) { res |= (1ULL << i); } } x = res; } if (swap_ctl & FIMD_WINCON_SWAP_BYTE) { x = bswap64(x); } if (swap_ctl & FIMD_WINCON_SWAP_HWORD) { x = ((x & 0x000000000000FFFFULL) << 48) | ((x & 0x00000000FFFF0000ULL) << 16) | ((x & 0x0000FFFF00000000ULL) >> 16) | ((x & 0xFFFF000000000000ULL) >> 48); } if (swap_ctl & FIMD_WINCON_SWAP_WORD) { x = ((x & 0x00000000FFFFFFFFULL) << 32) | ((x & 0xFFFFFFFF00000000ULL) >> 32); } *data = x; } | 19,454 |
1 | static int sdl_write_header(AVFormatContext *s) { SDLContext *sdl = s->priv_data; AVStream *st = s->streams[0]; AVCodecContext *encctx = st->codec; AVRational sar, dar; /* sample and display aspect ratios */ int i, ret; int flags = SDL_SWSURFACE | sdl->window_fullscreen ? SDL_FULLSCREEN : 0; if (!sdl->window_title) sdl->window_title = av_strdup(s->filename); if (!sdl->icon_title) sdl->icon_title = av_strdup(sdl->window_title); if (SDL_WasInit(SDL_INIT_VIDEO)) { av_log(s, AV_LOG_ERROR, "SDL video subsystem was already inited, aborting\n"); sdl->sdl_was_already_inited = 1; ret = AVERROR(EINVAL); goto fail; } if (SDL_Init(SDL_INIT_VIDEO) != 0) { av_log(s, AV_LOG_ERROR, "Unable to initialize SDL: %s\n", SDL_GetError()); ret = AVERROR(EINVAL); goto fail; } if ( s->nb_streams > 1 || encctx->codec_type != AVMEDIA_TYPE_VIDEO || encctx->codec_id != AV_CODEC_ID_RAWVIDEO) { av_log(s, AV_LOG_ERROR, "Only supports one rawvideo stream\n"); ret = AVERROR(EINVAL); goto fail; } for (i = 0; sdl_overlay_pix_fmt_map[i].pix_fmt != AV_PIX_FMT_NONE; i++) { if (sdl_overlay_pix_fmt_map[i].pix_fmt == encctx->pix_fmt) { sdl->overlay_fmt = sdl_overlay_pix_fmt_map[i].overlay_fmt; break; } } if (!sdl->overlay_fmt) { av_log(s, AV_LOG_ERROR, "Unsupported pixel format '%s', choose one of yuv420p, yuyv422, or uyvy422\n", av_get_pix_fmt_name(encctx->pix_fmt)); ret = AVERROR(EINVAL); goto fail; } /* compute overlay width and height from the codec context information */ sar = st->sample_aspect_ratio.num ? st->sample_aspect_ratio : (AVRational){ 1, 1 }; dar = av_mul_q(sar, (AVRational){ encctx->width, encctx->height }); /* we suppose the screen has a 1/1 sample aspect ratio */ if (sdl->window_width && sdl->window_height) { /* fit in the window */ if (av_cmp_q(dar, (AVRational){ sdl->window_width, sdl->window_height }) > 0) { /* fit in width */ sdl->overlay_width = sdl->window_width; sdl->overlay_height = av_rescale(sdl->overlay_width, dar.den, dar.num); } else { /* fit in height */ sdl->overlay_height = sdl->window_height; sdl->overlay_width = av_rescale(sdl->overlay_height, dar.num, dar.den); } } else { if (sar.num > sar.den) { sdl->overlay_width = encctx->width; sdl->overlay_height = av_rescale(sdl->overlay_width, dar.den, dar.num); } else { sdl->overlay_height = encctx->height; sdl->overlay_width = av_rescale(sdl->overlay_height, dar.num, dar.den); } sdl->window_width = sdl->overlay_width; sdl->window_height = sdl->overlay_height; } sdl->overlay_x = (sdl->window_width - sdl->overlay_width ) / 2; sdl->overlay_y = (sdl->window_height - sdl->overlay_height) / 2; SDL_WM_SetCaption(sdl->window_title, sdl->icon_title); sdl->surface = SDL_SetVideoMode(sdl->window_width, sdl->window_height, 24, flags); if (!sdl->surface) { av_log(s, AV_LOG_ERROR, "Unable to set video mode: %s\n", SDL_GetError()); ret = AVERROR(EINVAL); goto fail; } sdl->overlay = SDL_CreateYUVOverlay(encctx->width, encctx->height, sdl->overlay_fmt, sdl->surface); if (!sdl->overlay || sdl->overlay->pitches[0] < encctx->width) { av_log(s, AV_LOG_ERROR, "SDL does not support an overlay with size of %dx%d pixels\n", encctx->width, encctx->height); ret = AVERROR(EINVAL); goto fail; } av_log(s, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s sar:%d/%d -> w:%d h:%d\n", encctx->width, encctx->height, av_get_pix_fmt_name(encctx->pix_fmt), sar.num, sar.den, sdl->overlay_width, sdl->overlay_height); return 0; fail: sdl_write_trailer(s); return ret; } | 19,455 |
1 | static void aux_bus_class_init(ObjectClass *klass, void *data) { BusClass *k = BUS_CLASS(klass); /* AUXSlave has an MMIO so we need to change the way we print information * in monitor. */ k->print_dev = aux_slave_dev_print; } | 19,456 |
0 | static int ape_decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; APEContext *s = avctx->priv_data; int16_t *samples = data; int nblocks; int i, n; int blockstodecode; int bytes_used; if (buf_size == 0 && !s->samples) { *data_size = 0; return 0; } /* should not happen but who knows */ if (BLOCKS_PER_LOOP * 2 * avctx->channels > *data_size) { av_log (avctx, AV_LOG_ERROR, "Packet size is too big to be handled in lavc! (max is %d where you have %d)\n", *data_size, s->samples * 2 * avctx->channels); return -1; } if(!s->samples){ s->data = av_realloc(s->data, (buf_size + 3) & ~3); s->dsp.bswap_buf((uint32_t*)s->data, (const uint32_t*)buf, buf_size >> 2); s->ptr = s->last_ptr = s->data; s->data_end = s->data + buf_size; nblocks = s->samples = bytestream_get_be32(&s->ptr); n = bytestream_get_be32(&s->ptr); if(n < 0 || n > 3){ av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n"); s->data = NULL; return -1; } s->ptr += n; s->currentframeblocks = nblocks; buf += 4; if (s->samples <= 0) { *data_size = 0; return buf_size; } memset(s->decoded0, 0, sizeof(s->decoded0)); memset(s->decoded1, 0, sizeof(s->decoded1)); /* Initialize the frame decoder */ init_frame_decoder(s); } if (!s->data) { *data_size = 0; return buf_size; } nblocks = s->samples; blockstodecode = FFMIN(BLOCKS_PER_LOOP, nblocks); s->error=0; if ((s->channels == 1) || (s->frameflags & APE_FRAMECODE_PSEUDO_STEREO)) ape_unpack_mono(s, blockstodecode); else ape_unpack_stereo(s, blockstodecode); emms_c(); if(s->error || s->ptr > s->data_end){ s->samples=0; av_log(avctx, AV_LOG_ERROR, "Error decoding frame\n"); return -1; } for (i = 0; i < blockstodecode; i++) { *samples++ = s->decoded0[i]; if(s->channels == 2) *samples++ = s->decoded1[i]; } s->samples -= blockstodecode; *data_size = blockstodecode * 2 * s->channels; bytes_used = s->samples ? s->ptr - s->last_ptr : buf_size; s->last_ptr = s->ptr; return bytes_used; } | 19,457 |
0 | static const UID *mxf_get_mpeg2_codec_ul(AVCodecContext *avctx) { int long_gop = avctx->gop_size > 1 || avctx->has_b_frames; if (avctx->profile == 4) { // Main if (avctx->level == 8) // Main return &mxf_mpeg2_codec_uls[0+long_gop]; else if (avctx->level == 4) // High return &mxf_mpeg2_codec_uls[4+long_gop]; else if (avctx->level == 6) // High 14 return &mxf_mpeg2_codec_uls[8+long_gop]; } else if (avctx->profile == 0) { // 422 if (avctx->level == 5) // Main return &mxf_mpeg2_codec_uls[2+long_gop]; else if (avctx->level == 2) // High return &mxf_mpeg2_codec_uls[6+long_gop]; } return NULL; } | 19,458 |
0 | static void free_packet_list(AVPacketList *pktl) { AVPacketList *cur; while (pktl) { cur = pktl; pktl = cur->next; av_free_packet(&cur->pkt); av_free(cur); } } | 19,459 |
1 | static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data) { const AVFrame *pic = data; int aligned_width = ((avctx->width + 47) / 48) * 48; int stride = aligned_width * 8 / 3; int h, w; const uint16_t *y = (const uint16_t*)pic->data[0]; const uint16_t *u = (const uint16_t*)pic->data[1]; const uint16_t *v = (const uint16_t*)pic->data[2]; uint8_t *p = buf; uint8_t *pdst = buf; if (buf_size < aligned_width * avctx->height * 8 / 3) { av_log(avctx, AV_LOG_ERROR, "output buffer too small\n"); return -1; } #define CLIP(v) av_clip(v, 4, 1019) #define WRITE_PIXELS(a, b, c) \ do { \ val = CLIP(*a++); \ val |= (CLIP(*b++) << 10) | \ (CLIP(*c++) << 20); \ bytestream_put_le32(&p, val); \ } while (0) for (h = 0; h < avctx->height; h++) { uint32_t val; for (w = 0; w < avctx->width - 5; w += 6) { WRITE_PIXELS(u, y, v); WRITE_PIXELS(y, u, y); WRITE_PIXELS(v, y, u); WRITE_PIXELS(y, v, y); } if (w < avctx->width - 1) { WRITE_PIXELS(u, y, v); val = CLIP(*y++); if (w == avctx->width - 2) bytestream_put_le32(&p, val); } if (w < avctx->width - 3) { val |= (CLIP(*u++) << 10) | (CLIP(*y++) << 20); bytestream_put_le32(&p, val); val = CLIP(*v++) | (CLIP(*y++) << 10); bytestream_put_le32(&p, val); } pdst += stride; memset(p, 0, pdst - p); p = pdst; y += pic->linesize[0] / 2 - avctx->width; u += pic->linesize[1] / 2 - avctx->width / 2; v += pic->linesize[2] / 2 - avctx->width / 2; } return p - buf; } | 19,460 |
1 | static void pc_init_pci_no_kvmclock(MachineState *machine) { has_pci_info = false; has_acpi_build = false; smbios_defaults = false; x86_cpu_compat_disable_kvm_features(FEAT_KVM, KVM_FEATURE_PV_EOI); enable_compat_apic_id_mode(); pc_init1(machine, 1, 0); } | 19,461 |
1 | static void ide_sector_read_cb(void *opaque, int ret) { IDEState *s = opaque; int n; s->pio_aiocb = NULL; s->status &= ~BUSY_STAT; if (ret == -ECANCELED) { return; } block_acct_done(blk_get_stats(s->blk), &s->acct); if (ret != 0) { if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO | IDE_RETRY_READ)) { return; } } n = s->nsector; if (n > s->req_nb_sectors) { n = s->req_nb_sectors; } ide_set_sector(s, ide_get_sector(s) + n); s->nsector -= n; /* Allow the guest to read the io_buffer */ ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read); ide_set_irq(s->bus); } | 19,463 |
1 | av_cold void INIT_FUNC(VP9DSPContext *dsp, int bitexact) { #if HAVE_YASM int cpu_flags = av_get_cpu_flags(); #define init_lpf_8_func(idx1, idx2, dir, wd, bpp, opt) \ dsp->loop_filter_8[idx1][idx2] = ff_vp9_loop_filter_##dir##_##wd##_##bpp##_##opt #define init_lpf_16_func(idx, dir, bpp, opt) \ dsp->loop_filter_16[idx] = loop_filter_##dir##_16_##bpp##_##opt #define init_lpf_mix2_func(idx1, idx2, idx3, dir, wd1, wd2, bpp, opt) \ dsp->loop_filter_mix2[idx1][idx2][idx3] = loop_filter_##dir##_##wd1##wd2##_##bpp##_##opt #define init_lpf_funcs(bpp, opt) \ init_lpf_8_func(0, 0, h, 4, bpp, opt); \ init_lpf_8_func(0, 1, v, 4, bpp, opt); \ init_lpf_8_func(1, 0, h, 8, bpp, opt); \ init_lpf_8_func(1, 1, v, 8, bpp, opt); \ init_lpf_8_func(2, 0, h, 16, bpp, opt); \ init_lpf_8_func(2, 1, v, 16, bpp, opt); \ init_lpf_16_func(0, h, bpp, opt); \ init_lpf_16_func(1, v, bpp, opt); \ init_lpf_mix2_func(0, 0, 0, h, 4, 4, bpp, opt); \ init_lpf_mix2_func(0, 1, 0, h, 4, 8, bpp, opt); \ init_lpf_mix2_func(1, 0, 0, h, 8, 4, bpp, opt); \ init_lpf_mix2_func(1, 1, 0, h, 8, 8, bpp, opt); \ init_lpf_mix2_func(0, 0, 1, v, 4, 4, bpp, opt); \ init_lpf_mix2_func(0, 1, 1, v, 4, 8, bpp, opt); \ init_lpf_mix2_func(1, 0, 1, v, 8, 4, bpp, opt); \ init_lpf_mix2_func(1, 1, 1, v, 8, 8, bpp, opt) #define init_itx_func(idxa, idxb, typea, typeb, size, bpp, opt) \ dsp->itxfm_add[idxa][idxb] = \ ff_vp9_##typea##_##typeb##_##size##x##size##_add_##bpp##_##opt; #define init_itx_func_one(idx, typea, typeb, size, bpp, opt) \ init_itx_func(idx, DCT_DCT, typea, typeb, size, bpp, opt); \ init_itx_func(idx, ADST_DCT, typea, typeb, size, bpp, opt); \ init_itx_func(idx, DCT_ADST, typea, typeb, size, bpp, opt); \ init_itx_func(idx, ADST_ADST, typea, typeb, size, bpp, opt) #define init_itx_funcs(idx, size, bpp, opt) \ init_itx_func(idx, DCT_DCT, idct, idct, size, bpp, opt); \ init_itx_func(idx, ADST_DCT, idct, iadst, size, bpp, opt); \ init_itx_func(idx, DCT_ADST, iadst, idct, size, bpp, opt); \ init_itx_func(idx, ADST_ADST, iadst, iadst, size, bpp, opt); \ if (EXTERNAL_MMXEXT(cpu_flags)) { init_ipred_func(tm, TM_VP8, 4, BPC, mmxext); if (!bitexact) { init_itx_func_one(4 /* lossless */, iwht, iwht, 4, BPC, mmxext); #if BPC == 10 init_itx_func(TX_4X4, DCT_DCT, idct, idct, 4, 10, mmxext); #endif } } if (EXTERNAL_SSE2(cpu_flags)) { init_subpel3(0, put, BPC, sse2); init_subpel3(1, avg, BPC, sse2); init_lpf_funcs(BPC, sse2); init_8_16_32_ipred_funcs(tm, TM_VP8, BPC, sse2); #if BPC == 10 if (!bitexact) { init_itx_func(TX_4X4, ADST_DCT, idct, iadst, 4, 10, sse2); init_itx_func(TX_4X4, DCT_ADST, iadst, idct, 4, 10, sse2); init_itx_func(TX_4X4, ADST_ADST, iadst, iadst, 4, 10, sse2); } #endif } if (EXTERNAL_SSSE3(cpu_flags)) { init_lpf_funcs(BPC, ssse3); #if BPC == 10 if (!bitexact) { init_itx_funcs(TX_4X4, 4, BPC, ssse3); } #endif } if (EXTERNAL_AVX(cpu_flags)) { init_lpf_funcs(BPC, avx); } if (EXTERNAL_AVX2(cpu_flags)) { #if HAVE_AVX2_EXTERNAL init_subpel3_32_64(0, put, BPC, avx2); init_subpel3_32_64(1, avg, BPC, avx2); init_subpel2(2, 0, 16, put, BPC, avx2); init_subpel2(2, 1, 16, avg, BPC, avx2); #endif } #endif /* HAVE_YASM */ ff_vp9dsp_init_16bpp_x86(dsp); } | 19,464 |
1 | static void get_tag(AVFormatContext *s, AVIOContext *pb, const char *key, int type, int length) { int buf_size = FFMAX(2*length, LEN_PRETTY_GUID) + 1; char *buf = av_malloc(buf_size); if (!buf) return; if (type == 0 && length == 4) { snprintf(buf, buf_size, "%"PRIi32, avio_rl32(pb)); } else if (type == 1) { avio_get_str16le(pb, length, buf, buf_size); if (!strlen(buf)) { av_free(buf); return; } } else if (type == 3 && length == 4) { strcpy(buf, avio_rl32(pb) ? "true" : "false"); } else if (type == 4 && length == 8) { int64_t num = avio_rl64(pb); if (!strcmp(key, "WM/EncodingTime") || !strcmp(key, "WM/MediaOriginalBroadcastDateTime")) filetime_to_iso8601(buf, buf_size, num); else if (!strcmp(key, "WM/WMRVEncodeTime") || !strcmp(key, "WM/WMRVEndTime")) crazytime_to_iso8601(buf, buf_size, num); else if (!strcmp(key, "WM/WMRVExpirationDate")) oledate_to_iso8601(buf, buf_size, num); else if (!strcmp(key, "WM/WMRVBitrate")) snprintf(buf, buf_size, "%f", av_int2dbl(num)); else snprintf(buf, buf_size, "%"PRIi64, num); } else if (type == 5 && length == 2) { snprintf(buf, buf_size, "%"PRIi16, avio_rl16(pb)); } else if (type == 6 && length == 16) { ff_asf_guid guid; avio_read(pb, guid, 16); snprintf(buf, buf_size, PRI_PRETTY_GUID, ARG_PRETTY_GUID(guid)); } else if (type == 2 && !strcmp(key, "WM/Picture")) { get_attachment(s, pb, length); av_freep(&buf); return; } else { av_freep(&buf); av_log(s, AV_LOG_WARNING, "unsupported metadata entry; key:%s, type:%d, length:0x%x\n", key, type, length); avio_skip(pb, length); return; } av_metadata_set2(&s->metadata, key, buf, 0); av_freep(&buf); } | 19,465 |
1 | void pc_basic_device_init(ISABus *isa_bus, qemu_irq *gsi, ISADevice **rtc_state, bool create_fdctrl, bool no_vmport, uint32_t hpet_irqs) { int i; DriveInfo *fd[MAX_FD]; DeviceState *hpet = NULL; int pit_isa_irq = 0; qemu_irq pit_alt_irq = NULL; qemu_irq rtc_irq = NULL; qemu_irq *a20_line; ISADevice *i8042, *port92, *vmmouse, *pit = NULL; MemoryRegion *ioport80_io = g_new(MemoryRegion, 1); MemoryRegion *ioportF0_io = g_new(MemoryRegion, 1); memory_region_init_io(ioport80_io, NULL, &ioport80_io_ops, NULL, "ioport80", 1); memory_region_add_subregion(isa_bus->address_space_io, 0x80, ioport80_io); memory_region_init_io(ioportF0_io, NULL, &ioportF0_io_ops, NULL, "ioportF0", 1); memory_region_add_subregion(isa_bus->address_space_io, 0xf0, ioportF0_io); /* * Check if an HPET shall be created. * * Without KVM_CAP_PIT_STATE2, we cannot switch off the in-kernel PIT * when the HPET wants to take over. Thus we have to disable the latter. */ if (!no_hpet && (!kvm_irqchip_in_kernel() || kvm_has_pit_state2())) { /* In order to set property, here not using sysbus_try_create_simple */ hpet = qdev_try_create(NULL, TYPE_HPET); if (hpet) { /* For pc-piix-*, hpet's intcap is always IRQ2. For pc-q35-1.7 * and earlier, use IRQ2 for compat. Otherwise, use IRQ16~23, * IRQ8 and IRQ2. */ uint8_t compat = object_property_get_int(OBJECT(hpet), HPET_INTCAP, NULL); if (!compat) { qdev_prop_set_uint32(hpet, HPET_INTCAP, hpet_irqs); } qdev_init_nofail(hpet); sysbus_mmio_map(SYS_BUS_DEVICE(hpet), 0, HPET_BASE); for (i = 0; i < GSI_NUM_PINS; i++) { sysbus_connect_irq(SYS_BUS_DEVICE(hpet), i, gsi[i]); } pit_isa_irq = -1; pit_alt_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_PIT_INT); rtc_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_RTC_INT); } } *rtc_state = rtc_init(isa_bus, 2000, rtc_irq); qemu_register_boot_set(pc_boot_set, *rtc_state); if (!xen_enabled()) { if (kvm_pit_in_kernel()) { pit = kvm_pit_init(isa_bus, 0x40); } else { pit = pit_init(isa_bus, 0x40, pit_isa_irq, pit_alt_irq); } if (hpet) { /* connect PIT to output control line of the HPET */ qdev_connect_gpio_out(hpet, 0, qdev_get_gpio_in(DEVICE(pit), 0)); } pcspk_init(isa_bus, pit); } serial_hds_isa_init(isa_bus, MAX_SERIAL_PORTS); parallel_hds_isa_init(isa_bus, MAX_PARALLEL_PORTS); a20_line = qemu_allocate_irqs(handle_a20_line_change, first_cpu, 2); i8042 = isa_create_simple(isa_bus, "i8042"); i8042_setup_a20_line(i8042, a20_line[0]); if (!no_vmport) { vmport_init(isa_bus); vmmouse = isa_try_create(isa_bus, "vmmouse"); } else { vmmouse = NULL; } if (vmmouse) { DeviceState *dev = DEVICE(vmmouse); qdev_prop_set_ptr(dev, "ps2_mouse", i8042); qdev_init_nofail(dev); } port92 = isa_create_simple(isa_bus, "port92"); port92_init(port92, a20_line[1]); DMA_init(isa_bus, 0); for(i = 0; i < MAX_FD; i++) { fd[i] = drive_get(IF_FLOPPY, 0, i); create_fdctrl |= !!fd[i]; } if (create_fdctrl) { fdctrl_init_isa(isa_bus, fd); } } | 19,466 |
1 | void ff_vorbis_floor1_render_list(vorbis_floor1_entry * list, int values, uint16_t *y_list, int *flag, int multiplier, float *out, int samples) { int lx, ly, i; lx = 0; ly = y_list[0] * multiplier; for (i = 1; i < values; i++) { int pos = list[i].sort; if (flag[pos]) { int x1 = list[pos].x; int y1 = y_list[pos] * multiplier; if (lx < samples) render_line(lx, ly, FFMIN(x1,samples), y1, out); lx = x1; ly = y1; } if (lx >= samples) break; } if (lx < samples) render_line(lx, ly, samples, ly, out); } | 19,467 |
0 | static void channel_weighting(float *su1, float *su2, int *p3) { int band, nsample; /* w[x][y] y=0 is left y=1 is right */ float w[2][2]; if (p3[1] != 7 || p3[3] != 7) { get_channel_weights(p3[1], p3[0], w[0]); get_channel_weights(p3[3], p3[2], w[1]); for (band = 1; band < 4; band++) { for (nsample = 0; nsample < 8; nsample++) { su1[band * 256 + nsample] *= INTERPOLATE(w[0][0], w[0][1], nsample); su2[band * 256 + nsample] *= INTERPOLATE(w[1][0], w[1][1], nsample); } for(; nsample < 256; nsample++) { su1[band * 256 + nsample] *= w[1][0]; su2[band * 256 + nsample] *= w[1][1]; } } } } | 19,468 |
0 | bool bdrv_requests_pending(BlockDriverState *bs) { BdrvChild *child; BlockBackendPublic *blkp = bs->blk ? blk_get_public(bs->blk) : NULL; if (!QLIST_EMPTY(&bs->tracked_requests)) { return true; } if (blkp && !qemu_co_queue_empty(&blkp->throttled_reqs[0])) { return true; } if (blkp && !qemu_co_queue_empty(&blkp->throttled_reqs[1])) { return true; } QLIST_FOREACH(child, &bs->children, next) { if (bdrv_requests_pending(child->bs)) { return true; } } return false; } | 19,470 |
0 | static void qvirtio_9p_pci_free(QVirtIO9P *v9p) { qvirtqueue_cleanup(v9p->dev->bus, v9p->vq, v9p->alloc); pc_alloc_uninit(v9p->alloc); qvirtio_pci_device_disable(container_of(v9p->dev, QVirtioPCIDevice, vdev)); g_free(v9p->dev); qpci_free_pc(v9p->bus); g_free(v9p); } | 19,471 |
0 | static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int count, BdrvRequestFlags flags) { BlockDriver *drv = bs->drv; QEMUIOVector qiov; struct iovec iov = {0}; int ret = 0; bool need_flush = false; int head = 0; int tail = 0; int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX); int alignment = MAX(bs->bl.pwrite_zeroes_alignment ?: 1, bs->request_alignment); assert(is_power_of_2(alignment)); head = offset & (alignment - 1); tail = (offset + count) & (alignment - 1); max_write_zeroes &= ~(alignment - 1); while (count > 0 && !ret) { int num = count; /* Align request. Block drivers can expect the "bulk" of the request * to be aligned, and that unaligned requests do not cross cluster * boundaries. */ if (head) { /* Make a small request up to the first aligned sector. */ num = MIN(count, alignment - head); head = 0; } else if (tail && num > alignment) { /* Shorten the request to the last aligned sector. */ num -= tail; } /* limit request size */ if (num > max_write_zeroes) { num = max_write_zeroes; } ret = -ENOTSUP; /* First try the efficient write zeroes operation */ if (drv->bdrv_co_pwrite_zeroes) { ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, flags & bs->supported_zero_flags); if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && !(bs->supported_zero_flags & BDRV_REQ_FUA)) { need_flush = true; } } else { assert(!bs->supported_zero_flags); } if (ret == -ENOTSUP) { /* Fall back to bounce buffer if write zeroes is unsupported */ int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length, MAX_WRITE_ZEROES_BOUNCE_BUFFER); BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; if ((flags & BDRV_REQ_FUA) && !(bs->supported_write_flags & BDRV_REQ_FUA)) { /* No need for bdrv_driver_pwrite() to do a fallback * flush on each chunk; use just one at the end */ write_flags &= ~BDRV_REQ_FUA; need_flush = true; } num = MIN(num, max_xfer_len << BDRV_SECTOR_BITS); iov.iov_len = num; if (iov.iov_base == NULL) { iov.iov_base = qemu_try_blockalign(bs, num); if (iov.iov_base == NULL) { ret = -ENOMEM; goto fail; } memset(iov.iov_base, 0, num); } qemu_iovec_init_external(&qiov, &iov, 1); ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags); /* Keep bounce buffer around if it is big enough for all * all future requests. */ if (num < max_xfer_len << BDRV_SECTOR_BITS) { qemu_vfree(iov.iov_base); iov.iov_base = NULL; } } offset += num; count -= num; } fail: if (ret == 0 && need_flush) { ret = bdrv_co_flush(bs); } qemu_vfree(iov.iov_base); return ret; } | 19,474 |
0 | int load_vmstate(const char *name) { BlockDriverState *bs, *bs_vm_state; QEMUSnapshotInfo sn; QEMUFile *f; int ret; if (!bdrv_all_can_snapshot(&bs)) { error_report("Device '%s' is writable but does not support snapshots.", bdrv_get_device_name(bs)); return -ENOTSUP; } bs_vm_state = find_vmstate_bs(); if (!bs_vm_state) { error_report("No block device supports snapshots"); return -ENOTSUP; } /* Don't even try to load empty VM states */ ret = bdrv_snapshot_find(bs_vm_state, &sn, name); if (ret < 0) { return ret; } else if (sn.vm_state_size == 0) { error_report("This is a disk-only snapshot. Revert to it offline " "using qemu-img."); return -EINVAL; } /* Verify if there is any device that doesn't support snapshots and is writable and check if the requested snapshot is available too. */ bs = NULL; while ((bs = bdrv_next(bs))) { if (!bdrv_can_snapshot(bs)) { continue; } ret = bdrv_snapshot_find(bs, &sn, name); if (ret < 0) { error_report("Device '%s' does not have the requested snapshot '%s'", bdrv_get_device_name(bs), name); return ret; } } /* Flush all IO requests so they don't interfere with the new state. */ bdrv_drain_all(); ret = bdrv_all_goto_snapshot(name, &bs); if (ret < 0) { error_report("Error %d while activating snapshot '%s' on '%s'", ret, name, bdrv_get_device_name(bs)); return ret; } /* restore the VM state */ f = qemu_fopen_bdrv(bs_vm_state, 0); if (!f) { error_report("Could not open VM state file"); return -EINVAL; } qemu_system_reset(VMRESET_SILENT); migration_incoming_state_new(f); ret = qemu_loadvm_state(f); qemu_fclose(f); migration_incoming_state_destroy(); if (ret < 0) { error_report("Error %d while loading VM state", ret); return ret; } return 0; } | 19,475 |
0 | build_fadt(GArray *table_data, GArray *linker, AcpiPmInfo *pm, unsigned facs, unsigned dsdt, const char *oem_id, const char *oem_table_id) { AcpiFadtDescriptorRev1 *fadt = acpi_data_push(table_data, sizeof(*fadt)); fadt->firmware_ctrl = cpu_to_le32(facs); /* FACS address to be filled by Guest linker */ bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, ACPI_BUILD_TABLE_FILE, table_data, &fadt->firmware_ctrl, sizeof fadt->firmware_ctrl); fadt->dsdt = cpu_to_le32(dsdt); /* DSDT address to be filled by Guest linker */ bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, ACPI_BUILD_TABLE_FILE, table_data, &fadt->dsdt, sizeof fadt->dsdt); fadt_setup(fadt, pm); build_header(linker, table_data, (void *)fadt, "FACP", sizeof(*fadt), 1, oem_id, oem_table_id); } | 19,477 |
0 | static void test_visitor_in_bool(TestInputVisitorData *data, const void *unused) { bool res = false; Visitor *v; v = visitor_input_test_init(data, "true"); visit_type_bool(v, NULL, &res, &error_abort); g_assert_cmpint(res, ==, true); } | 19,478 |
0 | static void e500plat_init(QEMUMachineInitArgs *args) { ram_addr_t ram_size = args->ram_size; const char *boot_device = args->boot_device; const char *cpu_model = args->cpu_model; const char *kernel_filename = args->kernel_filename; const char *kernel_cmdline = args->kernel_cmdline; const char *initrd_filename = args->initrd_filename; PPCE500Params params = { .ram_size = ram_size, .boot_device = boot_device, .kernel_filename = kernel_filename, .kernel_cmdline = kernel_cmdline, .initrd_filename = initrd_filename, .cpu_model = cpu_model, .pci_first_slot = 0x11, .pci_nr_slots = 2, .fixup_devtree = e500plat_fixup_devtree, }; ppce500_init(¶ms); } | 19,481 |
0 | static void gdb_read_byte(GDBState *s, int ch) { CPUState *env = s->env; int i, csum; uint8_t reply; #ifndef CONFIG_USER_ONLY if (s->last_packet_len) { /* Waiting for a response to the last packet. If we see the start of a new command then abandon the previous response. */ if (ch == '-') { #ifdef DEBUG_GDB printf("Got NACK, retransmitting\n"); #endif put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len); } #ifdef DEBUG_GDB else if (ch == '+') printf("Got ACK\n"); else printf("Got '%c' when expecting ACK/NACK\n", ch); #endif if (ch == '+' || ch == '$') s->last_packet_len = 0; if (ch != '$') return; } if (vm_running) { /* when the CPU is running, we cannot do anything except stop it when receiving a char */ vm_stop(EXCP_INTERRUPT); } else #endif { switch(s->state) { case RS_IDLE: if (ch == '$') { s->line_buf_index = 0; s->state = RS_GETLINE; } break; case RS_GETLINE: if (ch == '#') { s->state = RS_CHKSUM1; } else if (s->line_buf_index >= sizeof(s->line_buf) - 1) { s->state = RS_IDLE; } else { s->line_buf[s->line_buf_index++] = ch; } break; case RS_CHKSUM1: s->line_buf[s->line_buf_index] = '\0'; s->line_csum = fromhex(ch) << 4; s->state = RS_CHKSUM2; break; case RS_CHKSUM2: s->line_csum |= fromhex(ch); csum = 0; for(i = 0; i < s->line_buf_index; i++) { csum += s->line_buf[i]; } if (s->line_csum != (csum & 0xff)) { reply = '-'; put_buffer(s, &reply, 1); s->state = RS_IDLE; } else { reply = '+'; put_buffer(s, &reply, 1); s->state = gdb_handle_packet(s, env, s->line_buf); } break; default: abort(); } } } | 19,482 |
0 | int ff_hevc_decode_nal_pps(HEVCContext *s) { GetBitContext *gb = &s->HEVClc->gb; HEVCSPS *sps = NULL; int pic_area_in_ctbs; int log2_diff_ctb_min_tb_size; int i, j, x, y, ctb_addr_rs, tile_id; int ret = 0; unsigned int pps_id = 0; AVBufferRef *pps_buf; HEVCPPS *pps = av_mallocz(sizeof(*pps)); if (!pps) return AVERROR(ENOMEM); pps_buf = av_buffer_create((uint8_t *)pps, sizeof(*pps), hevc_pps_free, NULL, 0); if (!pps_buf) { av_freep(&pps); return AVERROR(ENOMEM); } av_log(s->avctx, AV_LOG_DEBUG, "Decoding PPS\n"); // Default values pps->loop_filter_across_tiles_enabled_flag = 1; pps->num_tile_columns = 1; pps->num_tile_rows = 1; pps->uniform_spacing_flag = 1; pps->disable_dbf = 0; pps->beta_offset = 0; pps->tc_offset = 0; pps->log2_max_transform_skip_block_size = 2; // Coded parameters pps_id = get_ue_golomb_long(gb); if (pps_id >= MAX_PPS_COUNT) { av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", pps_id); ret = AVERROR_INVALIDDATA; goto err; } pps->sps_id = get_ue_golomb_long(gb); if (pps->sps_id >= MAX_SPS_COUNT) { av_log(s->avctx, AV_LOG_ERROR, "SPS id out of range: %d\n", pps->sps_id); ret = AVERROR_INVALIDDATA; goto err; } if (!s->sps_list[pps->sps_id]) { av_log(s->avctx, AV_LOG_ERROR, "SPS %u does not exist.\n", pps->sps_id); ret = AVERROR_INVALIDDATA; goto err; } sps = (HEVCSPS *)s->sps_list[pps->sps_id]->data; pps->dependent_slice_segments_enabled_flag = get_bits1(gb); pps->output_flag_present_flag = get_bits1(gb); pps->num_extra_slice_header_bits = get_bits(gb, 3); pps->sign_data_hiding_flag = get_bits1(gb); pps->cabac_init_present_flag = get_bits1(gb); pps->num_ref_idx_l0_default_active = get_ue_golomb_long(gb) + 1; pps->num_ref_idx_l1_default_active = get_ue_golomb_long(gb) + 1; pps->pic_init_qp_minus26 = get_se_golomb(gb); pps->constrained_intra_pred_flag = get_bits1(gb); pps->transform_skip_enabled_flag = get_bits1(gb); pps->cu_qp_delta_enabled_flag = get_bits1(gb); pps->diff_cu_qp_delta_depth = 0; if (pps->cu_qp_delta_enabled_flag) pps->diff_cu_qp_delta_depth = get_ue_golomb_long(gb); pps->cb_qp_offset = get_se_golomb(gb); if (pps->cb_qp_offset < -12 || pps->cb_qp_offset > 12) { av_log(s->avctx, AV_LOG_ERROR, "pps_cb_qp_offset out of range: %d\n", pps->cb_qp_offset); ret = AVERROR_INVALIDDATA; goto err; } pps->cr_qp_offset = get_se_golomb(gb); if (pps->cr_qp_offset < -12 || pps->cr_qp_offset > 12) { av_log(s->avctx, AV_LOG_ERROR, "pps_cr_qp_offset out of range: %d\n", pps->cr_qp_offset); ret = AVERROR_INVALIDDATA; goto err; } pps->pic_slice_level_chroma_qp_offsets_present_flag = get_bits1(gb); pps->weighted_pred_flag = get_bits1(gb); pps->weighted_bipred_flag = get_bits1(gb); pps->transquant_bypass_enable_flag = get_bits1(gb); pps->tiles_enabled_flag = get_bits1(gb); pps->entropy_coding_sync_enabled_flag = get_bits1(gb); if (pps->tiles_enabled_flag) { pps->num_tile_columns = get_ue_golomb_long(gb) + 1; pps->num_tile_rows = get_ue_golomb_long(gb) + 1; if (pps->num_tile_columns == 0 || pps->num_tile_columns >= sps->width) { av_log(s->avctx, AV_LOG_ERROR, "num_tile_columns_minus1 out of range: %d\n", pps->num_tile_columns - 1); ret = AVERROR_INVALIDDATA; goto err; } if (pps->num_tile_rows == 0 || pps->num_tile_rows >= sps->height) { av_log(s->avctx, AV_LOG_ERROR, "num_tile_rows_minus1 out of range: %d\n", pps->num_tile_rows - 1); ret = AVERROR_INVALIDDATA; goto err; } pps->column_width = av_malloc_array(pps->num_tile_columns, sizeof(*pps->column_width)); pps->row_height = av_malloc_array(pps->num_tile_rows, sizeof(*pps->row_height)); if (!pps->column_width || !pps->row_height) { ret = AVERROR(ENOMEM); goto err; } pps->uniform_spacing_flag = get_bits1(gb); if (!pps->uniform_spacing_flag) { uint64_t sum = 0; for (i = 0; i < pps->num_tile_columns - 1; i++) { pps->column_width[i] = get_ue_golomb_long(gb) + 1; sum += pps->column_width[i]; } if (sum >= sps->ctb_width) { av_log(s->avctx, AV_LOG_ERROR, "Invalid tile widths.\n"); ret = AVERROR_INVALIDDATA; goto err; } pps->column_width[pps->num_tile_columns - 1] = sps->ctb_width - sum; sum = 0; for (i = 0; i < pps->num_tile_rows - 1; i++) { pps->row_height[i] = get_ue_golomb_long(gb) + 1; sum += pps->row_height[i]; } if (sum >= sps->ctb_height) { av_log(s->avctx, AV_LOG_ERROR, "Invalid tile heights.\n"); ret = AVERROR_INVALIDDATA; goto err; } pps->row_height[pps->num_tile_rows - 1] = sps->ctb_height - sum; } pps->loop_filter_across_tiles_enabled_flag = get_bits1(gb); } pps->seq_loop_filter_across_slices_enabled_flag = get_bits1(gb); pps->deblocking_filter_control_present_flag = get_bits1(gb); if (pps->deblocking_filter_control_present_flag) { pps->deblocking_filter_override_enabled_flag = get_bits1(gb); pps->disable_dbf = get_bits1(gb); if (!pps->disable_dbf) { pps->beta_offset = get_se_golomb(gb) * 2; pps->tc_offset = get_se_golomb(gb) * 2; if (pps->beta_offset/2 < -6 || pps->beta_offset/2 > 6) { av_log(s->avctx, AV_LOG_ERROR, "pps_beta_offset_div2 out of range: %d\n", pps->beta_offset/2); ret = AVERROR_INVALIDDATA; goto err; } if (pps->tc_offset/2 < -6 || pps->tc_offset/2 > 6) { av_log(s->avctx, AV_LOG_ERROR, "pps_tc_offset_div2 out of range: %d\n", pps->tc_offset/2); ret = AVERROR_INVALIDDATA; goto err; } } } pps->scaling_list_data_present_flag = get_bits1(gb); if (pps->scaling_list_data_present_flag) { set_default_scaling_list_data(&pps->scaling_list); ret = scaling_list_data(s, &pps->scaling_list, sps); if (ret < 0) goto err; } pps->lists_modification_present_flag = get_bits1(gb); pps->log2_parallel_merge_level = get_ue_golomb_long(gb) + 2; if (pps->log2_parallel_merge_level > sps->log2_ctb_size) { av_log(s->avctx, AV_LOG_ERROR, "log2_parallel_merge_level_minus2 out of range: %d\n", pps->log2_parallel_merge_level - 2); ret = AVERROR_INVALIDDATA; goto err; } pps->slice_header_extension_present_flag = get_bits1(gb); if (get_bits1(gb)) { // pps_extension_present_flag int pps_range_extensions_flag = get_bits1(gb); /* int pps_extension_7bits = */ get_bits(gb, 7); if (sps->ptl.general_ptl.profile_idc == FF_PROFILE_HEVC_REXT && pps_range_extensions_flag) { pps_range_extensions(s, pps, sps); } } // Inferred parameters pps->col_bd = av_malloc_array(pps->num_tile_columns + 1, sizeof(*pps->col_bd)); pps->row_bd = av_malloc_array(pps->num_tile_rows + 1, sizeof(*pps->row_bd)); pps->col_idxX = av_malloc_array(sps->ctb_width, sizeof(*pps->col_idxX)); if (!pps->col_bd || !pps->row_bd || !pps->col_idxX) { ret = AVERROR(ENOMEM); goto err; } if (pps->uniform_spacing_flag) { if (!pps->column_width) { pps->column_width = av_malloc_array(pps->num_tile_columns, sizeof(*pps->column_width)); pps->row_height = av_malloc_array(pps->num_tile_rows, sizeof(*pps->row_height)); } if (!pps->column_width || !pps->row_height) { ret = AVERROR(ENOMEM); goto err; } for (i = 0; i < pps->num_tile_columns; i++) { pps->column_width[i] = ((i + 1) * sps->ctb_width) / pps->num_tile_columns - (i * sps->ctb_width) / pps->num_tile_columns; } for (i = 0; i < pps->num_tile_rows; i++) { pps->row_height[i] = ((i + 1) * sps->ctb_height) / pps->num_tile_rows - (i * sps->ctb_height) / pps->num_tile_rows; } } pps->col_bd[0] = 0; for (i = 0; i < pps->num_tile_columns; i++) pps->col_bd[i + 1] = pps->col_bd[i] + pps->column_width[i]; pps->row_bd[0] = 0; for (i = 0; i < pps->num_tile_rows; i++) pps->row_bd[i + 1] = pps->row_bd[i] + pps->row_height[i]; for (i = 0, j = 0; i < sps->ctb_width; i++) { if (i > pps->col_bd[j]) j++; pps->col_idxX[i] = j; } /** * 6.5 */ pic_area_in_ctbs = sps->ctb_width * sps->ctb_height; pps->ctb_addr_rs_to_ts = av_malloc_array(pic_area_in_ctbs, sizeof(*pps->ctb_addr_rs_to_ts)); pps->ctb_addr_ts_to_rs = av_malloc_array(pic_area_in_ctbs, sizeof(*pps->ctb_addr_ts_to_rs)); pps->tile_id = av_malloc_array(pic_area_in_ctbs, sizeof(*pps->tile_id)); pps->min_tb_addr_zs_tab = av_malloc_array((sps->tb_mask+2) * (sps->tb_mask+2), sizeof(*pps->min_tb_addr_zs_tab)); if (!pps->ctb_addr_rs_to_ts || !pps->ctb_addr_ts_to_rs || !pps->tile_id || !pps->min_tb_addr_zs_tab) { ret = AVERROR(ENOMEM); goto err; } for (ctb_addr_rs = 0; ctb_addr_rs < pic_area_in_ctbs; ctb_addr_rs++) { int tb_x = ctb_addr_rs % sps->ctb_width; int tb_y = ctb_addr_rs / sps->ctb_width; int tile_x = 0; int tile_y = 0; int val = 0; for (i = 0; i < pps->num_tile_columns; i++) { if (tb_x < pps->col_bd[i + 1]) { tile_x = i; break; } } for (i = 0; i < pps->num_tile_rows; i++) { if (tb_y < pps->row_bd[i + 1]) { tile_y = i; break; } } for (i = 0; i < tile_x; i++) val += pps->row_height[tile_y] * pps->column_width[i]; for (i = 0; i < tile_y; i++) val += sps->ctb_width * pps->row_height[i]; val += (tb_y - pps->row_bd[tile_y]) * pps->column_width[tile_x] + tb_x - pps->col_bd[tile_x]; pps->ctb_addr_rs_to_ts[ctb_addr_rs] = val; pps->ctb_addr_ts_to_rs[val] = ctb_addr_rs; } for (j = 0, tile_id = 0; j < pps->num_tile_rows; j++) for (i = 0; i < pps->num_tile_columns; i++, tile_id++) for (y = pps->row_bd[j]; y < pps->row_bd[j + 1]; y++) for (x = pps->col_bd[i]; x < pps->col_bd[i + 1]; x++) pps->tile_id[pps->ctb_addr_rs_to_ts[y * sps->ctb_width + x]] = tile_id; pps->tile_pos_rs = av_malloc_array(tile_id, sizeof(*pps->tile_pos_rs)); if (!pps->tile_pos_rs) { ret = AVERROR(ENOMEM); goto err; } for (j = 0; j < pps->num_tile_rows; j++) for (i = 0; i < pps->num_tile_columns; i++) pps->tile_pos_rs[j * pps->num_tile_columns + i] = pps->row_bd[j] * sps->ctb_width + pps->col_bd[i]; log2_diff_ctb_min_tb_size = sps->log2_ctb_size - sps->log2_min_tb_size; pps->min_tb_addr_zs = &pps->min_tb_addr_zs_tab[1*(sps->tb_mask+2)+1]; for (y = 0; y < sps->tb_mask+2; y++) { pps->min_tb_addr_zs_tab[y*(sps->tb_mask+2)] = -1; pps->min_tb_addr_zs_tab[y] = -1; } for (y = 0; y < sps->tb_mask+1; y++) { for (x = 0; x < sps->tb_mask+1; x++) { int tb_x = x >> log2_diff_ctb_min_tb_size; int tb_y = y >> log2_diff_ctb_min_tb_size; int ctb_addr_rs = sps->ctb_width * tb_y + tb_x; int val = pps->ctb_addr_rs_to_ts[ctb_addr_rs] << (log2_diff_ctb_min_tb_size * 2); for (i = 0; i < log2_diff_ctb_min_tb_size; i++) { int m = 1 << i; val += (m & x ? m * m : 0) + (m & y ? 2 * m * m : 0); } pps->min_tb_addr_zs[y * (sps->tb_mask+2) + x] = val; } } if (get_bits_left(gb) < 0) { av_log(s->avctx, AV_LOG_ERROR, "Overread PPS by %d bits\n", -get_bits_left(gb)); goto err; } av_buffer_unref(&s->pps_list[pps_id]); s->pps_list[pps_id] = pps_buf; return 0; err: av_buffer_unref(&pps_buf); return ret; } | 19,483 |
0 | static void ide_drive_pre_save(void *opaque) { IDEState *s = opaque; s->cur_io_buffer_len = 0; if (!(s->status & DRQ_STAT)) return; s->cur_io_buffer_offset = s->data_ptr - s->io_buffer; s->cur_io_buffer_len = s->data_end - s->data_ptr; s->end_transfer_fn_idx = transfer_end_table_idx(s->end_transfer_func); if (s->end_transfer_fn_idx == -1) { fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n", __func__); s->end_transfer_fn_idx = 2; } } | 19,484 |
0 | static int gdb_breakpoint_remove(CPUState *env, target_ulong addr, target_ulong len, int type) { switch (type) { case GDB_BREAKPOINT_SW: case GDB_BREAKPOINT_HW: return cpu_breakpoint_remove(env, addr, BP_GDB); #ifndef CONFIG_USER_ONLY case GDB_WATCHPOINT_WRITE: case GDB_WATCHPOINT_READ: case GDB_WATCHPOINT_ACCESS: return cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]); #endif default: return -ENOSYS; } } | 19,485 |
0 | static int unin_main_pci_host_init(PCIDevice *d) { pci_config_set_vendor_id(d->config, PCI_VENDOR_ID_APPLE); pci_config_set_device_id(d->config, PCI_DEVICE_ID_APPLE_UNI_N_PCI); d->config[0x08] = 0x00; // revision pci_config_set_class(d->config, PCI_CLASS_BRIDGE_HOST); d->config[0x0C] = 0x08; // cache_line_size d->config[0x0D] = 0x10; // latency_timer d->config[0x34] = 0x00; // capabilities_pointer return 0; } | 19,486 |
0 | VirtIODevice *virtio_serial_init(DeviceState *dev, uint32_t max_nr_ports) { VirtIOSerial *vser; VirtIODevice *vdev; uint32_t i; if (!max_nr_ports) return NULL; vdev = virtio_common_init("virtio-serial", VIRTIO_ID_CONSOLE, sizeof(struct virtio_console_config), sizeof(VirtIOSerial)); vser = DO_UPCAST(VirtIOSerial, vdev, vdev); /* Spawn a new virtio-serial bus on which the ports will ride as devices */ vser->bus = virtser_bus_new(dev); vser->bus->vser = vser; QTAILQ_INIT(&vser->ports); vser->bus->max_nr_ports = max_nr_ports; vser->ivqs = qemu_malloc(max_nr_ports * sizeof(VirtQueue *)); vser->ovqs = qemu_malloc(max_nr_ports * sizeof(VirtQueue *)); /* Add a queue for host to guest transfers for port 0 (backward compat) */ vser->ivqs[0] = virtio_add_queue(vdev, 128, handle_input); /* Add a queue for guest to host transfers for port 0 (backward compat) */ vser->ovqs[0] = virtio_add_queue(vdev, 128, handle_output); /* control queue: host to guest */ vser->c_ivq = virtio_add_queue(vdev, 16, control_in); /* control queue: guest to host */ vser->c_ovq = virtio_add_queue(vdev, 16, control_out); for (i = 1; i < vser->bus->max_nr_ports; i++) { /* Add a per-port queue for host to guest transfers */ vser->ivqs[i] = virtio_add_queue(vdev, 128, handle_input); /* Add a per-per queue for guest to host transfers */ vser->ovqs[i] = virtio_add_queue(vdev, 128, handle_output); } vser->config.max_nr_ports = max_nr_ports; vser->ports_map = qemu_mallocz(((max_nr_ports + 31) / 32) * sizeof(vser->ports_map[0])); /* * Reserve location 0 for a console port for backward compat * (old kernel, new qemu) */ mark_port_added(vser, 0); vser->vdev.get_features = get_features; vser->vdev.get_config = get_config; vser->vdev.set_config = set_config; /* * Register for the savevm section with the virtio-console name * to preserve backward compat */ register_savevm(dev, "virtio-console", -1, 2, virtio_serial_save, virtio_serial_load, vser); return vdev; } | 19,487 |
0 | static void pci_vpb_map(SysBusDevice *dev, target_phys_addr_t base) { PCIVPBState *s = (PCIVPBState *)dev; /* Selfconfig area. */ memory_region_add_subregion(get_system_memory(), base + 0x01000000, &s->mem_config); /* Normal config area. */ memory_region_add_subregion(get_system_memory(), base + 0x02000000, &s->mem_config2); if (s->realview) { /* IO memory area. */ memory_region_add_subregion(get_system_memory(), base + 0x03000000, &s->isa); } } | 19,488 |
0 | static void qmp_input_start_struct(Visitor *v, const char *name, void **obj, size_t size, Error **errp) { QmpInputVisitor *qiv = to_qiv(v); QObject *qobj = qmp_input_get_object(qiv, name, true, errp); Error *err = NULL; if (obj) { *obj = NULL; } if (!qobj) { return; } if (qobject_type(qobj) != QTYPE_QDICT) { error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", "QDict"); return; } qmp_input_push(qiv, qobj, obj, &err); if (err) { error_propagate(errp, err); return; } if (obj) { *obj = g_malloc0(size); } } | 19,491 |
0 | static int read_packet(ByteIOContext *pb, uint8_t *buf, int raw_packet_size) { int skip, len; for(;;) { len = get_buffer(pb, buf, TS_PACKET_SIZE); if (len != TS_PACKET_SIZE) return AVERROR(EIO); /* check paquet sync byte */ if (buf[0] != 0x47) { /* find a new packet start */ url_fseek(pb, -TS_PACKET_SIZE, SEEK_CUR); if (mpegts_resync(pb) < 0) return AVERROR_INVALIDDATA; else continue; } else { skip = raw_packet_size - TS_PACKET_SIZE; if (skip > 0) url_fskip(pb, skip); break; } } return 0; } | 19,493 |
0 | av_cold void ff_fft_init_arm(FFTContext *s) { if (HAVE_NEON) { s->fft_permute = ff_fft_permute_neon; s->fft_calc = ff_fft_calc_neon; s->imdct_calc = ff_imdct_calc_neon; s->imdct_half = ff_imdct_half_neon; s->mdct_calc = ff_mdct_calc_neon; s->permutation = FF_MDCT_PERM_INTERLEAVE; } } | 19,495 |
0 | static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *iov, bool is_write) { CoroutineIOCompletion co = { .coroutine = qemu_coroutine_self(), }; BlockDriverAIOCB *acb; if (is_write) { acb = bdrv_aio_writev(bs, sector_num, iov, nb_sectors, bdrv_co_io_em_complete, &co); } else { acb = bdrv_aio_readv(bs, sector_num, iov, nb_sectors, bdrv_co_io_em_complete, &co); } trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb); if (!acb) { return -EIO; } qemu_coroutine_yield(); return co.ret; } | 19,496 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.