label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
0 | void qemu_spice_display_switch(SimpleSpiceDisplay *ssd, DisplaySurface *surface) { SimpleSpiceUpdate *update; bool need_destroy; if (surface && ssd->surface && surface_width(surface) == pixman_image_get_width(ssd->surface) && surface_height(surface) == pixman_image_get_height(ssd->surface)) { /* no-resize fast path: just swap backing store */ dprint(1, "%s/%d: fast (%dx%d)\n", __func__, ssd->qxl.id, surface_width(surface), surface_height(surface)); qemu_mutex_lock(&ssd->lock); ssd->ds = surface; pixman_image_unref(ssd->surface); ssd->surface = pixman_image_ref(ssd->ds->image); qemu_mutex_unlock(&ssd->lock); qemu_spice_display_update(ssd, 0, 0, surface_width(surface), surface_height(surface)); return; } /* full mode switch */ dprint(1, "%s/%d: full (%dx%d -> %dx%d)\n", __func__, ssd->qxl.id, ssd->surface ? pixman_image_get_width(ssd->surface) : 0, ssd->surface ? pixman_image_get_height(ssd->surface) : 0, surface ? surface_width(surface) : 0, surface ? surface_height(surface) : 0); memset(&ssd->dirty, 0, sizeof(ssd->dirty)); if (ssd->surface) { pixman_image_unref(ssd->surface); ssd->surface = NULL; pixman_image_unref(ssd->mirror); ssd->mirror = NULL; } qemu_mutex_lock(&ssd->lock); need_destroy = (ssd->ds != NULL); ssd->ds = surface; while ((update = QTAILQ_FIRST(&ssd->updates)) != NULL) { QTAILQ_REMOVE(&ssd->updates, update, next); qemu_spice_destroy_update(ssd, update); } qemu_mutex_unlock(&ssd->lock); if (need_destroy) { qemu_spice_destroy_host_primary(ssd); } if (ssd->ds) { ssd->surface = pixman_image_ref(ssd->ds->image); ssd->mirror = qemu_pixman_mirror_create(ssd->ds->format, ssd->ds->image); qemu_spice_create_host_primary(ssd); } memset(&ssd->dirty, 0, sizeof(ssd->dirty)); ssd->notify++; } | 15,807 |
0 | GuestNetworkInterfaceList *qmp_guest_network_get_interfaces(Error **errp) { GuestNetworkInterfaceList *head = NULL, *cur_item = NULL; struct ifaddrs *ifap, *ifa; char err_msg[512]; if (getifaddrs(&ifap) < 0) { snprintf(err_msg, sizeof(err_msg), "getifaddrs failed: %s", strerror(errno)); error_set(errp, QERR_QGA_COMMAND_FAILED, err_msg); goto error; } for (ifa = ifap; ifa; ifa = ifa->ifa_next) { GuestNetworkInterfaceList *info; GuestIpAddressList **address_list = NULL, *address_item = NULL; char addr4[INET_ADDRSTRLEN]; char addr6[INET6_ADDRSTRLEN]; int sock; struct ifreq ifr; unsigned char *mac_addr; void *p; g_debug("Processing %s interface", ifa->ifa_name); info = guest_find_interface(head, ifa->ifa_name); if (!info) { info = g_malloc0(sizeof(*info)); info->value = g_malloc0(sizeof(*info->value)); info->value->name = g_strdup(ifa->ifa_name); if (!cur_item) { head = cur_item = info; } else { cur_item->next = info; cur_item = info; } } if (!info->value->has_hardware_address && ifa->ifa_flags & SIOCGIFHWADDR) { /* we haven't obtained HW address yet */ sock = socket(PF_INET, SOCK_STREAM, 0); if (sock == -1) { snprintf(err_msg, sizeof(err_msg), "failed to create socket: %s", strerror(errno)); error_set(errp, QERR_QGA_COMMAND_FAILED, err_msg); goto error; } memset(&ifr, 0, sizeof(ifr)); strncpy(ifr.ifr_name, info->value->name, IF_NAMESIZE); if (ioctl(sock, SIOCGIFHWADDR, &ifr) == -1) { snprintf(err_msg, sizeof(err_msg), "failed to get MAC addres of %s: %s", ifa->ifa_name, strerror(errno)); error_set(errp, QERR_QGA_COMMAND_FAILED, err_msg); goto error; } mac_addr = (unsigned char *) &ifr.ifr_hwaddr.sa_data; if (asprintf(&info->value->hardware_address, "%02x:%02x:%02x:%02x:%02x:%02x", (int) mac_addr[0], (int) mac_addr[1], (int) mac_addr[2], (int) mac_addr[3], (int) mac_addr[4], (int) mac_addr[5]) == -1) { snprintf(err_msg, sizeof(err_msg), "failed to format MAC: %s", strerror(errno)); error_set(errp, QERR_QGA_COMMAND_FAILED, err_msg); goto error; } info->value->has_hardware_address = true; close(sock); } if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET) { /* interface with IPv4 address */ address_item = g_malloc0(sizeof(*address_item)); address_item->value = g_malloc0(sizeof(*address_item->value)); p = &((struct sockaddr_in *)ifa->ifa_addr)->sin_addr; if (!inet_ntop(AF_INET, p, addr4, sizeof(addr4))) { snprintf(err_msg, sizeof(err_msg), "inet_ntop failed : %s", strerror(errno)); error_set(errp, QERR_QGA_COMMAND_FAILED, err_msg); goto error; } address_item->value->ip_address = g_strdup(addr4); address_item->value->ip_address_type = GUEST_IP_ADDRESS_TYPE_IPV4; if (ifa->ifa_netmask) { /* Count the number of set bits in netmask. * This is safe as '1' and '0' cannot be shuffled in netmask. */ p = &((struct sockaddr_in *)ifa->ifa_netmask)->sin_addr; address_item->value->prefix = ctpop32(((uint32_t *) p)[0]); } } else if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET6) { /* interface with IPv6 address */ address_item = g_malloc0(sizeof(*address_item)); address_item->value = g_malloc0(sizeof(*address_item->value)); p = &((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr; if (!inet_ntop(AF_INET6, p, addr6, sizeof(addr6))) { snprintf(err_msg, sizeof(err_msg), "inet_ntop failed : %s", strerror(errno)); error_set(errp, QERR_QGA_COMMAND_FAILED, err_msg); goto error; } address_item->value->ip_address = g_strdup(addr6); address_item->value->ip_address_type = GUEST_IP_ADDRESS_TYPE_IPV6; if (ifa->ifa_netmask) { /* Count the number of set bits in netmask. * This is safe as '1' and '0' cannot be shuffled in netmask. */ p = &((struct sockaddr_in6 *)ifa->ifa_netmask)->sin6_addr; address_item->value->prefix = ctpop32(((uint32_t *) p)[0]) + ctpop32(((uint32_t *) p)[1]) + ctpop32(((uint32_t *) p)[2]) + ctpop32(((uint32_t *) p)[3]); } } if (!address_item) { continue; } address_list = &info->value->ip_addresses; while (*address_list && (*address_list)->next) { address_list = &(*address_list)->next; } if (!*address_list) { *address_list = address_item; } else { (*address_list)->next = address_item; } info->value->has_ip_addresses = true; } freeifaddrs(ifap); return head; error: freeifaddrs(ifap); qapi_free_GuestNetworkInterfaceList(head); return NULL; } | 15,808 |
0 | static inline int coef_test_compression(int coef) { int tmp = coef >> 2; int res = ff_ctz(tmp); if (res > 1) return 1; /* ...00 -> compressable */ else if (res == 1) return 0; /* ...10 -> uncompressable */ else if (ff_ctz(tmp >> 1) > 0) return 0; /* ...0 1 -> uncompressable */ else return 1; /* ...1 1 -> compressable */ } | 15,809 |
0 | static int ac97_load (QEMUFile *f, void *opaque, int version_id) { int ret; size_t i; uint8_t active[LAST_INDEX]; AC97LinkState *s = opaque; if (version_id != 2) return -EINVAL; ret = pci_device_load (s->pci_dev, f); if (ret) return ret; qemu_get_be32s (f, &s->glob_cnt); qemu_get_be32s (f, &s->glob_sta); qemu_get_be32s (f, &s->cas); for (i = 0; i < ARRAY_SIZE (s->bm_regs); ++i) { AC97BusMasterRegs *r = &s->bm_regs[i]; qemu_get_be32s (f, &r->bdbar); qemu_get_8s (f, &r->civ); qemu_get_8s (f, &r->lvi); qemu_get_be16s (f, &r->sr); qemu_get_be16s (f, &r->picb); qemu_get_8s (f, &r->piv); qemu_get_8s (f, &r->cr); qemu_get_be32s (f, &r->bd_valid); qemu_get_be32s (f, &r->bd.addr); qemu_get_be32s (f, &r->bd.ctl_len); } qemu_get_buffer (f, s->mixer_data, sizeof (s->mixer_data)); qemu_get_buffer (f, active, sizeof (active)); #ifdef USE_MIXER record_select (s, mixer_load (s, AC97_Record_Select)); #define V_(a, b) set_volume (s, a, b, mixer_load (s, a)) V_ (AC97_Master_Volume_Mute, AUD_MIXER_VOLUME); V_ (AC97_PCM_Out_Volume_Mute, AUD_MIXER_PCM); V_ (AC97_Line_In_Volume_Mute, AUD_MIXER_LINE_IN); #undef V_ #endif reset_voices (s, active); s->bup_flag = 0; s->last_samp = 0; return 0; } | 15,810 |
0 | void usb_test_hotplug(const char *hcd_id, const int port, void (*port_check)(void)) { QDict *response; char *cmd; cmd = g_strdup_printf("{'execute': 'device_add'," " 'arguments': {" " 'driver': 'usb-tablet'," " 'port': '%d'," " 'bus': '%s.0'," " 'id': 'usbdev%d'" "}}", port, hcd_id, port); response = qmp(cmd); g_free(cmd); g_assert(response); g_assert(!qdict_haskey(response, "error")); QDECREF(response); if (port_check) { port_check(); } cmd = g_strdup_printf("{'execute': 'device_del'," " 'arguments': {" " 'id': 'usbdev%d'" "}}", port); response = qmp(cmd); g_free(cmd); g_assert(response); g_assert(qdict_haskey(response, "event")); g_assert(!strcmp(qdict_get_str(response, "event"), "DEVICE_DELETED")); QDECREF(response); } | 15,812 |
0 | bool qemu_signalfd_available(void) { #ifdef CONFIG_SIGNALFD errno = 0; syscall(SYS_signalfd, -1, NULL, _NSIG / 8); return errno != ENOSYS; #else return false; #endif } | 15,813 |
0 | static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, uint8_t *val, uint8_t dev_value, uint8_t valid_mask) { XenPTRegInfo *reg = cfg_entry->reg; uint8_t writable_mask = 0; uint8_t throughable_mask = get_throughable_mask(s, reg, valid_mask); /* modify emulate register */ writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); /* create value for writing to I/O device register */ *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); return 0; } | 15,814 |
0 | static void omap_update_display(void *opaque) { struct omap_lcd_panel_s *omap_lcd = (struct omap_lcd_panel_s *) opaque; DisplaySurface *surface = qemu_console_surface(omap_lcd->con); draw_line_func draw_line; int size, height, first, last; int width, linesize, step, bpp, frame_offset; hwaddr frame_base; if (!omap_lcd || omap_lcd->plm == 1 || !omap_lcd->enable || !surface_bits_per_pixel(surface)) { return; } frame_offset = 0; if (omap_lcd->plm != 2) { cpu_physical_memory_read(omap_lcd->dma->phys_framebuffer[ omap_lcd->dma->current_frame], (void *)omap_lcd->palette, 0x200); switch (omap_lcd->palette[0] >> 12 & 7) { case 3 ... 7: frame_offset += 0x200; break; default: frame_offset += 0x20; } } /* Colour depth */ switch ((omap_lcd->palette[0] >> 12) & 7) { case 1: draw_line = draw_line_table2[surface_bits_per_pixel(surface)]; bpp = 2; break; case 2: draw_line = draw_line_table4[surface_bits_per_pixel(surface)]; bpp = 4; break; case 3: draw_line = draw_line_table8[surface_bits_per_pixel(surface)]; bpp = 8; break; case 4 ... 7: if (!omap_lcd->tft) draw_line = draw_line_table12[surface_bits_per_pixel(surface)]; else draw_line = draw_line_table16[surface_bits_per_pixel(surface)]; bpp = 16; break; default: /* Unsupported at the moment. */ return; } /* Resolution */ width = omap_lcd->width; if (width != surface_width(surface) || omap_lcd->height != surface_height(surface)) { qemu_console_resize(omap_lcd->con, omap_lcd->width, omap_lcd->height); surface = qemu_console_surface(omap_lcd->con); omap_lcd->invalidate = 1; } if (omap_lcd->dma->current_frame == 0) size = omap_lcd->dma->src_f1_bottom - omap_lcd->dma->src_f1_top; else size = omap_lcd->dma->src_f2_bottom - omap_lcd->dma->src_f2_top; if (frame_offset + ((width * omap_lcd->height * bpp) >> 3) > size + 2) { omap_lcd->sync_error = 1; omap_lcd_interrupts(omap_lcd); omap_lcd->enable = 0; return; } /* Content */ frame_base = omap_lcd->dma->phys_framebuffer[ omap_lcd->dma->current_frame] + frame_offset; omap_lcd->dma->condition |= 1 << omap_lcd->dma->current_frame; if (omap_lcd->dma->interrupts & 1) qemu_irq_raise(omap_lcd->dma->irq); if (omap_lcd->dma->dual) omap_lcd->dma->current_frame ^= 1; if (!surface_bits_per_pixel(surface)) { return; } first = 0; height = omap_lcd->height; if (omap_lcd->subpanel & (1 << 31)) { if (omap_lcd->subpanel & (1 << 29)) first = (omap_lcd->subpanel >> 16) & 0x3ff; else height = (omap_lcd->subpanel >> 16) & 0x3ff; /* TODO: fill the rest of the panel with DPD */ } step = width * bpp >> 3; linesize = surface_stride(surface); framebuffer_update_display(surface, omap_lcd->sysmem, frame_base, width, height, step, linesize, 0, omap_lcd->invalidate, draw_line, omap_lcd->palette, &first, &last); if (first >= 0) { dpy_gfx_update(omap_lcd->con, 0, first, width, last - first + 1); } omap_lcd->invalidate = 0; } | 15,816 |
0 | static void rcu_qtest(const char *test, int duration, int nreaders) { int i; long long n_removed_local = 0; struct list_element *el, *prev_el; rcu_qtest_init(); for (i = 0; i < nreaders; i++) { create_thread(rcu_q_reader); } create_thread(rcu_q_updater); rcu_qtest_run(duration, nreaders); QLIST_FOREACH_SAFE_RCU(prev_el, &Q_list_head, entry, el) { QLIST_REMOVE_RCU(prev_el, entry); call_rcu1(&prev_el->rcu, reclaim_list_el); n_removed_local++; } atomic_add(&n_nodes_removed, n_removed_local); synchronize_rcu(); while (n_nodes_removed > n_reclaims) { g_usleep(100); synchronize_rcu(); } if (g_test_in_charge) { g_assert_cmpint(n_nodes_removed, ==, n_reclaims); } else { printf("%s: %d readers; 1 updater; nodes read: " \ "%lld, nodes removed: %lld; nodes reclaimed: %lld\n", test, nthreadsrunning - 1, n_reads, n_nodes_removed, n_reclaims); exit(0); } } | 15,817 |
0 | void qemu_chr_close(CharDriverState *chr) { TAILQ_REMOVE(&chardevs, chr, next); if (chr->chr_close) chr->chr_close(chr); qemu_free(chr->filename); qemu_free(chr->label); qemu_free(chr); } | 15,819 |
0 | static void mirror_start_job(const char *job_id, BlockDriverState *bs, int creation_flags, BlockDriverState *target, const char *replaces, int64_t speed, uint32_t granularity, int64_t buf_size, BlockMirrorBackingMode backing_mode, BlockdevOnError on_source_error, BlockdevOnError on_target_error, bool unmap, BlockCompletionFunc *cb, void *opaque, const BlockJobDriver *driver, bool is_none_mode, BlockDriverState *base, bool auto_complete, const char *filter_node_name, bool is_mirror, Error **errp) { MirrorBlockJob *s; BlockDriverState *mirror_top_bs; bool target_graph_mod; bool target_is_backing; Error *local_err = NULL; int ret; if (granularity == 0) { granularity = bdrv_get_default_bitmap_granularity(target); } assert ((granularity & (granularity - 1)) == 0); /* Granularity must be large enough for sector-based dirty bitmap */ assert(granularity >= BDRV_SECTOR_SIZE); if (buf_size < 0) { error_setg(errp, "Invalid parameter 'buf-size'"); return; } if (buf_size == 0) { buf_size = DEFAULT_MIRROR_BUF_SIZE; } /* In the case of active commit, add dummy driver to provide consistent * reads on the top, while disabling it in the intermediate nodes, and make * the backing chain writable. */ mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name, BDRV_O_RDWR, errp); if (mirror_top_bs == NULL) { return; } if (!filter_node_name) { mirror_top_bs->implicit = true; } mirror_top_bs->total_sectors = bs->total_sectors; bdrv_set_aio_context(mirror_top_bs, bdrv_get_aio_context(bs)); /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep * it alive until block_job_create() succeeds even if bs has no parent. */ bdrv_ref(mirror_top_bs); bdrv_drained_begin(bs); bdrv_append(mirror_top_bs, bs, &local_err); bdrv_drained_end(bs); if (local_err) { bdrv_unref(mirror_top_bs); error_propagate(errp, local_err); return; } /* Make sure that the source is not resized while the job is running */ s = block_job_create(job_id, driver, mirror_top_bs, BLK_PERM_CONSISTENT_READ, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed, creation_flags, cb, opaque, errp); if (!s) { goto fail; } /* The block job now has a reference to this node */ bdrv_unref(mirror_top_bs); s->source = bs; s->mirror_top_bs = mirror_top_bs; /* No resize for the target either; while the mirror is still running, a * consistent read isn't necessarily possible. We could possibly allow * writes and graph modifications, though it would likely defeat the * purpose of a mirror, so leave them blocked for now. * * In the case of active commit, things look a bit different, though, * because the target is an already populated backing file in active use. * We can allow anything except resize there.*/ target_is_backing = bdrv_chain_contains(bs, target); target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN); s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE | (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0), BLK_PERM_WRITE_UNCHANGED | (target_is_backing ? BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD : 0)); ret = blk_insert_bs(s->target, target, errp); if (ret < 0) { goto fail; } if (is_mirror) { /* XXX: Mirror target could be a NBD server of target QEMU in the case * of non-shared block migration. To allow migration completion, we * have to allow "inactivate" of the target BB. When that happens, we * know the job is drained, and the vcpus are stopped, so no write * operation will be performed. Block layer already has assertions to * ensure that. */ blk_set_force_allow_inactivate(s->target); } s->replaces = g_strdup(replaces); s->on_source_error = on_source_error; s->on_target_error = on_target_error; s->is_none_mode = is_none_mode; s->backing_mode = backing_mode; s->base = base; s->granularity = granularity; s->buf_size = ROUND_UP(buf_size, granularity); s->unmap = unmap; if (auto_complete) { s->should_complete = true; } s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); if (!s->dirty_bitmap) { goto fail; } /* Required permissions are already taken with blk_new() */ block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL, &error_abort); /* In commit_active_start() all intermediate nodes disappear, so * any jobs in them must be blocked */ if (target_is_backing) { BlockDriverState *iter; for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) { /* XXX BLK_PERM_WRITE needs to be allowed so we don't block * ourselves at s->base (if writes are blocked for a node, they are * also blocked for its backing file). The other options would be a * second filter driver above s->base (== target). */ ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE, errp); if (ret < 0) { goto fail; } } } trace_mirror_start(bs, s, opaque); block_job_start(&s->common); return; fail: if (s) { /* Make sure this BDS does not go away until we have completed the graph * changes below */ bdrv_ref(mirror_top_bs); g_free(s->replaces); blk_unref(s->target); block_job_early_fail(&s->common); } bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, &error_abort); bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort); bdrv_unref(mirror_top_bs); } | 15,821 |
0 | static void test_acpi_tables(test_data *data) { int tables_nr = data->rsdt_tables_nr - 1; /* fadt is first */ int i; for (i = 0; i < tables_nr; i++) { AcpiSdtTable ssdt_table; uint32_t addr; addr = le32_to_cpu(data->rsdt_tables_addr[i + 1]); /* fadt is first */ test_dst_table(&ssdt_table, addr); g_array_append_val(data->tables, ssdt_table); } } | 15,822 |
0 | void cris_mmu_flush_pid(CPUState *env, uint32_t pid) { target_ulong vaddr; unsigned int idx; uint32_t lo, hi; uint32_t tlb_vpn; int tlb_pid, tlb_g, tlb_v, tlb_k; unsigned int set; unsigned int mmu; pid &= 0xff; for (mmu = 0; mmu < 2; mmu++) { for (set = 0; set < 4; set++) { for (idx = 0; idx < 16; idx++) { lo = env->tlbsets[mmu][set][idx].lo; hi = env->tlbsets[mmu][set][idx].hi; tlb_vpn = EXTRACT_FIELD(hi, 13, 31); tlb_pid = EXTRACT_FIELD(hi, 0, 7); tlb_g = EXTRACT_FIELD(lo, 4, 4); tlb_v = EXTRACT_FIELD(lo, 3, 3); tlb_k = EXTRACT_FIELD(lo, 2, 2); /* Kernel protected areas need to be flushed as well. */ if (tlb_v && !tlb_g && (tlb_pid == pid || tlb_k)) { vaddr = tlb_vpn << TARGET_PAGE_BITS; D(fprintf(logfile, "flush pid=%x vaddr=%x\n", pid, vaddr)); tlb_flush_page(env, vaddr); } } } } } | 15,824 |
0 | static void vfio_intp_interrupt(VFIOINTp *intp) { int ret; VFIOINTp *tmp; VFIOPlatformDevice *vdev = intp->vdev; bool delay_handling = false; qemu_mutex_lock(&vdev->intp_mutex); if (intp->state == VFIO_IRQ_INACTIVE) { QLIST_FOREACH(tmp, &vdev->intp_list, next) { if (tmp->state == VFIO_IRQ_ACTIVE || tmp->state == VFIO_IRQ_PENDING) { delay_handling = true; break; } } } if (delay_handling) { /* * the new IRQ gets a pending status and is pushed in * the pending queue */ intp->state = VFIO_IRQ_PENDING; trace_vfio_intp_interrupt_set_pending(intp->pin); QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue, intp, pqnext); ret = event_notifier_test_and_clear(&intp->interrupt); qemu_mutex_unlock(&vdev->intp_mutex); return; } trace_vfio_platform_intp_interrupt(intp->pin, event_notifier_get_fd(&intp->interrupt)); ret = event_notifier_test_and_clear(&intp->interrupt); if (!ret) { error_report("Error when clearing fd=%d (ret = %d)", event_notifier_get_fd(&intp->interrupt), ret); } intp->state = VFIO_IRQ_ACTIVE; /* sets slow path */ vfio_mmap_set_enabled(vdev, false); /* trigger the virtual IRQ */ qemu_set_irq(intp->qemuirq, 1); /* * Schedule the mmap timer which will restore fastpath when no IRQ * is active anymore */ if (vdev->mmap_timeout) { timer_mod(vdev->mmap_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->mmap_timeout); } qemu_mutex_unlock(&vdev->intp_mutex); } | 15,825 |
0 | void tb_invalidate_phys_addr(hwaddr addr) { ram_addr_t ram_addr; MemoryRegionSection *section; section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS); if (!(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr))) { return; } ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) + memory_region_section_addr(section, addr); tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); } | 15,826 |
0 | static void lsi_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->init = lsi_scsi_init; k->exit = lsi_scsi_uninit; k->vendor_id = PCI_VENDOR_ID_LSI_LOGIC; k->device_id = PCI_DEVICE_ID_LSI_53C895A; k->class_id = PCI_CLASS_STORAGE_SCSI; k->subsystem_id = 0x1000; dc->alias = "lsi"; dc->reset = lsi_scsi_reset; dc->vmsd = &vmstate_lsi_scsi; } | 15,827 |
0 | static void sigp_stop(CPUState *cs, run_on_cpu_data arg) { S390CPU *cpu = S390_CPU(cs); SigpInfo *si = arg.host_ptr; if (s390_cpu_get_state(cpu) != CPU_STATE_OPERATING) { si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; return; } /* disabled wait - sleeping in user space */ if (cs->halted) { s390_cpu_set_state(CPU_STATE_STOPPED, cpu); } else { /* execute the stop function */ cpu->env.sigp_order = SIGP_STOP; cpu_inject_stop(cpu); } si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } | 15,828 |
0 | static bool bdrv_exceed_iops_limits(BlockDriverState *bs, bool is_write, double elapsed_time, uint64_t *wait) { uint64_t iops_limit = 0; double ios_limit, ios_base; double slice_time, wait_time; if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) { iops_limit = bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]; } else if (bs->io_limits.iops[is_write]) { iops_limit = bs->io_limits.iops[is_write]; } else { if (wait) { *wait = 0; } return false; } slice_time = bs->slice_end - bs->slice_start; slice_time /= (NANOSECONDS_PER_SECOND); ios_limit = iops_limit * slice_time; ios_base = bs->nr_ops[is_write] - bs->io_base.ios[is_write]; if (bs->io_limits.iops[BLOCK_IO_LIMIT_TOTAL]) { ios_base += bs->nr_ops[!is_write] - bs->io_base.ios[!is_write]; } if (ios_base + 1 <= ios_limit) { if (wait) { *wait = 0; } return false; } /* Calc approx time to dispatch */ wait_time = (ios_base + 1) / iops_limit; if (wait_time > elapsed_time) { wait_time = wait_time - elapsed_time; } else { wait_time = 0; } bs->slice_time = wait_time * BLOCK_IO_SLICE_TIME * 10; bs->slice_end += bs->slice_time - 3 * BLOCK_IO_SLICE_TIME; if (wait) { *wait = wait_time * BLOCK_IO_SLICE_TIME * 10; } return true; } | 15,829 |
0 | static struct vm_area_struct *vma_next(struct vm_area_struct *vma) { return (TAILQ_NEXT(vma, vma_link)); } | 15,830 |
0 | void ff_get_unscaled_swscale(SwsContext *c) { const enum PixelFormat srcFormat = c->srcFormat; const enum PixelFormat dstFormat = c->dstFormat; const int flags = c->flags; const int dstH = c->dstH; int needsDither; needsDither= isAnyRGB(dstFormat) && c->dstFormatBpp < 24 && (c->dstFormatBpp < c->srcFormatBpp || (!isAnyRGB(srcFormat))); /* yv12_to_nv12 */ if ((srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUVA420P) && (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21)) { c->swScale= planarToNv12Wrapper; } /* yuv2bgr */ if ((srcFormat==PIX_FMT_YUV420P || srcFormat==PIX_FMT_YUV422P || srcFormat==PIX_FMT_YUVA420P) && isAnyRGB(dstFormat) && !(flags & SWS_ACCURATE_RND) && !(dstH&1)) { c->swScale= ff_yuv2rgb_get_func_ptr(c); } if (srcFormat==PIX_FMT_YUV410P && (dstFormat==PIX_FMT_YUV420P || dstFormat==PIX_FMT_YUVA420P) && !(flags & SWS_BITEXACT)) { c->swScale= yvu9ToYv12Wrapper; } /* bgr24toYV12 */ if (srcFormat==PIX_FMT_BGR24 && (dstFormat==PIX_FMT_YUV420P || dstFormat==PIX_FMT_YUVA420P) && !(flags & SWS_ACCURATE_RND)) c->swScale= bgr24ToYv12Wrapper; /* RGB/BGR -> RGB/BGR (no dither needed forms) */ if ( isAnyRGB(srcFormat) && isAnyRGB(dstFormat) && srcFormat != PIX_FMT_BGR8 && dstFormat != PIX_FMT_BGR8 && srcFormat != PIX_FMT_RGB8 && dstFormat != PIX_FMT_RGB8 && srcFormat != PIX_FMT_BGR4 && dstFormat != PIX_FMT_BGR4 && srcFormat != PIX_FMT_RGB4 && dstFormat != PIX_FMT_RGB4 && srcFormat != PIX_FMT_BGR4_BYTE && dstFormat != PIX_FMT_BGR4_BYTE && srcFormat != PIX_FMT_RGB4_BYTE && dstFormat != PIX_FMT_RGB4_BYTE && srcFormat != PIX_FMT_MONOBLACK && dstFormat != PIX_FMT_MONOBLACK && srcFormat != PIX_FMT_MONOWHITE && dstFormat != PIX_FMT_MONOWHITE && srcFormat != PIX_FMT_RGB48LE && dstFormat != PIX_FMT_RGB48LE && srcFormat != PIX_FMT_RGB48BE && dstFormat != PIX_FMT_RGB48BE && srcFormat != PIX_FMT_BGR48LE && dstFormat != PIX_FMT_BGR48LE && srcFormat != PIX_FMT_BGR48BE && dstFormat != PIX_FMT_BGR48BE && (!needsDither || (c->flags&(SWS_FAST_BILINEAR|SWS_POINT)))) c->swScale= rgbToRgbWrapper; #define isByteRGB(f) (\ f == PIX_FMT_RGB32 ||\ f == PIX_FMT_RGB32_1 ||\ f == PIX_FMT_RGB24 ||\ f == PIX_FMT_BGR32 ||\ f == PIX_FMT_BGR32_1 ||\ f == PIX_FMT_BGR24) if (isAnyRGB(srcFormat) && isPlanar(srcFormat) && isByteRGB(dstFormat)) c->swScale= planarRgbToRgbWrapper; if ((usePal(srcFormat) && ( dstFormat == PIX_FMT_RGB32 || dstFormat == PIX_FMT_RGB32_1 || dstFormat == PIX_FMT_RGB24 || dstFormat == PIX_FMT_BGR32 || dstFormat == PIX_FMT_BGR32_1 || dstFormat == PIX_FMT_BGR24))) c->swScale= palToRgbWrapper; if (srcFormat == PIX_FMT_YUV422P) { if (dstFormat == PIX_FMT_YUYV422) c->swScale= yuv422pToYuy2Wrapper; else if (dstFormat == PIX_FMT_UYVY422) c->swScale= yuv422pToUyvyWrapper; } /* LQ converters if -sws 0 or -sws 4*/ if (c->flags&(SWS_FAST_BILINEAR|SWS_POINT)) { /* yv12_to_yuy2 */ if (srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUVA420P) { if (dstFormat == PIX_FMT_YUYV422) c->swScale= planarToYuy2Wrapper; else if (dstFormat == PIX_FMT_UYVY422) c->swScale= planarToUyvyWrapper; } } if(srcFormat == PIX_FMT_YUYV422 && (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P)) c->swScale= yuyvToYuv420Wrapper; if(srcFormat == PIX_FMT_UYVY422 && (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P)) c->swScale= uyvyToYuv420Wrapper; if(srcFormat == PIX_FMT_YUYV422 && dstFormat == PIX_FMT_YUV422P) c->swScale= yuyvToYuv422Wrapper; if(srcFormat == PIX_FMT_UYVY422 && dstFormat == PIX_FMT_YUV422P) c->swScale= uyvyToYuv422Wrapper; /* simple copy */ if ( srcFormat == dstFormat || (srcFormat == PIX_FMT_YUVA420P && dstFormat == PIX_FMT_YUV420P) || (srcFormat == PIX_FMT_YUV420P && dstFormat == PIX_FMT_YUVA420P) || (isPlanarYUV(srcFormat) && isGray(dstFormat)) || (isPlanarYUV(dstFormat) && isGray(srcFormat)) || (isGray(dstFormat) && isGray(srcFormat)) || (isPlanarYUV(srcFormat) && isPlanarYUV(dstFormat) && c->chrDstHSubSample == c->chrSrcHSubSample && c->chrDstVSubSample == c->chrSrcVSubSample && dstFormat != PIX_FMT_NV12 && dstFormat != PIX_FMT_NV21 && srcFormat != PIX_FMT_NV12 && srcFormat != PIX_FMT_NV21)) { if (isPacked(c->srcFormat)) c->swScale= packedCopyWrapper; else /* Planar YUV or gray */ c->swScale= planarCopyWrapper; } if (ARCH_BFIN) ff_bfin_get_unscaled_swscale(c); if (HAVE_ALTIVEC) ff_swscale_get_unscaled_altivec(c); } | 15,831 |
0 | void address_space_init_dispatch(AddressSpace *as) { AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1); d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 }; d->listener = (MemoryListener) { .begin = mem_begin, .region_add = mem_add, .region_nop = mem_add, .priority = 0, }; d->as = as; as->dispatch = d; memory_listener_register(&d->listener, as); } | 15,832 |
0 | void helper_fcmp_gt_FT(CPUSH4State *env, float32 t0, float32 t1) { int relation; set_float_exception_flags(0, &env->fp_status); relation = float32_compare(t0, t1, &env->fp_status); if (unlikely(relation == float_relation_unordered)) { update_fpscr(env, GETPC()); } else { env->sr_t = (relation == float_relation_greater); } } | 15,834 |
0 | static void pxa2xx_i2s_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { PXA2xxI2SState *s = (PXA2xxI2SState *) opaque; uint32_t *sample; switch (addr) { case SACR0: if (value & (1 << 3)) /* RST */ pxa2xx_i2s_reset(s); s->control[0] = value & 0xff3d; if (!s->enable && (value & 1) && s->tx_len) { /* ENB */ for (sample = s->fifo; s->fifo_len > 0; s->fifo_len --, sample ++) s->codec_out(s->opaque, *sample); s->status &= ~(1 << 7); /* I2SOFF */ } if (value & (1 << 4)) /* EFWR */ printf("%s: Attempt to use special function\n", __FUNCTION__); s->enable = (value & 9) == 1; /* ENB && !RST*/ pxa2xx_i2s_update(s); break; case SACR1: s->control[1] = value & 0x0039; if (value & (1 << 5)) /* ENLBF */ printf("%s: Attempt to use loopback function\n", __FUNCTION__); if (value & (1 << 4)) /* DPRL */ s->fifo_len = 0; pxa2xx_i2s_update(s); break; case SAIMR: s->mask = value & 0x0078; pxa2xx_i2s_update(s); break; case SAICR: s->status &= ~(value & (3 << 5)); pxa2xx_i2s_update(s); break; case SADIV: s->clk = value & 0x007f; break; case SADR: if (s->tx_len && s->enable) { s->tx_len --; pxa2xx_i2s_update(s); s->codec_out(s->opaque, value); } else if (s->fifo_len < 16) { s->fifo[s->fifo_len ++] = value; pxa2xx_i2s_update(s); } break; default: printf("%s: Bad register " REG_FMT "\n", __FUNCTION__, addr); } } | 15,836 |
0 | static int pci_unin_main_init_device(SysBusDevice *dev) { UNINState *s; int pci_mem_config, pci_mem_data; /* Use values found on a real PowerMac */ /* Uninorth main bus */ s = FROM_SYSBUS(UNINState, dev); pci_mem_config = cpu_register_io_memory(pci_unin_main_config_read, pci_unin_main_config_write, s); pci_mem_data = cpu_register_io_memory(pci_unin_main_read, pci_unin_main_write, &s->host_state); sysbus_init_mmio(dev, 0x1000, pci_mem_config); sysbus_init_mmio(dev, 0x1000, pci_mem_data); register_savevm("uninorth", 0, 1, pci_unin_save, pci_unin_load, &s->host_state); qemu_register_reset(pci_unin_reset, &s->host_state); return 0; } | 15,837 |
0 | static inline void wb_SR_F(void) { int label; label = gen_new_label(); tcg_gen_andi_tl(cpu_sr, cpu_sr, ~SR_F); tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, label); tcg_gen_ori_tl(cpu_sr, cpu_sr, SR_F); gen_set_label(label); } | 15,838 |
0 | static inline uint32_t efsctsf(uint32_t val) { CPU_FloatU u; float32 tmp; u.l = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float32_is_nan(u.f))) return 0; tmp = uint64_to_float32(1ULL << 32, &env->vec_status); u.f = float32_mul(u.f, tmp, &env->vec_status); return float32_to_int32(u.f, &env->vec_status); } | 15,839 |
0 | static inline int decode_seq_parameter_set(H264Context *h){ MpegEncContext * const s = &h->s; int profile_idc, level_idc; unsigned int sps_id, tmp, mb_width, mb_height; int i; SPS *sps; profile_idc= get_bits(&s->gb, 8); get_bits1(&s->gb); //constraint_set0_flag get_bits1(&s->gb); //constraint_set1_flag get_bits1(&s->gb); //constraint_set2_flag get_bits1(&s->gb); //constraint_set3_flag get_bits(&s->gb, 4); // reserved level_idc= get_bits(&s->gb, 8); sps_id= get_ue_golomb(&s->gb); sps = alloc_parameter_set(h, (void **)h->sps_buffers, sps_id, MAX_SPS_COUNT, sizeof(SPS), "sps"); if(sps == NULL) return -1; sps->profile_idc= profile_idc; sps->level_idc= level_idc; memset(sps->scaling_matrix4, 16, sizeof(sps->scaling_matrix4)); memset(sps->scaling_matrix8, 16, sizeof(sps->scaling_matrix8)); sps->scaling_matrix_present = 0; if(sps->profile_idc >= 100){ //high profile sps->chroma_format_idc= get_ue_golomb(&s->gb); if(sps->chroma_format_idc == 3) get_bits1(&s->gb); //residual_color_transform_flag get_ue_golomb(&s->gb); //bit_depth_luma_minus8 get_ue_golomb(&s->gb); //bit_depth_chroma_minus8 sps->transform_bypass = get_bits1(&s->gb); decode_scaling_matrices(h, sps, NULL, 1, sps->scaling_matrix4, sps->scaling_matrix8); }else{ sps->chroma_format_idc= 1; } sps->log2_max_frame_num= get_ue_golomb(&s->gb) + 4; sps->poc_type= get_ue_golomb(&s->gb); if(sps->poc_type == 0){ //FIXME #define sps->log2_max_poc_lsb= get_ue_golomb(&s->gb) + 4; } else if(sps->poc_type == 1){//FIXME #define sps->delta_pic_order_always_zero_flag= get_bits1(&s->gb); sps->offset_for_non_ref_pic= get_se_golomb(&s->gb); sps->offset_for_top_to_bottom_field= get_se_golomb(&s->gb); tmp= get_ue_golomb(&s->gb); if(tmp >= sizeof(sps->offset_for_ref_frame) / sizeof(sps->offset_for_ref_frame[0])){ av_log(h->s.avctx, AV_LOG_ERROR, "poc_cycle_length overflow %u\n", tmp); return -1; } sps->poc_cycle_length= tmp; for(i=0; i<sps->poc_cycle_length; i++) sps->offset_for_ref_frame[i]= get_se_golomb(&s->gb); }else if(sps->poc_type != 2){ av_log(h->s.avctx, AV_LOG_ERROR, "illegal POC type %d\n", sps->poc_type); return -1; } tmp= get_ue_golomb(&s->gb); if(tmp > MAX_PICTURE_COUNT-2 || tmp >= 32){ av_log(h->s.avctx, AV_LOG_ERROR, "too many reference frames\n"); return -1; } sps->ref_frame_count= tmp; sps->gaps_in_frame_num_allowed_flag= get_bits1(&s->gb); mb_width= get_ue_golomb(&s->gb) + 1; mb_height= get_ue_golomb(&s->gb) + 1; if(mb_width >= INT_MAX/16 || mb_height >= INT_MAX/16 || avcodec_check_dimensions(NULL, 16*mb_width, 16*mb_height)){ av_log(h->s.avctx, AV_LOG_ERROR, "mb_width/height overflow\n"); return -1; } sps->mb_width = mb_width; sps->mb_height= mb_height; sps->frame_mbs_only_flag= get_bits1(&s->gb); if(!sps->frame_mbs_only_flag) sps->mb_aff= get_bits1(&s->gb); else sps->mb_aff= 0; sps->direct_8x8_inference_flag= get_bits1(&s->gb); #ifndef ALLOW_INTERLACE if(sps->mb_aff) av_log(h->s.avctx, AV_LOG_ERROR, "MBAFF support not included; enable it at compile-time.\n"); #endif if(!sps->direct_8x8_inference_flag && sps->mb_aff) av_log(h->s.avctx, AV_LOG_ERROR, "MBAFF + !direct_8x8_inference is not implemented\n"); sps->crop= get_bits1(&s->gb); if(sps->crop){ sps->crop_left = get_ue_golomb(&s->gb); sps->crop_right = get_ue_golomb(&s->gb); sps->crop_top = get_ue_golomb(&s->gb); sps->crop_bottom= get_ue_golomb(&s->gb); if(sps->crop_left || sps->crop_top){ av_log(h->s.avctx, AV_LOG_ERROR, "insane cropping not completely supported, this could look slightly wrong ...\n"); } if(sps->crop_right >= 8 || sps->crop_bottom >= (8>> !sps->frame_mbs_only_flag)){ av_log(h->s.avctx, AV_LOG_ERROR, "brainfart cropping not supported, this could look slightly wrong ...\n"); } }else{ sps->crop_left = sps->crop_right = sps->crop_top = sps->crop_bottom= 0; } sps->vui_parameters_present_flag= get_bits1(&s->gb); if( sps->vui_parameters_present_flag ) decode_vui_parameters(h, sps); if(s->avctx->debug&FF_DEBUG_PICT_INFO){ av_log(h->s.avctx, AV_LOG_DEBUG, "sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%d/%d/%d/%d %s %s\n", sps_id, sps->profile_idc, sps->level_idc, sps->poc_type, sps->ref_frame_count, sps->mb_width, sps->mb_height, sps->frame_mbs_only_flag ? "FRM" : (sps->mb_aff ? "MB-AFF" : "PIC-AFF"), sps->direct_8x8_inference_flag ? "8B8" : "", sps->crop_left, sps->crop_right, sps->crop_top, sps->crop_bottom, sps->vui_parameters_present_flag ? "VUI" : "", ((const char*[]){"Gray","420","422","444"})[sps->chroma_format_idc] ); } return 0; } | 15,841 |
0 | static enum AVPixelFormat dshow_pixfmt(DWORD biCompression, WORD biBitCount) { switch(biCompression) { case BI_BITFIELDS: case BI_RGB: switch(biBitCount) { /* 1-8 are untested */ case 1: return AV_PIX_FMT_MONOWHITE; case 4: return AV_PIX_FMT_RGB4; case 8: return AV_PIX_FMT_RGB8; case 16: return AV_PIX_FMT_RGB555; case 24: return AV_PIX_FMT_BGR24; case 32: return AV_PIX_FMT_0RGB32; } } return avpriv_find_pix_fmt(ff_raw_pix_fmt_tags, biCompression); // all others } | 15,842 |
0 | static void decode_scaling_list(H264Context *h, uint8_t *factors, int size, const uint8_t *jvt_list, const uint8_t *fallback_list) { int i, last = 8, next = 8; const uint8_t *scan = size == 16 ? ff_zigzag_scan : ff_zigzag_direct; if (!get_bits1(&h->gb)) /* matrix not written, we use the predicted one */ memcpy(factors, fallback_list, size * sizeof(uint8_t)); else for (i = 0; i < size; i++) { if (next) next = (last + get_se_golomb(&h->gb)) & 0xff; if (!i && !next) { /* matrix not written, we use the preset one */ memcpy(factors, jvt_list, size * sizeof(uint8_t)); break; } last = factors[scan[i]] = next ? next : last; } } | 15,843 |
1 | static void iscsi_co_generic_bh_cb(void *opaque) { struct IscsiTask *iTask = opaque; iTask->complete = 1; qemu_bh_delete(iTask->bh); qemu_coroutine_enter(iTask->co, NULL); } | 15,845 |
1 | av_cold int ff_dct_common_init(MpegEncContext *s) { ff_dsputil_init(&s->dsp, s->avctx); ff_videodsp_init(&s->vdsp, 8); s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c; s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c; s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c; s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c; s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c; if (s->flags & CODEC_FLAG_BITEXACT) s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact; s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c; #if ARCH_X86 ff_MPV_common_init_x86(s); #elif ARCH_ALPHA ff_MPV_common_init_axp(s); #elif ARCH_ARM ff_MPV_common_init_arm(s); #elif HAVE_ALTIVEC ff_MPV_common_init_altivec(s); #elif ARCH_BFIN ff_MPV_common_init_bfin(s); #endif /* load & permutate scantables * note: only wmv uses different ones */ if (s->alternate_scan) { ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan); } else { ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct); } ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan); ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); return 0; } | 15,847 |
1 | int s390_ccw_cmd_request(ORB *orb, SCSW *scsw, void *data) { S390CCWDeviceClass *cdc = S390_CCW_DEVICE_GET_CLASS(data); if (cdc->handle_request) { return cdc->handle_request(orb, scsw, data); } else { return -ENOSYS; } } | 15,848 |
1 | static void v9fs_walk(void *opaque) { int name_idx; V9fsQID *qids = NULL; int i, err = 0; V9fsPath dpath, path; uint16_t nwnames; struct stat stbuf; size_t offset = 7; int32_t fid, newfid; V9fsString *wnames = NULL; V9fsFidState *fidp; V9fsFidState *newfidp = NULL; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; err = pdu_unmarshal(pdu, offset, "ddw", &fid, &newfid, &nwnames); if (err < 0) { pdu_complete(pdu, err); return ; offset += err; trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames); if (nwnames && nwnames <= P9_MAXWELEM) { wnames = g_malloc0(sizeof(wnames[0]) * nwnames); qids = g_malloc0(sizeof(qids[0]) * nwnames); for (i = 0; i < nwnames; i++) { err = pdu_unmarshal(pdu, offset, "s", &wnames[i]); if (err < 0) { offset += err; } else if (nwnames > P9_MAXWELEM) { err = -EINVAL; fidp = get_fid(pdu, fid); if (fidp == NULL) { v9fs_path_init(&dpath); v9fs_path_init(&path); /* * Both dpath and path initially poin to fidp. * Needed to handle request with nwnames == 0 */ v9fs_path_copy(&dpath, &fidp->path); v9fs_path_copy(&path, &fidp->path); for (name_idx = 0; name_idx < nwnames; name_idx++) { err = v9fs_co_name_to_path(pdu, &dpath, wnames[name_idx].data, &path); if (err < 0) { goto out; err = v9fs_co_lstat(pdu, &path, &stbuf); if (err < 0) { goto out; stat_to_qid(&stbuf, &qids[name_idx]); v9fs_path_copy(&dpath, &path); if (fid == newfid) { BUG_ON(fidp->fid_type != P9_FID_NONE); v9fs_path_copy(&fidp->path, &path); } else { newfidp = alloc_fid(s, newfid); if (newfidp == NULL) { err = -EINVAL; goto out; newfidp->uid = fidp->uid; v9fs_path_copy(&newfidp->path, &path); err = v9fs_walk_marshal(pdu, nwnames, qids); trace_v9fs_walk_return(pdu->tag, pdu->id, nwnames, qids); out: put_fid(pdu, fidp); if (newfidp) { put_fid(pdu, newfidp); v9fs_path_free(&dpath); v9fs_path_free(&path); out_nofid: pdu_complete(pdu, err); if (nwnames && nwnames <= P9_MAXWELEM) { for (name_idx = 0; name_idx < nwnames; name_idx++) { v9fs_string_free(&wnames[name_idx]); g_free(wnames); g_free(qids); | 15,849 |
1 | static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn) { int rt = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int imm9 = sextract32(insn, 12, 9); int opc = extract32(insn, 22, 2); int size = extract32(insn, 30, 2); int idx = extract32(insn, 10, 2); bool is_signed = false; bool is_store = false; bool is_extended = false; bool is_unpriv = (idx == 2); bool is_vector = extract32(insn, 26, 1); bool post_index; bool writeback; TCGv_i64 tcg_addr; if (is_vector) { size |= (opc & 2) << 1; if (size > 4 || is_unpriv) { unallocated_encoding(s); return; } is_store = ((opc & 1) == 0); if (!fp_access_check(s)) { return; } } else { if (size == 3 && opc == 2) { /* PRFM - prefetch */ if (is_unpriv) { unallocated_encoding(s); return; } return; } if (opc == 3 && size > 1) { unallocated_encoding(s); return; } is_store = (opc == 0); is_signed = opc & (1<<1); is_extended = (size < 3) && (opc & 1); } switch (idx) { case 0: case 2: post_index = false; writeback = false; break; case 1: post_index = true; writeback = true; break; case 3: post_index = false; writeback = true; break; } if (rn == 31) { gen_check_sp_alignment(s); } tcg_addr = read_cpu_reg_sp(s, rn, 1); if (!post_index) { tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9); } if (is_vector) { if (is_store) { do_fp_st(s, rt, tcg_addr, size); } else { do_fp_ld(s, rt, tcg_addr, size); } } else { TCGv_i64 tcg_rt = cpu_reg(s, rt); int memidx = is_unpriv ? 1 : get_mem_index(s); if (is_store) { do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx); } else { do_gpr_ld_memidx(s, tcg_rt, tcg_addr, size, is_signed, is_extended, memidx); } } if (writeback) { TCGv_i64 tcg_rn = cpu_reg_sp(s, rn); if (post_index) { tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9); } tcg_gen_mov_i64(tcg_rn, tcg_addr); } } | 15,850 |
1 | static int clv_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; CLVContext *c = avctx->priv_data; GetByteContext gb; uint32_t frame_type; int i, j; int ret; int mb_ret = 0; bytestream2_init(&gb, buf, buf_size); if (avctx->codec_tag == MKTAG('C','L','V','1')) { int skip = bytestream2_get_byte(&gb); bytestream2_skip(&gb, (skip + 1) * 8); frame_type = bytestream2_get_byte(&gb); if ((ret = ff_reget_buffer(avctx, c->pic)) < 0) return ret; c->pic->key_frame = frame_type & 0x20 ? 1 : 0; c->pic->pict_type = frame_type & 0x20 ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; if (frame_type & 0x2) { bytestream2_get_be32(&gb); // frame size; c->ac_quant = bytestream2_get_byte(&gb); c->luma_dc_quant = 32; c->chroma_dc_quant = 32; if ((ret = init_get_bits8(&c->gb, buf + bytestream2_tell(&gb), (buf_size - bytestream2_tell(&gb)))) < 0) return ret; for (i = 0; i < 3; i++) c->top_dc[i] = 32; for (i = 0; i < 4; i++) c->left_dc[i] = 32; for (j = 0; j < c->mb_height; j++) { for (i = 0; i < c->mb_width; i++) { ret = decode_mb(c, i, j); if (ret < 0) mb_ret = ret; } else { if ((ret = av_frame_ref(data, c->pic)) < 0) return ret; *got_frame = 1; return mb_ret < 0 ? mb_ret : buf_size; | 15,851 |
1 | static int process_ipmovie_chunk(IPMVEContext *s, AVIOContext *pb, AVPacket *pkt) { unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE]; int chunk_type; int chunk_size; unsigned char opcode_preamble[OPCODE_PREAMBLE_SIZE]; unsigned char opcode_type; unsigned char opcode_version; int opcode_size; unsigned char scratch[1024]; int i, j; int first_color, last_color; int audio_flags; unsigned char r, g, b; unsigned int width, height; /* see if there are any pending packets */ chunk_type = load_ipmovie_packet(s, pb, pkt); if (chunk_type != CHUNK_DONE) return chunk_type; /* read the next chunk, wherever the file happens to be pointing */ if (url_feof(pb)) return CHUNK_EOF; if (avio_read(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) != CHUNK_PREAMBLE_SIZE) return CHUNK_BAD; chunk_size = AV_RL16(&chunk_preamble[0]); chunk_type = AV_RL16(&chunk_preamble[2]); av_dlog(NULL, "chunk type 0x%04X, 0x%04X bytes: ", chunk_type, chunk_size); switch (chunk_type) { case CHUNK_INIT_AUDIO: av_dlog(NULL, "initialize audio\n"); break; case CHUNK_AUDIO_ONLY: av_dlog(NULL, "audio only\n"); break; case CHUNK_INIT_VIDEO: av_dlog(NULL, "initialize video\n"); break; case CHUNK_VIDEO: av_dlog(NULL, "video (and audio)\n"); break; case CHUNK_SHUTDOWN: av_dlog(NULL, "shutdown\n"); break; case CHUNK_END: av_dlog(NULL, "end\n"); break; default: av_dlog(NULL, "invalid chunk\n"); chunk_type = CHUNK_BAD; break; } while ((chunk_size > 0) && (chunk_type != CHUNK_BAD)) { /* read the next chunk, wherever the file happens to be pointing */ if (url_feof(pb)) { chunk_type = CHUNK_EOF; break; } if (avio_read(pb, opcode_preamble, CHUNK_PREAMBLE_SIZE) != CHUNK_PREAMBLE_SIZE) { chunk_type = CHUNK_BAD; break; } opcode_size = AV_RL16(&opcode_preamble[0]); opcode_type = opcode_preamble[2]; opcode_version = opcode_preamble[3]; chunk_size -= OPCODE_PREAMBLE_SIZE; chunk_size -= opcode_size; if (chunk_size < 0) { av_dlog(NULL, "chunk_size countdown just went negative\n"); chunk_type = CHUNK_BAD; break; } av_dlog(NULL, " opcode type %02X, version %d, 0x%04X bytes: ", opcode_type, opcode_version, opcode_size); switch (opcode_type) { case OPCODE_END_OF_STREAM: av_dlog(NULL, "end of stream\n"); avio_skip(pb, opcode_size); break; case OPCODE_END_OF_CHUNK: av_dlog(NULL, "end of chunk\n"); avio_skip(pb, opcode_size); break; case OPCODE_CREATE_TIMER: av_dlog(NULL, "create timer\n"); if ((opcode_version > 0) || (opcode_size != 6)) { av_dlog(NULL, "bad create_timer opcode\n"); chunk_type = CHUNK_BAD; break; } if (avio_read(pb, scratch, opcode_size) != opcode_size) { chunk_type = CHUNK_BAD; break; } s->frame_pts_inc = ((uint64_t)AV_RL32(&scratch[0])) * AV_RL16(&scratch[4]); av_dlog(NULL, " %.2f frames/second (timer div = %d, subdiv = %d)\n", 1000000.0 / s->frame_pts_inc, AV_RL32(&scratch[0]), AV_RL16(&scratch[4])); break; case OPCODE_INIT_AUDIO_BUFFERS: av_dlog(NULL, "initialize audio buffers\n"); if ((opcode_version > 1) || (opcode_size > 10)) { av_dlog(NULL, "bad init_audio_buffers opcode\n"); chunk_type = CHUNK_BAD; break; } if (avio_read(pb, scratch, opcode_size) != opcode_size) { chunk_type = CHUNK_BAD; break; } s->audio_sample_rate = AV_RL16(&scratch[4]); audio_flags = AV_RL16(&scratch[2]); /* bit 0 of the flags: 0 = mono, 1 = stereo */ s->audio_channels = (audio_flags & 1) + 1; /* bit 1 of the flags: 0 = 8 bit, 1 = 16 bit */ s->audio_bits = (((audio_flags >> 1) & 1) + 1) * 8; /* bit 2 indicates compressed audio in version 1 opcode */ if ((opcode_version == 1) && (audio_flags & 0x4)) s->audio_type = AV_CODEC_ID_INTERPLAY_DPCM; else if (s->audio_bits == 16) s->audio_type = AV_CODEC_ID_PCM_S16LE; else s->audio_type = AV_CODEC_ID_PCM_U8; av_dlog(NULL, "audio: %d bits, %d Hz, %s, %s format\n", s->audio_bits, s->audio_sample_rate, (s->audio_channels == 2) ? "stereo" : "mono", (s->audio_type == AV_CODEC_ID_INTERPLAY_DPCM) ? "Interplay audio" : "PCM"); break; case OPCODE_START_STOP_AUDIO: av_dlog(NULL, "start/stop audio\n"); avio_skip(pb, opcode_size); break; case OPCODE_INIT_VIDEO_BUFFERS: av_dlog(NULL, "initialize video buffers\n"); if ((opcode_version > 2) || (opcode_size > 8) || opcode_size < 4 || opcode_version == 2 && opcode_size < 8 ) { av_dlog(NULL, "bad init_video_buffers opcode\n"); chunk_type = CHUNK_BAD; break; } if (avio_read(pb, scratch, opcode_size) != opcode_size) { chunk_type = CHUNK_BAD; break; } width = AV_RL16(&scratch[0]) * 8; height = AV_RL16(&scratch[2]) * 8; if (width != s->video_width) { s->video_width = width; s->changed++; } if (height != s->video_height) { s->video_height = height; s->changed++; } if (opcode_version < 2 || !AV_RL16(&scratch[6])) { s->video_bpp = 8; } else { s->video_bpp = 16; } av_dlog(NULL, "video resolution: %d x %d\n", s->video_width, s->video_height); break; case OPCODE_UNKNOWN_06: case OPCODE_UNKNOWN_0E: case OPCODE_UNKNOWN_10: case OPCODE_UNKNOWN_12: case OPCODE_UNKNOWN_13: case OPCODE_UNKNOWN_14: case OPCODE_UNKNOWN_15: av_dlog(NULL, "unknown (but documented) opcode %02X\n", opcode_type); avio_skip(pb, opcode_size); break; case OPCODE_SEND_BUFFER: av_dlog(NULL, "send buffer\n"); avio_skip(pb, opcode_size); break; case OPCODE_AUDIO_FRAME: av_dlog(NULL, "audio frame\n"); /* log position and move on for now */ s->audio_chunk_offset = avio_tell(pb); s->audio_chunk_size = opcode_size; avio_skip(pb, opcode_size); break; case OPCODE_SILENCE_FRAME: av_dlog(NULL, "silence frame\n"); avio_skip(pb, opcode_size); break; case OPCODE_INIT_VIDEO_MODE: av_dlog(NULL, "initialize video mode\n"); avio_skip(pb, opcode_size); break; case OPCODE_CREATE_GRADIENT: av_dlog(NULL, "create gradient\n"); avio_skip(pb, opcode_size); break; case OPCODE_SET_PALETTE: av_dlog(NULL, "set palette\n"); /* check for the logical maximum palette size * (3 * 256 + 4 bytes) */ if (opcode_size > 0x304) { av_dlog(NULL, "demux_ipmovie: set_palette opcode too large\n"); chunk_type = CHUNK_BAD; break; } if (avio_read(pb, scratch, opcode_size) != opcode_size) { chunk_type = CHUNK_BAD; break; } /* load the palette into internal data structure */ first_color = AV_RL16(&scratch[0]); last_color = first_color + AV_RL16(&scratch[2]) - 1; /* sanity check (since they are 16 bit values) */ if ((first_color > 0xFF) || (last_color > 0xFF)) { av_dlog(NULL, "demux_ipmovie: set_palette indexes out of range (%d -> %d)\n", first_color, last_color); chunk_type = CHUNK_BAD; break; } j = 4; /* offset of first palette data */ for (i = first_color; i <= last_color; i++) { /* the palette is stored as a 6-bit VGA palette, thus each * component is shifted up to a 8-bit range */ r = scratch[j++] * 4; g = scratch[j++] * 4; b = scratch[j++] * 4; s->palette[i] = (0xFFU << 24) | (r << 16) | (g << 8) | (b); s->palette[i] |= s->palette[i] >> 6 & 0x30303; } s->has_palette = 1; break; case OPCODE_SET_PALETTE_COMPRESSED: av_dlog(NULL, "set palette compressed\n"); avio_skip(pb, opcode_size); break; case OPCODE_SET_DECODING_MAP: av_dlog(NULL, "set decoding map\n"); /* log position and move on for now */ s->decode_map_chunk_offset = avio_tell(pb); s->decode_map_chunk_size = opcode_size; avio_skip(pb, opcode_size); break; case OPCODE_VIDEO_DATA: av_dlog(NULL, "set video data\n"); /* log position and move on for now */ s->video_chunk_offset = avio_tell(pb); s->video_chunk_size = opcode_size; avio_skip(pb, opcode_size); break; default: av_dlog(NULL, "*** unknown opcode type\n"); chunk_type = CHUNK_BAD; break; } } /* make a note of where the stream is sitting */ s->next_chunk_offset = avio_tell(pb); /* dispatch the first of any pending packets */ if ((chunk_type == CHUNK_VIDEO) || (chunk_type == CHUNK_AUDIO_ONLY)) chunk_type = load_ipmovie_packet(s, pb, pkt); return chunk_type; } | 15,853 |
0 | static av_cold int svq1_encode_end(AVCodecContext *avctx) { SVQ1EncContext *const s = avctx->priv_data; int i; av_log(avctx, AV_LOG_DEBUG, "RD: %f\n", s->rd_total / (double)(avctx->width * avctx->height * avctx->frame_number)); s->m.mb_type = NULL; ff_mpv_common_end(&s->m); av_freep(&s->m.me.scratchpad); av_freep(&s->m.me.map); av_freep(&s->m.me.score_map); av_freep(&s->mb_type); av_freep(&s->dummy); av_freep(&s->scratchbuf); for (i = 0; i < 3; i++) { av_freep(&s->motion_val8[i]); av_freep(&s->motion_val16[i]); } av_frame_free(&s->current_picture); av_frame_free(&s->last_picture); av_frame_free(&avctx->coded_frame); return 0; } | 15,854 |
1 | void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], int is_mpeg12) { const int mb_xy = s->mb_y * s->mb_stride + s->mb_x; #if FF_API_XVMC FF_DISABLE_DEPRECATION_WARNINGS if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){ ff_xvmc_decode_mb(s);//xvmc uses pblocks return; } FF_ENABLE_DEPRECATION_WARNINGS #endif /* FF_API_XVMC */ if(s->avctx->debug&FF_DEBUG_DCT_COEFF) { /* print DCT coefficients */ int i,j; av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y); for(i=0; i<6; i++){ for(j=0; j<64; j++){ av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]); } av_log(s->avctx, AV_LOG_DEBUG, "\n"); } } s->current_picture.qscale_table[mb_xy] = s->qscale; /* update DC predictors for P macroblocks */ if (!s->mb_intra) { if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) { if(s->mbintra_table[mb_xy]) ff_clean_intra_table_entries(s); } else { s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 128 << s->intra_dc_precision; } } else if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) s->mbintra_table[mb_xy]=1; if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc uint8_t *dest_y, *dest_cb, *dest_cr; int dct_linesize, dct_offset; op_pixels_func (*op_pix)[4]; qpel_mc_func (*op_qpix)[16]; const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics const int uvlinesize = s->current_picture.f.linesize[1]; const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band; const int block_size = 8; /* avoid copy if macroblock skipped in last frame too */ /* skip only during decoding as we might trash the buffers during encoding a bit */ if(!s->encoding){ uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy]; if (s->mb_skipped) { s->mb_skipped= 0; assert(s->pict_type!=AV_PICTURE_TYPE_I); *mbskip_ptr = 1; } else if(!s->current_picture.reference) { *mbskip_ptr = 1; } else{ *mbskip_ptr = 0; /* not skipped */ } } dct_linesize = linesize << s->interlaced_dct; dct_offset = s->interlaced_dct ? linesize : linesize * block_size; if(readable){ dest_y= s->dest[0]; dest_cb= s->dest[1]; dest_cr= s->dest[2]; }else{ dest_y = s->b_scratchpad; dest_cb= s->b_scratchpad+16*linesize; dest_cr= s->b_scratchpad+32*linesize; } if (!s->mb_intra) { /* motion handling */ /* decoding or more than one mb_type (MC was already done otherwise) */ if(!s->encoding){ if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) { if (s->mv_dir & MV_DIR_FORWARD) { ff_thread_await_progress(&s->last_picture_ptr->tf, ff_MPV_lowest_referenced_row(s, 0), 0); } if (s->mv_dir & MV_DIR_BACKWARD) { ff_thread_await_progress(&s->next_picture_ptr->tf, ff_MPV_lowest_referenced_row(s, 1), 0); } } op_qpix= s->me.qpel_put; if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){ op_pix = s->hdsp.put_pixels_tab; }else{ op_pix = s->hdsp.put_no_rnd_pixels_tab; } if (s->mv_dir & MV_DIR_FORWARD) { ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix); op_pix = s->hdsp.avg_pixels_tab; op_qpix= s->me.qpel_avg; } if (s->mv_dir & MV_DIR_BACKWARD) { ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix); } } /* skip dequant / idct if we are really late ;) */ if(s->avctx->skip_idct){ if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) || s->avctx->skip_idct >= AVDISCARD_ALL) goto skip_idct; } /* add dct residue */ if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){ add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale); add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale); add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale); add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale); if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ if (s->chroma_y_shift){ add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); }else{ dct_linesize >>= 1; dct_offset >>=1; add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale); add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale); add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale); add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale); } } } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){ add_dct(s, block[0], 0, dest_y , dct_linesize); add_dct(s, block[1], 1, dest_y + block_size, dct_linesize); add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize); add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize); if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ if(s->chroma_y_shift){//Chroma420 add_dct(s, block[4], 4, dest_cb, uvlinesize); add_dct(s, block[5], 5, dest_cr, uvlinesize); }else{ //chroma422 dct_linesize = uvlinesize << s->interlaced_dct; dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8; add_dct(s, block[4], 4, dest_cb, dct_linesize); add_dct(s, block[5], 5, dest_cr, dct_linesize); add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize); add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize); if(!s->chroma_x_shift){//Chroma444 add_dct(s, block[8], 8, dest_cb+8, dct_linesize); add_dct(s, block[9], 9, dest_cr+8, dct_linesize); add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize); add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize); } } }//fi gray } else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) { ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr); } } else { /* dct only in intra block */ if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){ put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale); put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale); put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale); put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale); if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ if(s->chroma_y_shift){ put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); }else{ dct_offset >>=1; dct_linesize >>=1; put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale); put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale); put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale); put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale); } } }else{ s->dsp.idct_put(dest_y , dct_linesize, block[0]); s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]); s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]); s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]); if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ if(s->chroma_y_shift){ s->dsp.idct_put(dest_cb, uvlinesize, block[4]); s->dsp.idct_put(dest_cr, uvlinesize, block[5]); }else{ dct_linesize = uvlinesize << s->interlaced_dct; dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8; s->dsp.idct_put(dest_cb, dct_linesize, block[4]); s->dsp.idct_put(dest_cr, dct_linesize, block[5]); s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]); s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]); if(!s->chroma_x_shift){//Chroma444 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]); s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]); s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]); s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]); } } }//gray } } skip_idct: if(!readable){ s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16); s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift); s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift); } } } | 15,856 |
1 | static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq) { VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); VFIOINTp *intp; if (!kvm_irqfds_enabled() || !kvm_resamplefds_enabled() || !vdev->irqfd_allowed) { goto fail_irqfd; } QLIST_FOREACH(intp, &vdev->intp_list, next) { if (intp->qemuirq == irq) { break; } } assert(intp); if (kvm_irqchip_add_irqfd_notifier(kvm_state, intp->interrupt, intp->unmask, irq) < 0) { goto fail_irqfd; } if (vfio_set_trigger_eventfd(intp, NULL) < 0) { goto fail_vfio; } if (vfio_set_resample_eventfd(intp) < 0) { goto fail_vfio; } intp->kvm_accel = true; trace_vfio_platform_start_irqfd_injection(intp->pin, event_notifier_get_fd(intp->interrupt), event_notifier_get_fd(intp->unmask)); return; fail_vfio: kvm_irqchip_remove_irqfd_notifier(kvm_state, intp->interrupt, irq); error_report("vfio: failed to start eventfd signaling for IRQ %d: %m", intp->pin); abort(); fail_irqfd: vfio_start_eventfd_injection(sbdev, irq); return; } | 15,858 |
1 | static inline void RENAME(BEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, int width, uint32_t *unused) { #if COMPILE_TEMPLATE_MMX __asm__ volatile( "movq "MANGLE(bm01010101)", %%mm4 \n\t" "mov %0, %%"REG_a" \n\t" "1: \n\t" "movq (%1, %%"REG_a",2), %%mm0 \n\t" "movq 8(%1, %%"REG_a",2), %%mm1 \n\t" "movq (%2, %%"REG_a",2), %%mm2 \n\t" "movq 8(%2, %%"REG_a",2), %%mm3 \n\t" "pand %%mm4, %%mm0 \n\t" "pand %%mm4, %%mm1 \n\t" "pand %%mm4, %%mm2 \n\t" "pand %%mm4, %%mm3 \n\t" "packuswb %%mm1, %%mm0 \n\t" "packuswb %%mm3, %%mm2 \n\t" "movq %%mm0, (%3, %%"REG_a") \n\t" "movq %%mm2, (%4, %%"REG_a") \n\t" "add $8, %%"REG_a" \n\t" " js 1b \n\t" : : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width) : "%"REG_a ); #else int i; for (i=0; i<width; i++) { dstU[i]= src1[2*i]; dstV[i]= src2[2*i]; } #endif } | 15,859 |
1 | static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, int src_size) { const uint8_t *s = src; const uint8_t *end; const uint8_t *mm_end; uint16_t *d = (uint16_t *)dst; end = s + src_size; __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm__ volatile( "movq %0, %%mm7 \n\t" "movq %1, %%mm6 \n\t" ::"m"(red_16mask),"m"(green_16mask)); mm_end = end - 15; while (s < mm_end) { __asm__ volatile( PREFETCH" 32%1 \n\t" "movd %1, %%mm0 \n\t" "movd 3%1, %%mm3 \n\t" "punpckldq 6%1, %%mm0 \n\t" "punpckldq 9%1, %%mm3 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm0, %%mm2 \n\t" "movq %%mm3, %%mm4 \n\t" "movq %%mm3, %%mm5 \n\t" "psllq $8, %%mm0 \n\t" "psllq $8, %%mm3 \n\t" "pand %%mm7, %%mm0 \n\t" "pand %%mm7, %%mm3 \n\t" "psrlq $5, %%mm1 \n\t" "psrlq $5, %%mm4 \n\t" "pand %%mm6, %%mm1 \n\t" "pand %%mm6, %%mm4 \n\t" "psrlq $19, %%mm2 \n\t" "psrlq $19, %%mm5 \n\t" "pand %2, %%mm2 \n\t" "pand %2, %%mm5 \n\t" "por %%mm1, %%mm0 \n\t" "por %%mm4, %%mm3 \n\t" "por %%mm2, %%mm0 \n\t" "por %%mm5, %%mm3 \n\t" "psllq $16, %%mm3 \n\t" "por %%mm3, %%mm0 \n\t" MOVNTQ" %%mm0, %0 \n\t" :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); d += 4; s += 12; } __asm__ volatile(SFENCE:::"memory"); __asm__ volatile(EMMS:::"memory"); while (s < end) { const int r = *s++; const int g = *s++; const int b = *s++; *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8); } } | 15,860 |
1 | int socket_connect(SocketAddress *addr, Error **errp, NonBlockingConnectHandler *callback, void *opaque) { QemuOpts *opts; int fd; opts = qemu_opts_create_nofail(&socket_optslist); switch (addr->kind) { case SOCKET_ADDRESS_KIND_INET: inet_addr_to_opts(opts, addr->inet); fd = inet_connect_opts(opts, errp, callback, opaque); break; case SOCKET_ADDRESS_KIND_UNIX: qemu_opt_set(opts, "path", addr->q_unix->path); fd = unix_connect_opts(opts, errp, callback, opaque); break; case SOCKET_ADDRESS_KIND_FD: fd = monitor_get_fd(cur_mon, addr->fd->str, errp); if (callback) { callback(fd, opaque); } break; default: abort(); } qemu_opts_del(opts); return fd; } | 15,861 |
1 | void do_divwo (void) { if (likely(!((Ts0 == INT32_MIN && Ts1 == -1) || Ts1 == 0))) { xer_ov = 0; T0 = (Ts0 / Ts1); } else { xer_so = 1; xer_ov = 1; T0 = (-1) * ((uint32_t)T0 >> 31); } } | 15,862 |
1 | static void dump_json_image_info(ImageInfo *info) { Error *local_err = NULL; QString *str; QmpOutputVisitor *ov = qmp_output_visitor_new(); QObject *obj; visit_type_ImageInfo(qmp_output_get_visitor(ov), NULL, &info, &local_err); obj = qmp_output_get_qobject(ov); str = qobject_to_json_pretty(obj); assert(str != NULL); printf("%s\n", qstring_get_str(str)); qobject_decref(obj); qmp_output_visitor_cleanup(ov); QDECREF(str); } | 15,863 |
1 | static av_cold int vtenc_init(AVCodecContext *avctx) { CFMutableDictionaryRef enc_info; CFMutableDictionaryRef pixel_buffer_info; CMVideoCodecType codec_type; VTEncContext *vtctx = avctx->priv_data; CFStringRef profile_level; CFBooleanRef has_b_frames_cfbool; CFNumberRef gamma_level = NULL; int status; pthread_once(&once_ctrl, loadVTEncSymbols); codec_type = get_cm_codec_type(avctx->codec_id); if (!codec_type) { av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id); return AVERROR(EINVAL); } vtctx->has_b_frames = avctx->max_b_frames > 0; if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){ av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n"); vtctx->has_b_frames = false; } if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) { av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n"); vtctx->entropy = VT_ENTROPY_NOT_SET; } if (!get_vt_profile_level(avctx, &profile_level)) return AVERROR(EINVAL); vtctx->session = NULL; enc_info = CFDictionaryCreateMutable( kCFAllocatorDefault, 20, &kCFCopyStringDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks ); if (!enc_info) return AVERROR(ENOMEM); #if !TARGET_OS_IPHONE if (!vtctx->allow_sw) { CFDictionarySetValue(enc_info, compat_keys.kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder, kCFBooleanTrue); } else { CFDictionarySetValue(enc_info, compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder, kCFBooleanTrue); } #endif if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) { status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info); if (status) goto init_cleanup; } else { pixel_buffer_info = NULL; } pthread_mutex_init(&vtctx->lock, NULL); pthread_cond_init(&vtctx->cv_sample_sent, NULL); vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0; get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level); get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix); get_cv_color_primaries(avctx, &vtctx->color_primaries); if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) { status = vtenc_populate_extradata(avctx, codec_type, profile_level, gamma_level, enc_info, pixel_buffer_info); if (status) goto init_cleanup; } status = vtenc_create_encoder(avctx, codec_type, profile_level, gamma_level, enc_info, pixel_buffer_info, &vtctx->session); if (status < 0) goto init_cleanup; status = VTSessionCopyProperty(vtctx->session, kVTCompressionPropertyKey_AllowFrameReordering, kCFAllocatorDefault, &has_b_frames_cfbool); if (!status) { //Some devices don't output B-frames for main profile, even if requested. vtctx->has_b_frames = CFBooleanGetValue(has_b_frames_cfbool); CFRelease(has_b_frames_cfbool); } avctx->has_b_frames = vtctx->has_b_frames; init_cleanup: if (gamma_level) CFRelease(gamma_level); if (pixel_buffer_info) CFRelease(pixel_buffer_info); CFRelease(enc_info); return status; } | 15,864 |
0 | bool qemu_run_timers(QEMUClock *clock) { return qemu_clock_run_timers(clock->type); } | 15,865 |
0 | void *cpu_physical_memory_map(target_phys_addr_t addr, target_phys_addr_t *plen, int is_write) { return address_space_map(&address_space_memory, addr, plen, is_write); } | 15,866 |
0 | static void virtio_pci_device_unplugged(DeviceState *d) { VirtIOPCIProxy *proxy = VIRTIO_PCI(d); bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN); bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; virtio_pci_stop_ioeventfd(proxy); if (modern) { virtio_pci_modern_mem_region_unmap(proxy, &proxy->common); virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr); virtio_pci_modern_mem_region_unmap(proxy, &proxy->device); virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify); if (modern_pio) { virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio); } } } | 15,867 |
0 | static void apb_config_writel (void *opaque, target_phys_addr_t addr, uint64_t val, unsigned size) { APBState *s = opaque; APB_DPRINTF("%s: addr " TARGET_FMT_lx " val %" PRIx64 "\n", __func__, addr, val); switch (addr & 0xffff) { case 0x30 ... 0x4f: /* DMA error registers */ /* XXX: not implemented yet */ break; case 0x200 ... 0x20b: /* IOMMU */ s->iommu[(addr & 0xf) >> 2] = val; break; case 0x20c ... 0x3ff: /* IOMMU flush */ break; case 0xc00 ... 0xc3f: /* PCI interrupt control */ if (addr & 4) { s->pci_irq_map[(addr & 0x3f) >> 3] &= PBM_PCI_IMR_MASK; s->pci_irq_map[(addr & 0x3f) >> 3] |= val & ~PBM_PCI_IMR_MASK; } break; case 0x1000 ... 0x1080: /* OBIO interrupt control */ if (addr & 4) { s->obio_irq_map[(addr & 0xff) >> 3] &= PBM_PCI_IMR_MASK; s->obio_irq_map[(addr & 0xff) >> 3] |= val & ~PBM_PCI_IMR_MASK; } break; case 0x1400 ... 0x143f: /* PCI interrupt clear */ if (addr & 4) { pci_apb_set_irq(s, (addr & 0x3f) >> 3, 0); } break; case 0x1800 ... 0x1860: /* OBIO interrupt clear */ if (addr & 4) { pci_apb_set_irq(s, 0x20 | ((addr & 0xff) >> 3), 0); } break; case 0x2000 ... 0x202f: /* PCI control */ s->pci_control[(addr & 0x3f) >> 2] = val; break; case 0xf020 ... 0xf027: /* Reset control */ if (addr & 4) { val &= RESET_MASK; s->reset_control &= ~(val & RESET_WCMASK); s->reset_control |= val & RESET_WMASK; if (val & SOFT_POR) { s->nr_resets = 0; qemu_system_reset_request(); } else if (val & SOFT_XIR) { qemu_system_reset_request(); } } break; case 0x5000 ... 0x51cf: /* PIO/DMA diagnostics */ case 0xa400 ... 0xa67f: /* IOMMU diagnostics */ case 0xa800 ... 0xa80f: /* Interrupt diagnostics */ case 0xf000 ... 0xf01f: /* FFB config, memory control */ /* we don't care */ default: break; } } | 15,868 |
0 | static inline int small_diamond_search(MpegEncContext * s, int *best, int dmin, UINT8 *new_pic, UINT8 *old_pic, int pic_stride, int pred_x, int pred_y, UINT16 *mv_penalty, int quant, int xmin, int ymin, int xmax, int ymax, int shift) { int next_dir=-1; for(;;){ int d; const int dir= next_dir; const int x= best[0]; const int y= best[1]; next_dir=-1; //printf("%d", dir); if(dir!=2 && x>xmin) CHECK_MV_DIR(x-1, y , 0) if(dir!=3 && y>ymin) CHECK_MV_DIR(x , y-1, 1) if(dir!=0 && x<xmax) CHECK_MV_DIR(x+1, y , 2) if(dir!=1 && y<ymax) CHECK_MV_DIR(x , y+1, 3) if(next_dir==-1){ return dmin; } } /* for(;;){ int d; const int x= best[0]; const int y= best[1]; const int last_min=dmin; if(x>xmin) CHECK_MV(x-1, y ) if(y>xmin) CHECK_MV(x , y-1) if(x<xmax) CHECK_MV(x+1, y ) if(y<xmax) CHECK_MV(x , y+1) if(x>xmin && y>ymin) CHECK_MV(x-1, y-1) if(x>xmin && y<ymax) CHECK_MV(x-1, y+1) if(x<xmax && y>ymin) CHECK_MV(x+1, y-1) if(x<xmax && y<ymax) CHECK_MV(x+1, y+1) if(x-1>xmin) CHECK_MV(x-2, y ) if(y-1>xmin) CHECK_MV(x , y-2) if(x+1<xmax) CHECK_MV(x+2, y ) if(y+1<xmax) CHECK_MV(x , y+2) if(x-1>xmin && y-1>ymin) CHECK_MV(x-2, y-2) if(x-1>xmin && y+1<ymax) CHECK_MV(x-2, y+2) if(x+1<xmax && y-1>ymin) CHECK_MV(x+2, y-2) if(x+1<xmax && y+1<ymax) CHECK_MV(x+2, y+2) if(dmin==last_min) return dmin; } */ } | 15,870 |
0 | pci_ebus_init1(PCIDevice *s) { isa_bus_new(&s->qdev); pci_config_set_vendor_id(s->config, PCI_VENDOR_ID_SUN); pci_config_set_device_id(s->config, PCI_DEVICE_ID_SUN_EBUS); s->config[0x04] = 0x06; // command = bus master, pci mem s->config[0x05] = 0x00; s->config[0x06] = 0xa0; // status = fast back-to-back, 66MHz, no error s->config[0x07] = 0x03; // status = medium devsel s->config[0x08] = 0x01; // revision s->config[0x09] = 0x00; // programming i/f pci_config_set_class(s->config, PCI_CLASS_BRIDGE_OTHER); s->config[0x0D] = 0x0a; // latency_timer pci_register_bar(s, 0, 0x1000000, PCI_BASE_ADDRESS_SPACE_MEMORY, ebus_mmio_mapfunc); pci_register_bar(s, 1, 0x800000, PCI_BASE_ADDRESS_SPACE_MEMORY, ebus_mmio_mapfunc); return 0; } | 15,871 |
0 | static int v9fs_synth_fsync(FsContext *ctx, int fid_type, V9fsFidOpenState *fs, int datasync) { errno = ENOSYS; return 0; } | 15,872 |
0 | int64_t qmp_guest_fsfreeze_freeze(Error **err) { error_set(err, QERR_UNSUPPORTED); return 0; } | 15,873 |
0 | static void raw_probe_alignment(BlockDriverState *bs) { BDRVRawState *s = bs->opaque; DWORD sectorsPerCluster, freeClusters, totalClusters, count; DISK_GEOMETRY_EX dg; BOOL status; if (s->type == FTYPE_CD) { bs->request_alignment = 2048; return; } if (s->type == FTYPE_HARDDISK) { status = DeviceIoControl(s->hfile, IOCTL_DISK_GET_DRIVE_GEOMETRY_EX, NULL, 0, &dg, sizeof(dg), &count, NULL); if (status != 0) { bs->request_alignment = dg.Geometry.BytesPerSector; return; } /* try GetDiskFreeSpace too */ } if (s->drive_path[0]) { GetDiskFreeSpace(s->drive_path, §orsPerCluster, &dg.Geometry.BytesPerSector, &freeClusters, &totalClusters); bs->request_alignment = dg.Geometry.BytesPerSector; } } | 15,874 |
0 | static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv src) { switch (size) { case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break; case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break; case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break; default: abort(); } } | 15,875 |
0 | static void block_dirty_bitmap_clear_prepare(BlkActionState *common, Error **errp) { BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState, common, common); BlockDirtyBitmap *action; if (action_check_completion_mode(common, errp) < 0) { return; } action = common->action->u.block_dirty_bitmap_clear.data; state->bitmap = block_dirty_bitmap_lookup(action->node, action->name, &state->bs, &state->aio_context, errp); if (!state->bitmap) { return; } if (bdrv_dirty_bitmap_frozen(state->bitmap)) { error_setg(errp, "Cannot modify a frozen bitmap"); return; } else if (!bdrv_dirty_bitmap_enabled(state->bitmap)) { error_setg(errp, "Cannot clear a disabled bitmap"); return; } bdrv_clear_dirty_bitmap(state->bitmap, &state->backup); /* AioContext is released in .clean() */ } | 15,876 |
0 | static uint64_t bonito_ldma_readl(void *opaque, target_phys_addr_t addr, unsigned size) { uint32_t val; PCIBonitoState *s = opaque; val = ((uint32_t *)(&s->bonldma))[addr/sizeof(uint32_t)]; return val; } | 15,878 |
0 | static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg t0, tcg_target_long arg) { uint8_t *old_code_ptr = s->code_ptr; uint32_t arg32 = arg; if (type == TCG_TYPE_I32 || arg == arg32) { tcg_out_op_t(s, INDEX_op_movi_i32); tcg_out_r(s, t0); tcg_out32(s, arg32); } else { assert(type == TCG_TYPE_I64); #if TCG_TARGET_REG_BITS == 64 tcg_out_op_t(s, INDEX_op_movi_i64); tcg_out_r(s, t0); tcg_out64(s, arg); #else TODO(); #endif } old_code_ptr[1] = s->code_ptr - old_code_ptr; } | 15,880 |
0 | static int mjpeg_probe(AVProbeData *p) { int i; int state = -1; int nb_invalid = 0; int nb_frames = 0; for (i=0; i<p->buf_size-2; i++) { int c; if (p->buf[i] != 0xFF) continue; c = p->buf[i+1]; switch (c) { case 0xD8: state = 0xD8; break; case 0xC0: case 0xC1: case 0xC2: case 0xC3: case 0xC5: case 0xC6: case 0xC7: case 0xF7: if (state == 0xD8) { state = 0xC0; } else nb_invalid++; break; case 0xDA: if (state == 0xC0) { state = 0xDA; } else nb_invalid++; break; case 0xD9: if (state == 0xDA) { state = 0xD9; nb_frames++; } else nb_invalid++; break; default: if ( (c >= 0x02 && c <= 0xBF) || c == 0xC8) { nb_invalid++; } } } if (nb_invalid*4 + 1 < nb_frames) { static const char ct_jpeg[] = "\r\nContent-Type: image/jpeg\r\n\r\n"; int i; for (i=0; i<FFMIN(p->buf_size - sizeof(ct_jpeg), 100); i++) if (!memcmp(p->buf + i, ct_jpeg, sizeof(ct_jpeg) - 1)) return AVPROBE_SCORE_EXTENSION; if (nb_invalid == 0 && nb_frames > 2) return AVPROBE_SCORE_EXTENSION / 2; return AVPROBE_SCORE_EXTENSION / 4; } return 0; } | 15,881 |
0 | static void omap_mcbsp_writew(void *opaque, target_phys_addr_t addr, uint32_t value) { struct omap_mcbsp_s *s = (struct omap_mcbsp_s *) opaque; int offset = addr & OMAP_MPUI_REG_MASK; if (offset == 0x04) { /* DXR */ if (((s->xcr[0] >> 5) & 7) < 3) /* XWDLEN1 */ return; if (s->tx_req > 3) { s->tx_req -= 4; if (s->codec && s->codec->cts) { s->codec->out.fifo[s->codec->out.len ++] = (value >> 24) & 0xff; s->codec->out.fifo[s->codec->out.len ++] = (value >> 16) & 0xff; s->codec->out.fifo[s->codec->out.len ++] = (value >> 8) & 0xff; s->codec->out.fifo[s->codec->out.len ++] = (value >> 0) & 0xff; } if (s->tx_req < 4) omap_mcbsp_tx_done(s); } else printf("%s: Tx FIFO overrun\n", __FUNCTION__); return; } omap_badwidth_write16(opaque, addr, value); } | 15,882 |
0 | static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as) { FlatView *view; FlatRange *fr; CoalescedMemoryRange *cmr; AddrRange tmp; MemoryRegionSection section; view = as->current_map; FOR_EACH_FLAT_RANGE(fr, view) { if (fr->mr == mr) { section = (MemoryRegionSection) { .address_space = as, .offset_within_address_space = int128_get64(fr->addr.start), .size = fr->addr.size, }; MEMORY_LISTENER_CALL(coalesced_mmio_del, Reverse, §ion, int128_get64(fr->addr.start), int128_get64(fr->addr.size)); QTAILQ_FOREACH(cmr, &mr->coalesced, link) { tmp = addrrange_shift(cmr->addr, int128_sub(fr->addr.start, int128_make64(fr->offset_in_region))); if (!addrrange_intersects(tmp, fr->addr)) { continue; } tmp = addrrange_intersection(tmp, fr->addr); MEMORY_LISTENER_CALL(coalesced_mmio_add, Forward, §ion, int128_get64(tmp.start), int128_get64(tmp.size)); } } } } | 15,884 |
0 | static void lance_cleanup(NetClientState *nc) { PCNetState *d = qemu_get_nic_opaque(nc); pcnet_common_cleanup(d); } | 15,886 |
0 | static int kvm_put_sregs(CPUState *env) { struct kvm_sregs sregs; memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap)); if (env->interrupt_injected >= 0) { sregs.interrupt_bitmap[env->interrupt_injected / 64] |= (uint64_t)1 << (env->interrupt_injected % 64); } if ((env->eflags & VM_MASK)) { set_v8086_seg(&sregs.cs, &env->segs[R_CS]); set_v8086_seg(&sregs.ds, &env->segs[R_DS]); set_v8086_seg(&sregs.es, &env->segs[R_ES]); set_v8086_seg(&sregs.fs, &env->segs[R_FS]); set_v8086_seg(&sregs.gs, &env->segs[R_GS]); set_v8086_seg(&sregs.ss, &env->segs[R_SS]); } else { set_seg(&sregs.cs, &env->segs[R_CS]); set_seg(&sregs.ds, &env->segs[R_DS]); set_seg(&sregs.es, &env->segs[R_ES]); set_seg(&sregs.fs, &env->segs[R_FS]); set_seg(&sregs.gs, &env->segs[R_GS]); set_seg(&sregs.ss, &env->segs[R_SS]); } set_seg(&sregs.tr, &env->tr); set_seg(&sregs.ldt, &env->ldt); sregs.idt.limit = env->idt.limit; sregs.idt.base = env->idt.base; sregs.gdt.limit = env->gdt.limit; sregs.gdt.base = env->gdt.base; sregs.cr0 = env->cr[0]; sregs.cr2 = env->cr[2]; sregs.cr3 = env->cr[3]; sregs.cr4 = env->cr[4]; sregs.cr8 = cpu_get_apic_tpr(env->apic_state); sregs.apic_base = cpu_get_apic_base(env->apic_state); sregs.efer = env->efer; return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs); } | 15,887 |
0 | static int check_refcounts_l1(BlockDriverState *bs, BdrvCheckResult *res, uint16_t *refcount_table, int64_t refcount_table_size, int64_t l1_table_offset, int l1_size, int flags) { BDRVQcowState *s = bs->opaque; uint64_t *l1_table = NULL, l2_offset, l1_size2; int i, ret; l1_size2 = l1_size * sizeof(uint64_t); /* Mark L1 table as used */ ret = inc_refcounts(bs, res, refcount_table, refcount_table_size, l1_table_offset, l1_size2); if (ret < 0) { goto fail; } /* Read L1 table entries from disk */ if (l1_size2 > 0) { l1_table = g_try_malloc(l1_size2); if (l1_table == NULL) { ret = -ENOMEM; res->check_errors++; goto fail; } ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2); if (ret < 0) { fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n"); res->check_errors++; goto fail; } for(i = 0;i < l1_size; i++) be64_to_cpus(&l1_table[i]); } /* Do the actual checks */ for(i = 0; i < l1_size; i++) { l2_offset = l1_table[i]; if (l2_offset) { /* Mark L2 table as used */ l2_offset &= L1E_OFFSET_MASK; ret = inc_refcounts(bs, res, refcount_table, refcount_table_size, l2_offset, s->cluster_size); if (ret < 0) { goto fail; } /* L2 tables are cluster aligned */ if (offset_into_cluster(s, l2_offset)) { fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not " "cluster aligned; L1 entry corrupted\n", l2_offset); res->corruptions++; } /* Process and check L2 entries */ ret = check_refcounts_l2(bs, res, refcount_table, refcount_table_size, l2_offset, flags); if (ret < 0) { goto fail; } } } g_free(l1_table); return 0; fail: g_free(l1_table); return ret; } | 15,888 |
0 | static int coroutine_fn iscsi_co_writev(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *iov) { IscsiLun *iscsilun = bs->opaque; struct IscsiTask iTask; uint64_t lba; uint32_t num_sectors; int fua; if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) { return -EINVAL; } if (bs->bl.max_transfer_length && nb_sectors > bs->bl.max_transfer_length) { error_report("iSCSI Error: Write of %d sectors exceeds max_xfer_len " "of %d sectors", nb_sectors, bs->bl.max_transfer_length); return -EINVAL; } lba = sector_qemu2lun(sector_num, iscsilun); num_sectors = sector_qemu2lun(nb_sectors, iscsilun); iscsi_co_init_iscsitask(iscsilun, &iTask); retry: fua = iscsilun->dpofua && !bdrv_enable_write_cache(bs); iTask.force_next_flush = !fua; if (iscsilun->use_16_for_rw) { iTask.task = iscsi_write16_task(iscsilun->iscsi, iscsilun->lun, lba, NULL, num_sectors * iscsilun->block_size, iscsilun->block_size, 0, 0, fua, 0, 0, iscsi_co_generic_cb, &iTask); } else { iTask.task = iscsi_write10_task(iscsilun->iscsi, iscsilun->lun, lba, NULL, num_sectors * iscsilun->block_size, iscsilun->block_size, 0, 0, fua, 0, 0, iscsi_co_generic_cb, &iTask); } if (iTask.task == NULL) { return -ENOMEM; } scsi_task_set_iov_out(iTask.task, (struct scsi_iovec *) iov->iov, iov->niov); while (!iTask.complete) { iscsi_set_events(iscsilun); qemu_coroutine_yield(); } if (iTask.task != NULL) { scsi_free_scsi_task(iTask.task); iTask.task = NULL; } if (iTask.do_retry) { iTask.complete = 0; goto retry; } if (iTask.status != SCSI_STATUS_GOOD) { return iTask.err_code; } iscsi_allocationmap_set(iscsilun, sector_num, nb_sectors); return 0; } | 15,889 |
0 | static BlockDriverAIOCB *raw_aio_submit(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque, int type) { BDRVRawState *s = bs->opaque; if (fd_open(bs) < 0) return NULL; /* * If O_DIRECT is used the buffer needs to be aligned on a sector * boundary. Check if this is the case or tell the low-level * driver that it needs to copy the buffer. */ if ((bs->open_flags & BDRV_O_NOCACHE)) { if (!qiov_is_aligned(bs, qiov)) { type |= QEMU_AIO_MISALIGNED; #ifdef CONFIG_LINUX_AIO } else if (s->use_aio) { return laio_submit(bs, s->aio_ctx, s->fd, sector_num, qiov, nb_sectors, cb, opaque, type); #endif } } return paio_submit(bs, s->fd, sector_num, qiov, nb_sectors, cb, opaque, type); } | 15,891 |
0 | static void encode_header(SnowContext *s){ int plane_index, level, orientation; put_cabac(&s->c, s->header_state, s->keyframe); // state clearing stuff? if(s->keyframe){ put_symbol(&s->c, s->header_state, s->version, 0); put_symbol(&s->c, s->header_state, s->temporal_decomposition_type, 0); put_symbol(&s->c, s->header_state, s->temporal_decomposition_count, 0); put_symbol(&s->c, s->header_state, s->spatial_decomposition_count, 0); put_symbol(&s->c, s->header_state, s->colorspace_type, 0); put_symbol(&s->c, s->header_state, s->b_width, 0); put_symbol(&s->c, s->header_state, s->b_height, 0); put_symbol(&s->c, s->header_state, s->chroma_h_shift, 0); put_symbol(&s->c, s->header_state, s->chroma_v_shift, 0); put_cabac(&s->c, s->header_state, s->spatial_scalability); // put_cabac(&s->c, s->header_state, s->rate_scalability); for(plane_index=0; plane_index<2; plane_index++){ for(level=0; level<s->spatial_decomposition_count; level++){ for(orientation=level ? 1:0; orientation<4; orientation++){ if(orientation==2) continue; put_symbol(&s->c, s->header_state, s->plane[plane_index].band[level][orientation].qlog, 1); } } } } put_symbol(&s->c, s->header_state, s->spatial_decomposition_type, 0); put_symbol(&s->c, s->header_state, s->qlog, 1); put_symbol(&s->c, s->header_state, s->mv_scale, 0); put_symbol(&s->c, s->header_state, s->qbias, 1); } | 15,892 |
0 | void bdrv_iterate_format(void (*it)(void *opaque, const char *name), void *opaque) { BlockDriver *drv; int count = 0; int i; const char **formats = NULL; QLIST_FOREACH(drv, &bdrv_drivers, list) { if (drv->format_name) { bool found = false; int i = count; while (formats && i && !found) { found = !strcmp(formats[--i], drv->format_name); } if (!found) { formats = g_renew(const char *, formats, count + 1); formats[count++] = drv->format_name; } } } qsort(formats, count, sizeof(formats[0]), qsort_strcmp); for (i = 0; i < count; i++) { it(opaque, formats[i]); } g_free(formats); } | 15,893 |
0 | static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct = arg_ct->ct; if (ct & TCG_CT_CONST) { return 1; } if (type == TCG_TYPE_I32) { val = (int32_t)val; } /* The following are mutually exclusive. */ if (ct & TCG_CT_CONST_S16) { return val == (int16_t)val; } else if (ct & TCG_CT_CONST_S32) { return val == (int32_t)val; } else if (ct & TCG_CT_CONST_ADLI) { return tcg_match_add2i(type, val); } else if (ct & TCG_CT_CONST_ORI) { return tcg_match_ori(type, val); } else if (ct & TCG_CT_CONST_XORI) { return tcg_match_xori(type, val); } else if (ct & TCG_CT_CONST_U31) { return val >= 0 && val <= 0x7fffffff; } else if (ct & TCG_CT_CONST_ZERO) { return val == 0; } return 0; } | 15,894 |
0 | static void cpu_exec_nocache(CPUArchState *env, int max_cycles, TranslationBlock *orig_tb) { CPUState *cpu = ENV_GET_CPU(env); TranslationBlock *tb; target_ulong pc = orig_tb->pc; target_ulong cs_base = orig_tb->cs_base; uint64_t flags = orig_tb->flags; /* Should never happen. We only end up here when an existing TB is too long. */ if (max_cycles > CF_COUNT_MASK) max_cycles = CF_COUNT_MASK; /* tb_gen_code can flush our orig_tb, invalidate it now */ tb_phys_invalidate(orig_tb, -1); tb = tb_gen_code(cpu, pc, cs_base, flags, max_cycles); cpu->current_tb = tb; /* execute the generated code */ trace_exec_tb_nocache(tb, tb->pc); cpu_tb_exec(cpu, tb->tc_ptr); cpu->current_tb = NULL; tb_phys_invalidate(tb, -1); tb_free(tb); } | 15,895 |
0 | void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, int is_cpu_write_access) { TranslationBlock *tb, *tb_next; #if defined(TARGET_HAS_PRECISE_SMC) CPUState *cpu = current_cpu; CPUArchState *env = NULL; #endif tb_page_addr_t tb_start, tb_end; PageDesc *p; int n; #ifdef TARGET_HAS_PRECISE_SMC int current_tb_not_found = is_cpu_write_access; TranslationBlock *current_tb = NULL; int current_tb_modified = 0; target_ulong current_pc = 0; target_ulong current_cs_base = 0; uint32_t current_flags = 0; #endif /* TARGET_HAS_PRECISE_SMC */ assert_memory_lock(); assert_tb_locked(); p = page_find(start >> TARGET_PAGE_BITS); if (!p) { return; } #if defined(TARGET_HAS_PRECISE_SMC) if (cpu != NULL) { env = cpu->env_ptr; } #endif /* we remove all the TBs in the range [start, end[ */ /* XXX: see if in some cases it could be faster to invalidate all the code */ tb = p->first_tb; while (tb != NULL) { n = (uintptr_t)tb & 3; tb = (TranslationBlock *)((uintptr_t)tb & ~3); tb_next = tb->page_next[n]; /* NOTE: this is subtle as a TB may span two physical pages */ if (n == 0) { /* NOTE: tb_end may be after the end of the page, but it is not a problem */ tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); tb_end = tb_start + tb->size; } else { tb_start = tb->page_addr[1]; tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); } if (!(tb_end <= start || tb_start >= end)) { #ifdef TARGET_HAS_PRECISE_SMC if (current_tb_not_found) { current_tb_not_found = 0; current_tb = NULL; if (cpu->mem_io_pc) { /* now we have a real cpu fault */ current_tb = tb_find_pc(cpu->mem_io_pc); } } if (current_tb == tb && (current_tb->cflags & CF_COUNT_MASK) != 1) { /* If we are modifying the current TB, we must stop its execution. We could be more precise by checking that the modification is after the current PC, but it would require a specialized function to partially restore the CPU state */ current_tb_modified = 1; cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc); cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, ¤t_flags); } #endif /* TARGET_HAS_PRECISE_SMC */ tb_phys_invalidate(tb, -1); } tb = tb_next; } #if !defined(CONFIG_USER_ONLY) /* if no code remaining, no need to continue to use slow writes */ if (!p->first_tb) { invalidate_page_bitmap(p); tlb_unprotect_code(start); } #endif #ifdef TARGET_HAS_PRECISE_SMC if (current_tb_modified) { /* we generate a block containing just the instruction modifying the memory. It will ensure that it cannot modify itself */ tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1 | curr_cflags()); cpu_loop_exit_noexc(cpu); } #endif } | 15,896 |
0 | static void test_io_channel_setup_sync(SocketAddress *listen_addr, SocketAddress *connect_addr, QIOChannel **src, QIOChannel **dst) { QIOChannelSocket *lioc; lioc = qio_channel_socket_new(); qio_channel_socket_listen_sync(lioc, listen_addr, &error_abort); if (listen_addr->type == SOCKET_ADDRESS_KIND_INET) { SocketAddress *laddr = qio_channel_socket_get_local_address( lioc, &error_abort); g_free(connect_addr->u.inet.data->port); connect_addr->u.inet.data->port = g_strdup(laddr->u.inet.data->port); qapi_free_SocketAddress(laddr); } *src = QIO_CHANNEL(qio_channel_socket_new()); qio_channel_socket_connect_sync( QIO_CHANNEL_SOCKET(*src), connect_addr, &error_abort); qio_channel_set_delay(*src, false); qio_channel_wait(QIO_CHANNEL(lioc), G_IO_IN); *dst = QIO_CHANNEL(qio_channel_socket_accept(lioc, &error_abort)); g_assert(*dst); test_io_channel_set_socket_bufs(*src, *dst); object_unref(OBJECT(lioc)); } | 15,898 |
0 | static int qcow2_create2(const char *filename, int64_t total_size, const char *backing_file, const char *backing_format, int flags, size_t cluster_size, PreallocMode prealloc, QemuOpts *opts, int version, int refcount_order, Error **errp) { /* Calculate cluster_bits */ int cluster_bits; cluster_bits = ffs(cluster_size) - 1; if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || (1 << cluster_bits) != cluster_size) { error_setg(errp, "Cluster size must be a power of two between %d and " "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); return -EINVAL; } /* * Open the image file and write a minimal qcow2 header. * * We keep things simple and start with a zero-sized image. We also * do without refcount blocks or a L1 table for now. We'll fix the * inconsistency later. * * We do need a refcount table because growing the refcount table means * allocating two new refcount blocks - the seconds of which would be at * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file * size for any qcow2 image. */ BlockDriverState* bs; QCowHeader *header; uint64_t* refcount_table; Error *local_err = NULL; int ret; if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) { /* Note: The following calculation does not need to be exact; if it is a * bit off, either some bytes will be "leaked" (which is fine) or we * will need to increase the file size by some bytes (which is fine, * too, as long as the bulk is allocated here). Therefore, using * floating point arithmetic is fine. */ int64_t meta_size = 0; uint64_t nreftablee, nrefblocke, nl1e, nl2e; int64_t aligned_total_size = align_offset(total_size, cluster_size); int refblock_bits, refblock_size; /* refcount entry size in bytes */ double rces = (1 << refcount_order) / 8.; /* see qcow2_open() */ refblock_bits = cluster_bits - (refcount_order - 3); refblock_size = 1 << refblock_bits; /* header: 1 cluster */ meta_size += cluster_size; /* total size of L2 tables */ nl2e = aligned_total_size / cluster_size; nl2e = align_offset(nl2e, cluster_size / sizeof(uint64_t)); meta_size += nl2e * sizeof(uint64_t); /* total size of L1 tables */ nl1e = nl2e * sizeof(uint64_t) / cluster_size; nl1e = align_offset(nl1e, cluster_size / sizeof(uint64_t)); meta_size += nl1e * sizeof(uint64_t); /* total size of refcount blocks * * note: every host cluster is reference-counted, including metadata * (even refcount blocks are recursively included). * Let: * a = total_size (this is the guest disk size) * m = meta size not including refcount blocks and refcount tables * c = cluster size * y1 = number of refcount blocks entries * y2 = meta size including everything * rces = refcount entry size in bytes * then, * y1 = (y2 + a)/c * y2 = y1 * rces + y1 * rces * sizeof(u64) / c + m * we can get y1: * y1 = (a + m) / (c - rces - rces * sizeof(u64) / c) */ nrefblocke = (aligned_total_size + meta_size + cluster_size) / (cluster_size - rces - rces * sizeof(uint64_t) / cluster_size); meta_size += DIV_ROUND_UP(nrefblocke, refblock_size) * cluster_size; /* total size of refcount tables */ nreftablee = nrefblocke / refblock_size; nreftablee = align_offset(nreftablee, cluster_size / sizeof(uint64_t)); meta_size += nreftablee * sizeof(uint64_t); qemu_opt_set_number(opts, BLOCK_OPT_SIZE, aligned_total_size + meta_size, &error_abort); qemu_opt_set(opts, BLOCK_OPT_PREALLOC, PreallocMode_lookup[prealloc], &error_abort); } ret = bdrv_create_file(filename, opts, &local_err); if (ret < 0) { error_propagate(errp, local_err); return ret; } bs = NULL; ret = bdrv_open(&bs, filename, NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, NULL, &local_err); if (ret < 0) { error_propagate(errp, local_err); return ret; } /* Write the header */ QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header)); header = g_malloc0(cluster_size); *header = (QCowHeader) { .magic = cpu_to_be32(QCOW_MAGIC), .version = cpu_to_be32(version), .cluster_bits = cpu_to_be32(cluster_bits), .size = cpu_to_be64(0), .l1_table_offset = cpu_to_be64(0), .l1_size = cpu_to_be32(0), .refcount_table_offset = cpu_to_be64(cluster_size), .refcount_table_clusters = cpu_to_be32(1), .refcount_order = cpu_to_be32(refcount_order), .header_length = cpu_to_be32(sizeof(*header)), }; if (flags & BLOCK_FLAG_ENCRYPT) { header->crypt_method = cpu_to_be32(QCOW_CRYPT_AES); } else { header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); } if (flags & BLOCK_FLAG_LAZY_REFCOUNTS) { header->compatible_features |= cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS); } ret = bdrv_pwrite(bs, 0, header, cluster_size); g_free(header); if (ret < 0) { error_setg_errno(errp, -ret, "Could not write qcow2 header"); goto out; } /* Write a refcount table with one refcount block */ refcount_table = g_malloc0(2 * cluster_size); refcount_table[0] = cpu_to_be64(2 * cluster_size); ret = bdrv_pwrite(bs, cluster_size, refcount_table, 2 * cluster_size); g_free(refcount_table); if (ret < 0) { error_setg_errno(errp, -ret, "Could not write refcount table"); goto out; } bdrv_unref(bs); bs = NULL; /* * And now open the image and make it consistent first (i.e. increase the * refcount of the cluster that is occupied by the header and the refcount * table) */ ret = bdrv_open(&bs, filename, NULL, NULL, BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH, &bdrv_qcow2, &local_err); if (ret < 0) { error_propagate(errp, local_err); goto out; } ret = qcow2_alloc_clusters(bs, 3 * cluster_size); if (ret < 0) { error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 " "header and refcount table"); goto out; } else if (ret != 0) { error_report("Huh, first cluster in empty image is already in use?"); abort(); } /* Okay, now that we have a valid image, let's give it the right size */ ret = bdrv_truncate(bs, total_size); if (ret < 0) { error_setg_errno(errp, -ret, "Could not resize image"); goto out; } /* Want a backing file? There you go.*/ if (backing_file) { ret = bdrv_change_backing_file(bs, backing_file, backing_format); if (ret < 0) { error_setg_errno(errp, -ret, "Could not assign backing file '%s' " "with format '%s'", backing_file, backing_format); goto out; } } /* And if we're supposed to preallocate metadata, do that now */ if (prealloc != PREALLOC_MODE_OFF) { BDRVQcowState *s = bs->opaque; qemu_co_mutex_lock(&s->lock); ret = preallocate(bs); qemu_co_mutex_unlock(&s->lock); if (ret < 0) { error_setg_errno(errp, -ret, "Could not preallocate metadata"); goto out; } } bdrv_unref(bs); bs = NULL; /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning */ ret = bdrv_open(&bs, filename, NULL, NULL, BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_NO_BACKING, &bdrv_qcow2, &local_err); if (local_err) { error_propagate(errp, local_err); goto out; } ret = 0; out: if (bs) { bdrv_unref(bs); } return ret; } | 15,899 |
0 | static void ahci_start_transfer(IDEDMA *dma) { AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); IDEState *s = &ad->port.ifs[0]; uint32_t size = (uint32_t)(s->data_end - s->data_ptr); /* write == ram -> device */ uint32_t opts = le32_to_cpu(ad->cur_cmd->opts); int is_write = opts & AHCI_CMD_WRITE; int is_atapi = opts & AHCI_CMD_ATAPI; int has_sglist = 0; if (is_atapi && !ad->done_atapi_packet) { /* already prepopulated iobuffer */ ad->done_atapi_packet = true; size = 0; goto out; } if (!ahci_populate_sglist(ad, &s->sg, s->io_buffer_offset)) { has_sglist = 1; } DPRINTF(ad->port_no, "%sing %d bytes on %s w/%s sglist\n", is_write ? "writ" : "read", size, is_atapi ? "atapi" : "ata", has_sglist ? "" : "o"); if (has_sglist && size) { if (is_write) { dma_buf_write(s->data_ptr, size, &s->sg); } else { dma_buf_read(s->data_ptr, size, &s->sg); } } out: /* declare that we processed everything */ s->data_ptr = s->data_end; /* Update number of transferred bytes, destroy sglist */ ahci_commit_buf(dma, size); s->end_transfer_func(s); if (!(s->status & DRQ_STAT)) { /* done with PIO send/receive */ ahci_write_fis_pio(ad, le32_to_cpu(ad->cur_cmd->status)); } } | 15,900 |
0 | static int pl110_init(SysBusDevice *dev) { pl110_state *s = FROM_SYSBUS(pl110_state, dev); memory_region_init_io(&s->iomem, &pl110_ops, s, "pl110", 0x1000); sysbus_init_mmio(dev, &s->iomem); sysbus_init_irq(dev, &s->irq); qdev_init_gpio_in(&s->busdev.qdev, pl110_mux_ctrl_set, 1); s->con = graphic_console_init(pl110_update_display, pl110_invalidate_display, NULL, NULL, s); return 0; } | 15,901 |
0 | static int ljpeg_encode_bgr(AVCodecContext *avctx, PutBitContext *pb, const AVFrame *frame) { LJpegEncContext *s = avctx->priv_data; const int width = frame->width; const int height = frame->height; const int linesize = frame->linesize[0]; uint16_t (*buffer)[4] = s->scratch; const int predictor = avctx->prediction_method+1; int left[3], top[3], topleft[3]; int x, y, i; for (i = 0; i < 3; i++) buffer[0][i] = 1 << (9 - 1); for (y = 0; y < height; y++) { const int modified_predictor = y ? predictor : 1; uint8_t *ptr = frame->data[0] + (linesize * y); if (pb->buf_end - pb->buf - (put_bits_count(pb) >> 3) < width * 3 * 3) { av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n"); return -1; } for (i = 0; i < 3; i++) top[i]= left[i]= topleft[i]= buffer[0][i]; for (x = 0; x < width; x++) { buffer[x][1] = ptr[3 * x + 0] - ptr[3 * x + 1] + 0x100; buffer[x][2] = ptr[3 * x + 2] - ptr[3 * x + 1] + 0x100; buffer[x][0] = (ptr[3 * x + 0] + 2 * ptr[3 * x + 1] + ptr[3 * x + 2]) >> 2; for (i = 0; i < 3; i++) { int pred, diff; PREDICT(pred, topleft[i], top[i], left[i], modified_predictor); topleft[i] = top[i]; top[i] = buffer[x+1][i]; left[i] = buffer[x][i]; diff = ((left[i] - pred + 0x100) & 0x1FF) - 0x100; if (i == 0) ff_mjpeg_encode_dc(pb, diff, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly else ff_mjpeg_encode_dc(pb, diff, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance); } } } return 0; } | 15,903 |
0 | static int ipmovie_probe(AVProbeData *p) { if (p->buf_size < IPMOVIE_SIGNATURE_SIZE) return 0; if (strncmp(p->buf, IPMOVIE_SIGNATURE, IPMOVIE_SIGNATURE_SIZE) != 0) return 0; return AVPROBE_SCORE_MAX; } | 15,904 |
0 | static void vc1_decode_p_blocks(VC1Context *v) { MpegEncContext *s = &v->s; int apply_loop_filter; /* select codingmode used for VLC tables selection */ switch (v->c_ac_table_index) { case 0: v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA; break; case 1: v->codingset = CS_HIGH_MOT_INTRA; break; case 2: v->codingset = CS_MID_RATE_INTRA; break; } switch (v->c_ac_table_index) { case 0: v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER; break; case 1: v->codingset2 = CS_HIGH_MOT_INTER; break; case 2: v->codingset2 = CS_MID_RATE_INTER; break; } apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY); s->first_slice_line = 1; memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride); for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) { s->mb_x = 0; ff_init_block_index(s); for (; s->mb_x < s->mb_width; s->mb_x++) { ff_update_block_index(s); if (v->fcm == ILACE_FIELD) vc1_decode_p_mb_intfi(v); else if (v->fcm == ILACE_FRAME) vc1_decode_p_mb_intfr(v); else vc1_decode_p_mb(v); if (s->mb_y != s->start_mb_y && apply_loop_filter && v->fcm == PROGRESSIVE) vc1_apply_p_loop_filter(v); if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) { // TODO: may need modification to handle slice coding ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR); av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y); return; } } memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride); memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride); memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride); memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride); if (s->mb_y != s->start_mb_y) ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16); s->first_slice_line = 0; } if (apply_loop_filter && v->fcm == PROGRESSIVE) { s->mb_x = 0; ff_init_block_index(s); for (; s->mb_x < s->mb_width; s->mb_x++) { ff_update_block_index(s); vc1_apply_p_loop_filter(v); } } if (s->end_mb_y >= s->start_mb_y) ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16); ff_er_add_slice(s, 0, s->start_mb_y << v->field_mode, s->mb_width - 1, (s->end_mb_y << v->field_mode) - 1, ER_MB_END); } | 15,905 |
0 | static int vc9_decode_init(AVCodecContext *avctx) { VC9Context *v = avctx->priv_data; GetBitContext gb; if (!avctx->extradata_size || !avctx->extradata) return -1; avctx->pix_fmt = PIX_FMT_YUV420P; v->avctx = avctx; if (init_common(v) < 0) return -1; if (avctx->codec_id == CODEC_ID_WMV3) { int count = 0; // looks like WMV3 has a sequence header stored in the extradata // advanced sequence header may be before the first frame // the last byte of the extradata is a version number, 1 for the // samples we can decode init_get_bits(&gb, avctx->extradata, avctx->extradata_size); decode_sequence_header(avctx, &gb); count = avctx->extradata_size*8 - get_bits_count(&gb); if (count>0) { av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n", count, get_bits(&gb, count)); } else { av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count); } } /* Done with header parsing */ //FIXME I feel like this is wrong v->width_mb = (avctx->coded_width+15)>>4; v->height_mb = (avctx->coded_height+15)>>4; /* Allocate mb bitplanes */ v->mv_type_mb_plane = (uint8_t *)av_malloc(v->width_mb*v->height_mb); if (!v->mv_type_mb_plane) return -1; v->skip_mb_plane = (uint8_t *)av_malloc(v->width_mb*v->height_mb); if (!v->skip_mb_plane) return -1; v->direct_mb_plane = (uint8_t *)av_malloc(v->width_mb*v->height_mb); if (!v->direct_mb_plane) return -1; #if HAS_ADVANCED_PROFILE if (v->profile > PROFILE_MAIN) { v->over_flags_plane = (uint8_t *)av_malloc(v->width_mb*v->height_mb); if (!v->over_flags_plane) return -1; v->ac_pred_plane = (uint8_t *)av_malloc(v->width_mb*v->height_mb); if (!v->ac_pred_plane) return -1; } #endif return 0; } | 15,907 |
0 | enum AVCodecID ff_guess_image2_codec(const char *filename) { return av_str2id(img_tags, filename); } | 15,908 |
0 | static int sd_open(BlockDriverState *bs, const char *filename, int flags) { int ret, fd; uint32_t vid = 0; BDRVSheepdogState *s = bs->opaque; char vdi[SD_MAX_VDI_LEN], tag[SD_MAX_VDI_TAG_LEN]; uint32_t snapid; char *buf = NULL; strstart(filename, "sheepdog:", (const char **)&filename); QLIST_INIT(&s->inflight_aio_head); QLIST_INIT(&s->pending_aio_head); s->fd = -1; memset(vdi, 0, sizeof(vdi)); memset(tag, 0, sizeof(tag)); if (parse_vdiname(s, filename, vdi, &snapid, tag) < 0) { ret = -EINVAL; goto out; } s->fd = get_sheep_fd(s); if (s->fd < 0) { ret = s->fd; goto out; } ret = find_vdi_name(s, vdi, snapid, tag, &vid, 0); if (ret) { goto out; } /* * QEMU block layer emulates writethrough cache as 'writeback + flush', so * we always set SD_FLAG_CMD_CACHE (writeback cache) as default. */ s->cache_flags = SD_FLAG_CMD_CACHE; if (flags & BDRV_O_NOCACHE) { s->cache_flags = SD_FLAG_CMD_DIRECT; } if (s->cache_flags == SD_FLAG_CMD_CACHE) { s->flush_fd = connect_to_sdog(s->addr, s->port); if (s->flush_fd < 0) { error_report("failed to connect"); ret = s->flush_fd; goto out; } } if (snapid || tag[0] != '\0') { dprintf("%" PRIx32 " snapshot inode was open.\n", vid); s->is_snapshot = true; } fd = connect_to_sdog(s->addr, s->port); if (fd < 0) { error_report("failed to connect"); ret = fd; goto out; } buf = g_malloc(SD_INODE_SIZE); ret = read_object(fd, buf, vid_to_vdi_oid(vid), 0, SD_INODE_SIZE, 0, s->cache_flags); closesocket(fd); if (ret) { goto out; } memcpy(&s->inode, buf, sizeof(s->inode)); s->min_dirty_data_idx = UINT32_MAX; s->max_dirty_data_idx = 0; bs->total_sectors = s->inode.vdi_size / SECTOR_SIZE; pstrcpy(s->name, sizeof(s->name), vdi); qemu_co_mutex_init(&s->lock); g_free(buf); return 0; out: qemu_aio_set_fd_handler(s->fd, NULL, NULL, NULL, NULL); if (s->fd >= 0) { closesocket(s->fd); } g_free(buf); return ret; } | 15,909 |
0 | static void virt_set_gic_version(Object *obj, const char *value, Error **errp) { VirtMachineState *vms = VIRT_MACHINE(obj); if (!strcmp(value, "3")) { vms->gic_version = 3; } else if (!strcmp(value, "2")) { vms->gic_version = 2; } else if (!strcmp(value, "host")) { vms->gic_version = 0; /* Will probe later */ } else { error_report("Invalid gic-version option value"); error_printf("Allowed gic-version values are: 3, 2, host\n"); exit(1); } } | 15,910 |
0 | static int sys_openat(int dirfd, const char *pathname, int flags, ...) { /* * open(2) has extra parameter 'mode' when called with * flag O_CREAT. */ if ((flags & O_CREAT) != 0) { va_list ap; mode_t mode; /* * Get the 'mode' parameter and translate it to * host bits. */ va_start(ap, flags); mode = va_arg(ap, mode_t); mode = target_to_host_bitmask(mode, fcntl_flags_tbl); va_end(ap); return (openat(dirfd, pathname, flags, mode)); } return (openat(dirfd, pathname, flags)); } | 15,912 |
0 | static void coroutine_fn backup_run(void *opaque) { BackupBlockJob *job = opaque; BackupCompleteData *data; BlockDriverState *bs = job->common.bs; BlockDriverState *target = job->target; BlockdevOnError on_target_error = job->on_target_error; NotifierWithReturn before_write = { .notify = backup_before_write_notify, }; int64_t start, end; int64_t sectors_per_cluster = cluster_size_sectors(job); int ret = 0; QLIST_INIT(&job->inflight_reqs); qemu_co_rwlock_init(&job->flush_rwlock); start = 0; end = DIV_ROUND_UP(job->common.len, job->cluster_size); job->bitmap = hbitmap_alloc(end, 0); bdrv_set_enable_write_cache(target, true); if (target->blk) { blk_set_on_error(target->blk, on_target_error, on_target_error); blk_iostatus_enable(target->blk); } bdrv_add_before_write_notifier(bs, &before_write); if (job->sync_mode == MIRROR_SYNC_MODE_NONE) { while (!block_job_is_cancelled(&job->common)) { /* Yield until the job is cancelled. We just let our before_write * notify callback service CoW requests. */ job->common.busy = false; qemu_coroutine_yield(); job->common.busy = true; } } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { ret = backup_run_incremental(job); } else { /* Both FULL and TOP SYNC_MODE's require copying.. */ for (; start < end; start++) { bool error_is_read; if (yield_and_check(job)) { break; } if (job->sync_mode == MIRROR_SYNC_MODE_TOP) { int i, n; int alloced = 0; /* Check to see if these blocks are already in the * backing file. */ for (i = 0; i < sectors_per_cluster;) { /* bdrv_is_allocated() only returns true/false based * on the first set of sectors it comes across that * are are all in the same state. * For that reason we must verify each sector in the * backup cluster length. We end up copying more than * needed but at some point that is always the case. */ alloced = bdrv_is_allocated(bs, start * sectors_per_cluster + i, sectors_per_cluster - i, &n); i += n; if (alloced == 1 || n == 0) { break; } } /* If the above loop never found any sectors that are in * the topmost image, skip this backup. */ if (alloced == 0) { continue; } } /* FULL sync mode we copy the whole drive. */ ret = backup_do_cow(bs, start * sectors_per_cluster, sectors_per_cluster, &error_is_read, false); if (ret < 0) { /* Depending on error action, fail now or retry cluster */ BlockErrorAction action = backup_error_action(job, error_is_read, -ret); if (action == BLOCK_ERROR_ACTION_REPORT) { break; } else { start--; continue; } } } } notifier_with_return_remove(&before_write); /* wait until pending backup_do_cow() calls have completed */ qemu_co_rwlock_wrlock(&job->flush_rwlock); qemu_co_rwlock_unlock(&job->flush_rwlock); hbitmap_free(job->bitmap); if (target->blk) { blk_iostatus_disable(target->blk); } bdrv_op_unblock_all(target, job->common.blocker); data = g_malloc(sizeof(*data)); data->ret = ret; block_job_defer_to_main_loop(&job->common, backup_complete, data); } | 15,913 |
0 | static RawAIOCB *raw_aio_setup(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { BDRVRawState *s = bs->opaque; RawAIOCB *acb; if (fd_open(bs) < 0) return NULL; acb = qemu_aio_get(&raw_aio_pool, bs, cb, opaque); if (!acb) return NULL; acb->aiocb.aio_fildes = s->fd; acb->aiocb.ev_signo = SIGUSR2; acb->aiocb.aio_iov = qiov->iov; acb->aiocb.aio_niov = qiov->niov; acb->aiocb.aio_nbytes = nb_sectors * 512; acb->aiocb.aio_offset = sector_num * 512; acb->aiocb.aio_flags = 0; /* * If O_DIRECT is used the buffer needs to be aligned on a sector * boundary. Tell the low level code to ensure that in case it's * not done yet. */ if (s->aligned_buf) acb->aiocb.aio_flags |= QEMU_AIO_SECTOR_ALIGNED; acb->next = posix_aio_state->first_aio; posix_aio_state->first_aio = acb; return acb; } | 15,914 |
0 | static int net_socket_listen_init(NetClientState *peer, const char *model, const char *name, const char *host_str) { NetClientState *nc; NetSocketState *s; SocketAddressLegacy *saddr; int ret; Error *local_error = NULL; saddr = socket_parse(host_str, &local_error); if (saddr == NULL) { error_report_err(local_error); return -1; } ret = socket_listen(saddr, &local_error); if (ret < 0) { qapi_free_SocketAddressLegacy(saddr); error_report_err(local_error); return -1; } nc = qemu_new_net_client(&net_socket_info, peer, model, name); s = DO_UPCAST(NetSocketState, nc, nc); s->fd = -1; s->listen_fd = ret; s->nc.link_down = true; net_socket_rs_init(&s->rs, net_socket_rs_finalize); qemu_set_fd_handler(s->listen_fd, net_socket_accept, NULL, s); qapi_free_SocketAddressLegacy(saddr); return 0; } | 15,915 |
0 | InputEvent *qemu_input_event_new_key(KeyValue *key, bool down) { InputEvent *evt = g_new0(InputEvent, 1); evt->key = g_new0(InputKeyEvent, 1); evt->kind = INPUT_EVENT_KIND_KEY; evt->key->key = key; evt->key->down = down; return evt; } | 15,916 |
0 | static void mirror_exit(BlockJob *job, void *opaque) { MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); MirrorExitData *data = opaque; AioContext *replace_aio_context = NULL; BlockDriverState *src = s->common.bs; /* Make sure that the source BDS doesn't go away before we called * block_job_completed(). */ bdrv_ref(src); if (s->to_replace) { replace_aio_context = bdrv_get_aio_context(s->to_replace); aio_context_acquire(replace_aio_context); } if (s->should_complete && data->ret == 0) { BlockDriverState *to_replace = s->common.bs; if (s->to_replace) { to_replace = s->to_replace; } /* This was checked in mirror_start_job(), but meanwhile one of the * nodes could have been newly attached to a BlockBackend. */ if (to_replace->blk && s->target->blk) { error_report("block job: Can't create node with two BlockBackends"); data->ret = -EINVAL; goto out; } if (bdrv_get_flags(s->target) != bdrv_get_flags(to_replace)) { bdrv_reopen(s->target, bdrv_get_flags(to_replace), NULL); } bdrv_replace_in_backing_chain(to_replace, s->target); } out: if (s->to_replace) { bdrv_op_unblock_all(s->to_replace, s->replace_blocker); error_free(s->replace_blocker); bdrv_unref(s->to_replace); } if (replace_aio_context) { aio_context_release(replace_aio_context); } g_free(s->replaces); bdrv_op_unblock_all(s->target, s->common.blocker); bdrv_unref(s->target); block_job_completed(&s->common, data->ret); g_free(data); bdrv_drained_end(src); if (qemu_get_aio_context() == bdrv_get_aio_context(src)) { aio_enable_external(iohandler_get_aio_context()); } bdrv_unref(src); } | 15,917 |
0 | static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost) { AVFormatContext *s = of->ctx; AVStream *st = ost->st; int ret; if (!of->header_written) { AVPacket tmp_pkt = {0}; /* the muxer is not initialized yet, buffer the packet */ if (!av_fifo_space(ost->muxing_queue)) { int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue), ost->max_muxing_queue_size); if (new_size <= av_fifo_size(ost->muxing_queue)) { av_log(NULL, AV_LOG_ERROR, "Too many packets buffered for output stream %d:%d.\n", ost->file_index, ost->st->index); exit_program(1); } ret = av_fifo_realloc2(ost->muxing_queue, new_size); if (ret < 0) exit_program(1); } ret = av_packet_ref(&tmp_pkt, pkt); if (ret < 0) exit_program(1); av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL); av_packet_unref(pkt); return; } if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) || (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0)) pkt->pts = pkt->dts = AV_NOPTS_VALUE; /* * Audio encoders may split the packets -- #frames in != #packets out. * But there is no reordering, so we can limit the number of output packets * by simply dropping them here. * Counting encoded video frames needs to be done separately because of * reordering, see do_video_out() */ if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) { if (ost->frame_number >= ost->max_frames) { av_packet_unref(pkt); return; } ost->frame_number++; } if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { int i; uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS, NULL); ost->quality = sd ? AV_RL32(sd) : -1; ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE; for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) { if (sd && i < sd[5]) ost->error[i] = AV_RL64(sd + 8 + 8*i); else ost->error[i] = -1; } if (ost->frame_rate.num && ost->is_cfr) { if (pkt->duration > 0) av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n"); pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate), ost->mux_timebase); } } av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base); if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) { if (pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->dts > pkt->pts) { av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n", pkt->dts, pkt->pts, ost->file_index, ost->st->index); pkt->pts = pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1) - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1); } if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) && pkt->dts != AV_NOPTS_VALUE && !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) && ost->last_mux_dts != AV_NOPTS_VALUE) { int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT); if (pkt->dts < max) { int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG; av_log(s, loglevel, "Non-monotonous DTS in output stream " "%d:%d; previous: %"PRId64", current: %"PRId64"; ", ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts); if (exit_on_error) { av_log(NULL, AV_LOG_FATAL, "aborting.\n"); exit_program(1); } av_log(s, loglevel, "changing to %"PRId64". This may result " "in incorrect timestamps in the output file.\n", max); if (pkt->pts >= pkt->dts) pkt->pts = FFMAX(pkt->pts, max); pkt->dts = max; } } } ost->last_mux_dts = pkt->dts; ost->data_size += pkt->size; ost->packets_written++; pkt->stream_index = ost->index; if (debug_ts) { av_log(NULL, AV_LOG_INFO, "muxer <- type:%s " "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n", av_get_media_type_string(ost->enc_ctx->codec_type), av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base), av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base), pkt->size ); } ret = av_interleaved_write_frame(s, pkt); if (ret < 0) { print_error("av_interleaved_write_frame()", ret); main_return_code = 1; close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED); } av_packet_unref(pkt); } | 15,919 |
0 | static int estimate_qp(MpegEncContext *s, int dry_run){ if (s->next_lambda){ s->current_picture_ptr->quality= s->current_picture.quality = s->next_lambda; if(!dry_run) s->next_lambda= 0; } else if (!s->fixed_qscale) { s->current_picture_ptr->quality= s->current_picture.quality = ff_rate_estimate_qscale(s, dry_run); if (s->current_picture.quality < 0) return -1; } if(s->adaptive_quant){ switch(s->codec_id){ case CODEC_ID_MPEG4: if (CONFIG_MPEG4_ENCODER) ff_clean_mpeg4_qscales(s); break; case CODEC_ID_H263: case CODEC_ID_H263P: case CODEC_ID_FLV1: if (CONFIG_H263_ENCODER||CONFIG_H263P_ENCODER||CONFIG_FLV_ENCODER) ff_clean_h263_qscales(s); break; } s->lambda= s->lambda_table[0]; //FIXME broken }else s->lambda= s->current_picture.quality; //printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality); update_qscale(s); return 0; } | 15,920 |
0 | int coroutine_fn bdrv_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int count, BdrvRequestFlags flags) { trace_bdrv_co_pwrite_zeroes(bs, offset, count, flags); if (!(bs->open_flags & BDRV_O_UNMAP)) { flags &= ~BDRV_REQ_MAY_UNMAP; } return bdrv_co_pwritev(bs, offset, count, NULL, BDRV_REQ_ZERO_WRITE | flags); } | 15,921 |
0 | static void test_validate_fail_union_flat_no_discrim(TestInputVisitorData *data, const void *unused) { UserDefFlatUnion2 *tmp = NULL; Error *err = NULL; Visitor *v; /* test situation where discriminator field ('enum1' here) is missing */ v = validate_test_init(data, "{ 'integer': 42, 'string': 'c', 'string1': 'd', 'string2': 'e' }"); visit_type_UserDefFlatUnion2(v, NULL, &tmp, &err); error_free_or_abort(&err); g_assert(!tmp); } | 15,923 |
0 | static void s390_init(QEMUMachineInitArgs *args) { ram_addr_t my_ram_size = args->ram_size; const char *cpu_model = args->cpu_model; const char *kernel_filename = args->kernel_filename; const char *kernel_cmdline = args->kernel_cmdline; const char *initrd_filename = args->initrd_filename; CPUS390XState *env = NULL; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); ram_addr_t kernel_size = 0; ram_addr_t initrd_offset; ram_addr_t initrd_size = 0; int shift = 0; uint8_t *storage_keys; void *virtio_region; hwaddr virtio_region_len; hwaddr virtio_region_start; int i; /* s390x ram size detection needs a 16bit multiplier + an increment. So guests > 64GB can be specified in 2MB steps etc. */ while ((my_ram_size >> (20 + shift)) > 65535) { shift++; } my_ram_size = my_ram_size >> (20 + shift) << (20 + shift); /* lets propagate the changed ram size into the global variable. */ ram_size = my_ram_size; /* get a BUS */ s390_bus = s390_virtio_bus_init(&my_ram_size); s390_sclp_init(); /* allocate RAM */ memory_region_init_ram(ram, "s390.ram", my_ram_size); vmstate_register_ram_global(ram); memory_region_add_subregion(sysmem, 0, ram); /* clear virtio region */ virtio_region_len = my_ram_size - ram_size; virtio_region_start = ram_size; virtio_region = cpu_physical_memory_map(virtio_region_start, &virtio_region_len, true); memset(virtio_region, 0, virtio_region_len); cpu_physical_memory_unmap(virtio_region, virtio_region_len, 1, virtio_region_len); /* allocate storage keys */ storage_keys = g_malloc0(my_ram_size / TARGET_PAGE_SIZE); /* init CPUs */ if (cpu_model == NULL) { cpu_model = "host"; } ipi_states = g_malloc(sizeof(S390CPU *) * smp_cpus); for (i = 0; i < smp_cpus; i++) { S390CPU *cpu; CPUS390XState *tmp_env; cpu = cpu_s390x_init(cpu_model); tmp_env = &cpu->env; if (!env) { env = tmp_env; } ipi_states[i] = cpu; tmp_env->halted = 1; tmp_env->exception_index = EXCP_HLT; tmp_env->storage_keys = storage_keys; } /* One CPU has to run */ s390_add_running_cpu(env); if (kernel_filename) { kernel_size = load_elf(kernel_filename, NULL, NULL, NULL, NULL, NULL, 1, ELF_MACHINE, 0); if (kernel_size == -1UL) { kernel_size = load_image_targphys(kernel_filename, 0, ram_size); } if (kernel_size == -1UL) { fprintf(stderr, "qemu: could not load kernel '%s'\n", kernel_filename); exit(1); } /* * we can not rely on the ELF entry point, since up to 3.2 this * value was 0x800 (the SALIPL loader) and it wont work. For * all (Linux) cases 0x10000 (KERN_IMAGE_START) should be fine. */ env->psw.addr = KERN_IMAGE_START; env->psw.mask = 0x0000000180000000ULL; } else { ram_addr_t bios_size = 0; char *bios_filename; /* Load zipl bootloader */ if (bios_name == NULL) { bios_name = ZIPL_FILENAME; } bios_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); bios_size = load_image_targphys(bios_filename, ZIPL_LOAD_ADDR, 4096); g_free(bios_filename); if ((long)bios_size < 0) { hw_error("could not load bootloader '%s'\n", bios_name); } if (bios_size > 4096) { hw_error("stage1 bootloader is > 4k\n"); } env->psw.addr = ZIPL_START; env->psw.mask = 0x0000000180000000ULL; } if (initrd_filename) { initrd_offset = INITRD_START; while (kernel_size + 0x100000 > initrd_offset) { initrd_offset += 0x100000; } initrd_size = load_image_targphys(initrd_filename, initrd_offset, ram_size - initrd_offset); if (initrd_size == -1UL) { fprintf(stderr, "qemu: could not load initrd '%s'\n", initrd_filename); exit(1); } /* we have to overwrite values in the kernel image, which are "rom" */ stq_p(rom_ptr(INITRD_PARM_START), initrd_offset); stq_p(rom_ptr(INITRD_PARM_SIZE), initrd_size); } if (rom_ptr(KERN_PARM_AREA)) { /* we have to overwrite values in the kernel image, which are "rom" */ memcpy(rom_ptr(KERN_PARM_AREA), kernel_cmdline, strlen(kernel_cmdline) + 1); } /* Create VirtIO network adapters */ for(i = 0; i < nb_nics; i++) { NICInfo *nd = &nd_table[i]; DeviceState *dev; if (!nd->model) { nd->model = g_strdup("virtio"); } if (strcmp(nd->model, "virtio")) { fprintf(stderr, "S390 only supports VirtIO nics\n"); exit(1); } dev = qdev_create((BusState *)s390_bus, "virtio-net-s390"); qdev_set_nic_properties(dev, nd); qdev_init_nofail(dev); } /* Create VirtIO disk drives */ for(i = 0; i < MAX_BLK_DEVS; i++) { DriveInfo *dinfo; DeviceState *dev; dinfo = drive_get(IF_IDE, 0, i); if (!dinfo) { continue; } dev = qdev_create((BusState *)s390_bus, "virtio-blk-s390"); qdev_prop_set_drive_nofail(dev, "drive", dinfo->bdrv); qdev_init_nofail(dev); } } | 15,925 |
0 | void sparc64_set_context(CPUSPARCState *env) { abi_ulong ucp_addr; struct target_ucontext *ucp; target_mc_gregset_t *grp; abi_ulong pc, npc, tstate; abi_ulong fp, i7, w_addr; int err; unsigned int i; ucp_addr = env->regwptr[UREG_I0]; if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) goto do_sigsegv; grp = &ucp->tuc_mcontext.mc_gregs; err = __get_user(pc, &((*grp)[MC_PC])); err |= __get_user(npc, &((*grp)[MC_NPC])); if (err || ((pc | npc) & 3)) goto do_sigsegv; if (env->regwptr[UREG_I1]) { target_sigset_t target_set; sigset_t set; if (TARGET_NSIG_WORDS == 1) { if (__get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0])) goto do_sigsegv; } else { abi_ulong *src, *dst; src = ucp->tuc_sigmask.sig; dst = target_set.sig; for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) { err |= __get_user(*dst, src); } if (err) goto do_sigsegv; } target_to_host_sigset_internal(&set, &target_set); sigprocmask(SIG_SETMASK, &set, NULL); } env->pc = pc; env->npc = npc; err |= __get_user(env->y, &((*grp)[MC_Y])); err |= __get_user(tstate, &((*grp)[MC_TSTATE])); env->asi = (tstate >> 24) & 0xff; cpu_put_ccr(env, tstate >> 32); cpu_put_cwp64(env, tstate & 0x1f); err |= __get_user(env->gregs[1], (&(*grp)[MC_G1])); err |= __get_user(env->gregs[2], (&(*grp)[MC_G2])); err |= __get_user(env->gregs[3], (&(*grp)[MC_G3])); err |= __get_user(env->gregs[4], (&(*grp)[MC_G4])); err |= __get_user(env->gregs[5], (&(*grp)[MC_G5])); err |= __get_user(env->gregs[6], (&(*grp)[MC_G6])); err |= __get_user(env->gregs[7], (&(*grp)[MC_G7])); err |= __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0])); err |= __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1])); err |= __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2])); err |= __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3])); err |= __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4])); err |= __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5])); err |= __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6])); err |= __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7])); err |= __get_user(fp, &(ucp->tuc_mcontext.mc_fp)); err |= __get_user(i7, &(ucp->tuc_mcontext.mc_i7)); w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6]; if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]), abi_ulong) != 0) goto do_sigsegv; if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]), abi_ulong) != 0) goto do_sigsegv; /* FIXME this does not match how the kernel handles the FPU in * its sparc64_set_context implementation. In particular the FPU * is only restored if fenab is non-zero in: * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab)); */ err |= __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs)); { uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs; for (i = 0; i < 64; i++, src++) { if (i & 1) { err |= __get_user(env->fpr[i/2].l.lower, src); } else { err |= __get_user(env->fpr[i/2].l.upper, src); } } } err |= __get_user(env->fsr, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr)); err |= __get_user(env->gsr, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr)); if (err) goto do_sigsegv; unlock_user_struct(ucp, ucp_addr, 0); return; do_sigsegv: unlock_user_struct(ucp, ucp_addr, 0); force_sig(TARGET_SIGSEGV); } | 15,926 |
0 | uint32_t cpu_mips_get_count (CPUState *env) { if (env->CP0_Cause & (1 << CP0Ca_DC)) { return env->CP0_Count; } else { return env->CP0_Count + (uint32_t)muldiv64(qemu_get_clock(vm_clock), TIMER_FREQ, get_ticks_per_sec()); } } | 15,927 |
0 | static int cpu_has_work(CPUState *env) { if (env->stop) return 1; if (env->stopped) return 0; if (!env->halted) return 1; if (qemu_cpu_has_work(env)) return 1; return 0; } | 15,930 |
0 | static void outport_write(KBDState *s, uint32_t val) { DPRINTF("kbd: write outport=0x%02x\n", val); s->outport = val; if (s->a20_out) { qemu_set_irq(*s->a20_out, (val >> 1) & 1); } if (!(val & 1)) { qemu_system_reset_request(); } } | 15,932 |
0 | static void nvdimm_dsm_get_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in, hwaddr dsm_mem_addr) { NVDIMMClass *nvc = NVDIMM_GET_CLASS(nvdimm); NvdimmFuncGetLabelDataIn *get_label_data; NvdimmFuncGetLabelDataOut *get_label_data_out; uint32_t status; int size; get_label_data = (NvdimmFuncGetLabelDataIn *)in->arg3; le32_to_cpus(&get_label_data->offset); le32_to_cpus(&get_label_data->length); nvdimm_debug("Read Label Data: offset %#x length %#x.\n", get_label_data->offset, get_label_data->length); status = nvdimm_rw_label_data_check(nvdimm, get_label_data->offset, get_label_data->length); if (status != 0 /* Success */) { nvdimm_dsm_no_payload(status, dsm_mem_addr); return; } size = sizeof(*get_label_data_out) + get_label_data->length; assert(size <= 4096); get_label_data_out = g_malloc(size); get_label_data_out->len = cpu_to_le32(size); get_label_data_out->func_ret_status = cpu_to_le32(0 /* Success */); nvc->read_label_data(nvdimm, get_label_data_out->out_buf, get_label_data->length, get_label_data->offset); cpu_physical_memory_write(dsm_mem_addr, get_label_data_out, size); g_free(get_label_data_out); } | 15,933 |
0 | static QObject *visitor_get(TestOutputVisitorData *data) { visit_complete(data->ov, &data->obj); g_assert(data->obj); return data->obj; } | 15,934 |
0 | static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine, uint32_t flags) { (void) memset(elf, 0, sizeof(*elf)); (void) memcpy(elf->e_ident, ELFMAG, SELFMAG); elf->e_ident[EI_CLASS] = ELF_CLASS; elf->e_ident[EI_DATA] = ELF_DATA; elf->e_ident[EI_VERSION] = EV_CURRENT; elf->e_ident[EI_OSABI] = ELF_OSABI; elf->e_type = ET_CORE; elf->e_machine = machine; elf->e_version = EV_CURRENT; elf->e_phoff = sizeof(struct elfhdr); elf->e_flags = flags; elf->e_ehsize = sizeof(struct elfhdr); elf->e_phentsize = sizeof(struct elf_phdr); elf->e_phnum = segs; #ifdef BSWAP_NEEDED bswap_ehdr(elf); #endif } | 15,935 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.