label
int64
0
1
func1
stringlengths
23
97k
id
int64
0
27.3k
0
static void gen_spr_620 (CPUPPCState *env) { /* XXX : not implemented */ spr_register(env, SPR_620_PMR0, "PMR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_620_PMR1, "PMR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_620_PMR2, "PMR2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_620_PMR3, "PMR3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_620_PMR4, "PMR4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_620_PMR5, "PMR5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_620_PMR6, "PMR6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_620_PMR7, "PMR7", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_620_PMR8, "PMR8", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_620_PMR9, "PMR9", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_620_PMRA, "PMR10", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_620_PMRB, "PMR11", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_620_PMRC, "PMR12", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_620_PMRD, "PMR13", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_620_PMRE, "PMR14", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_620_PMRF, "PMR15", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_620_HID8, "HID8", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_620_HID9, "HID9", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); }
20,252
0
static struct dpll_ctl_s *omap_dpll_init(MemoryRegion *memory, target_phys_addr_t base, omap_clk clk) { struct dpll_ctl_s *s = g_malloc0(sizeof(*s)); memory_region_init_io(&s->iomem, &omap_dpll_ops, s, "omap-dpll", 0x100); s->dpll = clk; omap_dpll_reset(s); memory_region_add_subregion(memory, base, &s->iomem); return s; }
20,253
0
static void qcow2_close(BlockDriverState *bs) { BDRVQcow2State *s = bs->opaque; qemu_vfree(s->l1_table); /* else pre-write overlap checks in cache_destroy may crash */ s->l1_table = NULL; if (!(s->flags & BDRV_O_INACTIVE)) { qcow2_inactivate(bs); } cache_clean_timer_del(bs); qcow2_cache_destroy(bs, s->l2_table_cache); qcow2_cache_destroy(bs, s->refcount_block_cache); qcrypto_cipher_free(s->cipher); s->cipher = NULL; g_free(s->unknown_header_fields); cleanup_unknown_header_ext(bs); g_free(s->image_backing_file); g_free(s->image_backing_format); g_free(s->cluster_cache); qemu_vfree(s->cluster_data); qcow2_refcount_close(bs); qcow2_free_snapshots(bs); }
20,254
0
static int opt_output_file(const char *opt, const char *filename) { AVFormatContext *oc; int i, err; AVOutputFormat *file_oformat; OutputStream *ost; InputStream *ist; if (!strcmp(filename, "-")) filename = "pipe:"; err = avformat_alloc_output_context2(&oc, NULL, last_asked_format, filename); last_asked_format = NULL; if (!oc) { print_error(filename, err); exit_program(1); } file_oformat= oc->oformat; if (!strcmp(file_oformat->name, "ffm") && av_strstart(filename, "http:", NULL)) { /* special case for files sent to ffserver: we get the stream parameters from ffserver */ int err = read_ffserver_streams(oc, filename); if (err < 0) { print_error(filename, err); exit_program(1); } } else if (!nb_stream_maps) { /* pick the "best" stream of each type */ #define NEW_STREAM(type, index)\ if (index >= 0) {\ ost = new_ ## type ## _stream(oc);\ ost->source_index = index;\ ost->sync_ist = &input_streams[index];\ input_streams[index].discard = 0;\ } /* video: highest resolution */ if (!video_disable && oc->oformat->video_codec != CODEC_ID_NONE) { int area = 0, idx = -1; for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ist->st->codec->width * ist->st->codec->height > area) { area = ist->st->codec->width * ist->st->codec->height; idx = i; } } NEW_STREAM(video, idx); } /* audio: most channels */ if (!audio_disable && oc->oformat->audio_codec != CODEC_ID_NONE) { int channels = 0, idx = -1; for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && ist->st->codec->channels > channels) { channels = ist->st->codec->channels; idx = i; } } NEW_STREAM(audio, idx); } /* subtitles: pick first */ if (!subtitle_disable && oc->oformat->subtitle_codec != CODEC_ID_NONE) { for (i = 0; i < nb_input_streams; i++) if (input_streams[i].st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) { NEW_STREAM(subtitle, i); break; } } /* do something with data? */ } else { for (i = 0; i < nb_stream_maps; i++) { StreamMap *map = &stream_maps[i]; if (map->disabled) continue; ist = &input_streams[input_files[map->file_index].ist_index + map->stream_index]; switch (ist->st->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(oc); break; case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(oc); break; case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(oc); break; case AVMEDIA_TYPE_DATA: ost = new_data_stream(oc); break; default: av_log(NULL, AV_LOG_ERROR, "Cannot map stream #%d.%d - unsupported type.\n", map->file_index, map->stream_index); exit_program(1); } ost->source_index = input_files[map->file_index].ist_index + map->stream_index; ost->sync_ist = &input_streams[input_files[map->sync_file_index].ist_index + map->sync_stream_index]; ist->discard = 0; } } av_dict_copy(&oc->metadata, metadata, 0); av_dict_free(&metadata); if (nb_output_files == MAX_FILES) exit_program(1); /* a temporary hack until all the other MAX_FILES-sized arrays are removed */ output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1); output_files[nb_output_files - 1].ctx = oc; output_files[nb_output_files - 1].ost_index = nb_output_streams - oc->nb_streams; av_dict_copy(&output_files[nb_output_files - 1].opts, format_opts, 0); /* check filename in case of an image number is expected */ if (oc->oformat->flags & AVFMT_NEEDNUMBER) { if (!av_filename_number_test(oc->filename)) { print_error(oc->filename, AVERROR(EINVAL)); exit_program(1); } } if (!(oc->oformat->flags & AVFMT_NOFILE)) { /* test if it already exists to avoid loosing precious files */ if (!file_overwrite && (strchr(filename, ':') == NULL || filename[1] == ':' || av_strstart(filename, "file:", NULL))) { if (avio_check(filename, 0) == 0) { if (!using_stdin) { fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename); fflush(stderr); if (!read_yesno()) { fprintf(stderr, "Not overwriting - exiting\n"); exit_program(1); } } else { fprintf(stderr,"File '%s' already exists. Exiting.\n", filename); exit_program(1); } } } /* open the file */ if ((err = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE)) < 0) { print_error(filename, err); exit_program(1); } } oc->preload= (int)(mux_preload*AV_TIME_BASE); oc->max_delay= (int)(mux_max_delay*AV_TIME_BASE); if (loop_output >= 0) { av_log(NULL, AV_LOG_WARNING, "-loop_output is deprecated, use -loop\n"); oc->loop_output = loop_output; } /* copy chapters */ if (chapters_input_file >= nb_input_files) { if (chapters_input_file == INT_MAX) { /* copy chapters from the first input file that has them*/ chapters_input_file = -1; for (i = 0; i < nb_input_files; i++) if (input_files[i].ctx->nb_chapters) { chapters_input_file = i; break; } } else { av_log(NULL, AV_LOG_ERROR, "Invalid input file index %d in chapter mapping.\n", chapters_input_file); exit_program(1); } } if (chapters_input_file >= 0) copy_chapters(chapters_input_file, nb_output_files - 1); /* copy metadata */ for (i = 0; i < nb_meta_data_maps; i++) { AVFormatContext *files[2]; AVDictionary **meta[2]; int j; #define METADATA_CHECK_INDEX(index, nb_elems, desc)\ if ((index) < 0 || (index) >= (nb_elems)) {\ av_log(NULL, AV_LOG_ERROR, "Invalid %s index %d while processing metadata maps\n",\ (desc), (index));\ exit_program(1);\ } int in_file_index = meta_data_maps[i][1].file; if (in_file_index < 0) continue; METADATA_CHECK_INDEX(in_file_index, nb_input_files, "input file") files[0] = oc; files[1] = input_files[in_file_index].ctx; for (j = 0; j < 2; j++) { MetadataMap *map = &meta_data_maps[i][j]; switch (map->type) { case 'g': meta[j] = &files[j]->metadata; break; case 's': METADATA_CHECK_INDEX(map->index, files[j]->nb_streams, "stream") meta[j] = &files[j]->streams[map->index]->metadata; break; case 'c': METADATA_CHECK_INDEX(map->index, files[j]->nb_chapters, "chapter") meta[j] = &files[j]->chapters[map->index]->metadata; break; case 'p': METADATA_CHECK_INDEX(map->index, files[j]->nb_programs, "program") meta[j] = &files[j]->programs[map->index]->metadata; break; } } av_dict_copy(meta[0], *meta[1], AV_DICT_DONT_OVERWRITE); } /* copy global metadata by default */ if (metadata_global_autocopy && nb_input_files) av_dict_copy(&oc->metadata, input_files[0].ctx->metadata, AV_DICT_DONT_OVERWRITE); if (metadata_streams_autocopy) for (i = output_files[nb_output_files - 1].ost_index; i < nb_output_streams; i++) { InputStream *ist = &input_streams[output_streams[i].source_index]; av_dict_copy(&output_streams[i].st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE); } frame_rate = (AVRational){0, 0}; frame_width = 0; frame_height = 0; audio_sample_rate = 0; audio_channels = 0; audio_sample_fmt = AV_SAMPLE_FMT_NONE; chapters_input_file = INT_MAX; av_freep(&meta_data_maps); nb_meta_data_maps = 0; metadata_global_autocopy = 1; metadata_streams_autocopy = 1; metadata_chapters_autocopy = 1; av_freep(&stream_maps); nb_stream_maps = 0; av_dict_free(&codec_names); av_freep(&forced_key_frames); uninit_opts(); init_opts(); return 0; }
20,255
0
static int pte64_check(mmu_ctx_t *ctx, target_ulong pte0, target_ulong pte1, int h, int rw, int type) { target_ulong ptem, mmask; int access, ret, pteh, ptev, pp; ret = -1; /* Check validity and table match */ ptev = pte64_is_valid(pte0); pteh = (pte0 >> 1) & 1; if (ptev && h == pteh) { /* Check vsid & api */ ptem = pte0 & PTE64_PTEM_MASK; mmask = PTE64_CHECK_MASK; pp = (pte1 & 0x00000003) | ((pte1 >> 61) & 0x00000004); ctx->nx = (pte1 >> 2) & 1; /* No execute bit */ ctx->nx |= (pte1 >> 3) & 1; /* Guarded bit */ if (ptem == ctx->ptem) { if (ctx->raddr != (hwaddr)-1ULL) { /* all matches should have equal RPN, WIMG & PP */ if ((ctx->raddr & mmask) != (pte1 & mmask)) { qemu_log("Bad RPN/WIMG/PP\n"); return -3; } } /* Compute access rights */ access = pp_check(ctx->key, pp, ctx->nx); /* Keep the matching PTE informations */ ctx->raddr = pte1; ctx->prot = access; ret = check_prot(ctx->prot, rw, type); if (ret == 0) { /* Access granted */ LOG_MMU("PTE access granted !\n"); } else { /* Access right violation */ LOG_MMU("PTE access rejected\n"); } } } return ret; }
20,256
0
static void usb_xhci_realize(struct PCIDevice *dev, Error **errp) { int i, ret; Error *err = NULL; XHCIState *xhci = XHCI(dev); dev->config[PCI_CLASS_PROG] = 0x30; /* xHCI */ dev->config[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin 1 */ dev->config[PCI_CACHE_LINE_SIZE] = 0x10; dev->config[0x60] = 0x30; /* release number */ if (strcmp(object_get_typename(OBJECT(dev)), TYPE_NEC_XHCI) == 0) { xhci->nec_quirks = true; } if (xhci->numintrs > MAXINTRS) { xhci->numintrs = MAXINTRS; } while (xhci->numintrs & (xhci->numintrs - 1)) { /* ! power of 2 */ xhci->numintrs++; } if (xhci->numintrs < 1) { xhci->numintrs = 1; } if (xhci->numslots > MAXSLOTS) { xhci->numslots = MAXSLOTS; } if (xhci->numslots < 1) { xhci->numslots = 1; } if (xhci_get_flag(xhci, XHCI_FLAG_ENABLE_STREAMS)) { xhci->max_pstreams_mask = 7; /* == 256 primary streams */ } else { xhci->max_pstreams_mask = 0; } if (xhci->msi != ON_OFF_AUTO_OFF) { ret = msi_init(dev, 0x70, xhci->numintrs, true, false, &err); /* Any error other than -ENOTSUP(board's MSI support is broken) * is a programming error */ assert(!ret || ret == -ENOTSUP); if (ret && xhci->msi == ON_OFF_AUTO_ON) { /* Can't satisfy user's explicit msi=on request, fail */ error_append_hint(&err, "You have to use msi=auto (default) or " "msi=off with this machine type.\n"); error_propagate(errp, err); return; } assert(!err || xhci->msi == ON_OFF_AUTO_AUTO); /* With msi=auto, we fall back to MSI off silently */ error_free(err); } usb_xhci_init(xhci); xhci->mfwrap_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, xhci_mfwrap_timer, xhci); memory_region_init(&xhci->mem, OBJECT(xhci), "xhci", LEN_REGS); memory_region_init_io(&xhci->mem_cap, OBJECT(xhci), &xhci_cap_ops, xhci, "capabilities", LEN_CAP); memory_region_init_io(&xhci->mem_oper, OBJECT(xhci), &xhci_oper_ops, xhci, "operational", 0x400); memory_region_init_io(&xhci->mem_runtime, OBJECT(xhci), &xhci_runtime_ops, xhci, "runtime", LEN_RUNTIME); memory_region_init_io(&xhci->mem_doorbell, OBJECT(xhci), &xhci_doorbell_ops, xhci, "doorbell", LEN_DOORBELL); memory_region_add_subregion(&xhci->mem, 0, &xhci->mem_cap); memory_region_add_subregion(&xhci->mem, OFF_OPER, &xhci->mem_oper); memory_region_add_subregion(&xhci->mem, OFF_RUNTIME, &xhci->mem_runtime); memory_region_add_subregion(&xhci->mem, OFF_DOORBELL, &xhci->mem_doorbell); for (i = 0; i < xhci->numports; i++) { XHCIPort *port = &xhci->ports[i]; uint32_t offset = OFF_OPER + 0x400 + 0x10 * i; port->xhci = xhci; memory_region_init_io(&port->mem, OBJECT(xhci), &xhci_port_ops, port, port->name, 0x10); memory_region_add_subregion(&xhci->mem, offset, &port->mem); } pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64, &xhci->mem); if (pci_bus_is_express(dev->bus) || xhci_get_flag(xhci, XHCI_FLAG_FORCE_PCIE_ENDCAP)) { ret = pcie_endpoint_cap_init(dev, 0xa0); assert(ret > 0); } if (xhci->msix != ON_OFF_AUTO_OFF) { /* TODO check for errors, and should fail when msix=on */ msix_init(dev, xhci->numintrs, &xhci->mem, 0, OFF_MSIX_TABLE, &xhci->mem, 0, OFF_MSIX_PBA, 0x90, NULL); } }
20,258
0
void acpi_pcihp_init(AcpiPciHpState *s, PCIBus *root_bus, MemoryRegion *address_space_io) { s->root= root_bus; memory_region_init_io(&s->io, NULL, &acpi_pcihp_io_ops, s, "acpi-pci-hotplug", PCI_HOTPLUG_SIZE); memory_region_add_subregion(address_space_io, PCI_HOTPLUG_ADDR, &s->io); }
20,259
0
static void qmp_output_complete(Visitor *v, void *opaque) { QmpOutputVisitor *qov = to_qov(v); /* A visit must have occurred, with each start paired with end. */ assert(qov->root && QSLIST_EMPTY(&qov->stack)); assert(opaque == qov->result); qobject_incref(qov->root); *qov->result = qov->root; qov->result = NULL; }
20,260
0
static SocketAddress *sd_server_config(QDict *options, Error **errp) { QDict *server = NULL; QObject *crumpled_server = NULL; Visitor *iv = NULL; SocketAddressFlat *saddr_flat = NULL; SocketAddress *saddr = NULL; Error *local_err = NULL; qdict_extract_subqdict(options, &server, "server."); crumpled_server = qdict_crumple(server, errp); if (!crumpled_server) { goto done; } /* * FIXME .numeric, .to, .ipv4 or .ipv6 don't work with -drive * server.type=inet. .to doesn't matter, it's ignored anyway. * That's because when @options come from -blockdev or * blockdev_add, members are typed according to the QAPI schema, * but when they come from -drive, they're all QString. The * visitor expects the former. */ iv = qobject_input_visitor_new(crumpled_server); visit_type_SocketAddressFlat(iv, NULL, &saddr_flat, &local_err); if (local_err) { error_propagate(errp, local_err); goto done; } saddr = socket_address_crumple(saddr_flat); done: qapi_free_SocketAddressFlat(saddr_flat); visit_free(iv); qobject_decref(crumpled_server); QDECREF(server); return saddr; }
20,261
0
filter_mirror_set_outdev(Object *obj, const char *value, Error **errp) { MirrorState *s = FILTER_MIRROR(obj); g_free(s->outdev); s->outdev = g_strdup(value); if (!s->outdev) { error_setg(errp, "filter filter mirror needs 'outdev' " "property set"); return; } }
20,262
1
int bdrv_is_inserted(BlockDriverState *bs) { BlockDriver *drv = bs->drv; int ret; if (!drv) return 0; if (!drv->bdrv_is_inserted) return !bs->tray_open; ret = drv->bdrv_is_inserted(bs); return ret; }
20,263
1
static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) { VirtIOBlock *s = to_virtio_blk(vdev); struct virtio_blk_config blkcfg; uint64_t capacity; int cylinders, heads, secs; bdrv_get_geometry(s->bs, &capacity); bdrv_get_geometry_hint(s->bs, &cylinders, &heads, &secs); stq_raw(&blkcfg.capacity, capacity); stl_raw(&blkcfg.seg_max, 128 - 2); stw_raw(&blkcfg.cylinders, cylinders); blkcfg.heads = heads; blkcfg.sectors = secs; memcpy(config, &blkcfg, sizeof(blkcfg)); }
20,264
1
static int vpc_create(const char *filename, QEMUOptionParameter *options) { uint8_t buf[1024]; struct vhd_footer* footer = (struct vhd_footer*) buf; struct vhd_dyndisk_header* dyndisk_header = (struct vhd_dyndisk_header*) buf; int fd, i; uint16_t cyls; uint8_t heads; uint8_t secs_per_cyl; size_t block_size, num_bat_entries; int64_t total_sectors = 0; // Read out options while (options && options->name) { if (!strcmp(options->name, "size")) { total_sectors = options->value.n / 512; } options++; } // Create the file fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0644); if (fd < 0) return -EIO; // Calculate matching total_size and geometry if (calculate_geometry(total_sectors, &cyls, &heads, &secs_per_cyl)) return -EFBIG; total_sectors = (int64_t) cyls * heads * secs_per_cyl; // Prepare the Hard Disk Footer memset(buf, 0, 1024); memcpy(footer->creator, "conectix", 8); // TODO Check if "qemu" creator_app is ok for VPC memcpy(footer->creator_app, "qemu", 4); memcpy(footer->creator_os, "Wi2k", 4); footer->features = be32_to_cpu(0x02); footer->version = be32_to_cpu(0x00010000); footer->data_offset = be64_to_cpu(HEADER_SIZE); footer->timestamp = be32_to_cpu(time(NULL) - VHD_TIMESTAMP_BASE); // Version of Virtual PC 2007 footer->major = be16_to_cpu(0x0005); footer->minor =be16_to_cpu(0x0003); footer->orig_size = be64_to_cpu(total_sectors * 512); footer->size = be64_to_cpu(total_sectors * 512); footer->cyls = be16_to_cpu(cyls); footer->heads = heads; footer->secs_per_cyl = secs_per_cyl; footer->type = be32_to_cpu(VHD_DYNAMIC); // TODO uuid is missing footer->checksum = be32_to_cpu(vpc_checksum(buf, HEADER_SIZE)); // Write the footer (twice: at the beginning and at the end) block_size = 0x200000; num_bat_entries = (total_sectors + block_size / 512) / (block_size / 512); if (write(fd, buf, HEADER_SIZE) != HEADER_SIZE) return -EIO; if (lseek(fd, 1536 + ((num_bat_entries * 4 + 511) & ~511), SEEK_SET) < 0) return -EIO; if (write(fd, buf, HEADER_SIZE) != HEADER_SIZE) return -EIO; // Write the initial BAT if (lseek(fd, 3 * 512, SEEK_SET) < 0) return -EIO; memset(buf, 0xFF, 512); for (i = 0; i < (num_bat_entries * 4 + 511) / 512; i++) if (write(fd, buf, 512) != 512) return -EIO; // Prepare the Dynamic Disk Header memset(buf, 0, 1024); memcpy(dyndisk_header->magic, "cxsparse", 8); dyndisk_header->data_offset = be64_to_cpu(0xFFFFFFFF); dyndisk_header->table_offset = be64_to_cpu(3 * 512); dyndisk_header->version = be32_to_cpu(0x00010000); dyndisk_header->block_size = be32_to_cpu(block_size); dyndisk_header->max_table_entries = be32_to_cpu(num_bat_entries); dyndisk_header->checksum = be32_to_cpu(vpc_checksum(buf, 1024)); // Write the header if (lseek(fd, 512, SEEK_SET) < 0) return -EIO; if (write(fd, buf, 1024) != 1024) return -EIO; close(fd); return 0; }
20,265
1
ff_rm_retrieve_cache (AVFormatContext *s, AVIOContext *pb, AVStream *st, RMStream *ast, AVPacket *pkt) { RMDemuxContext *rm = s->priv_data; assert (rm->audio_pkt_cnt > 0); if (st->codec->codec_id == CODEC_ID_AAC) av_get_packet(pb, pkt, ast->sub_packet_lengths[ast->sub_packet_cnt - rm->audio_pkt_cnt]); else { av_new_packet(pkt, st->codec->block_align); memcpy(pkt->data, ast->pkt.data + st->codec->block_align * //FIXME avoid this (ast->sub_packet_h * ast->audio_framesize / st->codec->block_align - rm->audio_pkt_cnt), st->codec->block_align); } rm->audio_pkt_cnt--; if ((pkt->pts = ast->audiotimestamp) != AV_NOPTS_VALUE) { ast->audiotimestamp = AV_NOPTS_VALUE; pkt->flags = AV_PKT_FLAG_KEY; } else pkt->flags = 0; pkt->stream_index = st->index; return rm->audio_pkt_cnt; }
20,266
1
static void virtio_pci_reset(void *opaque) { VirtIOPCIProxy *proxy = opaque; virtio_reset(proxy->vdev); msix_reset(&proxy->pci_dev); }
20,267
1
static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors) { int async_ret; BlockDriverAIOCB *acb; async_ret = NOT_DONE; qemu_aio_wait_start(); acb = bdrv_aio_write(bs, sector_num, buf, nb_sectors, bdrv_rw_em_cb, &async_ret); if (acb == NULL) { qemu_aio_wait_end(); return -1; } while (async_ret == NOT_DONE) { qemu_aio_wait(); } qemu_aio_wait_end(); return async_ret; }
20,268
1
static void ioapic_class_init(ObjectClass *klass, void *data) { IOAPICCommonClass *k = IOAPIC_COMMON_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); k->realize = ioapic_realize; * If APIC is in kernel, we need to update the kernel cache after * migration, otherwise first 24 gsi routes will be invalid. k->post_load = ioapic_update_kvm_routes; dc->reset = ioapic_reset_common; dc->props = ioapic_properties; }
20,269
1
static inline void iwmmxt_store_creg(int reg, TCGv var) { tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg])); }
20,270
0
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { H264Context *h = avctx->priv_data; MpegEncContext *s = &h->s; AVFrame *pict = data; int buf_index; s->flags= avctx->flags; s->flags2= avctx->flags2; if(s->flags&CODEC_FLAG_TRUNCATED){ const int next= ff_h264_find_frame_end(h, buf, buf_size); assert((buf_size > 0) || (next == END_NOT_FOUND)); if( ff_combine_frame(&s->parse_context, next, &buf, &buf_size) < 0 ) return buf_size; //printf("next:%d buf_size:%d last_index:%d\n", next, buf_size, s->parse_context.last_index); } /* no supplementary picture */ if (buf_size == 0) { Picture *out; int i, out_idx; //FIXME factorize this with the output code below out = h->delayed_pic[0]; out_idx = 0; for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame && h->delayed_pic[i]->poc; i++) if(h->delayed_pic[i]->poc < out->poc){ out = h->delayed_pic[i]; out_idx = i; } for(i=out_idx; h->delayed_pic[i]; i++) h->delayed_pic[i] = h->delayed_pic[i+1]; if(out){ *data_size = sizeof(AVFrame); *pict= *(AVFrame*)out; } return 0; } if(h->is_avc && !h->got_avcC) { int i, cnt, nalsize; unsigned char *p = avctx->extradata; if(avctx->extradata_size < 7) { av_log(avctx, AV_LOG_ERROR, "avcC too short\n"); return -1; } if(*p != 1) { av_log(avctx, AV_LOG_ERROR, "Unknown avcC version %d\n", *p); return -1; } /* sps and pps in the avcC always have length coded with 2 bytes, so put a fake nal_length_size = 2 while parsing them */ h->nal_length_size = 2; // Decode sps from avcC cnt = *(p+5) & 0x1f; // Number of sps p += 6; for (i = 0; i < cnt; i++) { nalsize = AV_RB16(p) + 2; if(decode_nal_units(h, p, nalsize) < 0) { av_log(avctx, AV_LOG_ERROR, "Decoding sps %d from avcC failed\n", i); return -1; } p += nalsize; } // Decode pps from avcC cnt = *(p++); // Number of pps for (i = 0; i < cnt; i++) { nalsize = AV_RB16(p) + 2; if(decode_nal_units(h, p, nalsize) != nalsize) { av_log(avctx, AV_LOG_ERROR, "Decoding pps %d from avcC failed\n", i); return -1; } p += nalsize; } // Now store right nal length size, that will be use to parse all other nals h->nal_length_size = ((*(((char*)(avctx->extradata))+4))&0x03)+1; // Do not reparse avcC h->got_avcC = 1; } if(avctx->frame_number==0 && !h->is_avc && s->avctx->extradata_size){ if(decode_nal_units(h, s->avctx->extradata, s->avctx->extradata_size) < 0) return -1; } buf_index=decode_nal_units(h, buf, buf_size); if(buf_index < 0) return -1; if(!(s->flags2 & CODEC_FLAG2_CHUNKS) && !s->current_picture_ptr){ if (avctx->skip_frame >= AVDISCARD_NONREF || s->hurry_up) return 0; av_log(avctx, AV_LOG_ERROR, "no frame!\n"); return -1; } if(!(s->flags2 & CODEC_FLAG2_CHUNKS) || (s->mb_y >= s->mb_height && s->mb_height)){ Picture *out = s->current_picture_ptr; Picture *cur = s->current_picture_ptr; int i, pics, cross_idr, out_of_order, out_idx; s->mb_y= 0; s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_H264; s->current_picture_ptr->pict_type= s->pict_type; if(!s->dropable) { execute_ref_pic_marking(h, h->mmco, h->mmco_index); h->prev_poc_msb= h->poc_msb; h->prev_poc_lsb= h->poc_lsb; } h->prev_frame_num_offset= h->frame_num_offset; h->prev_frame_num= h->frame_num; /* * FIXME: Error handling code does not seem to support interlaced * when slices span multiple rows * The ff_er_add_slice calls don't work right for bottom * fields; they cause massive erroneous error concealing * Error marking covers both fields (top and bottom). * This causes a mismatched s->error_count * and a bad error table. Further, the error count goes to * INT_MAX when called for bottom field, because mb_y is * past end by one (callers fault) and resync_mb_y != 0 * causes problems for the first MB line, too. */ if (!FIELD_PICTURE) ff_er_frame_end(s); MPV_frame_end(s); if (s->first_field) { /* Wait for second field. */ *data_size = 0; } else { cur->interlaced_frame = FIELD_OR_MBAFF_PICTURE; /* Derive top_field_first from field pocs. */ cur->top_field_first = cur->field_poc[0] < cur->field_poc[1]; //FIXME do something with unavailable reference frames /* Sort B-frames into display order */ if(h->sps.bitstream_restriction_flag && s->avctx->has_b_frames < h->sps.num_reorder_frames){ s->avctx->has_b_frames = h->sps.num_reorder_frames; s->low_delay = 0; } if( s->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT && !h->sps.bitstream_restriction_flag){ s->avctx->has_b_frames= MAX_DELAYED_PIC_COUNT; s->low_delay= 0; } pics = 0; while(h->delayed_pic[pics]) pics++; assert(pics <= MAX_DELAYED_PIC_COUNT); h->delayed_pic[pics++] = cur; if(cur->reference == 0) cur->reference = DELAYED_PIC_REF; cross_idr = 0; for(i=0; h->delayed_pic[i]; i++) if(h->delayed_pic[i]->key_frame || h->delayed_pic[i]->poc==0) cross_idr = 1; out = h->delayed_pic[0]; out_idx = 0; for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame && h->delayed_pic[i]->poc; i++) if(h->delayed_pic[i]->poc < out->poc){ out = h->delayed_pic[i]; out_idx = i; } out_of_order = !cross_idr && out->poc < h->outputed_poc; if(h->sps.bitstream_restriction_flag && s->avctx->has_b_frames >= h->sps.num_reorder_frames) { } else if((out_of_order && pics-1 == s->avctx->has_b_frames && s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) || (s->low_delay && ((!cross_idr && out->poc > h->outputed_poc + 2) || cur->pict_type == FF_B_TYPE))) { s->low_delay = 0; s->avctx->has_b_frames++; } if(out_of_order || pics > s->avctx->has_b_frames){ out->reference &= ~DELAYED_PIC_REF; for(i=out_idx; h->delayed_pic[i]; i++) h->delayed_pic[i] = h->delayed_pic[i+1]; } if(!out_of_order && pics > s->avctx->has_b_frames){ *data_size = sizeof(AVFrame); h->outputed_poc = out->poc; *pict= *(AVFrame*)out; }else{ av_log(avctx, AV_LOG_DEBUG, "no picture\n"); } } } assert(pict->data[0] || !*data_size); ff_print_debug_info(s, pict); //printf("out %d\n", (int)pict->data[0]); #if 0 //? /* Return the Picture timestamp as the frame number */ /* we subtract 1 because it is added on utils.c */ avctx->frame_number = s->picture_number - 1; #endif return get_consumed_bytes(s, buf_index, buf_size); }
20,271
0
static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h) { PadContext *s = inlink->dst->priv; AVFrame *frame = ff_get_video_buffer(inlink->dst->outputs[0], w + (s->w - s->in_w), h + (s->h - s->in_h)); int plane; if (!frame) return NULL; frame->width = w; frame->height = h; for (plane = 0; plane < 4 && frame->data[plane]; plane++) { int hsub = s->draw.hsub[plane]; int vsub = s->draw.vsub[plane]; frame->data[plane] += (s->x >> hsub) * s->draw.pixelstep[plane] + (s->y >> vsub) * frame->linesize[plane]; } return frame; }
20,272
0
int64_t ff_start_tag(AVIOContext *pb, const char *tag) { ffio_wfourcc(pb, tag); avio_wl32(pb, 0); return avio_tell(pb); }
20,273
0
static int svq3_decode_mb(H264Context *h, unsigned int mb_type) { int i, j, k, m, dir, mode; int cbp = 0; uint32_t vlc; int8_t *top, *left; MpegEncContext *const s = (MpegEncContext *) h; const int mb_xy = h->mb_xy; const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride; h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF; h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF; h->topright_samples_available = 0xFFFF; if (mb_type == 0) { /* SKIP */ if (s->pict_type == FF_P_TYPE || s->next_picture.mb_type[mb_xy] == -1) { svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0); if (s->pict_type == FF_B_TYPE) { svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 1, 1); } mb_type = MB_TYPE_SKIP; } else { mb_type = FFMIN(s->next_picture.mb_type[mb_xy], 6); if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0) return -1; if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0) return -1; mb_type = MB_TYPE_16x16; } } else if (mb_type < 8) { /* INTER */ if (h->thirdpel_flag && h->halfpel_flag == !get_bits1 (&s->gb)) { mode = THIRDPEL_MODE; } else if (h->halfpel_flag && h->thirdpel_flag == !get_bits1 (&s->gb)) { mode = HALFPEL_MODE; } else { mode = FULLPEL_MODE; } /* fill caches */ /* note ref_cache should contain here: ???????? ???11111 N??11111 N??11111 N??11111 */ for (m = 0; m < 2; m++) { if (s->mb_x > 0 && h->intra4x4_pred_mode[mb_xy - 1][0] != -1) { for (i = 0; i < 4; i++) { *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - 1 + i*h->b_stride]; } } else { for (i = 0; i < 4; i++) { *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = 0; } } if (s->mb_y > 0) { memcpy(h->mv_cache[m][scan8[0] - 1*8], s->current_picture.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t)); memset(&h->ref_cache[m][scan8[0] - 1*8], (h->intra4x4_pred_mode[mb_xy - s->mb_stride][4] == -1) ? PART_NOT_AVAILABLE : 1, 4); if (s->mb_x < (s->mb_width - 1)) { *(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride + 4]; h->ref_cache[m][scan8[0] + 4 - 1*8] = (h->intra4x4_pred_mode[mb_xy - s->mb_stride + 1][0] == -1 || h->intra4x4_pred_mode[mb_xy - s->mb_stride ][4] == -1) ? PART_NOT_AVAILABLE : 1; }else h->ref_cache[m][scan8[0] + 4 - 1*8] = PART_NOT_AVAILABLE; if (s->mb_x > 0) { *(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.motion_val[m][b_xy - h->b_stride - 1]; h->ref_cache[m][scan8[0] - 1 - 1*8] = (h->intra4x4_pred_mode[mb_xy - s->mb_stride - 1][3] == -1) ? PART_NOT_AVAILABLE : 1; }else h->ref_cache[m][scan8[0] - 1 - 1*8] = PART_NOT_AVAILABLE; }else memset(&h->ref_cache[m][scan8[0] - 1*8 - 1], PART_NOT_AVAILABLE, 8); if (s->pict_type != FF_B_TYPE) break; } /* decode motion vector(s) and form prediction(s) */ if (s->pict_type == FF_P_TYPE) { if (svq3_mc_dir(h, (mb_type - 1), mode, 0, 0) < 0) return -1; } else { /* FF_B_TYPE */ if (mb_type != 2) { if (svq3_mc_dir(h, 0, mode, 0, 0) < 0) return -1; } else { for (i = 0; i < 4; i++) { memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); } } if (mb_type != 1) { if (svq3_mc_dir(h, 0, mode, 1, (mb_type == 3)) < 0) return -1; } else { for (i = 0; i < 4; i++) { memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); } } } mb_type = MB_TYPE_16x16; } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */ memset(h->intra4x4_pred_mode_cache, -1, 8*5*sizeof(int8_t)); if (mb_type == 8) { if (s->mb_x > 0) { for (i = 0; i < 4; i++) { h->intra4x4_pred_mode_cache[scan8[0] - 1 + i*8] = h->intra4x4_pred_mode[mb_xy - 1][i]; } if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1) { h->left_samples_available = 0x5F5F; } } if (s->mb_y > 0) { h->intra4x4_pred_mode_cache[4+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][4]; h->intra4x4_pred_mode_cache[5+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][5]; h->intra4x4_pred_mode_cache[6+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][6]; h->intra4x4_pred_mode_cache[7+8*0] = h->intra4x4_pred_mode[mb_xy - s->mb_stride][3]; if (h->intra4x4_pred_mode_cache[4+8*0] == -1) { h->top_samples_available = 0x33FF; } } /* decode prediction codes for luma blocks */ for (i = 0; i < 16; i+=2) { vlc = svq3_get_ue_golomb(&s->gb); if (vlc >= 25){ av_log(h->s.avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc); return -1; } left = &h->intra4x4_pred_mode_cache[scan8[i] - 1]; top = &h->intra4x4_pred_mode_cache[scan8[i] - 8]; left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]]; left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]]; if (left[1] == -1 || left[2] == -1){ av_log(h->s.avctx, AV_LOG_ERROR, "weird prediction\n"); return -1; } } } else { /* mb_type == 33, DC_128_PRED block type */ for (i = 0; i < 4; i++) { memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_PRED, 4); } } ff_h264_write_back_intra_pred_mode(h); if (mb_type == 8) { ff_h264_check_intra4x4_pred_mode(h); h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF; h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF; } else { for (i = 0; i < 4; i++) { memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_128_PRED, 4); } h->top_samples_available = 0x33FF; h->left_samples_available = 0x5F5F; } mb_type = MB_TYPE_INTRA4x4; } else { /* INTRA16x16 */ dir = i_mb_type_info[mb_type - 8].pred_mode; dir = (dir >> 1) ^ 3*(dir & 1) ^ 1; if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir)) == -1){ av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n"); return -1; } cbp = i_mb_type_info[mb_type - 8].cbp; mb_type = MB_TYPE_INTRA16x16; } if (!IS_INTER(mb_type) && s->pict_type != FF_I_TYPE) { for (i = 0; i < 4; i++) { memset(s->current_picture.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); } if (s->pict_type == FF_B_TYPE) { for (i = 0; i < 4; i++) { memset(s->current_picture.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); } } } if (!IS_INTRA4x4(mb_type)) { memset(h->intra4x4_pred_mode[mb_xy], DC_PRED, 8); } if (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE) { memset(h->non_zero_count_cache + 8, 0, 4*9*sizeof(uint8_t)); s->dsp.clear_blocks(h->mb); } if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || s->pict_type == FF_B_TYPE)) { if ((vlc = svq3_get_ue_golomb(&s->gb)) >= 48){ av_log(h->s.avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc); return -1; } cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc] : golomb_to_inter_cbp[vlc]; } if (IS_INTRA16x16(mb_type) || (s->pict_type != FF_I_TYPE && s->adaptive_quant && cbp)) { s->qscale += svq3_get_se_golomb(&s->gb); if (s->qscale > 31){ av_log(h->s.avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale); return -1; } } if (IS_INTRA16x16(mb_type)) { if (svq3_decode_block(&s->gb, h->mb, 0, 0)){ av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding intra luma dc\n"); return -1; } } if (cbp) { const int index = IS_INTRA16x16(mb_type) ? 1 : 0; const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1); for (i = 0; i < 4; i++) { if ((cbp & (1 << i))) { for (j = 0; j < 4; j++) { k = index ? ((j&1) + 2*(i&1) + 2*(j&2) + 4*(i&2)) : (4*i + j); h->non_zero_count_cache[ scan8[k] ] = 1; if (svq3_decode_block(&s->gb, &h->mb[16*k], index, type)){ av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding block\n"); return -1; } } } } if ((cbp & 0x30)) { for (i = 0; i < 2; ++i) { if (svq3_decode_block(&s->gb, &h->mb[16*(16 + 4*i)], 0, 3)){ av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma dc block\n"); return -1; } } if ((cbp & 0x20)) { for (i = 0; i < 8; i++) { h->non_zero_count_cache[ scan8[16+i] ] = 1; if (svq3_decode_block(&s->gb, &h->mb[16*(16 + i)], 1, 1)){ av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma ac block\n"); return -1; } } } } } h->cbp= cbp; s->current_picture.mb_type[mb_xy] = mb_type; if (IS_INTRA(mb_type)) { h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8); } return 0; }
20,274
0
AVCodecContext *avcodec_alloc_context3(const AVCodec *codec) { AVCodecContext *avctx= av_malloc(sizeof(AVCodecContext)); if(avctx==NULL) return NULL; if(avcodec_get_context_defaults3(avctx, codec) < 0){ av_free(avctx); return NULL; } return avctx; }
20,275
0
static void exynos4210_fimd_reset(DeviceState *d) { Exynos4210fimdState *s = EXYNOS4210_FIMD(d); unsigned w; DPRINT_TRACE("Display controller reset\n"); /* Set all display controller registers to 0 */ memset(&s->vidcon, 0, (uint8_t *)&s->window - (uint8_t *)&s->vidcon); for (w = 0; w < NUM_OF_WINDOWS; w++) { memset(&s->window[w], 0, sizeof(Exynos4210fimdWindow)); s->window[w].blendeq = 0xC2; exynos4210_fimd_update_win_bppmode(s, w); exynos4210_fimd_trace_bppmode(s, w, 0xFFFFFFFF); fimd_update_get_alpha(s, w); } if (s->ifb != NULL) { g_free(s->ifb); } s->ifb = NULL; exynos4210_fimd_invalidate(s); exynos4210_fimd_enable(s, false); /* Some registers have non-zero initial values */ s->winchmap = 0x7D517D51; s->colorgaincon = 0x10040100; s->huecoef_cr[0] = s->huecoef_cr[3] = 0x01000100; s->huecoef_cb[0] = s->huecoef_cb[3] = 0x01000100; s->hueoffset = 0x01800080; }
20,276
0
void helper_slbie(CPUPPCState *env, target_ulong addr) { PowerPCCPU *cpu = ppc_env_get_cpu(env); ppc_slb_t *slb; slb = slb_lookup(cpu, addr); if (!slb) { return; } if (slb->esid & SLB_ESID_V) { slb->esid &= ~SLB_ESID_V; /* XXX: given the fact that segment size is 256 MB or 1TB, * and we still don't have a tlb_flush_mask(env, n, mask) * in QEMU, we just invalidate all TLBs */ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; } }
20,277
0
static int execute_command(BlockDriverState *bdrv, SCSIRequest *r, int direction, BlockDriverCompletionFunc *complete) { r->io_header.interface_id = 'S'; r->io_header.dxfer_direction = direction; r->io_header.dxferp = r->buf; r->io_header.dxfer_len = r->buflen; r->io_header.cmdp = r->cmd; r->io_header.cmd_len = r->cmdlen; r->io_header.mx_sb_len = sizeof(r->dev->sensebuf); r->io_header.sbp = r->dev->sensebuf; r->io_header.timeout = MAX_UINT; r->io_header.usr_ptr = r; r->io_header.flags |= SG_FLAG_DIRECT_IO; if (bdrv_pwrite(bdrv, -1, &r->io_header, sizeof(r->io_header)) == -1) { BADF("execute_command: write failed ! (%d)\n", errno); return -1; } if (complete == NULL) { int ret; r->aiocb = NULL; while ((ret = bdrv_pread(bdrv, -1, &r->io_header, sizeof(r->io_header))) == -1 && errno == EINTR); if (ret == -1) { BADF("execute_command: read failed !\n"); return -1; } return 0; } r->aiocb = bdrv_aio_read(bdrv, 0, (uint8_t*)&r->io_header, -(int64_t)sizeof(r->io_header), complete, r); if (r->aiocb == NULL) { BADF("execute_command: read failed !\n"); return -1; } return 0; }
20,278
0
static void inet_addr_to_opts(QemuOpts *opts, const InetSocketAddress *addr) { bool ipv4 = addr->ipv4 || !addr->has_ipv4; bool ipv6 = addr->ipv6 || !addr->has_ipv6; if (!ipv4 || !ipv6) { qemu_opt_set_bool(opts, "ipv4", ipv4, &error_abort); qemu_opt_set_bool(opts, "ipv6", ipv6, &error_abort); } if (addr->has_to) { qemu_opt_set_number(opts, "to", addr->to, &error_abort); } qemu_opt_set(opts, "host", addr->host, &error_abort); qemu_opt_set(opts, "port", addr->port, &error_abort); }
20,279
0
static int local_lstat(FsContext *ctx, const char *path, struct stat *stbuf) { return lstat(rpath(ctx, path), stbuf); }
20,280
0
static void help(void) { printf("qemu-img version " QEMU_VERSION ", Copyright (c) 2004-2008 Fabrice Bellard\n" "usage: qemu-img command [command options]\n" "QEMU disk image utility\n" "\n" "Command syntax:\n" " check [-f fmt] filename\n" " create [-F fmt] [-b base_image] [-f fmt] [-o options] filename [size]\n" " commit [-f fmt] filename\n" " convert [-c] [-f fmt] [-O output_fmt] [-o options] [-B output_base_image] filename [filename2 [...]] output_filename\n" " info [-f fmt] filename\n" " snapshot [-l | -a snapshot | -c snapshot | -d snapshot] filename\n" "\n" "Command parameters:\n" " 'filename' is a disk image filename\n" " 'base_image' is the read-only disk image which is used as base for a copy on\n" " write image; the copy on write image only stores the modified data\n" " 'output_base_image' forces the output image to be created as a copy on write\n" " image of the specified base image; 'output_base_image' should have the same\n" " content as the input's base image, however the path, image format, etc may\n" " differ\n" " 'fmt' is the disk image format. It is guessed automatically in most cases\n" " 'size' is the disk image size in kilobytes. Optional suffixes\n" " 'M' (megabyte, 1024 * 1024) and 'G' (gigabyte, 1024 * 1024 * 1024) are\n" " supported any 'k' or 'K' is ignored\n" " 'output_filename' is the destination disk image filename\n" " 'output_fmt' is the destination format\n" " 'options' is a comma separated list of format specific options in a\n" " name=value format. Use -o ? for an overview of the options supported by the\n" " used format\n" " '-c' indicates that target image must be compressed (qcow format only)\n" " '-h' with or without a command shows this help and lists the supported formats\n" "\n" "Parameters to snapshot subcommand:\n" " 'snapshot' is the name of the snapshot to create, apply or delete\n" " '-a' applies a snapshot (revert disk to saved state)\n" " '-c' creates a snapshot\n" " '-d' deletes a snapshot\n" " '-l' lists all snapshots in the given image\n" ); printf("\nSupported formats:"); bdrv_iterate_format(format_print, NULL); printf("\n"); exit(1); }
20,281
0
ReadLineState *readline_init(Monitor *mon, ReadLineCompletionFunc *completion_finder) { ReadLineState *rs = g_malloc0(sizeof(*rs)); rs->hist_entry = -1; rs->mon = mon; rs->completion_finder = completion_finder; return rs; }
20,284
0
BlockDriverAIOCB *thread_pool_submit_aio(ThreadPoolFunc *func, void *arg, BlockDriverCompletionFunc *cb, void *opaque) { ThreadPool *pool = &global_pool; ThreadPoolElement *req; req = qemu_aio_get(&thread_pool_aiocb_info, NULL, cb, opaque); req->func = func; req->arg = arg; req->state = THREAD_QUEUED; req->pool = pool; QLIST_INSERT_HEAD(&pool->head, req, all); trace_thread_pool_submit(pool, req, arg); qemu_mutex_lock(&pool->lock); if (pool->idle_threads == 0 && pool->cur_threads < pool->max_threads) { spawn_thread(pool); } QTAILQ_INSERT_TAIL(&pool->request_list, req, reqs); qemu_mutex_unlock(&pool->lock); qemu_sem_post(&pool->sem); return &req->common; }
20,285
0
int ff_read_packet(AVFormatContext *s, AVPacket *pkt) { int ret, i, err; AVStream *st; for (;;) { AVPacketList *pktl = s->internal->raw_packet_buffer; if (pktl) { *pkt = pktl->pkt; st = s->streams[pkt->stream_index]; if (s->internal->raw_packet_buffer_remaining_size <= 0) if ((err = probe_codec(s, st, NULL)) < 0) return err; if (st->request_probe <= 0) { s->internal->raw_packet_buffer = pktl->next; s->internal->raw_packet_buffer_remaining_size += pkt->size; av_free(pktl); return 0; } } pkt->data = NULL; pkt->size = 0; av_init_packet(pkt); ret = s->iformat->read_packet(s, pkt); if (ret < 0) { /* Some demuxers return FFERROR_REDO when they consume data and discard it (ignored streams, junk, extradata). We must re-call the demuxer to get the real packet. */ if (ret == FFERROR_REDO) continue; if (!pktl || ret == AVERROR(EAGAIN)) return ret; for (i = 0; i < s->nb_streams; i++) { st = s->streams[i]; if (st->probe_packets) if ((err = probe_codec(s, st, NULL)) < 0) return err; av_assert0(st->request_probe <= 0); } continue; } if (!pkt->buf) { AVPacket tmp = { 0 }; ret = av_packet_ref(&tmp, pkt); if (ret < 0) return ret; *pkt = tmp; } if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) && (pkt->flags & AV_PKT_FLAG_CORRUPT)) { av_log(s, AV_LOG_WARNING, "Dropped corrupted packet (stream = %d)\n", pkt->stream_index); av_packet_unref(pkt); continue; } if (pkt->stream_index >= (unsigned)s->nb_streams) { av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index); continue; } st = s->streams[pkt->stream_index]; if (update_wrap_reference(s, st, pkt->stream_index, pkt) && st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) { // correct first time stamps to negative values if (!is_relative(st->first_dts)) st->first_dts = wrap_timestamp(st, st->first_dts); if (!is_relative(st->start_time)) st->start_time = wrap_timestamp(st, st->start_time); if (!is_relative(st->cur_dts)) st->cur_dts = wrap_timestamp(st, st->cur_dts); } pkt->dts = wrap_timestamp(st, pkt->dts); pkt->pts = wrap_timestamp(st, pkt->pts); force_codec_ids(s, st); /* TODO: audio: time filter; video: frame reordering (pts != dts) */ if (s->use_wallclock_as_timestamps) pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base); if (!pktl && st->request_probe <= 0) return ret; err = add_to_pktbuf(&s->internal->raw_packet_buffer, pkt, &s->internal->raw_packet_buffer_end, 0); if (err) return err; s->internal->raw_packet_buffer_remaining_size -= pkt->size; if ((err = probe_codec(s, st, pkt)) < 0) return err; } }
20,286
0
int kvm_arch_post_run(CPUState *env, struct kvm_run *run) { return 0; }
20,287
0
int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared, Error **errp) { int ret; ret = bdrv_child_check_perm(c, perm, shared, errp); if (ret < 0) { bdrv_child_abort_perm_update(c); return ret; } bdrv_child_set_perm(c, perm, shared); return 0; }
20,289
0
static gboolean nbd_accept(QIOChannel *ioc, GIOCondition cond, gpointer opaque) { QIOChannelSocket *cioc; cioc = qio_channel_socket_accept(QIO_CHANNEL_SOCKET(ioc), NULL); if (!cioc) { return TRUE; } if (state >= TERMINATE) { object_unref(OBJECT(cioc)); return TRUE; } nb_fds++; nbd_update_server_watch(); nbd_client_new(newproto ? NULL : exp, cioc, NULL, NULL, nbd_client_closed); object_unref(OBJECT(cioc)); return TRUE; }
20,290
0
static int sd_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { int ret, fd; uint32_t vid = 0; BDRVSheepdogState *s = bs->opaque; char vdi[SD_MAX_VDI_LEN], tag[SD_MAX_VDI_TAG_LEN]; uint32_t snapid; char *buf = NULL; QemuOpts *opts; Error *local_err = NULL; const char *filename; s->bs = bs; s->aio_context = bdrv_get_aio_context(bs); opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(opts, options, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto out; } filename = qemu_opt_get(opts, "filename"); QLIST_INIT(&s->inflight_aio_head); QLIST_INIT(&s->failed_aio_head); QLIST_INIT(&s->inflight_aiocb_head); s->fd = -1; memset(vdi, 0, sizeof(vdi)); memset(tag, 0, sizeof(tag)); if (strstr(filename, "://")) { ret = sd_parse_uri(s, filename, vdi, &snapid, tag); } else { ret = parse_vdiname(s, filename, vdi, &snapid, tag); } if (ret < 0) { error_setg(errp, "Can't parse filename"); goto out; } s->fd = get_sheep_fd(s, errp); if (s->fd < 0) { ret = s->fd; goto out; } ret = find_vdi_name(s, vdi, snapid, tag, &vid, true, errp); if (ret) { goto out; } /* * QEMU block layer emulates writethrough cache as 'writeback + flush', so * we always set SD_FLAG_CMD_CACHE (writeback cache) as default. */ s->cache_flags = SD_FLAG_CMD_CACHE; if (flags & BDRV_O_NOCACHE) { s->cache_flags = SD_FLAG_CMD_DIRECT; } s->discard_supported = true; if (snapid || tag[0] != '\0') { DPRINTF("%" PRIx32 " snapshot inode was open.\n", vid); s->is_snapshot = true; } fd = connect_to_sdog(s, errp); if (fd < 0) { ret = fd; goto out; } buf = g_malloc(SD_INODE_SIZE); ret = read_object(fd, s->aio_context, buf, vid_to_vdi_oid(vid), 0, SD_INODE_SIZE, 0, s->cache_flags); closesocket(fd); if (ret) { error_setg(errp, "Can't read snapshot inode"); goto out; } memcpy(&s->inode, buf, sizeof(s->inode)); s->min_dirty_data_idx = UINT32_MAX; s->max_dirty_data_idx = 0; bs->total_sectors = s->inode.vdi_size / BDRV_SECTOR_SIZE; pstrcpy(s->name, sizeof(s->name), vdi); qemu_co_mutex_init(&s->lock); qemu_co_queue_init(&s->overwrapping_queue); qemu_opts_del(opts); g_free(buf); return 0; out: aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd, NULL, NULL, NULL); if (s->fd >= 0) { closesocket(s->fd); } qemu_opts_del(opts); g_free(buf); return ret; }
20,291
0
static void gen_cp1 (DisasContext *ctx, uint32_t opc, int rt, int fs) { const char *opn = "cp1 move"; TCGv t0 = tcg_temp_new(); switch (opc) { case OPC_MFC1: { TCGv_i32 fp0 = tcg_temp_new_i32(); gen_load_fpr32(fp0, fs); tcg_gen_ext_i32_tl(t0, fp0); tcg_temp_free_i32(fp0); } gen_store_gpr(t0, rt); opn = "mfc1"; break; case OPC_MTC1: gen_load_gpr(t0, rt); { TCGv_i32 fp0 = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(fp0, t0); gen_store_fpr32(fp0, fs); tcg_temp_free_i32(fp0); } opn = "mtc1"; break; case OPC_CFC1: gen_helper_1e0i(cfc1, t0, fs); gen_store_gpr(t0, rt); opn = "cfc1"; break; case OPC_CTC1: gen_load_gpr(t0, rt); { TCGv_i32 fs_tmp = tcg_const_i32(fs); gen_helper_0e2i(ctc1, t0, fs_tmp, rt); tcg_temp_free_i32(fs_tmp); } opn = "ctc1"; break; #if defined(TARGET_MIPS64) case OPC_DMFC1: gen_load_fpr64(ctx, t0, fs); gen_store_gpr(t0, rt); opn = "dmfc1"; break; case OPC_DMTC1: gen_load_gpr(t0, rt); gen_store_fpr64(ctx, t0, fs); opn = "dmtc1"; break; #endif case OPC_MFHC1: { TCGv_i32 fp0 = tcg_temp_new_i32(); gen_load_fpr32h(fp0, fs); tcg_gen_ext_i32_tl(t0, fp0); tcg_temp_free_i32(fp0); } gen_store_gpr(t0, rt); opn = "mfhc1"; break; case OPC_MTHC1: gen_load_gpr(t0, rt); { TCGv_i32 fp0 = tcg_temp_new_i32(); tcg_gen_trunc_tl_i32(fp0, t0); gen_store_fpr32h(fp0, fs); tcg_temp_free_i32(fp0); } opn = "mthc1"; break; default: MIPS_INVAL(opn); generate_exception (ctx, EXCP_RI); goto out; } (void)opn; /* avoid a compiler warning */ MIPS_DEBUG("%s %s %s", opn, regnames[rt], fregnames[fs]); out: tcg_temp_free(t0); }
20,292
0
static void hpet_ram_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { int i; HPETState *s = opaque; uint64_t old_val, new_val, val, index; DPRINTF("qemu: Enter hpet_ram_writel at %" PRIx64 " = %#x\n", addr, value); index = addr; old_val = hpet_ram_read(opaque, addr, 4); new_val = value; /*address range of all TN regs*/ if (index >= 0x100 && index <= 0x3ff) { uint8_t timer_id = (addr - 0x100) / 0x20; HPETTimer *timer = &s->timer[timer_id]; DPRINTF("qemu: hpet_ram_writel timer_id = %#x\n", timer_id); if (timer_id > s->num_timers) { DPRINTF("qemu: timer id out of range\n"); return; } switch ((addr - 0x100) % 0x20) { case HPET_TN_CFG: DPRINTF("qemu: hpet_ram_writel HPET_TN_CFG\n"); if (activating_bit(old_val, new_val, HPET_TN_FSB_ENABLE)) { update_irq(timer, 0); } val = hpet_fixup_reg(new_val, old_val, HPET_TN_CFG_WRITE_MASK); timer->config = (timer->config & 0xffffffff00000000ULL) | val; if (new_val & HPET_TN_32BIT) { timer->cmp = (uint32_t)timer->cmp; timer->period = (uint32_t)timer->period; } if (activating_bit(old_val, new_val, HPET_TN_ENABLE)) { hpet_set_timer(timer); } else if (deactivating_bit(old_val, new_val, HPET_TN_ENABLE)) { hpet_del_timer(timer); } break; case HPET_TN_CFG + 4: // Interrupt capabilities DPRINTF("qemu: invalid HPET_TN_CFG+4 write\n"); break; case HPET_TN_CMP: // comparator register DPRINTF("qemu: hpet_ram_writel HPET_TN_CMP\n"); if (timer->config & HPET_TN_32BIT) { new_val = (uint32_t)new_val; } if (!timer_is_periodic(timer) || (timer->config & HPET_TN_SETVAL)) { timer->cmp = (timer->cmp & 0xffffffff00000000ULL) | new_val; } if (timer_is_periodic(timer)) { /* * FIXME: Clamp period to reasonable min value? * Clamp period to reasonable max value */ new_val &= (timer->config & HPET_TN_32BIT ? ~0u : ~0ull) >> 1; timer->period = (timer->period & 0xffffffff00000000ULL) | new_val; } timer->config &= ~HPET_TN_SETVAL; if (hpet_enabled(s)) { hpet_set_timer(timer); } break; case HPET_TN_CMP + 4: // comparator register high order DPRINTF("qemu: hpet_ram_writel HPET_TN_CMP + 4\n"); if (!timer_is_periodic(timer) || (timer->config & HPET_TN_SETVAL)) { timer->cmp = (timer->cmp & 0xffffffffULL) | new_val << 32; } else { /* * FIXME: Clamp period to reasonable min value? * Clamp period to reasonable max value */ new_val &= (timer->config & HPET_TN_32BIT ? ~0u : ~0ull) >> 1; timer->period = (timer->period & 0xffffffffULL) | new_val << 32; } timer->config &= ~HPET_TN_SETVAL; if (hpet_enabled(s)) { hpet_set_timer(timer); } break; case HPET_TN_ROUTE: timer->fsb = (timer->fsb & 0xffffffff00000000ULL) | new_val; break; case HPET_TN_ROUTE + 4: timer->fsb = (new_val << 32) | (timer->fsb & 0xffffffff); break; default: DPRINTF("qemu: invalid hpet_ram_writel\n"); break; } return; } else { switch (index) { case HPET_ID: return; case HPET_CFG: val = hpet_fixup_reg(new_val, old_val, HPET_CFG_WRITE_MASK); s->config = (s->config & 0xffffffff00000000ULL) | val; if (activating_bit(old_val, new_val, HPET_CFG_ENABLE)) { /* Enable main counter and interrupt generation. */ s->hpet_offset = ticks_to_ns(s->hpet_counter) - qemu_get_clock_ns(vm_clock); for (i = 0; i < s->num_timers; i++) { if ((&s->timer[i])->cmp != ~0ULL) { hpet_set_timer(&s->timer[i]); } } } else if (deactivating_bit(old_val, new_val, HPET_CFG_ENABLE)) { /* Halt main counter and disable interrupt generation. */ s->hpet_counter = hpet_get_ticks(s); for (i = 0; i < s->num_timers; i++) { hpet_del_timer(&s->timer[i]); } } /* i8254 and RTC output pins are disabled * when HPET is in legacy mode */ if (activating_bit(old_val, new_val, HPET_CFG_LEGACY)) { qemu_set_irq(s->pit_enabled, 0); qemu_irq_lower(s->irqs[0]); qemu_irq_lower(s->irqs[RTC_ISA_IRQ]); } else if (deactivating_bit(old_val, new_val, HPET_CFG_LEGACY)) { qemu_irq_lower(s->irqs[0]); qemu_set_irq(s->pit_enabled, 1); qemu_set_irq(s->irqs[RTC_ISA_IRQ], s->rtc_irq_level); } break; case HPET_CFG + 4: DPRINTF("qemu: invalid HPET_CFG+4 write\n"); break; case HPET_STATUS: val = new_val & s->isr; for (i = 0; i < s->num_timers; i++) { if (val & (1 << i)) { update_irq(&s->timer[i], 0); } } break; case HPET_COUNTER: if (hpet_enabled(s)) { DPRINTF("qemu: Writing counter while HPET enabled!\n"); } s->hpet_counter = (s->hpet_counter & 0xffffffff00000000ULL) | value; DPRINTF("qemu: HPET counter written. ctr = %#x -> %" PRIx64 "\n", value, s->hpet_counter); break; case HPET_COUNTER + 4: if (hpet_enabled(s)) { DPRINTF("qemu: Writing counter while HPET enabled!\n"); } s->hpet_counter = (s->hpet_counter & 0xffffffffULL) | (((uint64_t)value) << 32); DPRINTF("qemu: HPET counter + 4 written. ctr = %#x -> %" PRIx64 "\n", value, s->hpet_counter); break; default: DPRINTF("qemu: invalid hpet_ram_writel\n"); break; } } }
20,293
0
static void init_proc_601 (CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_601(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_hid0_601, 0x80010080); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_601_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_601_HID5, "HID5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_601_HID15, "HID15", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ #if !defined(CONFIG_USER_ONLY) env->nb_tlb = 64; env->nb_ways = 2; env->id_tlbs = 0; #endif init_excp_601(env); env->dcache_line_size = 64; env->icache_line_size = 64; /* Allocate hardware IRQ controller */ ppc6xx_irq_init(env); }
20,294
0
static inline void gen_stos(DisasContext *s, int ot) { gen_op_mov_TN_reg(OT_LONG, 0, R_EAX); gen_string_movl_A0_EDI(s); gen_op_st_T0_A0(ot + s->mem_index); gen_op_movl_T0_Dshift[ot](); #ifdef TARGET_X86_64 if (s->aflag == 2) { gen_op_addq_EDI_T0(); } else #endif if (s->aflag) { gen_op_addl_EDI_T0(); } else { gen_op_addw_EDI_T0(); } }
20,295
0
static int read_object(int fd, char *buf, uint64_t oid, int copies, unsigned int datalen, uint64_t offset, bool cache) { return read_write_object(fd, buf, oid, copies, datalen, offset, false, false, cache); }
20,296
0
static int decode_group3_2d_line(AVCodecContext *avctx, GetBitContext *gb, unsigned int width, int *runs, const int *runend, const int *ref) { int mode = 0, saved_run = 0, t; int run_off = *ref++; unsigned int offs=0, run= 0; runend--; // for the last written 0 while(offs < width){ int cmode = get_vlc2(gb, ccitt_group3_2d_vlc.table, 9, 1); if(cmode == -1){ av_log(avctx, AV_LOG_ERROR, "Incorrect mode VLC\n"); return -1; } if(!cmode){//pass mode run_off += *ref++; run = run_off - offs; offs= run_off; run_off += *ref++; if(offs > width){ av_log(avctx, AV_LOG_ERROR, "Run went out of bounds\n"); return -1; } saved_run += run; }else if(cmode == 1){//horizontal mode int k; for(k = 0; k < 2; k++){ run = 0; for(;;){ t = get_vlc2(gb, ccitt_vlc[mode].table, 9, 2); if(t == -1){ av_log(avctx, AV_LOG_ERROR, "Incorrect code\n"); return -1; } run += t; if(t < 64) break; } *runs++ = run + saved_run; if(runs >= runend){ av_log(avctx, AV_LOG_ERROR, "Run overrun\n"); return -1; } saved_run = 0; offs += run; if(offs > width || run > width){ av_log(avctx, AV_LOG_ERROR, "Run went out of bounds\n"); return -1; } mode = !mode; } }else if(cmode == 9 || cmode == 10){ av_log(avctx, AV_LOG_ERROR, "Special modes are not supported (yet)\n"); return -1; }else{//vertical mode run = run_off - offs + (cmode - 5); run_off -= *--ref; offs += run; if(offs > width || run > width){ av_log(avctx, AV_LOG_ERROR, "Run went out of bounds\n"); return -1; } *runs++ = run + saved_run; if(runs >= runend){ av_log(avctx, AV_LOG_ERROR, "Run overrun\n"); return -1; } saved_run = 0; mode = !mode; } //sync line pointers while(run_off <= offs){ run_off += *ref++; run_off += *ref++; } } *runs++ = saved_run; *runs++ = 0; return 0; }
20,297
0
static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, int flags) { BlockDriver *drv = bs->drv; BdrvTrackedRequest req; int ret; int64_t sector_num = offset >> BDRV_SECTOR_BITS; unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS; assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0); /* Handle Copy on Read and associated serialisation */ if (flags & BDRV_REQ_COPY_ON_READ) { bs->copy_on_read_in_flight++; } if (bs->copy_on_read_in_flight) { wait_for_overlapping_requests(bs, offset, bytes); } tracked_request_begin(&req, bs, offset, bytes, false); if (flags & BDRV_REQ_COPY_ON_READ) { int pnum; ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum); if (ret < 0) { goto out; } if (!ret || pnum != nb_sectors) { ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov); goto out; } } /* Forward the request to the BlockDriver */ if (!(bs->zero_beyond_eof && bs->growable)) { ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov); } else { /* Read zeros after EOF of growable BDSes */ int64_t len, total_sectors, max_nb_sectors; len = bdrv_getlength(bs); if (len < 0) { ret = len; goto out; } total_sectors = DIV_ROUND_UP(len, BDRV_SECTOR_SIZE); max_nb_sectors = MAX(0, total_sectors - sector_num); if (max_nb_sectors > 0) { ret = drv->bdrv_co_readv(bs, sector_num, MIN(nb_sectors, max_nb_sectors), qiov); } else { ret = 0; } /* Reading beyond end of file is supposed to produce zeroes */ if (ret == 0 && total_sectors < sector_num + nb_sectors) { uint64_t offset = MAX(0, total_sectors - sector_num); uint64_t bytes = (sector_num + nb_sectors - offset) * BDRV_SECTOR_SIZE; qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes); } } out: tracked_request_end(&req); if (flags & BDRV_REQ_COPY_ON_READ) { bs->copy_on_read_in_flight--; } return ret; }
20,298
0
void virtio_queue_notify_vq(VirtQueue *vq) { if (vq->vring.desc) { VirtIODevice *vdev = vq->vdev; trace_virtio_queue_notify(vdev, vq - vdev->vq, vq); vq->handle_output(vdev, vq); } }
20,300
1
target_ulong helper_rdhwr_performance(CPUMIPSState *env) { check_hwrena(env, 4); return env->CP0_Performance0; }
20,302
1
static void openpic_reset(DeviceState *d) { OpenPICState *opp = FROM_SYSBUS(typeof (*opp), sysbus_from_qdev(d)); int i; opp->glbc = GLBC_RESET; /* Initialise controller registers */ opp->frep = ((opp->nb_irqs - 1) << FREP_NIRQ_SHIFT) | ((opp->nb_cpus - 1) << FREP_NCPU_SHIFT) | (opp->vid << FREP_VID_SHIFT); opp->pint = 0; opp->spve = -1 & opp->vector_mask; opp->tifr = opp->tifr_reset; /* Initialise IRQ sources */ for (i = 0; i < opp->max_irq; i++) { opp->src[i].ipvp = opp->ipvp_reset; opp->src[i].ide = opp->ide_reset; } /* Initialise IRQ destinations */ for (i = 0; i < MAX_CPU; i++) { opp->dst[i].pctp = 15; memset(&opp->dst[i].raised, 0, sizeof(IRQ_queue_t)); opp->dst[i].raised.next = -1; memset(&opp->dst[i].servicing, 0, sizeof(IRQ_queue_t)); opp->dst[i].servicing.next = -1; } /* Initialise timers */ for (i = 0; i < MAX_TMR; i++) { opp->timers[i].ticc = 0; opp->timers[i].tibc = TIBC_CI; } /* Go out of RESET state */ opp->glbc = 0; }
20,303
1
static uint64_t get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate) { BDRVVmdkState *s = bs->opaque; unsigned int l1_index, l2_offset, l2_index; int min_index, i, j; uint32_t min_count, *l2_table, tmp; uint64_t cluster_offset; l1_index = (offset >> 9) / s->l1_entry_sectors; if (l1_index >= s->l1_size) return 0; l2_offset = s->l1_table[l1_index]; if (!l2_offset) return 0; for(i = 0; i < L2_CACHE_SIZE; i++) { if (l2_offset == s->l2_cache_offsets[i]) { /* increment the hit count */ if (++s->l2_cache_counts[i] == 0xffffffff) { for(j = 0; j < L2_CACHE_SIZE; j++) { s->l2_cache_counts[j] >>= 1; } } l2_table = s->l2_cache + (i * s->l2_size); goto found; } } /* not found: load a new entry in the least used one */ min_index = 0; min_count = 0xffffffff; for(i = 0; i < L2_CACHE_SIZE; i++) { if (s->l2_cache_counts[i] < min_count) { min_count = s->l2_cache_counts[i]; min_index = i; } } l2_table = s->l2_cache + (min_index * s->l2_size); if (bdrv_pread(s->hd, (int64_t)l2_offset * 512, l2_table, s->l2_size * sizeof(uint32_t)) != s->l2_size * sizeof(uint32_t)) return 0; s->l2_cache_offsets[min_index] = l2_offset; s->l2_cache_counts[min_index] = 1; found: l2_index = ((offset >> 9) / s->cluster_sectors) % s->l2_size; cluster_offset = le32_to_cpu(l2_table[l2_index]); if (!cluster_offset) { struct stat file_buf; if (!allocate) return 0; stat(s->hd->filename, &file_buf); cluster_offset = file_buf.st_size; bdrv_truncate(s->hd, cluster_offset + (s->cluster_sectors << 9)); cluster_offset >>= 9; /* update L2 table */ tmp = cpu_to_le32(cluster_offset); l2_table[l2_index] = tmp; if (bdrv_pwrite(s->hd, ((int64_t)l2_offset * 512) + (l2_index * sizeof(tmp)), &tmp, sizeof(tmp)) != sizeof(tmp)) return 0; /* update backup L2 table */ if (s->l1_backup_table_offset != 0) { l2_offset = s->l1_backup_table[l1_index]; if (bdrv_pwrite(s->hd, ((int64_t)l2_offset * 512) + (l2_index * sizeof(tmp)), &tmp, sizeof(tmp)) != sizeof(tmp)) return 0; } if (get_whole_cluster(bs, cluster_offset, offset, allocate) == -1) return 0; } cluster_offset <<= 9; return cluster_offset; }
20,304
1
static void rbd_aio_bh_cb(void *opaque) { RBDAIOCB *acb = opaque; if (acb->cmd == RBD_AIO_READ) { qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size); } qemu_vfree(acb->bounce); acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret)); qemu_bh_delete(acb->bh); acb->bh = NULL; qemu_aio_release(acb); }
20,306
1
static void test_tco_second_timeout_shutdown(void) { TestData td; const uint16_t ticks = TCO_SECS_TO_TICKS(128); QDict *ad; td.args = "-watchdog-action shutdown"; td.noreboot = false; test_init(&td); stop_tco(&td); clear_tco_status(&td); reset_on_second_timeout(true); set_tco_timeout(&td, ticks); load_tco(&td); start_tco(&td); clock_step(ticks * TCO_TICK_NSEC * 2); ad = get_watchdog_action(); g_assert(!strcmp(qdict_get_str(ad, "action"), "shutdown")); QDECREF(ad); stop_tco(&td); qtest_end(); }
20,307
1
static void add_ptimer_tests(uint8_t policy) { uint8_t *ppolicy = g_malloc(1); char *policy_name = g_malloc0(256); *ppolicy = policy; if (policy == PTIMER_POLICY_DEFAULT) { g_sprintf(policy_name, "default"); } if (policy & PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD) { g_strlcat(policy_name, "wrap_after_one_period,", 256); } if (policy & PTIMER_POLICY_CONTINUOUS_TRIGGER) { g_strlcat(policy_name, "continuous_trigger,", 256); } if (policy & PTIMER_POLICY_NO_IMMEDIATE_TRIGGER) { g_strlcat(policy_name, "no_immediate_trigger,", 256); } if (policy & PTIMER_POLICY_NO_IMMEDIATE_RELOAD) { g_strlcat(policy_name, "no_immediate_reload,", 256); } if (policy & PTIMER_POLICY_NO_COUNTER_ROUND_DOWN) { g_strlcat(policy_name, "no_counter_rounddown,", 256); } g_test_add_data_func( g_strdup_printf("/ptimer/set_count policy=%s", policy_name), ppolicy, check_set_count); g_test_add_data_func( g_strdup_printf("/ptimer/set_limit policy=%s", policy_name), ppolicy, check_set_limit); g_test_add_data_func( g_strdup_printf("/ptimer/oneshot policy=%s", policy_name), ppolicy, check_oneshot); g_test_add_data_func( g_strdup_printf("/ptimer/periodic policy=%s", policy_name), ppolicy, check_periodic); g_test_add_data_func( g_strdup_printf("/ptimer/on_the_fly_mode_change policy=%s", policy_name), ppolicy, check_on_the_fly_mode_change); g_test_add_data_func( g_strdup_printf("/ptimer/on_the_fly_period_change policy=%s", policy_name), ppolicy, check_on_the_fly_period_change); g_test_add_data_func( g_strdup_printf("/ptimer/on_the_fly_freq_change policy=%s", policy_name), ppolicy, check_on_the_fly_freq_change); g_test_add_data_func( g_strdup_printf("/ptimer/run_with_period_0 policy=%s", policy_name), ppolicy, check_run_with_period_0); g_test_add_data_func( g_strdup_printf("/ptimer/run_with_delta_0 policy=%s", policy_name), ppolicy, check_run_with_delta_0); g_test_add_data_func( g_strdup_printf("/ptimer/periodic_with_load_0 policy=%s", policy_name), ppolicy, check_periodic_with_load_0); g_test_add_data_func( g_strdup_printf("/ptimer/oneshot_with_load_0 policy=%s", policy_name), ppolicy, check_oneshot_with_load_0); }
20,308
1
void HELPER(entry)(CPUXtensaState *env, uint32_t pc, uint32_t s, uint32_t imm) { int callinc = (env->sregs[PS] & PS_CALLINC) >> PS_CALLINC_SHIFT; if (s > 3 || ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) { qemu_log("Illegal entry instruction(pc = %08x), PS = %08x\n", pc, env->sregs[PS]); HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE); } else { env->regs[(callinc << 2) | (s & 3)] = env->regs[s] - (imm << 3); rotate_window(env, callinc); env->sregs[WINDOW_START] |= windowstart_bit(env->sregs[WINDOW_BASE], env);
20,309
1
static void macio_nvram_writeb(void *opaque, hwaddr addr, uint64_t value, unsigned size) { MacIONVRAMState *s = opaque; addr = (addr >> s->it_shift) & (s->size - 1); s->data[addr] = value; NVR_DPRINTF("writeb addr %04" PHYS_PRIx " val %" PRIx64 "\n", addr, value); }
20,310
1
static int qcow2_do_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVQcow2State *s = bs->opaque; unsigned int len, i; int ret = 0; QCowHeader header; Error *local_err = NULL; uint64_t ext_end; uint64_t l1_vm_state_index; bool update_header = false; ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); if (ret < 0) { error_setg_errno(errp, -ret, "Could not read qcow2 header"); be32_to_cpus(&header.magic); be32_to_cpus(&header.version); be64_to_cpus(&header.backing_file_offset); be32_to_cpus(&header.backing_file_size); be64_to_cpus(&header.size); be32_to_cpus(&header.cluster_bits); be32_to_cpus(&header.crypt_method); be64_to_cpus(&header.l1_table_offset); be32_to_cpus(&header.l1_size); be64_to_cpus(&header.refcount_table_offset); be32_to_cpus(&header.refcount_table_clusters); be64_to_cpus(&header.snapshots_offset); be32_to_cpus(&header.nb_snapshots); if (header.magic != QCOW_MAGIC) { error_setg(errp, "Image is not in qcow2 format"); if (header.version < 2 || header.version > 3) { error_setg(errp, "Unsupported qcow2 version %" PRIu32, header.version); ret = -ENOTSUP; s->qcow_version = header.version; /* Initialise cluster size */ if (header.cluster_bits < MIN_CLUSTER_BITS || header.cluster_bits > MAX_CLUSTER_BITS) { error_setg(errp, "Unsupported cluster size: 2^%" PRIu32, header.cluster_bits); s->cluster_bits = header.cluster_bits; s->cluster_size = 1 << s->cluster_bits; s->cluster_sectors = 1 << (s->cluster_bits - BDRV_SECTOR_BITS); /* Initialise version 3 header fields */ if (header.version == 2) { header.incompatible_features = 0; header.compatible_features = 0; header.autoclear_features = 0; header.refcount_order = 4; header.header_length = 72; } else { be64_to_cpus(&header.incompatible_features); be64_to_cpus(&header.compatible_features); be64_to_cpus(&header.autoclear_features); be32_to_cpus(&header.refcount_order); be32_to_cpus(&header.header_length); if (header.header_length < 104) { error_setg(errp, "qcow2 header too short"); if (header.header_length > s->cluster_size) { error_setg(errp, "qcow2 header exceeds cluster size"); if (header.header_length > sizeof(header)) { s->unknown_header_fields_size = header.header_length - sizeof(header); s->unknown_header_fields = g_malloc(s->unknown_header_fields_size); ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields, s->unknown_header_fields_size); if (ret < 0) { error_setg_errno(errp, -ret, "Could not read unknown qcow2 header " "fields"); if (header.backing_file_offset > s->cluster_size) { error_setg(errp, "Invalid backing file offset"); if (header.backing_file_offset) { ext_end = header.backing_file_offset; } else { ext_end = 1 << header.cluster_bits; /* Handle feature bits */ s->incompatible_features = header.incompatible_features; s->compatible_features = header.compatible_features; s->autoclear_features = header.autoclear_features; if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) { void *feature_table = NULL; qcow2_read_extensions(bs, header.header_length, ext_end, &feature_table, flags, NULL, NULL); report_unsupported_feature(errp, feature_table, s->incompatible_features & ~QCOW2_INCOMPAT_MASK); ret = -ENOTSUP; g_free(feature_table); if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { /* Corrupt images may not be written to unless they are being repaired */ if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) { error_setg(errp, "qcow2: Image is corrupt; cannot be opened " "read/write"); ret = -EACCES; /* Check support for various header values */ if (header.refcount_order > 6) { error_setg(errp, "Reference count entry width too large; may not " "exceed 64 bits"); s->refcount_order = header.refcount_order; s->refcount_bits = 1 << s->refcount_order; s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1); s->refcount_max += s->refcount_max - 1; s->crypt_method_header = header.crypt_method; if (s->crypt_method_header) { if (bdrv_uses_whitelist() && s->crypt_method_header == QCOW_CRYPT_AES) { error_setg(errp, "Use of AES-CBC encrypted qcow2 images is no longer " "supported in system emulators"); error_append_hint(errp, "You can use 'qemu-img convert' to convert your " "image to an alternative supported format, such " "as unencrypted qcow2, or raw with the LUKS " "format instead.\n"); ret = -ENOSYS; if (s->crypt_method_header == QCOW_CRYPT_AES) { s->crypt_physical_offset = false; } else { /* Assuming LUKS and any future crypt methods we * add will all use physical offsets, due to the * fact that the alternative is insecure... */ s->crypt_physical_offset = true; bs->encrypted = true; s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */ s->l2_size = 1 << s->l2_bits; /* 2^(s->refcount_order - 3) is the refcount width in bytes */ s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3); s->refcount_block_size = 1 << s->refcount_block_bits; bs->total_sectors = header.size / 512; s->csize_shift = (62 - (s->cluster_bits - 8)); s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; s->cluster_offset_mask = (1LL << s->csize_shift) - 1; s->refcount_table_offset = header.refcount_table_offset; s->refcount_table_size = header.refcount_table_clusters << (s->cluster_bits - 3); if (header.refcount_table_clusters > qcow2_max_refcount_clusters(s)) { error_setg(errp, "Reference count table too large"); ret = validate_table_offset(bs, s->refcount_table_offset, s->refcount_table_size, sizeof(uint64_t)); if (ret < 0) { error_setg(errp, "Invalid reference count table offset"); /* Snapshot table offset/length */ if (header.nb_snapshots > QCOW_MAX_SNAPSHOTS) { error_setg(errp, "Too many snapshots"); ret = validate_table_offset(bs, header.snapshots_offset, header.nb_snapshots, sizeof(QCowSnapshotHeader)); if (ret < 0) { error_setg(errp, "Invalid snapshot table offset"); /* read the level 1 table */ if (header.l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) { error_setg(errp, "Active L1 table too large"); ret = -EFBIG; s->l1_size = header.l1_size; l1_vm_state_index = size_to_l1(s, header.size); if (l1_vm_state_index > INT_MAX) { error_setg(errp, "Image is too big"); ret = -EFBIG; s->l1_vm_state_index = l1_vm_state_index; /* the L1 table must contain at least enough entries to put header.size bytes */ if (s->l1_size < s->l1_vm_state_index) { error_setg(errp, "L1 table is too small"); ret = validate_table_offset(bs, header.l1_table_offset, header.l1_size, sizeof(uint64_t)); if (ret < 0) { error_setg(errp, "Invalid L1 table offset"); s->l1_table_offset = header.l1_table_offset; if (s->l1_size > 0) { s->l1_table = qemu_try_blockalign(bs->file->bs, align_offset(s->l1_size * sizeof(uint64_t), 512)); if (s->l1_table == NULL) { error_setg(errp, "Could not allocate L1 table"); ret = -ENOMEM; ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, s->l1_size * sizeof(uint64_t)); if (ret < 0) { error_setg_errno(errp, -ret, "Could not read L1 table"); for(i = 0;i < s->l1_size; i++) { be64_to_cpus(&s->l1_table[i]); /* Parse driver-specific options */ ret = qcow2_update_options(bs, options, flags, errp); if (ret < 0) { s->cluster_cache_offset = -1; s->flags = flags; ret = qcow2_refcount_init(bs); if (ret != 0) { error_setg_errno(errp, -ret, "Could not initialize refcount handling"); QLIST_INIT(&s->cluster_allocs); QTAILQ_INIT(&s->discards); /* read qcow2 extensions */ if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL, flags, &update_header, &local_err)) { error_propagate(errp, local_err); /* qcow2_read_extension may have set up the crypto context * if the crypt method needs a header region, some methods * don't need header extensions, so must check here */ if (s->crypt_method_header && !s->crypto) { if (s->crypt_method_header == QCOW_CRYPT_AES) { unsigned int cflags = 0; if (flags & BDRV_O_NO_IO) { cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", NULL, NULL, cflags, errp); if (!s->crypto) { } else if (!(flags & BDRV_O_NO_IO)) { error_setg(errp, "Missing CRYPTO header for crypt method %d", s->crypt_method_header); /* read the backing file name */ if (header.backing_file_offset != 0) { len = header.backing_file_size; if (len > MIN(1023, s->cluster_size - header.backing_file_offset) || len >= sizeof(bs->backing_file)) { error_setg(errp, "Backing file name too long"); ret = bdrv_pread(bs->file, header.backing_file_offset, bs->backing_file, len); if (ret < 0) { error_setg_errno(errp, -ret, "Could not read backing file name"); bs->backing_file[len] = '\0'; s->image_backing_file = g_strdup(bs->backing_file); /* Internal snapshots */ s->snapshots_offset = header.snapshots_offset; s->nb_snapshots = header.nb_snapshots; ret = qcow2_read_snapshots(bs); if (ret < 0) { error_setg_errno(errp, -ret, "Could not read snapshots"); /* Clear unknown autoclear feature bits */ update_header |= s->autoclear_features & ~QCOW2_AUTOCLEAR_MASK; update_header = update_header && !bs->read_only && !(flags & BDRV_O_INACTIVE); if (update_header) { s->autoclear_features &= QCOW2_AUTOCLEAR_MASK; if (qcow2_load_autoloading_dirty_bitmaps(bs, &local_err)) { update_header = false; if (local_err != NULL) { error_propagate(errp, local_err); if (update_header) { ret = qcow2_update_header(bs); if (ret < 0) { error_setg_errno(errp, -ret, "Could not update qcow2 header"); /* Initialise locks */ qemu_co_mutex_init(&s->lock); bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP; /* Repair image if dirty */ if (!(flags & (BDRV_O_CHECK | BDRV_O_INACTIVE)) && !bs->read_only && (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) { BdrvCheckResult result = {0}; ret = qcow2_check(bs, &result, BDRV_FIX_ERRORS | BDRV_FIX_LEAKS); if (ret < 0) { error_setg_errno(errp, -ret, "Could not repair dirty image"); #ifdef DEBUG_ALLOC { BdrvCheckResult result = {0}; qcow2_check_refcounts(bs, &result, 0); #endif return ret; fail: g_free(s->unknown_header_fields); cleanup_unknown_header_ext(bs); qcow2_free_snapshots(bs); qcow2_refcount_close(bs); qemu_vfree(s->l1_table); /* else pre-write overlap checks in cache_destroy may crash */ s->l1_table = NULL; cache_clean_timer_del(bs); if (s->l2_table_cache) { qcow2_cache_destroy(bs, s->l2_table_cache); if (s->refcount_block_cache) { qcow2_cache_destroy(bs, s->refcount_block_cache); qcrypto_block_free(s->crypto); qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); return ret;
20,311
1
static void qemu_kvm_init_cpu_signals(CPUState *env) { int r; sigset_t set; struct sigaction sigact; memset(&sigact, 0, sizeof(sigact)); sigact.sa_handler = dummy_signal; sigaction(SIG_IPI, &sigact, NULL); #ifdef CONFIG_IOTHREAD pthread_sigmask(SIG_BLOCK, NULL, &set); sigdelset(&set, SIG_IPI); sigdelset(&set, SIGBUS); r = kvm_set_signal_mask(env, &set); if (r) { fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r)); exit(1); } #else sigemptyset(&set); sigaddset(&set, SIG_IPI); sigaddset(&set, SIGIO); sigaddset(&set, SIGALRM); pthread_sigmask(SIG_BLOCK, &set, NULL); pthread_sigmask(SIG_BLOCK, NULL, &set); sigdelset(&set, SIGIO); sigdelset(&set, SIGALRM); #endif sigdelset(&set, SIG_IPI); sigdelset(&set, SIGBUS); r = kvm_set_signal_mask(env, &set); if (r) { fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r)); exit(1); } }
20,312
1
static inline void vmxnet3_ring_write_curr_cell(Vmxnet3Ring *ring, void *buff) { vmw_shmem_write(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size); }
20,313
1
static uint16_t roundToInt16(int64_t f){ int r= (f + (1<<15))>>16; if(r<-0x7FFF) return 0x8000; else if(r> 0x7FFF) return 0x7FFF; else return r; }
20,314
1
static int decode_band_hdr(IVI4DecContext *ctx, IVIBandDesc *band, AVCodecContext *avctx) { int plane, band_num, indx, transform_id, scan_indx; int i; int quant_mat; plane = get_bits(&ctx->gb, 2); band_num = get_bits(&ctx->gb, 4); if (band->plane != plane || band->band_num != band_num) { av_log(avctx, AV_LOG_ERROR, "Invalid band header sequence!\n"); return AVERROR_INVALIDDATA; } band->is_empty = get_bits1(&ctx->gb); if (!band->is_empty) { /* skip header size * If header size is not given, header size is 4 bytes. */ if (get_bits1(&ctx->gb)) skip_bits(&ctx->gb, 16); band->is_halfpel = get_bits(&ctx->gb, 2); if (band->is_halfpel >= 2) { av_log(avctx, AV_LOG_ERROR, "Invalid/unsupported mv resolution: %d!\n", band->is_halfpel); return AVERROR_INVALIDDATA; } #if IVI4_STREAM_ANALYSER if (!band->is_halfpel) ctx->uses_fullpel = 1; #endif band->checksum_present = get_bits1(&ctx->gb); if (band->checksum_present) band->checksum = get_bits(&ctx->gb, 16); indx = get_bits(&ctx->gb, 2); if (indx == 3) { av_log(avctx, AV_LOG_ERROR, "Invalid block size!\n"); return AVERROR_INVALIDDATA; } band->mb_size = 16 >> indx; band->blk_size = 8 >> (indx >> 1); band->inherit_mv = get_bits1(&ctx->gb); band->inherit_qdelta = get_bits1(&ctx->gb); band->glob_quant = get_bits(&ctx->gb, 5); if (!get_bits1(&ctx->gb) || ctx->frame_type == FRAMETYPE_INTRA) { transform_id = get_bits(&ctx->gb, 5); if (transform_id >= FF_ARRAY_ELEMS(transforms) || !transforms[transform_id].inv_trans) { av_log_ask_for_sample(avctx, "Unimplemented transform: %d!\n", transform_id); return AVERROR_PATCHWELCOME; } if ((transform_id >= 7 && transform_id <= 9) || transform_id == 17) { av_log_ask_for_sample(avctx, "DCT transform not supported yet!\n"); return AVERROR_PATCHWELCOME; } if (transform_id < 10 && band->blk_size < 8) { av_log(avctx, AV_LOG_ERROR, "wrong transform size!\n"); return AVERROR_INVALIDDATA; } #if IVI4_STREAM_ANALYSER if ((transform_id >= 0 && transform_id <= 2) || transform_id == 10) ctx->uses_haar = 1; #endif band->inv_transform = transforms[transform_id].inv_trans; band->dc_transform = transforms[transform_id].dc_trans; band->is_2d_trans = transforms[transform_id].is_2d_trans; band->transform_size= (transform_id < 10) ? 8 : 4; scan_indx = get_bits(&ctx->gb, 4); if ((scan_indx>4 && scan_indx<10) != (band->blk_size==4)) { av_log(avctx, AV_LOG_ERROR, "mismatching scan table!\n"); return AVERROR_INVALIDDATA; } if (scan_indx == 15) { av_log(avctx, AV_LOG_ERROR, "Custom scan pattern encountered!\n"); return AVERROR_INVALIDDATA; } band->scan = scan_index_to_tab[scan_indx]; quant_mat = get_bits(&ctx->gb, 5); if (quant_mat == 31) { av_log(avctx, AV_LOG_ERROR, "Custom quant matrix encountered!\n"); return AVERROR_INVALIDDATA; } if (quant_mat > 21) { av_log(avctx, AV_LOG_ERROR, "Invalid quant matrix encountered!\n"); return AVERROR_INVALIDDATA; } band->quant_mat = quant_mat; } /* decode block huffman codebook */ if (ff_ivi_dec_huff_desc(&ctx->gb, get_bits1(&ctx->gb), IVI_BLK_HUFF, &band->blk_vlc, avctx)) return AVERROR_INVALIDDATA; /* select appropriate rvmap table for this band */ band->rvmap_sel = get_bits1(&ctx->gb) ? get_bits(&ctx->gb, 3) : 8; /* decode rvmap probability corrections if any */ band->num_corr = 0; /* there is no corrections */ if (get_bits1(&ctx->gb)) { band->num_corr = get_bits(&ctx->gb, 8); /* get number of correction pairs */ if (band->num_corr > 61) { av_log(avctx, AV_LOG_ERROR, "Too many corrections: %d\n", band->num_corr); return AVERROR_INVALIDDATA; } /* read correction pairs */ for (i = 0; i < band->num_corr * 2; i++) band->corr[i] = get_bits(&ctx->gb, 8); } } if (band->blk_size == 8) { band->intra_base = &ivi4_quant_8x8_intra[quant_index_to_tab[band->quant_mat]][0]; band->inter_base = &ivi4_quant_8x8_inter[quant_index_to_tab[band->quant_mat]][0]; } else { band->intra_base = &ivi4_quant_4x4_intra[quant_index_to_tab[band->quant_mat]][0]; band->inter_base = &ivi4_quant_4x4_inter[quant_index_to_tab[band->quant_mat]][0]; } /* Indeo 4 doesn't use scale tables */ band->intra_scale = NULL; band->inter_scale = NULL; align_get_bits(&ctx->gb); if (!band->scan) { av_log(avctx, AV_LOG_ERROR, "band->scan not set\n"); return AVERROR_INVALIDDATA; } return 0; }
20,315
1
static void scale_coefficients(AC3EncodeContext *s) { /* scaling/conversion is obviously not needed for the fixed-point encoder since the coefficients are already fixed-point. */ return; }
20,316
1
static int decode_dds1(uint8_t *frame, int width, int height, const uint8_t *src, const uint8_t *src_end) { const uint8_t *frame_start = frame; const uint8_t *frame_end = frame + width * height; int mask = 0x10000, bitbuf = 0; int i, v, offset, count, segments; segments = bytestream_get_le16(&src); while (segments--) { if (mask == 0x10000) { if (src >= src_end) return -1; bitbuf = bytestream_get_le16(&src); mask = 1; } if (src_end - src < 2 || frame_end - frame < 2) return -1; if (bitbuf & mask) { v = bytestream_get_le16(&src); offset = (v & 0x1FFF) << 2; count = ((v >> 13) + 2) << 1; if (frame - frame_start < offset || frame_end - frame < count*2 + width) return -1; for (i = 0; i < count; i++) { frame[0] = frame[1] = frame[width] = frame[width + 1] = frame[-offset]; frame += 2; } } else if (bitbuf & (mask << 1)) { frame += bytestream_get_le16(&src) * 2; } else { frame[0] = frame[1] = frame[width] = frame[width + 1] = *src++; frame += 2; frame[0] = frame[1] = frame[width] = frame[width + 1] = *src++; frame += 2; } mask <<= 2; } return 0; }
20,317
1
static void gen_test_cc(int cc, int label) { TCGv tmp; TCGv tmp2; int inv; switch (cc) { case 0: /* eq: Z */ tmp = load_cpu_field(ZF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); break; case 1: /* ne: !Z */ tmp = load_cpu_field(ZF); tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); break; case 2: /* cs: C */ tmp = load_cpu_field(CF); tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); break; case 3: /* cc: !C */ tmp = load_cpu_field(CF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); break; case 4: /* mi: N */ tmp = load_cpu_field(NF); tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); break; case 5: /* pl: !N */ tmp = load_cpu_field(NF); tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); break; case 6: /* vs: V */ tmp = load_cpu_field(VF); tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); break; case 7: /* vc: !V */ tmp = load_cpu_field(VF); tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); break; case 8: /* hi: C && !Z */ inv = gen_new_label(); tmp = load_cpu_field(CF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv); dead_tmp(tmp); tmp = load_cpu_field(ZF); tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); gen_set_label(inv); break; case 9: /* ls: !C || Z */ tmp = load_cpu_field(CF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); dead_tmp(tmp); tmp = load_cpu_field(ZF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); break; case 10: /* ge: N == V -> N ^ V == 0 */ tmp = load_cpu_field(VF); tmp2 = load_cpu_field(NF); tcg_gen_xor_i32(tmp, tmp, tmp2); dead_tmp(tmp2); tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); break; case 11: /* lt: N != V -> N ^ V != 0 */ tmp = load_cpu_field(VF); tmp2 = load_cpu_field(NF); tcg_gen_xor_i32(tmp, tmp, tmp2); dead_tmp(tmp2); tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); break; case 12: /* gt: !Z && N == V */ inv = gen_new_label(); tmp = load_cpu_field(ZF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv); dead_tmp(tmp); tmp = load_cpu_field(VF); tmp2 = load_cpu_field(NF); tcg_gen_xor_i32(tmp, tmp, tmp2); dead_tmp(tmp2); tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); gen_set_label(inv); break; case 13: /* le: Z || N != V */ tmp = load_cpu_field(ZF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); dead_tmp(tmp); tmp = load_cpu_field(VF); tmp2 = load_cpu_field(NF); tcg_gen_xor_i32(tmp, tmp, tmp2); dead_tmp(tmp2); tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); break; default: fprintf(stderr, "Bad condition code 0x%x\n", cc); abort(); } dead_tmp(tmp); }
20,318
1
static int init_input_threads(void) { int i, ret; if (nb_input_files == 1) return 0; for (i = 0; i < nb_input_files; i++) { InputFile *f = input_files[i]; if (f->ctx->pb ? !f->ctx->pb->seekable : strcmp(f->ctx->iformat->name, "lavfi")) f->non_blocking = 1; ret = av_thread_message_queue_alloc(&f->in_thread_queue, 8, sizeof(AVPacket)); if (ret < 0) return ret; if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) return AVERROR(ret); } return 0; }
20,319
0
static const MXFCodecUL *mxf_get_essence_container_ul(enum CodecID type) { const MXFCodecUL *uls = ff_mxf_essence_container_uls; while (uls->id != CODEC_ID_NONE) { if (uls->id == type) break; uls++; } return uls; }
20,322
0
static int amovie_request_frame(AVFilterLink *outlink) { MovieContext *movie = outlink->src->priv; int ret; if (movie->is_done) return AVERROR_EOF; do { if ((ret = amovie_get_samples(outlink)) < 0) return ret; } while (!movie->samplesref); ff_filter_samples(outlink, avfilter_ref_buffer(movie->samplesref, ~0)); avfilter_unref_buffer(movie->samplesref); movie->samplesref = NULL; return 0; }
20,323
0
static av_cold int pcm_decode_init(AVCodecContext *avctx) { PCMDecode *s = avctx->priv_data; int i; if (avctx->channels <= 0) { av_log(avctx, AV_LOG_ERROR, "PCM channels out of bounds\n"); return AVERROR(EINVAL); } switch (avctx->codec->id) { case AV_CODEC_ID_PCM_ALAW: for (i = 0; i < 256; i++) s->table[i] = alaw2linear(i); break; case AV_CODEC_ID_PCM_MULAW: for (i = 0; i < 256; i++) s->table[i] = ulaw2linear(i); break; default: break; } avctx->sample_fmt = avctx->codec->sample_fmts[0]; if (avctx->sample_fmt == AV_SAMPLE_FMT_S32) avctx->bits_per_raw_sample = av_get_bits_per_sample(avctx->codec->id); avcodec_get_frame_defaults(&s->frame); avctx->coded_frame = &s->frame; return 0; }
20,324
0
static av_cold int tta_decode_init(AVCodecContext * avctx) { TTAContext *s = avctx->priv_data; int total_frames; s->avctx = avctx; // 30bytes includes TTA1 header if (avctx->extradata_size < 22) return AVERROR_INVALIDDATA; init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8); if (show_bits_long(&s->gb, 32) == AV_RL32("TTA1")) { if (avctx->err_recognition & AV_EF_CRCCHECK) { s->crc_table = av_crc_get_table(AV_CRC_32_IEEE_LE); tta_check_crc(s, avctx->extradata, 18); } /* signature */ skip_bits_long(&s->gb, 32); s->format = get_bits(&s->gb, 16); if (s->format > 2) { av_log(avctx, AV_LOG_ERROR, "Invalid format\n"); return AVERROR_INVALIDDATA; } if (s->format == FORMAT_ENCRYPTED) { if (!s->pass) { av_log(avctx, AV_LOG_ERROR, "Missing password for encrypted stream. Please use the -password option\n"); return AVERROR(EINVAL); } AV_WL64(s->crc_pass, tta_check_crc64(s->pass)); } avctx->channels = s->channels = get_bits(&s->gb, 16); if (s->channels > 1 && s->channels < 9) avctx->channel_layout = tta_channel_layouts[s->channels-2]; avctx->bits_per_raw_sample = get_bits(&s->gb, 16); s->bps = (avctx->bits_per_raw_sample + 7) / 8; avctx->sample_rate = get_bits_long(&s->gb, 32); s->data_length = get_bits_long(&s->gb, 32); skip_bits_long(&s->gb, 32); // CRC32 of header if (s->channels == 0) { av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n"); return AVERROR_INVALIDDATA; } else if (avctx->sample_rate == 0) { av_log(avctx, AV_LOG_ERROR, "Invalid samplerate\n"); return AVERROR_INVALIDDATA; } switch(s->bps) { case 1: avctx->sample_fmt = AV_SAMPLE_FMT_U8; break; case 2: avctx->sample_fmt = AV_SAMPLE_FMT_S16; break; case 3: avctx->sample_fmt = AV_SAMPLE_FMT_S32; break; //case 4: avctx->sample_fmt = AV_SAMPLE_FMT_S32; break; default: av_log(avctx, AV_LOG_ERROR, "Invalid/unsupported sample format.\n"); return AVERROR_INVALIDDATA; } // prevent overflow if (avctx->sample_rate > 0x7FFFFFu) { av_log(avctx, AV_LOG_ERROR, "sample_rate too large\n"); return AVERROR(EINVAL); } s->frame_length = 256 * avctx->sample_rate / 245; s->last_frame_length = s->data_length % s->frame_length; total_frames = s->data_length / s->frame_length + (s->last_frame_length ? 1 : 0); av_log(avctx, AV_LOG_DEBUG, "format: %d chans: %d bps: %d rate: %d block: %d\n", s->format, avctx->channels, avctx->bits_per_coded_sample, avctx->sample_rate, avctx->block_align); av_log(avctx, AV_LOG_DEBUG, "data_length: %d frame_length: %d last: %d total: %d\n", s->data_length, s->frame_length, s->last_frame_length, total_frames); if(s->frame_length >= UINT_MAX / (s->channels * sizeof(int32_t))){ av_log(avctx, AV_LOG_ERROR, "frame_length too large\n"); return AVERROR_INVALIDDATA; } if (s->bps < 3) { s->decode_buffer = av_mallocz(sizeof(int32_t)*s->frame_length*s->channels); if (!s->decode_buffer) return AVERROR(ENOMEM); } else s->decode_buffer = NULL; s->ch_ctx = av_malloc(avctx->channels * sizeof(*s->ch_ctx)); if (!s->ch_ctx) { av_freep(&s->decode_buffer); return AVERROR(ENOMEM); } } else { av_log(avctx, AV_LOG_ERROR, "Wrong extradata present\n"); return AVERROR_INVALIDDATA; } return 0; }
20,325
0
static void vda_h264_uninit(AVCodecContext *avctx) { VDAContext *vda = avctx->internal->priv_data; av_freep(&vda->bitstream); }
20,326
1
int vhost_dev_init(struct vhost_dev *hdev, void *opaque, VhostBackendType backend_type, uint32_t busyloop_timeout) { uint64_t features; int i, r; hdev->migration_blocker = NULL; r = vhost_set_backend_type(hdev, backend_type); assert(r >= 0); r = hdev->vhost_ops->vhost_backend_init(hdev, opaque); if (r < 0) { goto fail; } if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) { fprintf(stderr, "vhost backend memory slots limit is less" " than current number of present memory slots\n"); r = -1; goto fail; } QLIST_INSERT_HEAD(&vhost_devices, hdev, entry); r = hdev->vhost_ops->vhost_set_owner(hdev); if (r < 0) { goto fail; } r = hdev->vhost_ops->vhost_get_features(hdev, &features); if (r < 0) { goto fail; } for (i = 0; i < hdev->nvqs; ++i) { r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i); if (r < 0) { goto fail_vq; } } if (busyloop_timeout) { for (i = 0; i < hdev->nvqs; ++i) { r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, busyloop_timeout); if (r < 0) { goto fail_busyloop; } } } hdev->features = features; hdev->memory_listener = (MemoryListener) { .begin = vhost_begin, .commit = vhost_commit, .region_add = vhost_region_add, .region_del = vhost_region_del, .region_nop = vhost_region_nop, .log_start = vhost_log_start, .log_stop = vhost_log_stop, .log_sync = vhost_log_sync, .log_global_start = vhost_log_global_start, .log_global_stop = vhost_log_global_stop, .eventfd_add = vhost_eventfd_add, .eventfd_del = vhost_eventfd_del, .priority = 10 }; if (hdev->migration_blocker == NULL) { if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) { error_setg(&hdev->migration_blocker, "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature."); } else if (!qemu_memfd_check()) { error_setg(&hdev->migration_blocker, "Migration disabled: failed to allocate shared memory"); } } if (hdev->migration_blocker != NULL) { migrate_add_blocker(hdev->migration_blocker); } hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); hdev->n_mem_sections = 0; hdev->mem_sections = NULL; hdev->log = NULL; hdev->log_size = 0; hdev->log_enabled = false; hdev->started = false; hdev->memory_changed = false; memory_listener_register(&hdev->memory_listener, &address_space_memory); return 0; fail_busyloop: while (--i >= 0) { vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0); } i = hdev->nvqs; fail_vq: while (--i >= 0) { vhost_virtqueue_cleanup(hdev->vqs + i); } fail: r = -errno; hdev->vhost_ops->vhost_backend_cleanup(hdev); QLIST_REMOVE(hdev, entry); return r; }
20,329
1
static void cmv_process_header(CmvContext *s, const uint8_t *buf, const uint8_t *buf_end) { int pal_start, pal_count, i; if(buf+16>=buf_end) { av_log(s->avctx, AV_LOG_WARNING, "truncated header\n"); return; } s->width = AV_RL16(&buf[4]); s->height = AV_RL16(&buf[6]); if (s->avctx->width!=s->width || s->avctx->height!=s->height) avcodec_set_dimensions(s->avctx, s->width, s->height); s->avctx->time_base.num = 1; s->avctx->time_base.den = AV_RL16(&buf[10]); pal_start = AV_RL16(&buf[12]); pal_count = AV_RL16(&buf[14]); buf += 16; for (i=pal_start; i<pal_start+pal_count && i<AVPALETTE_COUNT && buf+2<buf_end; i++) { s->palette[i] = AV_RB24(buf); buf += 3; } }
20,330
1
static int usb_serial_initfn(USBDevice *dev) { USBSerialState *s = DO_UPCAST(USBSerialState, dev, dev); s->dev.speed = USB_SPEED_FULL; qemu_chr_add_handlers(s->cs, usb_serial_can_read, usb_serial_read, usb_serial_event, s); usb_serial_handle_reset(dev); return 0;
20,331
1
static ssize_t vnc_client_write_tls(gnutls_session_t *session, const uint8_t *data, size_t datalen) { ssize_t ret = gnutls_write(*session, data, datalen); if (ret < 0) { if (ret == GNUTLS_E_AGAIN) { errno = EAGAIN; } else { errno = EIO; } ret = -1; } return ret; }
20,332
1
ivshmem_client_handle_server_msg(IvshmemClient *client) { IvshmemClientPeer *peer; long peer_id; int ret, fd; ret = ivshmem_client_read_one_msg(client, &peer_id, &fd); if (ret < 0) { /* can return a peer or the local client */ peer = ivshmem_client_search_peer(client, peer_id); /* delete peer */ if (fd == -1) { if (peer == NULL || peer == &client->local) { IVSHMEM_CLIENT_DEBUG(client, "receive delete for invalid " "peer %ld\n", peer_id); IVSHMEM_CLIENT_DEBUG(client, "delete peer id = %ld\n", peer_id); ivshmem_client_free_peer(client, peer); return 0; /* new peer */ if (peer == NULL) { peer = g_malloc0(sizeof(*peer)); peer->id = peer_id; peer->vectors_count = 0; QTAILQ_INSERT_TAIL(&client->peer_list, peer, next); IVSHMEM_CLIENT_DEBUG(client, "new peer id = %ld\n", peer_id); /* new vector */ IVSHMEM_CLIENT_DEBUG(client, " new vector %d (fd=%d) for peer id %ld\n", peer->vectors_count, fd, peer->id); peer->vectors[peer->vectors_count] = fd; peer->vectors_count++; return 0;
20,333
1
yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum PixelFormat target) { const int16_t *buf0 = buf[0], *buf1 = buf[1], *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], *vbuf0 = vbuf[0], *vbuf1 = vbuf[1]; int yalpha1 = 4095 - yalpha; int uvalpha1 = 4095 - uvalpha; int i; for (i = 0; i < (dstW >> 1); i++) { int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19; int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19; int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19; int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19; output_pixels(i * 4, Y1, U, Y2, V); } }
20,334
1
static int vobsub_read_packet(AVFormatContext *s, AVPacket *pkt) { MpegDemuxContext *vobsub = s->priv_data; FFDemuxSubtitlesQueue *q; AVIOContext *pb = vobsub->sub_ctx->pb; int ret, psize, total_read = 0, i; AVPacket idx_pkt; int64_t min_ts = INT64_MAX; int sid = 0; for (i = 0; i < s->nb_streams; i++) { FFDemuxSubtitlesQueue *tmpq = &vobsub->q[i]; int64_t ts = tmpq->subs[tmpq->current_sub_idx].pts; if (ts < min_ts) { min_ts = ts; sid = i; } } q = &vobsub->q[sid]; ret = ff_subtitles_queue_read_packet(q, &idx_pkt); if (ret < 0) return ret; /* compute maximum packet size using the next packet position. This is * useful when the len in the header is non-sense */ if (q->current_sub_idx < q->nb_subs) { psize = q->subs[q->current_sub_idx].pos - idx_pkt.pos; } else { int64_t fsize = avio_size(pb); psize = fsize < 0 ? 0xffff : fsize - idx_pkt.pos; } avio_seek(pb, idx_pkt.pos, SEEK_SET); av_init_packet(pkt); pkt->size = 0; pkt->data = NULL; do { int n, to_read, startcode; int64_t pts, dts; int64_t old_pos = avio_tell(pb), new_pos; int pkt_size; ret = mpegps_read_pes_header(vobsub->sub_ctx, NULL, &startcode, &pts, &dts); if (ret < 0) { if (pkt->size) // raise packet even if incomplete break; goto fail; } to_read = ret & 0xffff; new_pos = avio_tell(pb); pkt_size = ret + (new_pos - old_pos); /* this prevents reads above the current packet */ if (total_read + pkt_size > psize) break; total_read += pkt_size; /* the current chunk doesn't match the stream index (unlikely) */ if ((startcode & 0x1f) != idx_pkt.stream_index) break; ret = av_grow_packet(pkt, to_read); if (ret < 0) goto fail; n = avio_read(pb, pkt->data + (pkt->size - to_read), to_read); if (n < to_read) pkt->size -= to_read - n; } while (total_read < psize); pkt->pts = pkt->dts = idx_pkt.pts; pkt->pos = idx_pkt.pos; pkt->stream_index = idx_pkt.stream_index; av_free_packet(&idx_pkt); return 0; fail: av_free_packet(pkt); av_free_packet(&idx_pkt); return ret; }
20,335
1
static int dxtory_decode_v1_410(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size) { int h, w; uint8_t *Y1, *Y2, *Y3, *Y4, *U, *V; int ret; if (src_size < avctx->width * avctx->height * 9L / 8) { av_log(avctx, AV_LOG_ERROR, "packet too small\n"); return AVERROR_INVALIDDATA; } avctx->pix_fmt = AV_PIX_FMT_YUV410P; if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) return ret; Y1 = pic->data[0]; Y2 = pic->data[0] + pic->linesize[0]; Y3 = pic->data[0] + pic->linesize[0] * 2; Y4 = pic->data[0] + pic->linesize[0] * 3; U = pic->data[1]; V = pic->data[2]; for (h = 0; h < avctx->height; h += 4) { for (w = 0; w < avctx->width; w += 4) { AV_COPY32(Y1 + w, src); AV_COPY32(Y2 + w, src + 4); AV_COPY32(Y3 + w, src + 8); AV_COPY32(Y4 + w, src + 12); U[w >> 2] = src[16] + 0x80; V[w >> 2] = src[17] + 0x80; src += 18; } Y1 += pic->linesize[0] << 2; Y2 += pic->linesize[0] << 2; Y3 += pic->linesize[0] << 2; Y4 += pic->linesize[0] << 2; U += pic->linesize[1]; V += pic->linesize[2]; } return 0; }
20,336
0
static void dnxhd_decode_dct_block(DNXHDContext *ctx, DCTELEM *block, int n, int qscale) { int i, j, index, index2; int level, component, sign; const uint8_t *weigth_matrix; if (n&2) { component = 1 + (n&1); weigth_matrix = ctx->cid_table->chroma_weigth; } else { component = 0; weigth_matrix = ctx->cid_table->luma_weigth; } ctx->last_dc[component] += dnxhd_decode_dc(ctx); block[0] = ctx->last_dc[component]; //av_log(ctx->avctx, AV_LOG_DEBUG, "dc %d\n", block[0]); for (i = 1; ; i++) { index = get_vlc2(&ctx->gb, ctx->ac_vlc.table, DNXHD_VLC_BITS, 2); //av_log(ctx->avctx, AV_LOG_DEBUG, "index %d\n", index); level = ctx->cid_table->ac_level[index]; if (!level) { /* EOB */ //av_log(ctx->avctx, AV_LOG_DEBUG, "EOB\n"); return; } sign = get_sbits(&ctx->gb, 1); if (ctx->cid_table->ac_index_flag[index]) { level += get_bits(&ctx->gb, ctx->cid_table->index_bits)<<6; } if (ctx->cid_table->ac_run_flag[index]) { index2 = get_vlc2(&ctx->gb, ctx->run_vlc.table, DNXHD_VLC_BITS, 2); i += ctx->cid_table->run[index2]; } j = ctx->scantable.permutated[i]; //av_log(ctx->avctx, AV_LOG_DEBUG, "j %d\n", j); //av_log(ctx->avctx, AV_LOG_DEBUG, "level %d, weigth %d\n", level, weigth_matrix[i]); level = (2*level+1) * qscale * weigth_matrix[i]; if (weigth_matrix[i] != 32) // FIXME 10bit level += 32; level >>= 6; level = (level^sign) - sign; if (i > 63) { av_log(ctx->avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", n, i); return; } //av_log(NULL, AV_LOG_DEBUG, "i %d, j %d, end level %d\n", i, j, level); block[j] = level; } }
20,337
0
const char *avutil_configuration(void) { return FFMPEG_CONFIGURATION; }
20,339
1
static void i8042_class_initfn(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = i8042_realizefn; dc->no_user = 1; dc->vmsd = &vmstate_kbd_isa; }
20,340
1
int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt, int is_vp7) { VP8Context *s = avctx->priv_data; int ret, i, referenced, num_jobs; enum AVDiscard skip_thresh; VP8Frame *av_uninit(curframe), *prev_frame; if (is_vp7) ret = vp7_decode_frame_header(s, avpkt->data, avpkt->size); else ret = vp8_decode_frame_header(s, avpkt->data, avpkt->size); if (ret < 0) goto err; prev_frame = s->framep[VP56_FRAME_CURRENT]; referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT || s->update_altref == VP56_FRAME_CURRENT; skip_thresh = !referenced ? AVDISCARD_NONREF : !s->keyframe ? AVDISCARD_NONKEY : AVDISCARD_ALL; if (avctx->skip_frame >= skip_thresh) { s->invisible = 1; memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4); goto skip_decode; } s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh; // release no longer referenced frames for (i = 0; i < 5; i++) if (s->frames[i].tf.f->data[0] && &s->frames[i] != prev_frame && &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] && &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] && &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) vp8_release_frame(s, &s->frames[i]); curframe = s->framep[VP56_FRAME_CURRENT] = vp8_find_free_buffer(s); if (!s->colorspace) avctx->colorspace = AVCOL_SPC_BT470BG; if (s->fullrange) avctx->color_range = AVCOL_RANGE_JPEG; else avctx->color_range = AVCOL_RANGE_MPEG; /* Given that arithmetic probabilities are updated every frame, it's quite * likely that the values we have on a random interframe are complete * junk if we didn't start decode on a keyframe. So just don't display * anything rather than junk. */ if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] || !s->framep[VP56_FRAME_GOLDEN] || !s->framep[VP56_FRAME_GOLDEN2])) { av_log(avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n"); ret = AVERROR_INVALIDDATA; goto err; } curframe->tf.f->key_frame = s->keyframe; curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; if ((ret = vp8_alloc_frame(s, curframe, referenced)) < 0) goto err; // check if golden and altref are swapped if (s->update_altref != VP56_FRAME_NONE) s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref]; else s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[VP56_FRAME_GOLDEN2]; if (s->update_golden != VP56_FRAME_NONE) s->next_framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden]; else s->next_framep[VP56_FRAME_GOLDEN] = s->framep[VP56_FRAME_GOLDEN]; if (s->update_last) s->next_framep[VP56_FRAME_PREVIOUS] = curframe; else s->next_framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_PREVIOUS]; s->next_framep[VP56_FRAME_CURRENT] = curframe; if (avctx->codec->update_thread_context) ff_thread_finish_setup(avctx); s->linesize = curframe->tf.f->linesize[0]; s->uvlinesize = curframe->tf.f->linesize[1]; memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz)); /* Zero macroblock structures for top/top-left prediction * from outside the frame. */ if (!s->mb_layout) memset(s->macroblocks + s->mb_height * 2 - 1, 0, (s->mb_width + 1) * sizeof(*s->macroblocks)); if (!s->mb_layout && s->keyframe) memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4); memset(s->ref_count, 0, sizeof(s->ref_count)); if (s->mb_layout == 1) { // Make sure the previous frame has read its segmentation map, // if we re-use the same map. if (prev_frame && s->segmentation.enabled && !s->segmentation.update_map) ff_thread_await_progress(&prev_frame->tf, 1, 0); if (is_vp7) vp7_decode_mv_mb_modes(avctx, curframe, prev_frame); else vp8_decode_mv_mb_modes(avctx, curframe, prev_frame); } if (avctx->active_thread_type == FF_THREAD_FRAME) num_jobs = 1; else num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count); s->num_jobs = num_jobs; s->curframe = curframe; s->prev_frame = prev_frame; s->mv_min.y = -MARGIN; s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN; for (i = 0; i < MAX_THREADS; i++) { VP8ThreadData *td = &s->thread_data[i]; atomic_init(&td->thread_mb_pos, 0); atomic_init(&td->wait_mb_pos, INT_MAX); } if (is_vp7) avctx->execute2(avctx, vp7_decode_mb_row_sliced, s->thread_data, NULL, num_jobs); else avctx->execute2(avctx, vp8_decode_mb_row_sliced, s->thread_data, NULL, num_jobs); ff_thread_report_progress(&curframe->tf, INT_MAX, 0); memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4); skip_decode: // if future frames don't use the updated probabilities, // reset them to the values we saved if (!s->update_probabilities) s->prob[0] = s->prob[1]; if (!s->invisible) { if ((ret = av_frame_ref(data, curframe->tf.f)) < 0) return ret; *got_frame = 1; } return avpkt->size; err: memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4); return ret; }
20,341
1
void ff_bink_idct_put_c(uint8_t *dest, int linesize, DCTELEM *block) { int i; DCTELEM temp[64]; for (i = 0; i < 8; i++) bink_idct_col(&temp[i], &block[i]); for (i = 0; i < 8; i++) { IDCT_ROW( (&dest[i*linesize]), (&temp[8*i]) ); } }
20,342
1
static void qemu_rdma_signal_unregister(RDMAContext *rdma, uint64_t index, uint64_t chunk, uint64_t wr_id) { if (rdma->unregistrations[rdma->unregister_next] != 0) { fprintf(stderr, "rdma migration: queue is full!\n"); } else { RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]); if (!test_and_set_bit(chunk, block->unregister_bitmap)) { DDPRINTF("Appending unregister chunk %" PRIu64 " at position %d\n", chunk, rdma->unregister_next); rdma->unregistrations[rdma->unregister_next++] = qemu_rdma_make_wrid(wr_id, index, chunk); if (rdma->unregister_next == RDMA_SIGNALED_SEND_MAX) { rdma->unregister_next = 0; } } else { DDPRINTF("Unregister chunk %" PRIu64 " already in queue.\n", chunk); } } }
20,343
1
static void v9fs_unlinkat(void *opaque) { int err = 0; V9fsString name; int32_t dfid, flags; size_t offset = 7; V9fsPath path; V9fsFidState *dfidp; V9fsPDU *pdu = opaque; v9fs_string_init(&name); err = pdu_unmarshal(pdu, offset, "dsd", &dfid, &name, &flags); if (err < 0) { dfidp = get_fid(pdu, dfid); if (dfidp == NULL) { err = -EINVAL; /* * IF the file is unlinked, we cannot reopen * the file later. So don't reclaim fd */ v9fs_path_init(&path); err = v9fs_co_name_to_path(pdu, &dfidp->path, name.data, &path); if (err < 0) { goto out_err; err = v9fs_mark_fids_unreclaim(pdu, &path); if (err < 0) { goto out_err; err = v9fs_co_unlinkat(pdu, &dfidp->path, &name, flags); if (!err) { err = offset; out_err: put_fid(pdu, dfidp); v9fs_path_free(&path); out_nofid: pdu_complete(pdu, err); v9fs_string_free(&name);
20,344
1
static int parse_pci_devfn(DeviceState *dev, Property *prop, const char *str) { uint32_t *ptr = qdev_get_prop_ptr(dev, prop); unsigned int slot, fn, n; if (sscanf(str, "%x.%x%n", &slot, &fn, &n) != 2) { fn = 0; if (sscanf(str, "%x%n", &slot, &n) != 1) { return -EINVAL; } } if (str[n] != '\0') return -EINVAL; if (fn > 7) return -EINVAL; if (slot > 31) return -EINVAL; *ptr = slot << 3 | fn; return 0; }
20,346
1
static int nsv_read_header(AVFormatContext *s) { NSVContext *nsv = s->priv_data; int i, err; av_dlog(s, "%s()\n", __FUNCTION__); av_dlog(s, "filename '%s'\n", s->filename); nsv->state = NSV_UNSYNC; nsv->ahead[0].data = nsv->ahead[1].data = NULL; for (i = 0; i < NSV_MAX_RESYNC_TRIES; i++) { if (nsv_resync(s) < 0) return -1; if (nsv->state == NSV_FOUND_NSVF) err = nsv_parse_NSVf_header(s); /* we need the first NSVs also... */ if (nsv->state == NSV_FOUND_NSVS) { err = nsv_parse_NSVs_header(s); break; /* we just want the first one */ } } if (s->nb_streams < 1) /* no luck so far */ return -1; /* now read the first chunk, so we can attempt to decode more info */ err = nsv_read_chunk(s, 1); av_dlog(s, "parsed header\n"); return err; }
20,347
1
static void cd_read_sector_cb(void *opaque, int ret) { IDEState *s = opaque; block_acct_done(blk_get_stats(s->blk), &s->acct); #ifdef DEBUG_IDE_ATAPI printf("cd_read_sector_cb: lba=%d ret=%d\n", s->lba, ret); #endif if (ret < 0) { ide_atapi_io_error(s, ret); return; } if (s->cd_sector_size == 2352) { cd_data_to_raw(s->io_buffer, s->lba); } s->lba++; s->io_buffer_index = 0; s->status &= ~BUSY_STAT; ide_atapi_cmd_reply_end(s); }
20,348
1
static void vnc_tls_handshake_done(QIOTask *task, gpointer user_data) { VncState *vs = user_data; Error *err = NULL; if (qio_task_propagate_error(task, &err)) { VNC_DEBUG("Handshake failed %s\n", error_get_pretty(err)); vnc_client_error(vs); error_free(err); } else { vs->ioc_tag = qio_channel_add_watch( vs->ioc, G_IO_IN | G_IO_OUT, vnc_client_io, vs, NULL); start_auth_vencrypt_subauth(vs); } }
20,349
0
static int get_best_header(FLACParseContext* fpc, const uint8_t **poutbuf, int *poutbuf_size) { FLACHeaderMarker *header = fpc->best_header; FLACHeaderMarker *child = header->best_child; if (!child) { *poutbuf_size = av_fifo_size(fpc->fifo_buf) - header->offset; } else { *poutbuf_size = child->offset - header->offset; /* If the child has suspicious changes, log them */ check_header_mismatch(fpc, header, child, 0); } if (header->fi.channels != fpc->avctx->channels || (!fpc->avctx->channel_layout && header->fi.channels <= 6)) { fpc->avctx->channels = header->fi.channels; ff_flac_set_channel_layout(fpc->avctx); } fpc->avctx->sample_rate = header->fi.samplerate; fpc->pc->duration = header->fi.blocksize; *poutbuf = flac_fifo_read_wrap(fpc, header->offset, *poutbuf_size, &fpc->wrap_buf, &fpc->wrap_buf_allocated_size); fpc->best_header_valid = 0; /* Return the negative overread index so the client can compute pos. This should be the amount overread to the beginning of the child */ if (child) return child->offset - av_fifo_size(fpc->fifo_buf); return 0; }
20,350
0
static int decode_packet(int *got_frame, int cached) { int ret = 0; int decoded = pkt.size; *got_frame = 0; if (pkt.stream_index == video_stream_idx) { /* decode video frame */ ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt); if (ret < 0) { fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret)); return ret; } if (video_dec_ctx->width != width || video_dec_ctx->height != height || video_dec_ctx->pix_fmt != pix_fmt) { /* To handle this change, one could call av_image_alloc again and * decode the following frames into another rawvideo file. */ fprintf(stderr, "Error: Width, height and pixel format have to be " "constant in a rawvideo file, but the width, height or " "pixel format of the input video changed:\n" "old: width = %d, height = %d, format = %s\n" "new: width = %d, height = %d, format = %s\n", width, height, av_get_pix_fmt_name(pix_fmt), video_dec_ctx->width, video_dec_ctx->height, av_get_pix_fmt_name(video_dec_ctx->pix_fmt)); return -1; } if (*got_frame) { printf("video_frame%s n:%d coded_n:%d pts:%s\n", cached ? "(cached)" : "", video_frame_count++, frame->coded_picture_number, av_ts2timestr(frame->pts, &video_dec_ctx->time_base)); /* copy decoded frame to destination buffer: * this is required since rawvideo expects non aligned data */ av_image_copy(video_dst_data, video_dst_linesize, (const uint8_t **)(frame->data), frame->linesize, pix_fmt, width, height); /* write to rawvideo file */ fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file); } } else if (pkt.stream_index == audio_stream_idx) { /* decode audio frame */ ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt); if (ret < 0) { fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret)); return ret; } /* Some audio decoders decode only part of the packet, and have to be * called again with the remainder of the packet data. * Sample: fate-suite/lossless-audio/luckynight-partial.shn * Also, some decoders might over-read the packet. */ decoded = FFMIN(ret, pkt.size); if (*got_frame) { size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format); printf("audio_frame%s n:%d nb_samples:%d pts:%s\n", cached ? "(cached)" : "", audio_frame_count++, frame->nb_samples, av_ts2timestr(frame->pts, &audio_dec_ctx->time_base)); /* Write the raw audio data samples of the first plane. This works * fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However, * most audio decoders output planar audio, which uses a separate * plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P). * In other words, this code will write only the first audio channel * in these cases. * You should use libswresample or libavfilter to convert the frame * to packed data. */ fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file); } } /* If we use the new API with reference counting, we own the data and need * to de-reference it when we don't use it anymore */ if (*got_frame && api_mode == API_MODE_NEW_API_REF_COUNT) av_frame_unref(frame); return decoded; }
20,351
0
void ff_avg_h264_qpel16_mc00_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avg_width16_msa(src, stride, dst, stride, 16); }
20,353
0
static inline int init_pfa_reindex_tabs(MDCT15Context *s) { int i, j; const int b_ptwo = s->ptwo_fft.nbits; /* Bits for the power of two FFTs */ const int l_ptwo = 1 << b_ptwo; /* Total length for the power of two FFTs */ const int inv_1 = l_ptwo << ((4 - b_ptwo) & 3); /* (2^b_ptwo)^-1 mod 15 */ const int inv_2 = 0xeeeeeeef & ((1U << b_ptwo) - 1); /* 15^-1 mod 2^b_ptwo */ s->pfa_prereindex = av_malloc(15 * l_ptwo * sizeof(*s->pfa_prereindex)); if (!s->pfa_prereindex) return 1; s->pfa_postreindex = av_malloc(15 * l_ptwo * sizeof(*s->pfa_postreindex)); if (!s->pfa_postreindex) return 1; /* Pre/Post-reindex */ for (i = 0; i < l_ptwo; i++) { for (j = 0; j < 15; j++) { const int q_pre = ((l_ptwo * j)/15 + i) >> b_ptwo; const int q_post = (((j*inv_1)/15) + (i*inv_2)) >> b_ptwo; const int k_pre = 15*i + ((j - q_pre*15) << b_ptwo); const int k_post = i*inv_2*15 + j*inv_1 - 15*q_post*l_ptwo; s->pfa_prereindex[i*15 + j] = k_pre; s->pfa_postreindex[k_post] = l_ptwo*j + i; } } return 0; }
20,354
0
static void mxf_write_identification(AVFormatContext *s) { MXFContext *mxf = s->priv_data; AVIOContext *pb = s->pb; const char *company = "Libav"; const char *product = "OP1a Muxer"; const char *version; int length; mxf_write_metadata_key(pb, 0x013000); PRINT_KEY(s, "identification key", pb->buf_ptr - 16); version = s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT ? "0.0.0" : AV_STRINGIFY(LIBAVFORMAT_VERSION); length = 84 + (strlen(company)+strlen(product)+strlen(version))*2; // utf-16 klv_encode_ber_length(pb, length); // write uid mxf_write_local_tag(pb, 16, 0x3C0A); mxf_write_uuid(pb, Identification, 0); PRINT_KEY(s, "identification uid", pb->buf_ptr - 16); // write generation uid mxf_write_local_tag(pb, 16, 0x3C09); mxf_write_uuid(pb, Identification, 1); mxf_write_local_tag_utf16(pb, 0x3C01, company); // Company Name mxf_write_local_tag_utf16(pb, 0x3C02, product); // Product Name mxf_write_local_tag_utf16(pb, 0x3C04, version); // Version String // write product uid mxf_write_local_tag(pb, 16, 0x3C05); mxf_write_uuid(pb, Identification, 2); // modification date mxf_write_local_tag(pb, 8, 0x3C06); avio_wb64(pb, mxf->timestamp); }
20,355
0
static void test_visitor_out_native_list_uint32(TestOutputVisitorData *data, const void *unused) { test_native_list(data, unused, USER_DEF_NATIVE_LIST_UNION_KIND_U32); }
20,356
0
static void ahci_test_identify(AHCIQState *ahci) { RegD2HFIS *d2h = g_malloc0(0x20); RegD2HFIS *pio = g_malloc0(0x20); RegH2DFIS fis; AHCICommandHeader cmd; PRD prd; uint32_t reg, data_ptr; uint16_t buff[256]; unsigned i; int rc; uint8_t cx; uint64_t table; g_assert(ahci != NULL); /* We need to: * (1) Create a Command Table Buffer and update the Command List Slot #0 * to point to this buffer. * (2) Construct an FIS host-to-device command structure, and write it to * the top of the command table buffer. * (3) Create a data buffer for the IDENTIFY response to be sent to * (4) Create a Physical Region Descriptor that points to the data buffer, * and write it to the bottom (offset 0x80) of the command table. * (5) Now, PxCLB points to the command list, command 0 points to * our table, and our table contains an FIS instruction and a * PRD that points to our rx buffer. * (6) We inform the HBA via PxCI that there is a command ready in slot #0. */ /* Pick the first implemented and running port */ i = ahci_port_select(ahci); g_test_message("Selected port %u for test", i); /* Clear out the FIS Receive area and any pending interrupts. */ ahci_port_clear(ahci, i); /* Create a Command Table buffer. 0x80 is the smallest with a PRDTL of 0. */ /* We need at least one PRD, so round up to the nearest 0x80 multiple. */ table = ahci_alloc(ahci, CMD_TBL_SIZ(1)); g_assert(table); ASSERT_BIT_CLEAR(table, 0x7F); /* Create a data buffer ... where we will dump the IDENTIFY data to. */ data_ptr = ahci_alloc(ahci, 512); g_assert(data_ptr); /* pick a command slot (should be 0!) */ cx = ahci_pick_cmd(ahci, i); /* Construct our Command Header (set_command_header handles endianness.) */ memset(&cmd, 0x00, sizeof(cmd)); cmd.flags = 5; /* reg_h2d_fis is 5 double-words long */ cmd.flags |= 0x400; /* clear PxTFD.STS.BSY when done */ cmd.prdtl = 1; /* One PRD table entry. */ cmd.prdbc = 0; cmd.ctba = table; /* Construct our PRD, noting that DBC is 0-indexed. */ prd.dba = cpu_to_le64(data_ptr); prd.res = 0; /* 511+1 bytes, request DPS interrupt */ prd.dbc = cpu_to_le32(511 | 0x80000000); /* Construct our Command FIS, Based on http://wiki.osdev.org/AHCI */ memset(&fis, 0x00, sizeof(fis)); fis.fis_type = 0x27; /* Register Host-to-Device FIS */ fis.command = 0xEC; /* IDENTIFY */ fis.device = 0; fis.flags = 0x80; /* Indicate this is a command FIS */ /* We've committed nothing yet, no interrupts should be posted yet. */ g_assert_cmphex(ahci_px_rreg(ahci, i, AHCI_PX_IS), ==, 0); /* Commit the Command FIS to the Command Table */ memwrite(table, &fis, sizeof(fis)); /* Commit the PRD entry to the Command Table */ memwrite(table + 0x80, &prd, sizeof(prd)); /* Commit Command #cx, pointing to the Table, to the Command List Buffer. */ ahci_set_command_header(ahci, i, cx, &cmd); /* Everything is in place, but we haven't given the go-ahead yet, * so we should find that there are no pending interrupts yet. */ g_assert_cmphex(ahci_px_rreg(ahci, i, AHCI_PX_IS), ==, 0); /* Issue Command #cx via PxCI */ ahci_px_wreg(ahci, i, AHCI_PX_CI, (1 << cx)); while (BITSET(ahci_px_rreg(ahci, i, AHCI_PX_TFD), AHCI_PX_TFD_STS_BSY)) { usleep(50); } /* Check for expected interrupts */ reg = ahci_px_rreg(ahci, i, AHCI_PX_IS); ASSERT_BIT_SET(reg, AHCI_PX_IS_DHRS); ASSERT_BIT_SET(reg, AHCI_PX_IS_PSS); /* BUG: we expect AHCI_PX_IS_DPS to be set. */ ASSERT_BIT_CLEAR(reg, AHCI_PX_IS_DPS); /* Clear expected interrupts and assert all interrupts now cleared. */ ahci_px_wreg(ahci, i, AHCI_PX_IS, AHCI_PX_IS_DHRS | AHCI_PX_IS_PSS | AHCI_PX_IS_DPS); g_assert_cmphex(ahci_px_rreg(ahci, i, AHCI_PX_IS), ==, 0); /* Check for errors. */ reg = ahci_px_rreg(ahci, i, AHCI_PX_SERR); g_assert_cmphex(reg, ==, 0); reg = ahci_px_rreg(ahci, i, AHCI_PX_TFD); ASSERT_BIT_CLEAR(reg, AHCI_PX_TFD_STS_ERR); ASSERT_BIT_CLEAR(reg, AHCI_PX_TFD_ERR); /* Investigate the CMD, assert that we read 512 bytes */ ahci_get_command_header(ahci, i, cx, &cmd); g_assert_cmphex(512, ==, cmd.prdbc); /* Investigate FIS responses */ memread(ahci->port[i].fb + 0x20, pio, 0x20); memread(ahci->port[i].fb + 0x40, d2h, 0x20); g_assert_cmphex(pio->fis_type, ==, 0x5f); g_assert_cmphex(d2h->fis_type, ==, 0x34); g_assert_cmphex(pio->flags, ==, d2h->flags); g_assert_cmphex(pio->status, ==, d2h->status); g_assert_cmphex(pio->error, ==, d2h->error); reg = ahci_px_rreg(ahci, i, AHCI_PX_TFD); g_assert_cmphex((reg & AHCI_PX_TFD_ERR), ==, pio->error); g_assert_cmphex((reg & AHCI_PX_TFD_STS), ==, pio->status); /* The PIO Setup FIS contains a "bytes read" field, which is a * 16-bit value. The Physical Region Descriptor Byte Count is * 32-bit, but for small transfers using one PRD, it should match. */ g_assert_cmphex(le16_to_cpu(pio->res4), ==, cmd.prdbc); /* Last, but not least: Investigate the IDENTIFY response data. */ memread(data_ptr, &buff, 512); /* Check serial number/version in the buffer */ /* NB: IDENTIFY strings are packed in 16bit little endian chunks. * Since we copy byte-for-byte in ahci-test, on both LE and BE, we need to * unchunk this data. By contrast, ide-test copies 2 bytes at a time, and * as a consequence, only needs to unchunk the data on LE machines. */ string_bswap16(&buff[10], 20); rc = memcmp(&buff[10], "testdisk ", 20); g_assert_cmphex(rc, ==, 0); string_bswap16(&buff[23], 8); rc = memcmp(&buff[23], "version ", 8); g_assert_cmphex(rc, ==, 0); g_free(d2h); g_free(pio); }
20,357
0
void opt_output_file(const char *filename) { AVStream *st; AVFormatContext *oc; int use_video, use_audio, nb_streams, input_has_video, input_has_audio; int codec_id; if (!strcmp(filename, "-")) filename = "pipe:"; oc = av_mallocz(sizeof(AVFormatContext)); if (!file_format) { file_format = guess_format(NULL, filename, NULL); if (!file_format) file_format = &mpeg_mux_format; } oc->format = file_format; if (!strcmp(file_format->name, "ffm") && strstart(filename, "http:", NULL)) { /* special case for files sent to ffserver: we get the stream parameters from ffserver */ if (read_ffserver_streams(oc, filename) < 0) { fprintf(stderr, "Could not read stream parameters from '%s'\n", filename); exit(1); } } else { use_video = file_format->video_codec != CODEC_ID_NONE; use_audio = file_format->audio_codec != CODEC_ID_NONE; /* disable if no corresponding type found */ check_audio_video_inputs(&input_has_video, &input_has_audio); if (!input_has_video) use_video = 0; if (!input_has_audio) use_audio = 0; /* manual disable */ if (audio_disable) { use_audio = 0; } if (video_disable) { use_video = 0; } nb_streams = 0; if (use_video) { AVCodecContext *video_enc; st = av_mallocz(sizeof(AVStream)); if (!st) { fprintf(stderr, "Could not alloc stream\n"); exit(1); } video_enc = &st->codec; codec_id = file_format->video_codec; if (video_codec_id != CODEC_ID_NONE) codec_id = video_codec_id; video_enc->codec_id = codec_id; video_enc->codec_type = CODEC_TYPE_VIDEO; video_enc->bit_rate = video_bit_rate; video_enc->frame_rate = frame_rate; video_enc->width = frame_width; video_enc->height = frame_height; if (!intra_only) video_enc->gop_size = gop_size; else video_enc->gop_size = 0; if (video_qscale || same_quality) { video_enc->flags |= CODEC_FLAG_QSCALE; video_enc->quality = video_qscale; } /* XXX: need to find a way to set codec parameters */ if (oc->format == &ppm_format || oc->format == &ppmpipe_format) { video_enc->pix_fmt = PIX_FMT_RGB24; } oc->streams[nb_streams] = st; nb_streams++; } if (use_audio) { AVCodecContext *audio_enc; st = av_mallocz(sizeof(AVStream)); if (!st) { fprintf(stderr, "Could not alloc stream\n"); exit(1); } audio_enc = &st->codec; codec_id = file_format->audio_codec; if (audio_codec_id != CODEC_ID_NONE) codec_id = audio_codec_id; audio_enc->codec_id = codec_id; audio_enc->codec_type = CODEC_TYPE_AUDIO; audio_enc->bit_rate = audio_bit_rate; audio_enc->sample_rate = audio_sample_rate; audio_enc->channels = audio_channels; oc->streams[nb_streams] = st; nb_streams++; } oc->nb_streams = nb_streams; if (!nb_streams) { fprintf(stderr, "No audio or video streams available\n"); exit(1); } if (str_title) nstrcpy(oc->title, sizeof(oc->title), str_title); if (str_author) nstrcpy(oc->author, sizeof(oc->author), str_author); if (str_copyright) nstrcpy(oc->copyright, sizeof(oc->copyright), str_copyright); if (str_comment) nstrcpy(oc->comment, sizeof(oc->comment), str_comment); } output_files[nb_output_files] = oc; /* dump the file content */ dump_format(oc, nb_output_files, filename, 1); nb_output_files++; strcpy(oc->filename, filename); /* check filename in case of an image number is expected */ if (oc->format->flags & AVFMT_NEEDNUMBER) { if (filename_number_test(oc->filename) < 0) exit(1); } if (!(oc->format->flags & AVFMT_NOFILE)) { /* test if it already exists to avoid loosing precious files */ if (!file_overwrite && (strchr(filename, ':') == NULL || strstart(filename, "file:", NULL))) { if (url_exist(filename)) { int c; printf("File '%s' already exists. Overwrite ? [y/N] ", filename); fflush(stdout); c = getchar(); if (toupper(c) != 'Y') { fprintf(stderr, "Not overwriting - exiting\n"); exit(1); } } } /* open the file */ if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) { fprintf(stderr, "Could not open '%s'\n", filename); exit(1); } } /* reset some options */ file_format = NULL; audio_disable = 0; video_disable = 0; audio_codec_id = CODEC_ID_NONE; video_codec_id = CODEC_ID_NONE; }
20,358
0
int xen_hvm_init(void) { int i, rc; unsigned long ioreq_pfn; XenIOState *state; state = g_malloc0(sizeof (XenIOState)); state->xce_handle = xen_xc_evtchn_open(NULL, 0); if (state->xce_handle == XC_HANDLER_INITIAL_VALUE) { perror("xen: event channel open"); return -errno; } state->xenstore = xs_daemon_open(); if (state->xenstore == NULL) { perror("xen: xenstore open"); return -errno; } state->exit.notify = xen_exit_notifier; qemu_add_exit_notifier(&state->exit); state->suspend.notify = xen_suspend_notifier; qemu_register_suspend_notifier(&state->suspend); xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn); DPRINTF("shared page at pfn %lx\n", ioreq_pfn); state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE, PROT_READ|PROT_WRITE, ioreq_pfn); if (state->shared_page == NULL) { hw_error("map shared IO page returned error %d handle=" XC_INTERFACE_FMT, errno, xen_xc); } xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn); DPRINTF("buffered io page at pfn %lx\n", ioreq_pfn); state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE, PROT_READ|PROT_WRITE, ioreq_pfn); if (state->buffered_io_page == NULL) { hw_error("map buffered IO page returned error %d", errno); } state->ioreq_local_port = g_malloc0(smp_cpus * sizeof (evtchn_port_t)); /* FIXME: how about if we overflow the page here? */ for (i = 0; i < smp_cpus; i++) { rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid, xen_vcpu_eport(state->shared_page, i)); if (rc == -1) { fprintf(stderr, "bind interdomain ioctl error %d\n", errno); return -1; } state->ioreq_local_port[i] = rc; } /* Init RAM management */ xen_map_cache_init(); xen_ram_init(ram_size); qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); state->memory_listener = xen_memory_listener; QLIST_INIT(&state->physmap); memory_listener_register(&state->memory_listener); state->log_for_dirtybit = NULL; /* Initialize backend core & drivers */ if (xen_be_init() != 0) { fprintf(stderr, "%s: xen backend core setup failed\n", __FUNCTION__); exit(1); } xen_be_register("console", &xen_console_ops); xen_be_register("vkbd", &xen_kbdmouse_ops); xen_be_register("qdisk", &xen_blkdev_ops); xen_read_physmap(state); return 0; }
20,359
0
static void nic_receive(void *opaque, const uint8_t * buf, size_t size) { /* TODO: * - Magic packets should set bit 30 in power management driver register. * - Interesting packets should set bit 29 in power management driver register. */ EEPRO100State *s = opaque; uint16_t rfd_status = 0xa000; static const uint8_t broadcast_macaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; /* TODO: check multiple IA bit. */ assert(!(s->configuration[20] & BIT(6))); if (s->configuration[8] & 0x80) { /* CSMA is disabled. */ logout("%p received while CSMA is disabled\n", s); return; } else if (size < 64 && (s->configuration[7] & 1)) { /* Short frame and configuration byte 7/0 (discard short receive) set: * Short frame is discarded */ logout("%p received short frame (%d byte)\n", s, size); s->statistics.rx_short_frame_errors++; //~ return; } else if ((size > MAX_ETH_FRAME_SIZE + 4) && !(s->configuration[18] & 8)) { /* Long frame and configuration byte 18/3 (long receive ok) not set: * Long frames are discarded. */ logout("%p received long frame (%d byte), ignored\n", s, size); return; } else if (memcmp(buf, s->macaddr, 6) == 0) { // !!! /* Frame matches individual address. */ /* TODO: check configuration byte 15/4 (ignore U/L). */ logout("%p received frame for me, len=%d\n", s, size); } else if (memcmp(buf, broadcast_macaddr, 6) == 0) { /* Broadcast frame. */ logout("%p received broadcast, len=%d\n", s, size); rfd_status |= 0x0002; } else if (buf[0] & 0x01) { // !!! /* Multicast frame. */ logout("%p received multicast, len=%d\n", s, size); /* TODO: check multicast all bit. */ assert(!(s->configuration[21] & BIT(3))); int mcast_idx = compute_mcast_idx(buf); if (!(s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7)))) { return; } rfd_status |= 0x0002; } else if (s->configuration[15] & 1) { /* Promiscuous: receive all. */ logout("%p received frame in promiscuous mode, len=%d\n", s, size); rfd_status |= 0x0004; } else { logout("%p received frame, ignored, len=%d,%s\n", s, size, nic_dump(buf, size)); return; } if (get_ru_state(s) != ru_ready) { /* No ressources available. */ logout("no ressources, state=%u\n", get_ru_state(s)); s->statistics.rx_resource_errors++; //~ assert(!"no ressources"); return; } //~ !!! //~ $3 = {status = 0x0, command = 0xc000, link = 0x2d220, rx_buf_addr = 0x207dc, count = 0x0, size = 0x5f8, packet = {0x0 <repeats 1518 times>}} eepro100_rx_t rx; cpu_physical_memory_read(s->ru_base + s->ru_offset, (uint8_t *) & rx, offsetof(eepro100_rx_t, packet)); uint16_t rfd_command = le16_to_cpu(rx.command); uint16_t rfd_size = le16_to_cpu(rx.size); assert(size <= rfd_size); if (size < 64) { rfd_status |= 0x0080; } logout("command 0x%04x, link 0x%08x, addr 0x%08x, size %u\n", rfd_command, rx.link, rx.rx_buf_addr, rfd_size); stw_phys(s->ru_base + s->ru_offset + offsetof(eepro100_rx_t, status), rfd_status); stw_phys(s->ru_base + s->ru_offset + offsetof(eepro100_rx_t, count), size); /* Early receive interrupt not supported. */ //~ eepro100_er_interrupt(s); /* Receive CRC Transfer not supported. */ assert(!(s->configuration[18] & 4)); /* TODO: check stripping enable bit. */ //~ assert(!(s->configuration[17] & 1)); cpu_physical_memory_write(s->ru_base + s->ru_offset + offsetof(eepro100_rx_t, packet), buf, size); s->statistics.rx_good_frames++; eepro100_fr_interrupt(s); s->ru_offset = le32_to_cpu(rx.link); if (rfd_command & 0x8000) { /* EL bit is set, so this was the last frame. */ assert(0); } if (rfd_command & 0x4000) { /* S bit is set. */ set_ru_state(s, ru_suspended); } }
20,360
0
static inline void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, TCGReg arg2, int label_index, int cmp4) { TCGLabel *l = &s->labels[label_index]; uint64_t imm; /* We pay attention here to not modify the branch target by reading the existing value and using it again. This ensure that caches and memory are kept coherent during retranslation. */ if (l->has_value) { imm = l->u.value_ptr - s->code_ptr; } else { imm = get_reloc_pcrel21b_slot2(s->code_ptr); tcg_out_reloc(s, s->code_ptr, R_IA64_PCREL21B, label_index, 0); } tcg_out_bundle(s, miB, INSN_NOP_M, tcg_opc_cmp_a(TCG_REG_P0, cond, arg1, arg2, cmp4), tcg_opc_b1(TCG_REG_P6, OPC_BR_DPTK_FEW_B1, imm)); }
20,361
0
static int handle_buffered_iopage(XenIOState *state) { buffered_iopage_t *buf_page = state->buffered_io_page; buf_ioreq_t *buf_req = NULL; ioreq_t req; int qw; if (!buf_page) { return 0; } memset(&req, 0x00, sizeof(req)); for (;;) { uint32_t rdptr = buf_page->read_pointer, wrptr; xen_rmb(); wrptr = buf_page->write_pointer; xen_rmb(); if (rdptr != buf_page->read_pointer) { continue; } if (rdptr == wrptr) { break; } buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM]; req.size = 1UL << buf_req->size; req.count = 1; req.addr = buf_req->addr; req.data = buf_req->data; req.state = STATE_IOREQ_READY; req.dir = buf_req->dir; req.df = 1; req.type = buf_req->type; req.data_is_ptr = 0; xen_rmb(); qw = (req.size == 8); if (qw) { if (rdptr + 1 == wrptr) { hw_error("Incomplete quad word buffered ioreq"); } buf_req = &buf_page->buf_ioreq[(rdptr + 1) % IOREQ_BUFFER_SLOT_NUM]; req.data |= ((uint64_t)buf_req->data) << 32; xen_rmb(); } handle_ioreq(state, &req); atomic_add(&buf_page->read_pointer, qw + 1); } return req.count; }
20,362
0
void virtio_bus_device_plugged(VirtIODevice *vdev, Error **errp) { DeviceState *qdev = DEVICE(vdev); BusState *qbus = BUS(qdev_get_parent_bus(qdev)); VirtioBusState *bus = VIRTIO_BUS(qbus); VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(bus); VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); DPRINTF("%s: plug device.\n", qbus->name); if (klass->device_plugged != NULL) { klass->device_plugged(qbus->parent, errp); } /* Get the features of the plugged device. */ assert(vdc->get_features != NULL); vdev->host_features = vdc->get_features(vdev, vdev->host_features, errp); if (klass->post_plugged != NULL) { klass->post_plugged(qbus->parent, errp); } }
20,363
0
GSource *aio_get_g_source(AioContext *ctx) { g_source_ref(&ctx->source); return &ctx->source; }
20,364
0
static int decode_frame_mp3on4(AVCodecContext * avctx, void *data, int *data_size, const uint8_t * buf, int buf_size) { MP3On4DecodeContext *s = avctx->priv_data; MPADecodeContext *m; int len, out_size = 0; uint32_t header; OUT_INT *out_samples = data; OUT_INT decoded_buf[MPA_FRAME_SIZE * MPA_MAX_CHANNELS]; OUT_INT *outptr, *bp; int fsize; int fr, i, j, n; len = buf_size; *data_size = 0; // Discard too short frames if (buf_size < HEADER_SIZE) return -1; // If only one decoder interleave is not needed outptr = s->frames == 1 ? out_samples : decoded_buf; for (fr = 0; fr < s->frames; fr++) { fsize = AV_RB16(buf) >> 4; fsize = FFMIN3(fsize, len, MPA_MAX_CODED_FRAME_SIZE); m = s->mp3decctx[fr]; assert (m != NULL); header = (AV_RB32(buf) & 0x000fffff) | s->syncword; // patch header if (ff_mpa_check_header(header) < 0) { // Bad header, discard block *data_size = 0; return buf_size; } ff_mpegaudio_decode_header(m, header); out_size += mp_decode_frame(m, outptr, buf, fsize); buf += fsize; len -= fsize; if(s->frames > 1) { n = m->avctx->frame_size*m->nb_channels; /* interleave output data */ bp = out_samples + s->coff[fr]; if(m->nb_channels == 1) { for(j = 0; j < n; j++) { *bp = decoded_buf[j]; bp += avctx->channels; } } else { for(j = 0; j < n; j++) { bp[0] = decoded_buf[j++]; bp[1] = decoded_buf[j]; bp += avctx->channels; } } } } /* update codec info */ avctx->sample_rate = s->mp3decctx[0]->sample_rate; avctx->bit_rate = 0; for (i = 0; i < s->frames; i++) avctx->bit_rate += s->mp3decctx[i]->bit_rate; *data_size = out_size; return buf_size; }
20,369