label
int64
0
1
func1
stringlengths
23
97k
id
int64
0
27.3k
1
yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum PixelFormat target) { const int16_t *buf0 = buf[0], *buf1 = buf[1]; const uint8_t * const d128 = dither_8x8_220[y & 7]; int yalpha1 = 4095 - yalpha; int i; for (i = 0; i < dstW; i += 8) { int Y, acc = 0; Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19; accumulate_bit(acc, Y + d128[0]); Y = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19; accumulate_bit(acc, Y + d128[1]); Y = (buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19; accumulate_bit(acc, Y + d128[2]); Y = (buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19; accumulate_bit(acc, Y + d128[3]); Y = (buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19; accumulate_bit(acc, Y + d128[4]); Y = (buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19; accumulate_bit(acc, Y + d128[5]); Y = (buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19; accumulate_bit(acc, Y + d128[6]); Y = (buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19; accumulate_bit(acc, Y + d128[7]); output_pixel(*dest++, acc); } }
14,687
1
static int xwma_read_header(AVFormatContext *s, AVFormatParameters *ap) { int64_t size, av_uninit(data_size); uint32_t dpds_table_size = 0; uint32_t *dpds_table = 0; unsigned int tag; AVIOContext *pb = s->pb; AVStream *st; XWMAContext *xwma = s->priv_data; int i; /* The following code is mostly copied from wav.c, with some * minor alterations. */ /* check RIFF header */ tag = avio_rl32(pb); if (tag != MKTAG('R', 'I', 'F', 'F')) return -1; avio_rl32(pb); /* file size */ tag = avio_rl32(pb); if (tag != MKTAG('X', 'W', 'M', 'A')) return -1; /* parse fmt header */ tag = avio_rl32(pb); if (tag != MKTAG('f', 'm', 't', ' ')) return -1; size = avio_rl32(pb); st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); ff_get_wav_header(pb, st->codec, size); st->need_parsing = AVSTREAM_PARSE_NONE; /* All xWMA files I have seen contained WMAv2 data. If there are files * using WMA Pro or some other codec, then we need to figure out the right * extradata for that. Thus, ask the user for feedback, but try to go on * anyway. */ if (st->codec->codec_id != CODEC_ID_WMAV2) { av_log(s, AV_LOG_WARNING, "unexpected codec (tag 0x04%x; id %d)\n", st->codec->codec_tag, st->codec->codec_id); av_log_ask_for_sample(s, NULL); } else { /* In all xWMA files I have seen, there is no extradata. But the WMA * codecs require extradata, so we provide our own fake extradata. * * First, check that there really was no extradata in the header. If * there was, then try to use, after asking the the user to provide a * sample of this unusual file. */ if (st->codec->extradata_size != 0) { /* Surprise, surprise: We *did* get some extradata. No idea * if it will work, but just go on and try it, after asking * the user for a sample. */ av_log(s, AV_LOG_WARNING, "unexpected extradata (%d bytes)\n", st->codec->extradata_size); av_log_ask_for_sample(s, NULL); } else { st->codec->extradata_size = 6; st->codec->extradata = av_mallocz(6 + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); /* setup extradata with our experimentally obtained value */ st->codec->extradata[4] = 31; } } /* set the sample rate */ av_set_pts_info(st, 64, 1, st->codec->sample_rate); /* parse the remaining RIFF chunks */ for (;;) { if (pb->eof_reached) return -1; /* read next chunk tag */ tag = avio_rl32(pb); size = avio_rl32(pb); if (tag == MKTAG('d', 'a', 't', 'a')) { /* We assume that the data chunk comes last. */ break; } else if (tag == MKTAG('d','p','d','s')) { /* Quoting the MSDN xWMA docs on the dpds chunk: "Contains the * decoded packet cumulative data size array, each element is the * number of bytes accumulated after the corresponding xWMA packet * is decoded in order" * * Each packet has size equal to st->codec->block_align, which in * all cases I saw so far was always 2230. Thus, we can use the * dpds data to compute a seeking index. */ /* Error out if there is more than one dpds chunk. */ if (dpds_table) { av_log(s, AV_LOG_ERROR, "two dpds chunks present\n"); return -1; } /* Compute the number of entries in the dpds chunk. */ if (size & 3) { /* Size should be divisible by four */ av_log(s, AV_LOG_WARNING, "dpds chunk size "PRId64" not divisible by 4\n", size); } dpds_table_size = size / 4; if (dpds_table_size == 0 || dpds_table_size >= INT_MAX / 4) { av_log(s, AV_LOG_ERROR, "dpds chunk size "PRId64" invalid\n", size); return -1; } /* Allocate some temporary storage to keep the dpds data around. * for processing later on. */ dpds_table = av_malloc(dpds_table_size * sizeof(uint32_t)); if (!dpds_table) { return AVERROR(ENOMEM); } for (i = 0; i < dpds_table_size; ++i) { dpds_table[i] = avio_rl32(pb); size -= 4; } } avio_skip(pb, size); } /* Determine overall data length */ if (size < 0) return -1; if (!size) { xwma->data_end = INT64_MAX; } else xwma->data_end = avio_tell(pb) + size; if (dpds_table && dpds_table_size) { int64_t cur_pos; const uint32_t bytes_per_sample = (st->codec->channels * st->codec->bits_per_coded_sample) >> 3; /* Estimate the duration from the total number of output bytes. */ const uint64_t total_decoded_bytes = dpds_table[dpds_table_size - 1]; st->duration = total_decoded_bytes / bytes_per_sample; /* Use the dpds data to build a seek table. We can only do this after * we know the offset to the data chunk, as we need that to determine * the actual offset to each input block. * Note: If we allowed ourselves to assume that the data chunk always * follows immediately after the dpds block, we could of course guess * the data block's start offset already while reading the dpds chunk. * I decided against that, just in case other chunks ever are * discovered. */ cur_pos = avio_tell(pb); for (i = 0; i < dpds_table_size; ++i) { /* From the number of output bytes that would accumulate in the * output buffer after decoding the first (i+1) packets, we compute * an offset / timestamp pair. */ av_add_index_entry(st, cur_pos + (i+1) * st->codec->block_align, /* pos */ dpds_table[i] / bytes_per_sample, /* timestamp */ st->codec->block_align, /* size */ 0, /* duration */ AVINDEX_KEYFRAME); } } else if (st->codec->bit_rate) { /* No dpds chunk was present (or only an empty one), so estimate * the total duration using the average bits per sample and the * total data length. */ st->duration = (size<<3) * st->codec->sample_rate / st->codec->bit_rate; } av_free(dpds_table); return 0; }
14,689
1
void qmp_migrate_set_parameters(bool has_compress_level, int64_t compress_level, bool has_compress_threads, int64_t compress_threads, bool has_decompress_threads, int64_t decompress_threads, bool has_cpu_throttle_initial, int64_t cpu_throttle_initial, bool has_cpu_throttle_increment, int64_t cpu_throttle_increment, Error **errp) { MigrationState *s = migrate_get_current(); if (has_compress_level && (compress_level < 0 || compress_level > 9)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", "is invalid, it should be in the range of 0 to 9"); return; } if (has_compress_threads && (compress_threads < 1 || compress_threads > 255)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_threads", "is invalid, it should be in the range of 1 to 255"); return; } if (has_decompress_threads && (decompress_threads < 1 || decompress_threads > 255)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "decompress_threads", "is invalid, it should be in the range of 1 to 255"); return; } if (has_cpu_throttle_initial && (cpu_throttle_initial < 1 || cpu_throttle_initial > 99)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu_throttle_initial", "an integer in the range of 1 to 99"); } if (has_cpu_throttle_increment && (cpu_throttle_increment < 1 || cpu_throttle_increment > 99)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu_throttle_increment", "an integer in the range of 1 to 99"); } if (has_compress_level) { s->parameters.compress_level = compress_level; } if (has_compress_threads) { s->parameters.compress_threads = compress_threads; } if (has_decompress_threads) { s->parameters.decompress_threads = decompress_threads; } if (has_cpu_throttle_initial) { s->parameters.cpu_throttle_initial = cpu_throttle_initial; } if (has_cpu_throttle_increment) { s->parameters.cpu_throttle_increment = cpu_throttle_increment; } if (has_tls_creds) { g_free(s->parameters.tls_creds); s->parameters.tls_creds = g_strdup(tls_creds); } if (has_tls_hostname) { g_free(s->parameters.tls_hostname); s->parameters.tls_hostname = g_strdup(tls_hostname); } }
14,690
1
static int swf_write_packet(AVFormatContext *s, int stream_index, const uint8_t *buf, int size, int64_t pts) { AVCodecContext *codec = &s->streams[stream_index]->codec; if (codec->codec_type == CODEC_TYPE_AUDIO) return swf_write_audio(s, buf, size); else return swf_write_video(s, codec, buf, size); }
14,691
1
static void hScale16To15_c(SwsContext *c, int16_t *dst, int dstW, const uint8_t *_src, const int16_t *filter, const int16_t *filterPos, int filterSize) { int i; const uint16_t *src = (const uint16_t *) _src; int sh = av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1; for (i = 0; i < dstW; i++) { int j; int srcPos = filterPos[i]; int val = 0; for (j = 0; j < filterSize; j++) { val += src[srcPos + j] * filter[filterSize * i + j]; } // filter=14 bit, input=16 bit, output=30 bit, >> 15 makes 15 bit dst[i] = FFMIN(val >> sh, (1 << 15) - 1); } }
14,692
1
static int __qemu_rdma_delete_block(RDMAContext *rdma, ram_addr_t block_offset) { RDMALocalBlocks *local = &rdma->local_ram_blocks; RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap, (void *) block_offset); RDMALocalBlock *old = local->block; int x; assert(block); if (block->pmr) { int j; for (j = 0; j < block->nb_chunks; j++) { if (!block->pmr[j]) { continue; } ibv_dereg_mr(block->pmr[j]); rdma->total_registrations--; } g_free(block->pmr); block->pmr = NULL; } if (block->mr) { ibv_dereg_mr(block->mr); rdma->total_registrations--; block->mr = NULL; } g_free(block->transit_bitmap); block->transit_bitmap = NULL; g_free(block->unregister_bitmap); block->unregister_bitmap = NULL; g_free(block->remote_keys); block->remote_keys = NULL; for (x = 0; x < local->nb_blocks; x++) { g_hash_table_remove(rdma->blockmap, (void *)old[x].offset); } if (local->nb_blocks > 1) { local->block = g_malloc0(sizeof(RDMALocalBlock) * (local->nb_blocks - 1)); if (block->index) { memcpy(local->block, old, sizeof(RDMALocalBlock) * block->index); } if (block->index < (local->nb_blocks - 1)) { memcpy(local->block + block->index, old + (block->index + 1), sizeof(RDMALocalBlock) * (local->nb_blocks - (block->index + 1))); } } else { assert(block == local->block); local->block = NULL; } DDPRINTF("Deleted Block: %d, addr: %" PRIu64 ", offset: %" PRIu64 " length: %" PRIu64 " end: %" PRIu64 " bits %" PRIu64 " chunks %d\n", local->nb_blocks, (uint64_t) block->local_host_addr, block->offset, block->length, (uint64_t) (block->local_host_addr + block->length), BITS_TO_LONGS(block->nb_chunks) * sizeof(unsigned long) * 8, block->nb_chunks); g_free(old); local->nb_blocks--; if (local->nb_blocks) { for (x = 0; x < local->nb_blocks; x++) { g_hash_table_insert(rdma->blockmap, (void *)local->block[x].offset, &local->block[x]); } } return 0; }
14,693
1
static int decode_extradata(ADTSContext *adts, uint8_t *buf, int size) { GetBitContext gb; init_get_bits(&gb, buf, size * 8); adts->objecttype = get_bits(&gb, 5) - 1; adts->sample_rate_index = get_bits(&gb, 4); adts->channel_conf = get_bits(&gb, 4); adts->write_adts = 1; return 0; }
14,695
1
static void tcg_out_st (TCGContext *s, TCGType type, int arg, int arg1, tcg_target_long arg2) { if (type == TCG_TYPE_I32) tcg_out_ldst (s, arg, arg1, arg2, STW, STWX); else tcg_out_ldst (s, arg, arg1, arg2, STD, STDX); }
14,696
1
static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp) { BDRVRawState *s = bs->opaque; char *buf; /* For /dev/sg devices the alignment is not really used. With buffered I/O, we don't have any restrictions. */ if (bs->sg || !s->needs_alignment) { bs->request_alignment = 1; s->buf_align = 1; return; } bs->request_alignment = 0; s->buf_align = 0; /* Let's try to use the logical blocksize for the alignment. */ if (probe_logical_blocksize(fd, &bs->request_alignment) < 0) { bs->request_alignment = 0; } #ifdef CONFIG_XFS if (s->is_xfs) { struct dioattr da; if (xfsctl(NULL, fd, XFS_IOC_DIOINFO, &da) >= 0) { bs->request_alignment = da.d_miniosz; /* The kernel returns wrong information for d_mem */ /* s->buf_align = da.d_mem; */ } } #endif /* If we could not get the sizes so far, we can only guess them */ if (!s->buf_align) { size_t align; buf = qemu_memalign(MAX_BLOCKSIZE, 2 * MAX_BLOCKSIZE); for (align = 512; align <= MAX_BLOCKSIZE; align <<= 1) { if (pread(fd, buf + align, MAX_BLOCKSIZE, 0) >= 0) { s->buf_align = align; break; } } qemu_vfree(buf); } if (!bs->request_alignment) { size_t align; buf = qemu_memalign(s->buf_align, MAX_BLOCKSIZE); for (align = 512; align <= MAX_BLOCKSIZE; align <<= 1) { if (pread(fd, buf, align, 0) >= 0) { bs->request_alignment = align; break; } } qemu_vfree(buf); } if (!s->buf_align || !bs->request_alignment) { error_setg(errp, "Could not find working O_DIRECT alignment. " "Try cache.direct=off."); } }
14,697
1
static void alpha_cpu_realizefn(DeviceState *dev, Error **errp) { AlphaCPUClass *acc = ALPHA_CPU_GET_CLASS(dev); acc->parent_realize(dev, errp); }
14,698
1
static void add_pixels_clamped_mmx(const DCTELEM *block, UINT8 *pixels, int line_size) { const DCTELEM *p; UINT8 *pix; int i; /* read the pixels */ p = block; pix = pixels; MOVQ_ZERO(mm7); i = 4; while (i) { __asm __volatile( "movq %2, %%mm0\n\t" "movq 8%2, %%mm1\n\t" "movq 16%2, %%mm2\n\t" "movq 24%2, %%mm3\n\t" "movq %0, %%mm4\n\t" "movq %1, %%mm6\n\t" "movq %%mm4, %%mm5\n\t" "punpcklbw %%mm7, %%mm4\n\t" "punpckhbw %%mm7, %%mm5\n\t" "paddsw %%mm4, %%mm0\n\t" "paddsw %%mm5, %%mm1\n\t" "movq %%mm6, %%mm5\n\t" "punpcklbw %%mm7, %%mm6\n\t" "punpckhbw %%mm7, %%mm5\n\t" "paddsw %%mm6, %%mm2\n\t" "paddsw %%mm5, %%mm3\n\t" "packuswb %%mm1, %%mm0\n\t" "packuswb %%mm3, %%mm2\n\t" "movq %%mm0, %0\n\t" "movq %%mm2, %1\n\t" :"+m"(*pix), "+m"(*(pix+line_size)) :"m"(*p) :"memory"); pix += line_size*2; p += 16; i--; }; }
14,699
1
struct omap_mpu_state_s *omap310_mpu_init(MemoryRegion *system_memory, unsigned long sdram_size, const char *core) { int i; struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) g_malloc0(sizeof(struct omap_mpu_state_s)); qemu_irq dma_irqs[6]; DriveInfo *dinfo; SysBusDevice *busdev; if (!core) core = "ti925t"; /* Core */ s->mpu_model = omap310; s->cpu = cpu_arm_init(core); if (s->cpu == NULL) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } s->sdram_size = sdram_size; s->sram_size = OMAP15XX_SRAM_SIZE; s->wakeup = qemu_allocate_irq(omap_mpu_wakeup, s, 0); /* Clocks */ omap_clk_init(s); /* Memory-mapped stuff */ memory_region_allocate_system_memory(&s->emiff_ram, NULL, "omap1.dram", s->sdram_size); memory_region_add_subregion(system_memory, OMAP_EMIFF_BASE, &s->emiff_ram); memory_region_init_ram(&s->imif_ram, NULL, "omap1.sram", s->sram_size, &error_abort); vmstate_register_ram_global(&s->imif_ram); memory_region_add_subregion(system_memory, OMAP_IMIF_BASE, &s->imif_ram); omap_clkm_init(system_memory, 0xfffece00, 0xe1008000, s); s->ih[0] = qdev_create(NULL, "omap-intc"); qdev_prop_set_uint32(s->ih[0], "size", 0x100); qdev_prop_set_ptr(s->ih[0], "clk", omap_findclk(s, "arminth_ck")); qdev_init_nofail(s->ih[0]); busdev = SYS_BUS_DEVICE(s->ih[0]); sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_IRQ)); sysbus_connect_irq(busdev, 1, qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_FIQ)); sysbus_mmio_map(busdev, 0, 0xfffecb00); s->ih[1] = qdev_create(NULL, "omap-intc"); qdev_prop_set_uint32(s->ih[1], "size", 0x800); qdev_prop_set_ptr(s->ih[1], "clk", omap_findclk(s, "arminth_ck")); qdev_init_nofail(s->ih[1]); busdev = SYS_BUS_DEVICE(s->ih[1]); sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(s->ih[0], OMAP_INT_15XX_IH2_IRQ)); /* The second interrupt controller's FIQ output is not wired up */ sysbus_mmio_map(busdev, 0, 0xfffe0000); for (i = 0; i < 6; i++) { dma_irqs[i] = qdev_get_gpio_in(s->ih[omap1_dma_irq_map[i].ih], omap1_dma_irq_map[i].intr); } s->dma = omap_dma_init(0xfffed800, dma_irqs, system_memory, qdev_get_gpio_in(s->ih[0], OMAP_INT_DMA_LCD), s, omap_findclk(s, "dma_ck"), omap_dma_3_1); s->port[emiff ].addr_valid = omap_validate_emiff_addr; s->port[emifs ].addr_valid = omap_validate_emifs_addr; s->port[imif ].addr_valid = omap_validate_imif_addr; s->port[tipb ].addr_valid = omap_validate_tipb_addr; s->port[local ].addr_valid = omap_validate_local_addr; s->port[tipb_mpui].addr_valid = omap_validate_tipb_mpui_addr; /* Register SDRAM and SRAM DMA ports for fast transfers. */ soc_dma_port_add_mem(s->dma, memory_region_get_ram_ptr(&s->emiff_ram), OMAP_EMIFF_BASE, s->sdram_size); soc_dma_port_add_mem(s->dma, memory_region_get_ram_ptr(&s->imif_ram), OMAP_IMIF_BASE, s->sram_size); s->timer[0] = omap_mpu_timer_init(system_memory, 0xfffec500, qdev_get_gpio_in(s->ih[0], OMAP_INT_TIMER1), omap_findclk(s, "mputim_ck")); s->timer[1] = omap_mpu_timer_init(system_memory, 0xfffec600, qdev_get_gpio_in(s->ih[0], OMAP_INT_TIMER2), omap_findclk(s, "mputim_ck")); s->timer[2] = omap_mpu_timer_init(system_memory, 0xfffec700, qdev_get_gpio_in(s->ih[0], OMAP_INT_TIMER3), omap_findclk(s, "mputim_ck")); s->wdt = omap_wd_timer_init(system_memory, 0xfffec800, qdev_get_gpio_in(s->ih[0], OMAP_INT_WD_TIMER), omap_findclk(s, "armwdt_ck")); s->os_timer = omap_os_timer_init(system_memory, 0xfffb9000, qdev_get_gpio_in(s->ih[1], OMAP_INT_OS_TIMER), omap_findclk(s, "clk32-kHz")); s->lcd = omap_lcdc_init(system_memory, 0xfffec000, qdev_get_gpio_in(s->ih[0], OMAP_INT_LCD_CTRL), omap_dma_get_lcdch(s->dma), omap_findclk(s, "lcd_ck")); omap_ulpd_pm_init(system_memory, 0xfffe0800, s); omap_pin_cfg_init(system_memory, 0xfffe1000, s); omap_id_init(system_memory, s); omap_mpui_init(system_memory, 0xfffec900, s); s->private_tipb = omap_tipb_bridge_init(system_memory, 0xfffeca00, qdev_get_gpio_in(s->ih[0], OMAP_INT_BRIDGE_PRIV), omap_findclk(s, "tipb_ck")); s->public_tipb = omap_tipb_bridge_init(system_memory, 0xfffed300, qdev_get_gpio_in(s->ih[0], OMAP_INT_BRIDGE_PUB), omap_findclk(s, "tipb_ck")); omap_tcmi_init(system_memory, 0xfffecc00, s); s->uart[0] = omap_uart_init(0xfffb0000, qdev_get_gpio_in(s->ih[1], OMAP_INT_UART1), omap_findclk(s, "uart1_ck"), omap_findclk(s, "uart1_ck"), s->drq[OMAP_DMA_UART1_TX], s->drq[OMAP_DMA_UART1_RX], "uart1", serial_hds[0]); s->uart[1] = omap_uart_init(0xfffb0800, qdev_get_gpio_in(s->ih[1], OMAP_INT_UART2), omap_findclk(s, "uart2_ck"), omap_findclk(s, "uart2_ck"), s->drq[OMAP_DMA_UART2_TX], s->drq[OMAP_DMA_UART2_RX], "uart2", serial_hds[0] ? serial_hds[1] : NULL); s->uart[2] = omap_uart_init(0xfffb9800, qdev_get_gpio_in(s->ih[0], OMAP_INT_UART3), omap_findclk(s, "uart3_ck"), omap_findclk(s, "uart3_ck"), s->drq[OMAP_DMA_UART3_TX], s->drq[OMAP_DMA_UART3_RX], "uart3", serial_hds[0] && serial_hds[1] ? serial_hds[2] : NULL); s->dpll[0] = omap_dpll_init(system_memory, 0xfffecf00, omap_findclk(s, "dpll1")); s->dpll[1] = omap_dpll_init(system_memory, 0xfffed000, omap_findclk(s, "dpll2")); s->dpll[2] = omap_dpll_init(system_memory, 0xfffed100, omap_findclk(s, "dpll3")); dinfo = drive_get(IF_SD, 0, 0); if (!dinfo) { fprintf(stderr, "qemu: missing SecureDigital device\n"); exit(1); } s->mmc = omap_mmc_init(0xfffb7800, system_memory, blk_by_legacy_dinfo(dinfo), qdev_get_gpio_in(s->ih[1], OMAP_INT_OQN), &s->drq[OMAP_DMA_MMC_TX], omap_findclk(s, "mmc_ck")); s->mpuio = omap_mpuio_init(system_memory, 0xfffb5000, qdev_get_gpio_in(s->ih[1], OMAP_INT_KEYBOARD), qdev_get_gpio_in(s->ih[1], OMAP_INT_MPUIO), s->wakeup, omap_findclk(s, "clk32-kHz")); s->gpio = qdev_create(NULL, "omap-gpio"); qdev_prop_set_int32(s->gpio, "mpu_model", s->mpu_model); qdev_prop_set_ptr(s->gpio, "clk", omap_findclk(s, "arm_gpio_ck")); qdev_init_nofail(s->gpio); sysbus_connect_irq(SYS_BUS_DEVICE(s->gpio), 0, qdev_get_gpio_in(s->ih[0], OMAP_INT_GPIO_BANK1)); sysbus_mmio_map(SYS_BUS_DEVICE(s->gpio), 0, 0xfffce000); s->microwire = omap_uwire_init(system_memory, 0xfffb3000, qdev_get_gpio_in(s->ih[1], OMAP_INT_uWireTX), qdev_get_gpio_in(s->ih[1], OMAP_INT_uWireRX), s->drq[OMAP_DMA_UWIRE_TX], omap_findclk(s, "mpuper_ck")); s->pwl = omap_pwl_init(system_memory, 0xfffb5800, omap_findclk(s, "armxor_ck")); s->pwt = omap_pwt_init(system_memory, 0xfffb6000, omap_findclk(s, "armxor_ck")); s->i2c[0] = qdev_create(NULL, "omap_i2c"); qdev_prop_set_uint8(s->i2c[0], "revision", 0x11); qdev_prop_set_ptr(s->i2c[0], "fclk", omap_findclk(s, "mpuper_ck")); qdev_init_nofail(s->i2c[0]); busdev = SYS_BUS_DEVICE(s->i2c[0]); sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(s->ih[1], OMAP_INT_I2C)); sysbus_connect_irq(busdev, 1, s->drq[OMAP_DMA_I2C_TX]); sysbus_connect_irq(busdev, 2, s->drq[OMAP_DMA_I2C_RX]); sysbus_mmio_map(busdev, 0, 0xfffb3800); s->rtc = omap_rtc_init(system_memory, 0xfffb4800, qdev_get_gpio_in(s->ih[1], OMAP_INT_RTC_TIMER), qdev_get_gpio_in(s->ih[1], OMAP_INT_RTC_ALARM), omap_findclk(s, "clk32-kHz")); s->mcbsp1 = omap_mcbsp_init(system_memory, 0xfffb1800, qdev_get_gpio_in(s->ih[1], OMAP_INT_McBSP1TX), qdev_get_gpio_in(s->ih[1], OMAP_INT_McBSP1RX), &s->drq[OMAP_DMA_MCBSP1_TX], omap_findclk(s, "dspxor_ck")); s->mcbsp2 = omap_mcbsp_init(system_memory, 0xfffb1000, qdev_get_gpio_in(s->ih[0], OMAP_INT_310_McBSP2_TX), qdev_get_gpio_in(s->ih[0], OMAP_INT_310_McBSP2_RX), &s->drq[OMAP_DMA_MCBSP2_TX], omap_findclk(s, "mpuper_ck")); s->mcbsp3 = omap_mcbsp_init(system_memory, 0xfffb7000, qdev_get_gpio_in(s->ih[1], OMAP_INT_McBSP3TX), qdev_get_gpio_in(s->ih[1], OMAP_INT_McBSP3RX), &s->drq[OMAP_DMA_MCBSP3_TX], omap_findclk(s, "dspxor_ck")); s->led[0] = omap_lpg_init(system_memory, 0xfffbd000, omap_findclk(s, "clk32-kHz")); s->led[1] = omap_lpg_init(system_memory, 0xfffbd800, omap_findclk(s, "clk32-kHz")); /* Register mappings not currenlty implemented: * MCSI2 Comm fffb2000 - fffb27ff (not mapped on OMAP310) * MCSI1 Bluetooth fffb2800 - fffb2fff (not mapped on OMAP310) * USB W2FC fffb4000 - fffb47ff * Camera Interface fffb6800 - fffb6fff * USB Host fffba000 - fffba7ff * FAC fffba800 - fffbafff * HDQ/1-Wire fffbc000 - fffbc7ff * TIPB switches fffbc800 - fffbcfff * Mailbox fffcf000 - fffcf7ff * Local bus IF fffec100 - fffec1ff * Local bus MMU fffec200 - fffec2ff * DSP MMU fffed200 - fffed2ff */ omap_setup_dsp_mapping(system_memory, omap15xx_dsp_mm); omap_setup_mpui_io(system_memory, s); qemu_register_reset(omap1_mpu_reset, s); return s; }
14,700
1
static void decode_scaling_list(GetBitContext *gb, uint8_t *factors, int size, const uint8_t *jvt_list, const uint8_t *fallback_list) { int i, last = 8, next = 8; const uint8_t *scan = size == 16 ? ff_zigzag_scan : ff_zigzag_direct; if (!get_bits1(gb)) /* matrix not written, we use the predicted one */ memcpy(factors, fallback_list, size * sizeof(uint8_t)); else for (i = 0; i < size; i++) { if (next) next = (last + get_se_golomb(gb)) & 0xff; if (!i && !next) { /* matrix not written, we use the preset one */ memcpy(factors, jvt_list, size * sizeof(uint8_t)); break; } last = factors[scan[i]] = next ? next : last; } }
14,701
0
PCIDevice *virtio_net_init(PCIBus *bus, NICInfo *nd, int devfn) { VirtIONet *n; static int virtio_net_id; n = (VirtIONet *)virtio_init_pci(bus, "virtio-net", 6900, 0x1000, 0, VIRTIO_ID_NET, 0x02, 0x00, 0x00, 6, sizeof(VirtIONet)); if (!n) return NULL; n->vdev.get_config = virtio_net_update_config; n->vdev.get_features = virtio_net_get_features; n->vdev.set_features = virtio_net_set_features; n->rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx); n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx); memcpy(n->mac, nd->macaddr, 6); n->vc = qemu_new_vlan_client(nd->vlan, nd->model, nd->name, virtio_net_receive, virtio_net_can_receive, n); qemu_format_nic_info_str(n->vc, n->mac); n->tx_timer = qemu_new_timer(vm_clock, virtio_net_tx_timer, n); n->tx_timer_active = 0; n->mergeable_rx_bufs = 0; register_savevm("virtio-net", virtio_net_id++, 2, virtio_net_save, virtio_net_load, n); return (PCIDevice *)n; }
14,702
0
CharDriverState *chr_baum_init(void) { BaumDriverState *baum; CharDriverState *chr; brlapi_handle_t *handle; #ifdef CONFIG_SDL SDL_SysWMinfo info; #endif int tty; baum = g_malloc0(sizeof(BaumDriverState)); baum->chr = chr = g_malloc0(sizeof(CharDriverState)); chr->opaque = baum; chr->chr_write = baum_write; chr->chr_accept_input = baum_accept_input; chr->chr_close = baum_close; handle = g_malloc0(brlapi_getHandleSize()); baum->brlapi = handle; baum->brlapi_fd = brlapi__openConnection(handle, NULL, NULL); if (baum->brlapi_fd == -1) { brlapi_perror("baum_init: brlapi_openConnection"); goto fail_handle; } baum->cellCount_timer = qemu_new_timer_ns(vm_clock, baum_cellCount_timer_cb, baum); if (brlapi__getDisplaySize(handle, &baum->x, &baum->y) == -1) { brlapi_perror("baum_init: brlapi_getDisplaySize"); goto fail; } #ifdef CONFIG_SDL memset(&info, 0, sizeof(info)); SDL_VERSION(&info.version); if (SDL_GetWMInfo(&info)) tty = info.info.x11.wmwindow; else #endif tty = BRLAPI_TTY_DEFAULT; if (brlapi__enterTtyMode(handle, tty, NULL) == -1) { brlapi_perror("baum_init: brlapi_enterTtyMode"); goto fail; } qemu_set_fd_handler(baum->brlapi_fd, baum_chr_read, NULL, baum); qemu_chr_be_generic_open(chr); return chr; fail: qemu_free_timer(baum->cellCount_timer); brlapi__closeConnection(handle); fail_handle: g_free(handle); g_free(chr); g_free(baum); return NULL; }
14,703
0
static int all_vcpus_paused(void) { CPUState *penv = first_cpu; while (penv) { if (!penv->stopped) return 0; penv = (CPUState *)penv->next_cpu; } return 1; }
14,704
0
uint32_t lm4549_read(lm4549_state *s, target_phys_addr_t offset) { uint16_t *regfile = s->regfile; uint32_t value = 0; /* Read the stored value */ assert(offset < 128); value = regfile[offset]; DPRINTF("read [0x%02x] = 0x%04x\n", offset, value); return value; }
14,705
0
static void kvm_log_stop(MemoryListener *listener, MemoryRegionSection *section) { int r; r = kvm_dirty_pages_log_change(section->offset_within_address_space, int128_get64(section->size), false); if (r < 0) { abort(); } }
14,707
0
int kvm_arch_init_vcpu(CPUState *cs) { int i, ret, arraylen; uint64_t v; struct kvm_one_reg r; struct kvm_reg_list rl; struct kvm_reg_list *rlp; ARMCPU *cpu = ARM_CPU(cs); if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) { fprintf(stderr, "KVM is not supported for this guest CPU type\n"); return -EINVAL; } /* Determine init features for this CPU */ memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features)); if (cpu->start_powered_off) { cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF; } if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) { cpu->psci_version = 2; cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2; } /* Do KVM_ARM_VCPU_INIT ioctl */ ret = kvm_arm_vcpu_init(cs); if (ret) { return ret; } /* Query the kernel to make sure it supports 32 VFP * registers: QEMU's "cortex-a15" CPU is always a * VFP-D32 core. The simplest way to do this is just * to attempt to read register d31. */ r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP | 31; r.addr = (uintptr_t)(&v); ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r); if (ret == -ENOENT) { return -EINVAL; } /* Populate the cpreg list based on the kernel's idea * of what registers exist (and throw away the TCG-created list). */ rl.n = 0; ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl); if (ret != -E2BIG) { return ret; } rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t)); rlp->n = rl.n; ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp); if (ret) { goto out; } /* Sort the list we get back from the kernel, since cpreg_tuples * must be in strictly ascending order. */ qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64); for (i = 0, arraylen = 0; i < rlp->n; i++) { if (!reg_syncs_via_tuple_list(rlp->reg[i])) { continue; } switch (rlp->reg[i] & KVM_REG_SIZE_MASK) { case KVM_REG_SIZE_U32: case KVM_REG_SIZE_U64: break; default: fprintf(stderr, "Can't handle size of register in kernel list\n"); ret = -EINVAL; goto out; } arraylen++; } cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen); cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen); cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes, arraylen); cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values, arraylen); cpu->cpreg_array_len = arraylen; cpu->cpreg_vmstate_array_len = arraylen; for (i = 0, arraylen = 0; i < rlp->n; i++) { uint64_t regidx = rlp->reg[i]; if (!reg_syncs_via_tuple_list(regidx)) { continue; } cpu->cpreg_indexes[arraylen] = regidx; arraylen++; } assert(cpu->cpreg_array_len == arraylen); if (!write_kvmstate_to_list(cpu)) { /* Shouldn't happen unless kernel is inconsistent about * what registers exist. */ fprintf(stderr, "Initial read of kernel register state failed\n"); ret = -EINVAL; goto out; } /* Save a copy of the initial register values so that we can * feed it back to the kernel on VCPU reset. */ cpu->cpreg_reset_values = g_memdup(cpu->cpreg_values, cpu->cpreg_array_len * sizeof(cpu->cpreg_values[0])); out: g_free(rlp); return ret; }
14,708
0
static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw, int mmu_idx) { CPUState *cs = CPU(ppc_env_get_cpu(env)); PowerPCCPU *cpu = POWERPC_CPU(cs); mmu_ctx_t ctx; int access_type; int ret = 0; if (rw == 2) { /* code access */ rw = 0; access_type = ACCESS_CODE; } else { /* data access */ access_type = env->access_type; } ret = get_physical_address(env, &ctx, address, rw, access_type); if (ret == 0) { tlb_set_page(cs, address & TARGET_PAGE_MASK, ctx.raddr & TARGET_PAGE_MASK, ctx.prot, mmu_idx, TARGET_PAGE_SIZE); ret = 0; } else if (ret < 0) { LOG_MMU_STATE(cs); if (access_type == ACCESS_CODE) { switch (ret) { case -1: /* No matches in page tables or TLB */ switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: cs->exception_index = POWERPC_EXCP_IFTLB; env->error_code = 1 << 18; env->spr[SPR_IMISS] = address; env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; goto tlb_miss; case POWERPC_MMU_SOFT_74xx: cs->exception_index = POWERPC_EXCP_IFTLB; goto tlb_miss_74xx; case POWERPC_MMU_SOFT_4xx: case POWERPC_MMU_SOFT_4xx_Z: cs->exception_index = POWERPC_EXCP_ITLB; env->error_code = 0; env->spr[SPR_40x_DEAR] = address; env->spr[SPR_40x_ESR] = 0x00000000; break; case POWERPC_MMU_BOOKE206: booke206_update_mas_tlb_miss(env, address, rw); /* fall through */ case POWERPC_MMU_BOOKE: cs->exception_index = POWERPC_EXCP_ITLB; env->error_code = 0; env->spr[SPR_BOOKE_DEAR] = address; return -1; case POWERPC_MMU_MPC8xx: /* XXX: TODO */ cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); break; case POWERPC_MMU_REAL: cpu_abort(cs, "PowerPC in real mode should never raise " "any MMU exceptions\n"); return -1; default: cpu_abort(cs, "Unknown or invalid MMU model\n"); return -1; } break; case -2: /* Access rights violation */ cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x08000000; break; case -3: /* No execute protection violation */ if ((env->mmu_model == POWERPC_MMU_BOOKE) || (env->mmu_model == POWERPC_MMU_BOOKE206)) { env->spr[SPR_BOOKE_ESR] = 0x00000000; } cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x10000000; break; case -4: /* Direct store exception */ /* No code fetch is allowed in direct-store areas */ cs->exception_index = POWERPC_EXCP_ISI; env->error_code = 0x10000000; break; } } else { switch (ret) { case -1: /* No matches in page tables or TLB */ switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: if (rw == 1) { cs->exception_index = POWERPC_EXCP_DSTLB; env->error_code = 1 << 16; } else { cs->exception_index = POWERPC_EXCP_DLTLB; env->error_code = 0; } env->spr[SPR_DMISS] = address; env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; tlb_miss: env->error_code |= ctx.key << 19; env->spr[SPR_HASH1] = env->htab_base + get_pteg_offset32(cpu, ctx.hash[0]); env->spr[SPR_HASH2] = env->htab_base + get_pteg_offset32(cpu, ctx.hash[1]); break; case POWERPC_MMU_SOFT_74xx: if (rw == 1) { cs->exception_index = POWERPC_EXCP_DSTLB; } else { cs->exception_index = POWERPC_EXCP_DLTLB; } tlb_miss_74xx: /* Implement LRU algorithm */ env->error_code = ctx.key << 19; env->spr[SPR_TLBMISS] = (address & ~((target_ulong)0x3)) | ((env->last_way + 1) & (env->nb_ways - 1)); env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem; break; case POWERPC_MMU_SOFT_4xx: case POWERPC_MMU_SOFT_4xx_Z: cs->exception_index = POWERPC_EXCP_DTLB; env->error_code = 0; env->spr[SPR_40x_DEAR] = address; if (rw) { env->spr[SPR_40x_ESR] = 0x00800000; } else { env->spr[SPR_40x_ESR] = 0x00000000; } break; case POWERPC_MMU_MPC8xx: /* XXX: TODO */ cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); break; case POWERPC_MMU_BOOKE206: booke206_update_mas_tlb_miss(env, address, rw); /* fall through */ case POWERPC_MMU_BOOKE: cs->exception_index = POWERPC_EXCP_DTLB; env->error_code = 0; env->spr[SPR_BOOKE_DEAR] = address; env->spr[SPR_BOOKE_ESR] = rw ? ESR_ST : 0; return -1; case POWERPC_MMU_REAL: cpu_abort(cs, "PowerPC in real mode should never raise " "any MMU exceptions\n"); return -1; default: cpu_abort(cs, "Unknown or invalid MMU model\n"); return -1; } break; case -2: /* Access rights violation */ cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; if (env->mmu_model == POWERPC_MMU_SOFT_4xx || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) { env->spr[SPR_40x_DEAR] = address; if (rw) { env->spr[SPR_40x_ESR] |= 0x00800000; } } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || (env->mmu_model == POWERPC_MMU_BOOKE206)) { env->spr[SPR_BOOKE_DEAR] = address; env->spr[SPR_BOOKE_ESR] = rw ? ESR_ST : 0; } else { env->spr[SPR_DAR] = address; if (rw == 1) { env->spr[SPR_DSISR] = 0x0A000000; } else { env->spr[SPR_DSISR] = 0x08000000; } } break; case -4: /* Direct store exception */ switch (access_type) { case ACCESS_FLOAT: /* Floating point load/store */ cs->exception_index = POWERPC_EXCP_ALIGN; env->error_code = POWERPC_EXCP_ALIGN_FP; env->spr[SPR_DAR] = address; break; case ACCESS_RES: /* lwarx, ldarx or stwcx. */ cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = address; if (rw == 1) { env->spr[SPR_DSISR] = 0x06000000; } else { env->spr[SPR_DSISR] = 0x04000000; } break; case ACCESS_EXT: /* eciwx or ecowx */ cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; env->spr[SPR_DAR] = address; if (rw == 1) { env->spr[SPR_DSISR] = 0x06100000; } else { env->spr[SPR_DSISR] = 0x04100000; } break; default: printf("DSI: invalid exception (%d)\n", ret); cs->exception_index = POWERPC_EXCP_PROGRAM; env->error_code = POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; env->spr[SPR_DAR] = address; break; } break; } } #if 0 printf("%s: set exception to %d %02x\n", __func__, cs->exception, env->error_code); #endif ret = 1; } return ret; }
14,709
0
START_TEST(qdict_get_try_str_test) { const char *p; const char *key = "key"; const char *str = "string"; qdict_put(tests_dict, key, qstring_from_str(str)); p = qdict_get_try_str(tests_dict, key); fail_unless(p != NULL); fail_unless(strcmp(p, str) == 0); }
14,710
0
int swri_realloc_audio(AudioData *a, int count){ int i, countb; AudioData old; if(count < 0 || count > INT_MAX/2/a->bps/a->ch_count) return AVERROR(EINVAL); if(a->count >= count) return 0; count*=2; countb= FFALIGN(count*a->bps, ALIGN); old= *a; av_assert0(a->bps); av_assert0(a->ch_count); a->data= av_mallocz_array(countb, a->ch_count); if(!a->data) return AVERROR(ENOMEM); for(i=0; i<a->ch_count; i++){ a->ch[i]= a->data + i*(a->planar ? countb : a->bps); if(a->planar) memcpy(a->ch[i], old.ch[i], a->count*a->bps); } if(!a->planar) memcpy(a->ch[0], old.ch[0], a->count*a->ch_count*a->bps); av_freep(&old.data); a->count= count; return 1; }
14,711
0
static void virtio_scsi_command_complete(SCSIRequest *r, uint32_t status, size_t resid) { VirtIOSCSIReq *req = r->hba_private; uint8_t sense[SCSI_SENSE_BUF_SIZE]; uint32_t sense_len; if (r->io_canceled) { return; } req->resp.cmd->response = VIRTIO_SCSI_S_OK; req->resp.cmd->status = status; if (req->resp.cmd->status == GOOD) { req->resp.cmd->resid = tswap32(resid); } else { req->resp.cmd->resid = 0; sense_len = scsi_req_get_sense(r, sense, sizeof(sense)); sense_len = MIN(sense_len, req->resp_size - sizeof(req->resp.cmd)); memcpy(req->resp.cmd->sense, sense, sense_len); req->resp.cmd->sense_len = tswap32(sense_len); } virtio_scsi_complete_cmd_req(req); }
14,712
0
static void bitband_writeb(void *opaque, target_phys_addr_t offset, uint32_t value) { uint32_t addr; uint8_t mask; uint8_t v; addr = bitband_addr(opaque, offset); mask = (1 << ((offset >> 2) & 7)); cpu_physical_memory_read(addr, &v, 1); if (value & 1) v |= mask; else v &= ~mask; cpu_physical_memory_write(addr, &v, 1); }
14,714
0
void bdrv_enable_copy_on_read(BlockDriverState *bs) { bs->copy_on_read++; }
14,715
0
static void s390_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); S390CPU *cpu = S390_CPU(obj); CPUS390XState *env = &cpu->env; static bool inited; static int cpu_num = 0; #if !defined(CONFIG_USER_ONLY) struct tm tm; #endif cs->env_ptr = env; cpu_exec_init(env); #if !defined(CONFIG_USER_ONLY) qemu_register_reset(s390_cpu_machine_reset_cb, cpu); qemu_get_timedate(&tm, 0); env->tod_offset = TOD_UNIX_EPOCH + (time2tod(mktimegm(&tm)) * 1000000000ULL); env->tod_basetime = 0; env->tod_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_tod_timer, cpu); env->cpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu); s390_cpu_set_state(CPU_STATE_STOPPED, cpu); #endif env->cpu_num = cpu_num++; env->ext_index = -1; if (tcg_enabled() && !inited) { inited = true; s390x_translate_init(); } }
14,717
0
static void t_gen_lsr(TCGv d, TCGv a, TCGv b) { TCGv t0, t_31; t0 = tcg_temp_new(TCG_TYPE_TL); t_31 = tcg_temp_new(TCG_TYPE_TL); tcg_gen_shr_tl(d, a, b); tcg_gen_movi_tl(t_31, 31); tcg_gen_sub_tl(t0, t_31, b); tcg_gen_sar_tl(t0, t0, t_31); tcg_gen_and_tl(t0, t0, d); tcg_gen_xor_tl(d, d, t0); tcg_temp_free(t0); tcg_temp_free(t_31); }
14,718
0
void nbd_client_new(NBDExport *exp, int csock, void (*close_fn)(NBDClient *)) { NBDClient *client; client = g_malloc0(sizeof(NBDClient)); client->refcount = 1; client->exp = exp; client->sock = csock; client->can_read = true; if (nbd_send_negotiate(client)) { shutdown(client->sock, 2); close_fn(client); return; } client->close = close_fn; qemu_co_mutex_init(&client->send_lock); nbd_set_handlers(client); if (exp) { QTAILQ_INSERT_TAIL(&exp->clients, client, next); nbd_export_get(exp); } }
14,719
0
static void do_info_status(Monitor *mon, QObject **ret_data) { *ret_data = qobject_from_jsonf("{ 'running': %i, 'singlestep': %i }", runstate_is_running(), singlestep); }
14,720
0
int event_notifier_test_and_clear(EventNotifier *e) { uint64_t value; int r = read(e->fd, &value, sizeof(value)); return r == sizeof(value); }
14,721
0
static int x8_decode_intra_mb(IntraX8Context *const w, const int chroma) { MpegEncContext *const s = w->s; uint8_t *scantable; int final, run, level; int ac_mode, dc_mode, est_run, dc_level; int pos, n; int zeros_only; int use_quant_matrix; int sign; assert(w->orient < 12); s->bdsp.clear_block(s->block[0]); if (chroma) dc_mode = 2; else dc_mode = !!w->est_run; // 0, 1 if (x8_get_dc_rlf(w, dc_mode, &dc_level, &final)) return -1; n = 0; zeros_only = 0; if (!final) { // decode ac use_quant_matrix = w->use_quant_matrix; if (chroma) { ac_mode = 1; est_run = 64; // not used } else { if (w->raw_orient < 3) use_quant_matrix = 0; if (w->raw_orient > 4) { ac_mode = 0; est_run = 64; } else { if (w->est_run > 1) { ac_mode = 2; est_run = w->est_run; } else { ac_mode = 3; est_run = 64; } } } x8_select_ac_table(w, ac_mode); /* scantable_selector[12] = { 0, 2, 0, 1, 1, 1, 0, 2, 2, 0, 1, 2 }; <- * -> 10'01' 00'10' 10'00' 01'01' 01'00' 10'00 => 0x928548 */ scantable = w->scantable[(0x928548 >> (2 * w->orient)) & 3].permutated; pos = 0; do { n++; if (n >= est_run) { ac_mode = 3; x8_select_ac_table(w, 3); } x8_get_ac_rlf(w, ac_mode, &run, &level, &final); pos += run + 1; if (pos > 63) { // this also handles vlc error in x8_get_ac_rlf return -1; } level = (level + 1) * w->dquant; level += w->qsum; sign = -get_bits1(&s->gb); level = (level ^ sign) - sign; if (use_quant_matrix) level = (level * quant_table[pos]) >> 8; s->block[0][scantable[pos]] = level; } while (!final); s->block_last_index[0] = pos; } else { // DC only s->block_last_index[0] = 0; if (w->flat_dc && ((unsigned) (dc_level + 1)) < 3) { // [-1; 1] int32_t divide_quant = !chroma ? w->divide_quant_dc_luma : w->divide_quant_dc_chroma; int32_t dc_quant = !chroma ? w->quant : w->quant_dc_chroma; // original intent dc_level += predicted_dc/quant; // but it got lost somewhere in the rounding dc_level += (w->predicted_dc * divide_quant + (1 << 12)) >> 13; dsp_x8_put_solidcolor(av_clip_uint8((dc_level * dc_quant + 4) >> 3), w->dest[chroma], s->current_picture.f->linesize[!!chroma]); goto block_placed; } zeros_only = (dc_level == 0); } if (!chroma) s->block[0][0] = dc_level * w->quant; else s->block[0][0] = dc_level * w->quant_dc_chroma; // there is !zero_only check in the original, but dc_level check is enough if ((unsigned int) (dc_level + 1) >= 3 && (w->edges & 3) != 3) { int direction; /* ac_comp_direction[orient] = { 0, 3, 3, 1, 1, 0, 0, 0, 2, 2, 2, 1 }; <- * -> 01'10' 10'10' 00'00' 00'01' 01'11' 11'00 => 0x6A017C */ direction = (0x6A017C >> (w->orient * 2)) & 3; if (direction != 3) { // modify block_last[] x8_ac_compensation(w, direction, s->block[0][0]); } } if (w->flat_dc) { dsp_x8_put_solidcolor(w->predicted_dc, w->dest[chroma], s->current_picture.f->linesize[!!chroma]); } else { w->dsp.spatial_compensation[w->orient](s->sc.edge_emu_buffer, w->dest[chroma], s->current_picture.f->linesize[!!chroma]); } if (!zeros_only) w->idsp.idct_add(w->dest[chroma], s->current_picture.f->linesize[!!chroma], s->block[0]); block_placed: if (!chroma) x8_update_predictions(w, w->orient, n); if (s->loop_filter) { uint8_t *ptr = w->dest[chroma]; int linesize = s->current_picture.f->linesize[!!chroma]; if (!((w->edges & 2) || (zeros_only && (w->orient | 4) == 4))) w->dsp.h_loop_filter(ptr, linesize, w->quant); if (!((w->edges & 1) || (zeros_only && (w->orient | 8) == 8))) w->dsp.v_loop_filter(ptr, linesize, w->quant); } return 0; }
14,722
0
static void bdrv_move_feature_fields(BlockDriverState *bs_dest, BlockDriverState *bs_src) { /* move some fields that need to stay attached to the device */ bs_dest->open_flags = bs_src->open_flags; /* dev info */ bs_dest->dev_ops = bs_src->dev_ops; bs_dest->dev_opaque = bs_src->dev_opaque; bs_dest->dev = bs_src->dev; bs_dest->buffer_alignment = bs_src->buffer_alignment; bs_dest->copy_on_read = bs_src->copy_on_read; bs_dest->enable_write_cache = bs_src->enable_write_cache; /* i/o timing parameters */ bs_dest->slice_time = bs_src->slice_time; bs_dest->slice_start = bs_src->slice_start; bs_dest->slice_end = bs_src->slice_end; bs_dest->io_limits = bs_src->io_limits; bs_dest->io_base = bs_src->io_base; bs_dest->throttled_reqs = bs_src->throttled_reqs; bs_dest->block_timer = bs_src->block_timer; bs_dest->io_limits_enabled = bs_src->io_limits_enabled; /* geometry */ bs_dest->cyls = bs_src->cyls; bs_dest->heads = bs_src->heads; bs_dest->secs = bs_src->secs; bs_dest->translation = bs_src->translation; /* r/w error */ bs_dest->on_read_error = bs_src->on_read_error; bs_dest->on_write_error = bs_src->on_write_error; /* i/o status */ bs_dest->iostatus_enabled = bs_src->iostatus_enabled; bs_dest->iostatus = bs_src->iostatus; /* dirty bitmap */ bs_dest->dirty_count = bs_src->dirty_count; bs_dest->dirty_bitmap = bs_src->dirty_bitmap; /* job */ bs_dest->in_use = bs_src->in_use; bs_dest->job = bs_src->job; /* keep the same entry in bdrv_states */ pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name), bs_src->device_name); bs_dest->list = bs_src->list; }
14,724
0
static void mmubooke_create_initial_mapping(CPUPPCState *env) { struct boot_info *bi = env->load_info; ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 0); hwaddr size, dt_end; int ps; /* Our initial TLB entry needs to cover everything from 0 to the device tree top */ dt_end = bi->dt_base + bi->dt_size; ps = booke206_page_size_to_tlb(dt_end) + 1; if (ps & 1) { /* e500v2 can only do even TLB size bits */ ps++; } size = (ps << MAS1_TSIZE_SHIFT); tlb->mas1 = MAS1_VALID | size; tlb->mas2 = 0; tlb->mas7_3 = 0; tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX; env->tlb_dirty = true; }
14,725
0
static unsigned int dec_bound_m(DisasContext *dc) { TCGv l[2]; int memsize = memsize_zz(dc); int insn_len; DIS(fprintf (logfile, "bound.%d [$r%u%s, $r%u\n", memsize_char(memsize), dc->op1, dc->postinc ? "+]" : "]", dc->op2)); l[0] = tcg_temp_local_new(TCG_TYPE_TL); l[1] = tcg_temp_local_new(TCG_TYPE_TL); insn_len = dec_prep_alu_m(dc, 0, memsize, l[0], l[1]); cris_cc_mask(dc, CC_MASK_NZ); cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], l[0], l[1], 4); do_postinc(dc, memsize); tcg_temp_free(l[0]); tcg_temp_free(l[1]); return insn_len; }
14,726
0
void omap_badwidth_write16(void *opaque, target_phys_addr_t addr, uint32_t value) { uint16_t val16 = value; OMAP_16B_REG(addr); cpu_physical_memory_write(addr, (void *) &val16, 2); }
14,727
0
static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2) { gen_helper_fcmped(cpu_env, r_rs1, r_rs2); }
14,728
0
static void hid_pointer_event(DeviceState *dev, QemuConsole *src, InputEvent *evt) { static const int bmap[INPUT_BUTTON__MAX] = { [INPUT_BUTTON_LEFT] = 0x01, [INPUT_BUTTON_RIGHT] = 0x02, [INPUT_BUTTON_MIDDLE] = 0x04, }; HIDState *hs = (HIDState *)dev; HIDPointerEvent *e; InputMoveEvent *move; InputBtnEvent *btn; assert(hs->n < QUEUE_LENGTH); e = &hs->ptr.queue[(hs->head + hs->n) & QUEUE_MASK]; switch (evt->type) { case INPUT_EVENT_KIND_REL: move = evt->u.rel; if (move->axis == INPUT_AXIS_X) { e->xdx += move->value; } else if (move->axis == INPUT_AXIS_Y) { e->ydy += move->value; } break; case INPUT_EVENT_KIND_ABS: move = evt->u.abs; if (move->axis == INPUT_AXIS_X) { e->xdx = move->value; } else if (move->axis == INPUT_AXIS_Y) { e->ydy = move->value; } break; case INPUT_EVENT_KIND_BTN: btn = evt->u.btn; if (btn->down) { e->buttons_state |= bmap[btn->button]; if (btn->button == INPUT_BUTTON_WHEEL_UP) { e->dz--; } else if (btn->button == INPUT_BUTTON_WHEEL_DOWN) { e->dz++; } } else { e->buttons_state &= ~bmap[btn->button]; } break; default: /* keep gcc happy */ break; } }
14,729
0
static int aio_read_f(BlockBackend *blk, int argc, char **argv) { int nr_iov, c; struct aio_ctx *ctx = g_new0(struct aio_ctx, 1); ctx->blk = blk; while ((c = getopt(argc, argv, "CP:qv")) != EOF) { switch (c) { case 'C': ctx->Cflag = 1; break; case 'P': ctx->Pflag = 1; ctx->pattern = parse_pattern(optarg); if (ctx->pattern < 0) { g_free(ctx); return 0; } break; case 'q': ctx->qflag = 1; break; case 'v': ctx->vflag = 1; break; default: g_free(ctx); return qemuio_command_usage(&aio_read_cmd); } } if (optind > argc - 2) { g_free(ctx); return qemuio_command_usage(&aio_read_cmd); } ctx->offset = cvtnum(argv[optind]); if (ctx->offset < 0) { printf("non-numeric length argument -- %s\n", argv[optind]); g_free(ctx); return 0; } optind++; if (ctx->offset & 0x1ff) { printf("offset %" PRId64 " is not sector aligned\n", ctx->offset); g_free(ctx); return 0; } nr_iov = argc - optind; ctx->buf = create_iovec(blk, &ctx->qiov, &argv[optind], nr_iov, 0xab); if (ctx->buf == NULL) { g_free(ctx); return 0; } gettimeofday(&ctx->t1, NULL); block_acct_start(blk_get_stats(blk), &ctx->acct, ctx->qiov.size, BLOCK_ACCT_READ); blk_aio_readv(blk, ctx->offset >> 9, &ctx->qiov, ctx->qiov.size >> 9, aio_read_done, ctx); return 0; }
14,730
0
static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, uint32_t decr, uint32_t value, int is_excp) { ppc_tb_t *tb_env = cpu->env.tb_env; __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer, &cpu_ppc_decr_excp, decr, value, is_excp); }
14,731
0
static GSList *gd_vc_init(GtkDisplayState *s, VirtualConsole *vc, int index, GSList *group, GtkWidget *view_menu) { const char *label; char buffer[32]; char path[32]; #if VTE_CHECK_VERSION(0, 26, 0) VtePty *pty; #endif GIOChannel *chan; GtkWidget *scrolled_window; GtkAdjustment *vadjustment; int master_fd, slave_fd; snprintf(buffer, sizeof(buffer), "vc%d", index); snprintf(path, sizeof(path), "<QEMU>/View/VC%d", index); vc->chr = vcs[index]; if (vc->chr->label) { label = vc->chr->label; } else { label = buffer; } vc->menu_item = gtk_radio_menu_item_new_with_mnemonic(group, label); group = gtk_radio_menu_item_get_group(GTK_RADIO_MENU_ITEM(vc->menu_item)); gtk_menu_item_set_accel_path(GTK_MENU_ITEM(vc->menu_item), path); gtk_accel_map_add_entry(path, GDK_KEY_2 + index, GDK_CONTROL_MASK | GDK_MOD1_MASK); vc->terminal = vte_terminal_new(); master_fd = qemu_openpty_raw(&slave_fd, NULL); g_assert(master_fd != -1); #if VTE_CHECK_VERSION(0, 26, 0) pty = vte_pty_new_foreign(master_fd, NULL); vte_terminal_set_pty_object(VTE_TERMINAL(vc->terminal), pty); #else vte_terminal_set_pty(VTE_TERMINAL(vc->terminal), master_fd); #endif vte_terminal_set_scrollback_lines(VTE_TERMINAL(vc->terminal), -1); vadjustment = vte_terminal_get_adjustment(VTE_TERMINAL(vc->terminal)); scrolled_window = gtk_scrolled_window_new(NULL, vadjustment); gtk_container_add(GTK_CONTAINER(scrolled_window), vc->terminal); vte_terminal_set_size(VTE_TERMINAL(vc->terminal), 80, 25); vc->fd = slave_fd; vc->chr->opaque = vc; vc->scrolled_window = scrolled_window; gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(vc->scrolled_window), GTK_POLICY_AUTOMATIC, GTK_POLICY_AUTOMATIC); gtk_notebook_append_page(GTK_NOTEBOOK(s->notebook), scrolled_window, gtk_label_new(label)); g_signal_connect(vc->menu_item, "activate", G_CALLBACK(gd_menu_switch_vc), s); gtk_menu_shell_append(GTK_MENU_SHELL(view_menu), vc->menu_item); qemu_chr_be_generic_open(vc->chr); if (vc->chr->init) { vc->chr->init(vc->chr); } chan = g_io_channel_unix_new(vc->fd); g_io_add_watch(chan, G_IO_IN, gd_vc_in, vc); return group; }
14,732
0
static void patch_reloc(uint8_t *code_ptr, int type, tcg_target_long value, tcg_target_long addend) { value += addend; switch (type) { case R_SPARC_32: if (value != (uint32_t)value) tcg_abort(); *(uint32_t *)code_ptr = value; break; case R_SPARC_WDISP22: value -= (long)code_ptr; value >>= 2; if (!check_fit(value, 22)) tcg_abort(); *(uint32_t *)code_ptr = ((*(uint32_t *)code_ptr) & ~0x3fffff) | value; break; default: tcg_abort(); } }
14,733
0
static void frame_end(MpegEncContext *s) { int i; if (s->unrestricted_mv && s->current_picture.reference && !s->intra_only) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt); int hshift = desc->log2_chroma_w; int vshift = desc->log2_chroma_h; s->mpvencdsp.draw_edges(s->current_picture.f->data[0], s->linesize, s->h_edge_pos, s->v_edge_pos, EDGE_WIDTH, EDGE_WIDTH, EDGE_TOP | EDGE_BOTTOM); s->mpvencdsp.draw_edges(s->current_picture.f->data[1], s->uvlinesize, s->h_edge_pos >> hshift, s->v_edge_pos >> vshift, EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, EDGE_TOP | EDGE_BOTTOM); s->mpvencdsp.draw_edges(s->current_picture.f->data[2], s->uvlinesize, s->h_edge_pos >> hshift, s->v_edge_pos >> vshift, EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, EDGE_TOP | EDGE_BOTTOM); } emms_c(); s->last_pict_type = s->pict_type; s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality; if (s->pict_type!= AV_PICTURE_TYPE_B) s->last_non_b_pict_type = s->pict_type; if (s->encoding) { /* release non-reference frames */ for (i = 0; i < MAX_PICTURE_COUNT; i++) { if (!s->picture[i].reference) ff_mpeg_unref_picture(s->avctx, &s->picture[i]); } } s->avctx->coded_frame = s->current_picture_ptr->f; }
14,734
0
static uint64_t sectors_covered_by_bitmap_cluster(const BDRVQcow2State *s, const BdrvDirtyBitmap *bitmap) { uint32_t sector_granularity = bdrv_dirty_bitmap_granularity(bitmap) >> BDRV_SECTOR_BITS; return (uint64_t)sector_granularity * (s->cluster_size << 3); }
14,735
0
static int vmdk_open_sparse(BlockDriverState *bs, BlockDriverState *file, int flags, char *buf, Error **errp) { uint32_t magic; magic = ldl_be_p(buf); switch (magic) { case VMDK3_MAGIC: return vmdk_open_vmfs_sparse(bs, file, flags, errp); break; case VMDK4_MAGIC: return vmdk_open_vmdk4(bs, file, flags, errp); break; default: return -EMEDIUMTYPE; break; } }
14,736
0
static void tcg_reg_alloc_op(TCGContext *s, const TCGOpDef *def, TCGOpcode opc, const TCGArg *args, uint16_t dead_args, uint8_t sync_args) { TCGRegSet allocated_regs; int i, k, nb_iargs, nb_oargs, reg; TCGArg arg; const TCGArgConstraint *arg_ct; TCGTemp *ts; TCGArg new_args[TCG_MAX_OP_ARGS]; int const_args[TCG_MAX_OP_ARGS]; nb_oargs = def->nb_oargs; nb_iargs = def->nb_iargs; /* copy constants */ memcpy(new_args + nb_oargs + nb_iargs, args + nb_oargs + nb_iargs, sizeof(TCGArg) * def->nb_cargs); /* satisfy input constraints */ tcg_regset_set(allocated_regs, s->reserved_regs); for(k = 0; k < nb_iargs; k++) { i = def->sorted_args[nb_oargs + k]; arg = args[i]; arg_ct = &def->args_ct[i]; ts = &s->temps[arg]; if (ts->val_type == TEMP_VAL_MEM) { reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset); ts->val_type = TEMP_VAL_REG; ts->reg = reg; ts->mem_coherent = 1; s->reg_to_temp[reg] = arg; } else if (ts->val_type == TEMP_VAL_CONST) { if (tcg_target_const_match(ts->val, ts->type, arg_ct)) { /* constant is OK for instruction */ const_args[i] = 1; new_args[i] = ts->val; goto iarg_end; } else { /* need to move to a register */ reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); tcg_out_movi(s, ts->type, reg, ts->val); ts->val_type = TEMP_VAL_REG; ts->reg = reg; ts->mem_coherent = 0; s->reg_to_temp[reg] = arg; } } assert(ts->val_type == TEMP_VAL_REG); if (arg_ct->ct & TCG_CT_IALIAS) { if (ts->fixed_reg) { /* if fixed register, we must allocate a new register if the alias is not the same register */ if (arg != args[arg_ct->alias_index]) goto allocate_in_reg; } else { /* if the input is aliased to an output and if it is not dead after the instruction, we must allocate a new register and move it */ if (!IS_DEAD_ARG(i)) { goto allocate_in_reg; } /* check if the current register has already been allocated for another input aliased to an output */ int k2, i2; for (k2 = 0 ; k2 < k ; k2++) { i2 = def->sorted_args[nb_oargs + k2]; if ((def->args_ct[i2].ct & TCG_CT_IALIAS) && (new_args[i2] == ts->reg)) { goto allocate_in_reg; } } } } reg = ts->reg; if (tcg_regset_test_reg(arg_ct->u.regs, reg)) { /* nothing to do : the constraint is satisfied */ } else { allocate_in_reg: /* allocate a new register matching the constraint and move the temporary register into it */ reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); tcg_out_mov(s, ts->type, reg, ts->reg); } new_args[i] = reg; const_args[i] = 0; tcg_regset_set_reg(allocated_regs, reg); iarg_end: ; } /* mark dead temporaries and free the associated registers */ for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { if (IS_DEAD_ARG(i)) { temp_dead(s, args[i]); } } if (def->flags & TCG_OPF_BB_END) { tcg_reg_alloc_bb_end(s, allocated_regs); } else { if (def->flags & TCG_OPF_CALL_CLOBBER) { /* XXX: permit generic clobber register list ? */ for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) { if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) { tcg_reg_free(s, reg); } } } if (def->flags & TCG_OPF_SIDE_EFFECTS) { /* sync globals if the op has side effects and might trigger an exception. */ sync_globals(s, allocated_regs); } /* satisfy the output constraints */ tcg_regset_set(allocated_regs, s->reserved_regs); for(k = 0; k < nb_oargs; k++) { i = def->sorted_args[k]; arg = args[i]; arg_ct = &def->args_ct[i]; ts = &s->temps[arg]; if (arg_ct->ct & TCG_CT_ALIAS) { reg = new_args[arg_ct->alias_index]; } else { /* if fixed register, we try to use it */ reg = ts->reg; if (ts->fixed_reg && tcg_regset_test_reg(arg_ct->u.regs, reg)) { goto oarg_end; } reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); } tcg_regset_set_reg(allocated_regs, reg); /* if a fixed register is used, then a move will be done afterwards */ if (!ts->fixed_reg) { if (ts->val_type == TEMP_VAL_REG) { s->reg_to_temp[ts->reg] = -1; } ts->val_type = TEMP_VAL_REG; ts->reg = reg; /* temp value is modified, so the value kept in memory is potentially not the same */ ts->mem_coherent = 0; s->reg_to_temp[reg] = arg; } oarg_end: new_args[i] = reg; } } /* emit instruction */ tcg_out_op(s, opc, new_args, const_args); /* move the outputs in the correct register if needed */ for(i = 0; i < nb_oargs; i++) { ts = &s->temps[args[i]]; reg = new_args[i]; if (ts->fixed_reg && ts->reg != reg) { tcg_out_mov(s, ts->type, ts->reg, reg); } if (NEED_SYNC_ARG(i)) { tcg_reg_sync(s, reg); } if (IS_DEAD_ARG(i)) { temp_dead(s, args[i]); } } }
14,737
1
static void arm_cpu_class_init(ObjectClass *oc, void *data) { ARMCPUClass *acc = ARM_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(acc); DeviceClass *dc = DEVICE_CLASS(oc); acc->parent_realize = dc->realize; dc->realize = arm_cpu_realizefn; dc->props = arm_cpu_properties; acc->parent_reset = cc->reset; cc->reset = arm_cpu_reset; cc->class_by_name = arm_cpu_class_by_name; cc->has_work = arm_cpu_has_work; cc->cpu_exec_interrupt = arm_cpu_exec_interrupt; cc->dump_state = arm_cpu_dump_state; cc->set_pc = arm_cpu_set_pc; cc->gdb_read_register = arm_cpu_gdb_read_register; cc->gdb_write_register = arm_cpu_gdb_write_register; #ifdef CONFIG_USER_ONLY cc->handle_mmu_fault = arm_cpu_handle_mmu_fault; #else cc->do_interrupt = arm_cpu_do_interrupt; cc->get_phys_page_debug = arm_cpu_get_phys_page_debug; cc->vmsd = &vmstate_arm_cpu; cc->virtio_is_big_endian = arm_cpu_is_big_endian; #endif cc->gdb_num_core_regs = 26; cc->gdb_core_xml_file = "arm-core.xml"; cc->gdb_stop_before_watchpoint = true; cc->debug_excp_handler = arm_debug_excp_handler; cc->disas_set_info = arm_disas_set_info; }
14,738
1
static int mp3info(void *data, int *byteSize, int *samplesPerFrame, int *sampleRate, int *isMono ) { uint8_t *dataTmp = (uint8_t *)data; uint32_t header = ( (uint32_t)dataTmp[0] << 24 ) | ( (uint32_t)dataTmp[1] << 16 ) | ( (uint32_t)dataTmp[2] << 8 ) | (uint32_t)dataTmp[3]; int layerID = 3 - ((header >> 17) & 0x03); int bitRateID = ((header >> 12) & 0x0f); int sampleRateID = ((header >> 10) & 0x03); int bitRate = 0; int bitsPerSlot = sBitsPerSlot[layerID]; int isPadded = ((header >> 9) & 0x01); if ( (( header >> 21 ) & 0x7ff) != 0x7ff ) { return 0; } if ( !isPadded ) { printf("Fatal error: mp3 data is not padded!\n"); exit(0); } *isMono = ((header >> 6) & 0x03) == 0x03; if ( (header >> 19 ) & 0x01 ) { *sampleRate = sSampleRates[0][sampleRateID]; bitRate = sBitRates[0][layerID][bitRateID] * 1000; *samplesPerFrame = sSamplesPerFrame[0][layerID]; } else { if ( (header >> 20) & 0x01 ) { *sampleRate = sSampleRates[1][sampleRateID]; bitRate = sBitRates[1][layerID][bitRateID] * 1000; *samplesPerFrame = sSamplesPerFrame[1][layerID]; } else { *sampleRate = sSampleRates[2][sampleRateID]; bitRate = sBitRates[1][layerID][bitRateID] * 1000; *samplesPerFrame = sSamplesPerFrame[2][layerID]; } } *byteSize = ( ( ( ( *samplesPerFrame * (bitRate / bitsPerSlot) ) / *sampleRate ) + isPadded ) * bitsPerSlot); return 1; }
14,739
1
void slirp_input(const uint8_t *pkt, int pkt_len) { struct mbuf *m; int proto; if (pkt_len < ETH_HLEN) return; proto = ntohs(*(uint16_t *)(pkt + 12)); switch(proto) { case ETH_P_ARP: arp_input(pkt, pkt_len); break; case ETH_P_IP: m = m_get(); if (!m) return; /* Note: we add to align the IP header */ m->m_len = pkt_len + 2; memcpy(m->m_data + 2, pkt, pkt_len); m->m_data += 2 + ETH_HLEN; m->m_len -= 2 + ETH_HLEN; ip_input(m); break; default: break;
14,742
1
void OPPROTO op_srl_T0_T1 (void) { T0 = T0 >> T1; RETURN(); }
14,743
1
void ff_decode_dxt3(const uint8_t *s, uint8_t *dst, const unsigned int w, const unsigned int h, const unsigned int stride) { unsigned int bx, by, qstride = stride/4; uint32_t *d = (uint32_t *) dst; for (by=0; by < h/4; by++, d += stride-w) for (bx=0; bx < w/4; bx++, s+=16, d+=4) dxt1_decode_pixels(s+8, d, qstride, 1, AV_RL64(s)); }
14,744
1
static void gen_mtdcr(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); #else TCGv dcrn; if (unlikely(ctx->pr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); return; } /* NIP cannot be restored if the memory exception comes from an helper */ gen_update_nip(ctx, ctx->nip - 4); dcrn = tcg_const_tl(SPR(ctx->opcode)); gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]); tcg_temp_free(dcrn); #endif }
14,746
0
static void avc_luma_mid_and_aver_dst_8w_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, int32_t height) { uint32_t loop_cnt; v16i8 src0, src1, src2, src3, src4; v16i8 mask0, mask1, mask2; v8i16 hz_out0, hz_out1, hz_out2, hz_out3; v8i16 hz_out4, hz_out5, hz_out6, hz_out7, hz_out8; v16u8 dst0, dst1, dst2, dst3; v8i16 res0, res1, res2, res3; LD_SB3(&luma_mask_arr[0], 16, mask0, mask1, mask2); LD_SB5(src, src_stride, src0, src1, src2, src3, src4); XORI_B5_128_SB(src0, src1, src2, src3, src4); src += (5 * src_stride); hz_out0 = AVC_HORZ_FILTER_SH(src0, src0, mask0, mask1, mask2); hz_out1 = AVC_HORZ_FILTER_SH(src1, src1, mask0, mask1, mask2); hz_out2 = AVC_HORZ_FILTER_SH(src2, src2, mask0, mask1, mask2); hz_out3 = AVC_HORZ_FILTER_SH(src3, src3, mask0, mask1, mask2); hz_out4 = AVC_HORZ_FILTER_SH(src4, src4, mask0, mask1, mask2); for (loop_cnt = (height >> 2); loop_cnt--;) { LD_SB4(src, src_stride, src0, src1, src2, src3); XORI_B4_128_SB(src0, src1, src2, src3); src += (4 * src_stride); hz_out5 = AVC_HORZ_FILTER_SH(src0, src0, mask0, mask1, mask2); hz_out6 = AVC_HORZ_FILTER_SH(src1, src1, mask0, mask1, mask2); hz_out7 = AVC_HORZ_FILTER_SH(src2, src2, mask0, mask1, mask2); hz_out8 = AVC_HORZ_FILTER_SH(src3, src3, mask0, mask1, mask2); res0 = AVC_CALC_DPADD_H_6PIX_2COEFF_SH(hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5); res1 = AVC_CALC_DPADD_H_6PIX_2COEFF_SH(hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6); res2 = AVC_CALC_DPADD_H_6PIX_2COEFF_SH(hz_out2, hz_out3, hz_out4, hz_out5, hz_out6, hz_out7); res3 = AVC_CALC_DPADD_H_6PIX_2COEFF_SH(hz_out3, hz_out4, hz_out5, hz_out6, hz_out7, hz_out8); LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); ILVR_D2_UB(dst1, dst0, dst3, dst2, dst0, dst1); CONVERT_UB_AVG_ST8x4_UB(res0, res1, res2, res3, dst0, dst1, dst, dst_stride); dst += (4 * dst_stride); hz_out3 = hz_out7; hz_out1 = hz_out5; hz_out5 = hz_out4; hz_out4 = hz_out8; hz_out2 = hz_out6; hz_out0 = hz_out5; } }
14,747
0
static void check_mc(void) { LOCAL_ALIGNED_32(uint8_t, buf, [72 * 72 * 2]); LOCAL_ALIGNED_32(uint8_t, dst0, [64 * 64 * 2]); LOCAL_ALIGNED_32(uint8_t, dst1, [64 * 64 * 2]); VP9DSPContext dsp; int op, hsize, bit_depth, filter, dx, dy; declare_func(void, uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *ref, ptrdiff_t ref_stride, int h, int mx, int my); static const char *const filter_names[4] = { "8tap_smooth", "8tap_regular", "8tap_sharp", "bilin" }; static const char *const subpel_names[2][2] = { { "", "h" }, { "v", "hv" } }; static const char *const op_names[2] = { "put", "avg" }; char str[256]; for (op = 0; op < 2; op++) { for (bit_depth = 8; bit_depth <= 12; bit_depth += 2) { ff_vp9dsp_init(&dsp, bit_depth, 0); for (hsize = 0; hsize < 5; hsize++) { int size = 64 >> hsize; for (filter = 0; filter < 4; filter++) { for (dx = 0; dx < 2; dx++) { for (dy = 0; dy < 2; dy++) { if (dx || dy) { sprintf(str, "%s_%s_%d%s", op_names[op], filter_names[filter], size, subpel_names[dy][dx]); } else { sprintf(str, "%s%d", op_names[op], size); } if (check_func(dsp.mc[hsize][filter][op][dx][dy], "vp9_%s_%dbpp", str, bit_depth)) { int mx = dx ? 1 + (rnd() % 14) : 0; int my = dy ? 1 + (rnd() % 14) : 0; randomize_buffers(); call_ref(dst0, size * SIZEOF_PIXEL, src, SRC_BUF_STRIDE * SIZEOF_PIXEL, size, mx, my); call_new(dst1, size * SIZEOF_PIXEL, src, SRC_BUF_STRIDE * SIZEOF_PIXEL, size, mx, my); if (memcmp(dst0, dst1, DST_BUF_SIZE)) fail(); // simd implementations for each filter of subpel // functions are identical if (filter >= 1 && filter <= 2) continue; // 10/12 bpp for bilin are identical if (bit_depth == 12 && filter == 3) continue; bench_new(dst1, size * SIZEOF_PIXEL, src, SRC_BUF_STRIDE * SIZEOF_PIXEL, size, mx, my); } } } } } } } report("mc"); }
14,748
0
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts) { InputStream *ist = s->opaque; const enum AVPixelFormat *p; int ret; for (p = pix_fmts; *p != -1; p++) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p); const HWAccel *hwaccel; if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) break; hwaccel = get_hwaccel(*p, ist->hwaccel_id); if (!hwaccel || (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) || (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id)) continue; ret = hwaccel->init(s); if (ret < 0) { if (ist->hwaccel_id == hwaccel->id) { av_log(NULL, AV_LOG_FATAL, "%s hwaccel requested for input stream #%d:%d, " "but cannot be initialized.\n", hwaccel->name, ist->file_index, ist->st->index); return AV_PIX_FMT_NONE; } continue; } if (ist->hw_frames_ctx) { s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx); if (!s->hw_frames_ctx) return AV_PIX_FMT_NONE; } ist->active_hwaccel_id = hwaccel->id; ist->hwaccel_pix_fmt = *p; break; } return *p; }
14,749
1
void qdev_prop_allow_set_link_before_realize(Object *obj, const char *name, Object *val, Error **errp) { DeviceState *dev = DEVICE(obj); if (dev->realized) { error_setg(errp, "Attempt to set link property '%s' on device '%s' " "(type '%s') after it was realized", name, dev->id, object_get_typename(obj)); } }
14,751
1
static void uninit(struct vf_instance *vf) { free(vf->priv); }
14,752
1
static av_cold int encode_init(AVCodecContext *avctx) { NellyMoserEncodeContext *s = avctx->priv_data; int i, ret; if (avctx->channels != 1) { av_log(avctx, AV_LOG_ERROR, "Nellymoser supports only 1 channel\n"); return AVERROR(EINVAL); } if (avctx->sample_rate != 8000 && avctx->sample_rate != 16000 && avctx->sample_rate != 11025 && avctx->sample_rate != 22050 && avctx->sample_rate != 44100 && avctx->strict_std_compliance >= FF_COMPLIANCE_NORMAL) { av_log(avctx, AV_LOG_ERROR, "Nellymoser works only with 8000, 16000, 11025, 22050 and 44100 sample rate\n"); return AVERROR(EINVAL); } avctx->frame_size = NELLY_SAMPLES; avctx->delay = NELLY_BUF_LEN; ff_af_queue_init(avctx, &s->afq); s->avctx = avctx; if ((ret = ff_mdct_init(&s->mdct_ctx, 8, 0, 32768.0)) < 0) goto error; ff_dsputil_init(&s->dsp, avctx); /* Generate overlap window */ ff_sine_window_init(ff_sine_128, 128); for (i = 0; i < POW_TABLE_SIZE; i++) pow_table[i] = -pow(2, -i / 2048.0 - 3.0 + POW_TABLE_OFFSET); if (s->avctx->trellis) { s->opt = av_malloc(NELLY_BANDS * OPT_SIZE * sizeof(float )); s->path = av_malloc(NELLY_BANDS * OPT_SIZE * sizeof(uint8_t)); if (!s->opt || !s->path) { ret = AVERROR(ENOMEM); goto error; } } #if FF_API_OLD_ENCODE_AUDIO avctx->coded_frame = avcodec_alloc_frame(); if (!avctx->coded_frame) { ret = AVERROR(ENOMEM); goto error; } #endif return 0; error: encode_end(avctx); return ret; }
14,753
1
static int ass_encode_frame(AVCodecContext *avctx, unsigned char *buf, int bufsize, const AVSubtitle *sub) { ASSEncodeContext *s = avctx->priv_data; int i, len, total_len = 0; for (i=0; i<sub->num_rects; i++) { char ass_line[2048]; const char *ass = sub->rects[i]->ass; if (sub->rects[i]->type != SUBTITLE_ASS) { av_log(avctx, AV_LOG_ERROR, "Only SUBTITLE_ASS type supported.\n"); return -1; } if (strncmp(ass, "Dialogue: ", 10)) { av_log(avctx, AV_LOG_ERROR, "AVSubtitle rectangle ass \"%s\"" " does not look like a SSA markup\n", ass); return AVERROR_INVALIDDATA; } if (avctx->codec->id == AV_CODEC_ID_ASS) { long int layer; char *p; if (i > 0) { av_log(avctx, AV_LOG_ERROR, "ASS encoder supports only one " "ASS rectangle field.\n"); return AVERROR_INVALIDDATA; } ass += 10; // skip "Dialogue: " /* parse Layer field. If it's a Marked field, the content * will be "Marked=N" instead of the layer num, so we will * have layer=0, which is fine. */ layer = strtol(ass, &p, 10); if (*p) p += strcspn(p, ",") + 1; // skip layer or marked if (*p) p += strcspn(p, ",") + 1; // skip start timestamp if (*p) p += strcspn(p, ",") + 1; // skip end timestamp snprintf(ass_line, sizeof(ass_line), "%d,%ld,%s", ++s->id, layer, p); ass_line[strcspn(ass_line, "\r\n")] = 0; ass = ass_line; } len = av_strlcpy(buf+total_len, ass, bufsize-total_len); if (len > bufsize-total_len-1) { av_log(avctx, AV_LOG_ERROR, "Buffer too small for ASS event.\n"); return -1; } total_len += len; } return total_len; }
14,754
1
static void lsp2lpc(int16_t *lpc) { int f1[LPC_ORDER / 2 + 1]; int f2[LPC_ORDER / 2 + 1]; int i, j; /* Calculate negative cosine */ for (j = 0; j < LPC_ORDER; j++) { int index = (lpc[j] >> 7) & 0x1FF; int offset = lpc[j] & 0x7f; int temp1 = cos_tab[index] << 16; int temp2 = (cos_tab[index + 1] - cos_tab[index]) * ((offset << 8) + 0x80) << 1; lpc[j] = -(av_sat_dadd32(1 << 15, temp1 + temp2) >> 16); } /* * Compute sum and difference polynomial coefficients * (bitexact alternative to lsp2poly() in lsp.c) */ /* Initialize with values in Q28 */ f1[0] = 1 << 28; f1[1] = (lpc[0] << 14) + (lpc[2] << 14); f1[2] = lpc[0] * lpc[2] + (2 << 28); f2[0] = 1 << 28; f2[1] = (lpc[1] << 14) + (lpc[3] << 14); f2[2] = lpc[1] * lpc[3] + (2 << 28); /* * Calculate and scale the coefficients by 1/2 in * each iteration for a final scaling factor of Q25 */ for (i = 2; i < LPC_ORDER / 2; i++) { f1[i + 1] = f1[i - 1] + MULL2(f1[i], lpc[2 * i]); f2[i + 1] = f2[i - 1] + MULL2(f2[i], lpc[2 * i + 1]); for (j = i; j >= 2; j--) { f1[j] = MULL2(f1[j - 1], lpc[2 * i]) + (f1[j] >> 1) + (f1[j - 2] >> 1); f2[j] = MULL2(f2[j - 1], lpc[2 * i + 1]) + (f2[j] >> 1) + (f2[j - 2] >> 1); } f1[0] >>= 1; f2[0] >>= 1; f1[1] = ((lpc[2 * i] << 16 >> i) + f1[1]) >> 1; f2[1] = ((lpc[2 * i + 1] << 16 >> i) + f2[1]) >> 1; } /* Convert polynomial coefficients to LPC coefficients */ for (i = 0; i < LPC_ORDER / 2; i++) { int64_t ff1 = f1[i + 1] + f1[i]; int64_t ff2 = f2[i + 1] - f2[i]; lpc[i] = av_clipl_int32(((ff1 + ff2) << 3) + (1 << 15)) >> 16; lpc[LPC_ORDER - i - 1] = av_clipl_int32(((ff1 - ff2) << 3) + (1 << 15)) >> 16; } }
14,755
1
static void blockdev_backup_abort(BlkActionState *common) { BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common); BlockDriverState *bs = state->bs; /* Only cancel if it's the job we started */ if (bs && bs->job && bs->job == state->job) { block_job_cancel_sync(bs->job); } }
14,756
1
static void pc_init_pci_1_2(QEMUMachineInitArgs *args) { disable_kvm_pv_eoi(); enable_compat_apic_id_mode(); pc_sysfw_flash_vs_rom_bug_compatible = true; has_pvpanic = false; pc_init_pci(args); }
14,757
1
static int decode_nal_sei_message(GetBitContext *gb, void *logctx, HEVCSEI *s, const HEVCParamSets *ps, int nal_unit_type) { int payload_type = 0; int payload_size = 0; int byte = 0xFF; av_log(logctx, AV_LOG_DEBUG, "Decoding SEI\n"); while (byte == 0xFF) { byte = get_bits(gb, 8); payload_type += byte; } byte = 0xFF; while (byte == 0xFF) { byte = get_bits(gb, 8); payload_size += byte; } if (nal_unit_type == HEVC_NAL_SEI_PREFIX) { return decode_nal_sei_prefix(gb, logctx, s, ps, payload_type, payload_size); } else { /* nal_unit_type == NAL_SEI_SUFFIX */ return decode_nal_sei_suffix(gb, logctx, s, payload_type, payload_size); } }
14,759
1
static void parse_type_number(Visitor *v, const char *name, double *obj, Error **errp) { StringInputVisitor *siv = to_siv(v); char *endp = (char *) siv->string; double val; errno = 0; if (siv->string) { val = strtod(siv->string, &endp); } if (!siv->string || errno || endp == siv->string || *endp) { error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", "number"); return; } *obj = val; }
14,760
1
int arm_set_cpu_off(uint64_t cpuid) { CPUState *target_cpu_state; ARMCPU *target_cpu; DPRINTF("cpu %" PRId64 "\n", cpuid); /* change to the cpu we are powering up */ target_cpu_state = arm_get_cpu_by_id(cpuid); if (!target_cpu_state) { return QEMU_ARM_POWERCTL_INVALID_PARAM; } target_cpu = ARM_CPU(target_cpu_state); if (target_cpu->powered_off) { qemu_log_mask(LOG_GUEST_ERROR, "[ARM]%s: CPU %" PRId64 " is already off\n", __func__, cpuid); return QEMU_ARM_POWERCTL_IS_OFF; } target_cpu->powered_off = true; target_cpu_state->halted = 1; target_cpu_state->exception_index = EXCP_HLT; cpu_loop_exit(target_cpu_state); /* notreached */ return QEMU_ARM_POWERCTL_RET_SUCCESS; }
14,761
1
static int read_packet(AVFormatContext *s, AVPacket *pkt) { FilmstripDemuxContext *film = s->priv_data; AVStream *st = s->streams[0]; if (s->pb->eof_reached) return AVERROR(EIO); pkt->dts = avio_tell(s->pb) / (st->codec->width * (st->codec->height + film->leading) * 4); pkt->size = av_get_packet(s->pb, pkt, st->codec->width * st->codec->height * 4); avio_skip(s->pb, st->codec->width * film->leading * 4); if (pkt->size < 0) return pkt->size; pkt->flags |= AV_PKT_FLAG_KEY; return 0; }
14,762
1
static int get_physical_address_code(CPUState *env, target_phys_addr_t *physical, int *prot, target_ulong address, int is_user) { unsigned int i; uint64_t context; int is_nucleus; if ((env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0) { /* IMMU disabled */ *physical = ultrasparc_truncate_physical(address); *prot = PAGE_EXEC; return 0; } context = env->dmmu.mmu_primary_context & 0x1fff; is_nucleus = env->tl > 0; for (i = 0; i < 64; i++) { // ctx match, vaddr match, valid? if (ultrasparc_tag_match(&env->itlb[i], address, context, physical, is_nucleus)) { // access ok? if ((env->itlb[i].tte & 0x4) && is_user) { if (env->immu.sfsr) /* Fault status register */ env->immu.sfsr = 2; /* overflow (not read before another fault) */ env->immu.sfsr |= (is_user << 3) | 1; env->exception_index = TT_TFAULT; #ifdef DEBUG_MMU printf("TFAULT at 0x%" PRIx64 "\n", address); #endif return 1; } *prot = PAGE_EXEC; TTE_SET_USED(env->itlb[i].tte); return 0; } } #ifdef DEBUG_MMU printf("TMISS at 0x%" PRIx64 "\n", address); #endif /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */ env->immu.tag_access = (address & ~0x1fffULL) | context; env->exception_index = TT_TMISS; return 1; }
14,763
1
int ff_mjpeg_decode_sof(MJpegDecodeContext *s) { int len, nb_components, i, width, height, bits, pix_fmt_id, ret; int h_count[MAX_COMPONENTS]; int v_count[MAX_COMPONENTS]; s->cur_scan = 0; s->upscale_h = s->upscale_v = 0; /* XXX: verify len field validity */ len = get_bits(&s->gb, 16); s->avctx->bits_per_raw_sample = bits = get_bits(&s->gb, 8); if (s->pegasus_rct) bits = 9; if (bits == 9 && !s->pegasus_rct) s->rct = 1; // FIXME ugly if(s->lossless && s->avctx->lowres){ av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n"); return -1; height = get_bits(&s->gb, 16); width = get_bits(&s->gb, 16); if (s->avctx->codec_id == AV_CODEC_ID_AMV && (height&15)) avpriv_request_sample(s->avctx, "non mod 16 height AMV\n"); // HACK for odd_height.mov if (s->interlaced && s->width == width && s->height == height + 1) height= s->height; av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height); if (av_image_check_size(width, height, 0, s->avctx)) nb_components = get_bits(&s->gb, 8); if (nb_components <= 0 || nb_components > MAX_COMPONENTS) return -1; if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) { if (nb_components != s->nb_components) { av_log(s->avctx, AV_LOG_ERROR, "nb_components changing in interlaced picture\n"); if (s->ls && !(bits <= 8 || nb_components == 1)) { avpriv_report_missing_feature(s->avctx, "JPEG-LS that is not <= 8 " "bits/component or 16-bit gray"); return AVERROR_PATCHWELCOME; s->nb_components = nb_components; s->h_max = 1; s->v_max = 1; memset(h_count, 0, sizeof(h_count)); memset(v_count, 0, sizeof(v_count)); for (i = 0; i < nb_components; i++) { /* component id */ s->component_id[i] = get_bits(&s->gb, 8) - 1; h_count[i] = get_bits(&s->gb, 4); v_count[i] = get_bits(&s->gb, 4); /* compute hmax and vmax (only used in interleaved case) */ if (h_count[i] > s->h_max) s->h_max = h_count[i]; if (v_count[i] > s->v_max) s->v_max = v_count[i]; s->quant_index[i] = get_bits(&s->gb, 8); if (s->quant_index[i] >= 4) { av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n"); if (!h_count[i] || !v_count[i]) { av_log(s->avctx, AV_LOG_ERROR, "Invalid sampling factor in component %d %d:%d\n", i, h_count[i], v_count[i]); av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n", i, h_count[i], v_count[i], s->component_id[i], s->quant_index[i]); if (s->ls && (s->h_max > 1 || s->v_max > 1)) { avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS"); return AVERROR_PATCHWELCOME; /* if different size, realloc/alloc picture */ if ( width != s->width || height != s->height || bits != s->bits || memcmp(s->h_count, h_count, sizeof(h_count)) || memcmp(s->v_count, v_count, sizeof(v_count))) { s->width = width; s->height = height; s->bits = bits; memcpy(s->h_count, h_count, sizeof(h_count)); memcpy(s->v_count, v_count, sizeof(v_count)); s->interlaced = 0; s->got_picture = 0; /* test interlaced mode */ if (s->first_picture && s->org_height != 0 && s->height < ((s->org_height * 3) / 4)) { s->interlaced = 1; s->bottom_field = s->interlace_polarity; s->picture_ptr->interlaced_frame = 1; s->picture_ptr->top_field_first = !s->interlace_polarity; height *= 2; ret = ff_set_dimensions(s->avctx, width, height); if (ret < 0) return ret; s->first_picture = 0; if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) { if (s->progressive) { avpriv_request_sample(s->avctx, "progressively coded interlaced picture"); } else{ if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4)) s->rgb = 1; else if (!s->lossless) s->rgb = 0; /* XXX: not complete test ! */ pix_fmt_id = (s->h_count[0] << 28) | (s->v_count[0] << 24) | (s->h_count[1] << 20) | (s->v_count[1] << 16) | (s->h_count[2] << 12) | (s->v_count[2] << 8) | (s->h_count[3] << 4) | s->v_count[3]; av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id); /* NOTE we do not allocate pictures large enough for the possible * padding of h/v_count being 4 */ if (!(pix_fmt_id & 0xD0D0D0D0)) pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1; if (!(pix_fmt_id & 0x0D0D0D0D)) pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1; for (i = 0; i < 8; i++) { int j = 6 + (i&1) - (i&6); int is = (pix_fmt_id >> (4*i)) & 0xF; int js = (pix_fmt_id >> (4*j)) & 0xF; if (is == 1 && js != 2 && (i < 2 || i > 5)) js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF; if (is == 1 && js != 2 && (i < 2 || i > 5)) js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF; if (is == 1 && js == 2) { if (i & 1) s->upscale_h |= 1 << (j/2); else s->upscale_v |= 1 << (j/2); switch (pix_fmt_id) { case 0x11111100: if (s->rgb) s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48; else { if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') { s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16; } else { if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P; else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG; av_assert0(s->nb_components == 3); break; case 0x11111111: if (s->rgb) s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64; else { if (s->adobe_transform == 0 && s->bits <= 8) { s->avctx->pix_fmt = AV_PIX_FMT_GBRAP; } else { s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16; s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG; av_assert0(s->nb_components == 4); break; case 0x22111122: if (s->adobe_transform == 0 && s->bits <= 8) { s->avctx->pix_fmt = AV_PIX_FMT_GBRAP; s->upscale_v = 6; s->upscale_h = 6; s->chroma_height = s->height; } else if (s->adobe_transform == 2 && s->bits <= 8) { s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; s->upscale_v = 6; s->upscale_h = 6; s->chroma_height = s->height; s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG; } else { if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16; s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG; av_assert0(s->nb_components == 4); break; case 0x12121100: case 0x22122100: case 0x21211100: case 0x22211200: if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P; else goto unk_pixfmt; s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG; s->chroma_height = s->height; break; case 0x22221100: case 0x22112200: case 0x11222200: if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P; else goto unk_pixfmt; s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG; s->chroma_height = (s->height + 1) / 2; break; case 0x11000000: case 0x13000000: case 0x14000000: case 0x31000000: case 0x33000000: case 0x34000000: case 0x41000000: case 0x43000000: case 0x44000000: if(s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GRAY8; else s->avctx->pix_fmt = AV_PIX_FMT_GRAY16; break; case 0x12111100: case 0x14121200: case 0x22211100: case 0x22112100: if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P; else goto unk_pixfmt; s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG; s->chroma_height = (s->height + 1) / 2; break; case 0x21111100: if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P; else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG; break; case 0x22121100: case 0x22111200: if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P; else goto unk_pixfmt; s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG; break; case 0x22111100: case 0x42111100: if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P; else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG; if (pix_fmt_id == 0x42111100) { s->upscale_h = 6; s->chroma_height = (s->height + 1) / 2; break; case 0x41111100: if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P; else goto unk_pixfmt; s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG; break; default: unk_pixfmt: av_log(s->avctx, AV_LOG_ERROR, "Unhandled pixel format 0x%x\n", pix_fmt_id); s->upscale_h = s->upscale_v = 0; return AVERROR_PATCHWELCOME; if ((s->upscale_h || s->upscale_v) && s->avctx->lowres) { av_log(s->avctx, AV_LOG_ERROR, "lowres not supported for weird subsampling\n"); return AVERROR_PATCHWELCOME; if (s->ls) { s->upscale_h = s->upscale_v = 0; if (s->nb_components > 1) s->avctx->pix_fmt = AV_PIX_FMT_RGB24; else if (s->palette_index && s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_PAL8; else if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GRAY8; else s->avctx->pix_fmt = AV_PIX_FMT_GRAY16; s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt); if (!s->pix_desc) { av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n"); return AVERROR_BUG; av_frame_unref(s->picture_ptr); if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0) return -1; s->picture_ptr->pict_type = AV_PICTURE_TYPE_I; s->picture_ptr->key_frame = 1; s->got_picture = 1; for (i = 0; i < 4; i++) s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced; av_dlog(s->avctx, "%d %d %d %d %d %d\n", s->width, s->height, s->linesize[0], s->linesize[1], s->interlaced, s->avctx->height); if (len != (8 + (3 * nb_components))) av_log(s->avctx, AV_LOG_DEBUG, "decode_sof0: error, len(%d) mismatch\n", len); if (s->rgb && !s->lossless && !s->ls) { av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n"); return AVERROR_PATCHWELCOME; /* totally blank picture as progressive JPEG will only add details to it */ if (s->progressive) { int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8); int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8); for (i = 0; i < s->nb_components; i++) { int size = bw * bh * s->h_count[i] * s->v_count[i]; av_freep(&s->blocks[i]); av_freep(&s->last_nnz[i]); s->blocks[i] = av_mallocz_array(size, sizeof(**s->blocks)); s->last_nnz[i] = av_mallocz_array(size, sizeof(**s->last_nnz)); if (!s->blocks[i] || !s->last_nnz[i]) return AVERROR(ENOMEM); s->block_stride[i] = bw * s->h_count[i]; memset(s->coefs_finished, 0, sizeof(s->coefs_finished)); return 0;
14,764
1
static void ahci_shutdown(AHCIQState *ahci) { QOSState *qs = ahci->parent; free_ahci_device(ahci->dev); g_free(ahci); qtest_shutdown(qs); }
14,765
1
static int gif_parse_next_image(GifState *s) { ByteIOContext *f = s->f; int ret, code; for (;;) { code = url_fgetc(f); #ifdef DEBUG printf("gif: code=%02x '%c'\n", code, code); #endif switch (code) { case ',': if (gif_read_image(s) < 0) return AVERROR(EIO); ret = 0; goto the_end; case ';': /* end of image */ ret = AVERROR(EIO); goto the_end; case '!': if (gif_read_extension(s) < 0) return AVERROR(EIO); break; case EOF: default: /* error or errneous EOF */ ret = AVERROR(EIO); goto the_end; } } the_end: return ret; }
14,766
1
void net_client_uninit(NICInfo *nd) { nd->vlan->nb_guest_devs--; nb_nics--; nd->used = 0; free((void *)nd->model); }
14,767
1
static int virtio_net_device_exit(DeviceState *qdev) { VirtIONet *n = VIRTIO_NET(qdev); VirtIODevice *vdev = VIRTIO_DEVICE(qdev); int i; /* This will stop vhost backend if appropriate. */ virtio_net_set_status(vdev, 0); unregister_savevm(qdev, "virtio-net", n); if (n->netclient_name) { g_free(n->netclient_name); n->netclient_name = NULL; } if (n->netclient_type) { g_free(n->netclient_type); n->netclient_type = NULL; } g_free(n->mac_table.macs); g_free(n->vlans); for (i = 0; i < n->max_queues; i++) { VirtIONetQueue *q = &n->vqs[i]; NetClientState *nc = qemu_get_subqueue(n->nic, i); qemu_purge_queued_packets(nc); if (q->tx_timer) { timer_del(q->tx_timer); timer_free(q->tx_timer); } else if (q->tx_bh) { qemu_bh_delete(q->tx_bh); } } g_free(n->vqs); qemu_del_nic(n->nic); virtio_cleanup(vdev); return 0; }
14,768
1
static void residue_encode(vorbis_enc_context *venc, vorbis_enc_residue *rc, PutBitContext *pb, float *coeffs, int samples, int real_ch) { int pass, i, j, p, k; int psize = rc->partition_size; int partitions = (rc->end - rc->begin) / psize; int channels = (rc->type == 2) ? 1 : real_ch; int classes[MAX_CHANNELS][NUM_RESIDUE_PARTITIONS]; int classwords = venc->codebooks[rc->classbook].ndimentions; assert(rc->type == 2); assert(real_ch == 2); for (p = 0; p < partitions; p++) { float max1 = 0., max2 = 0.; int s = rc->begin + p * psize; for (k = s; k < s + psize; k += 2) { max1 = FFMAX(max1, fabs(coeffs[ k / real_ch])); max2 = FFMAX(max2, fabs(coeffs[samples + k / real_ch])); } for (i = 0; i < rc->classifications - 1; i++) if (max1 < rc->maxes[i][0] && max2 < rc->maxes[i][1]) break; classes[0][p] = i; } for (pass = 0; pass < 8; pass++) { p = 0; while (p < partitions) { if (pass == 0) for (j = 0; j < channels; j++) { vorbis_enc_codebook * book = &venc->codebooks[rc->classbook]; int entry = 0; for (i = 0; i < classwords; i++) { entry *= rc->classifications; entry += classes[j][p + i]; } put_codeword(pb, book, entry); } for (i = 0; i < classwords && p < partitions; i++, p++) { for (j = 0; j < channels; j++) { int nbook = rc->books[classes[j][p]][pass]; vorbis_enc_codebook * book = &venc->codebooks[nbook]; float *buf = coeffs + samples*j + rc->begin + p*psize; if (nbook == -1) continue; assert(rc->type == 0 || rc->type == 2); assert(!(psize % book->ndimentions)); if (rc->type == 0) { for (k = 0; k < psize; k += book->ndimentions) { float *a = put_vector(book, pb, &buf[k]); int l; for (l = 0; l < book->ndimentions; l++) buf[k + l] -= a[l]; } } else { int s = rc->begin + p * psize, a1, b1; a1 = (s % real_ch) * samples; b1 = s / real_ch; s = real_ch * samples; for (k = 0; k < psize; k += book->ndimentions) { int dim, a2 = a1, b2 = b1; float vec[MAX_CODEBOOK_DIM], *pv = vec; for (dim = book->ndimentions; dim--; ) { *pv++ = coeffs[a2 + b2]; if ((a2 += samples) == s) { a2 = 0; b2++; } } pv = put_vector(book, pb, vec); for (dim = book->ndimentions; dim--; ) { coeffs[a1 + b1] -= *pv++; if ((a1 += samples) == s) { a1 = 0; b1++; } } } } } } } } }
14,769
1
static void rtc_class_initfn(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = rtc_realizefn; dc->no_user = 1; dc->vmsd = &vmstate_rtc; dc->props = mc146818rtc_properties; }
14,771
1
static int megasas_handle_dcmd(MegasasState *s, MegasasCmd *cmd) { int opcode, len; int retval = 0; const struct dcmd_cmd_tbl_t *cmdptr = dcmd_cmd_tbl; opcode = le32_to_cpu(cmd->frame->dcmd.opcode); trace_megasas_handle_dcmd(cmd->index, opcode); len = megasas_map_dcmd(s, cmd); if (len < 0) { return MFI_STAT_MEMORY_NOT_AVAILABLE; } while (cmdptr->opcode != -1 && cmdptr->opcode != opcode) { cmdptr++; } if (cmdptr->opcode == -1) { trace_megasas_dcmd_unhandled(cmd->index, opcode, len); retval = megasas_dcmd_dummy(s, cmd); } else { trace_megasas_dcmd_enter(cmd->index, cmdptr->desc, len); retval = cmdptr->func(s, cmd); } if (retval != MFI_STAT_INVALID_STATUS) { megasas_finish_dcmd(cmd, len); } return retval; }
14,773
1
static int64_t alloc_clusters_imrt(BlockDriverState *bs, int cluster_count, uint16_t **refcount_table, int64_t *imrt_nb_clusters, int64_t *first_free_cluster) { BDRVQcowState *s = bs->opaque; int64_t cluster = *first_free_cluster, i; bool first_gap = true; int contiguous_free_clusters; int ret; /* Starting at *first_free_cluster, find a range of at least cluster_count * continuously free clusters */ for (contiguous_free_clusters = 0; cluster < *imrt_nb_clusters && contiguous_free_clusters < cluster_count; cluster++) { if (!(*refcount_table)[cluster]) { contiguous_free_clusters++; if (first_gap) { /* If this is the first free cluster found, update * *first_free_cluster accordingly */ *first_free_cluster = cluster; first_gap = false; } } else if (contiguous_free_clusters) { contiguous_free_clusters = 0; } } /* If contiguous_free_clusters is greater than zero, it contains the number * of continuously free clusters until the current cluster; the first free * cluster in the current "gap" is therefore * cluster - contiguous_free_clusters */ /* If no such range could be found, grow the in-memory refcount table * accordingly to append free clusters at the end of the image */ if (contiguous_free_clusters < cluster_count) { /* contiguous_free_clusters clusters are already empty at the image end; * we need cluster_count clusters; therefore, we have to allocate * cluster_count - contiguous_free_clusters new clusters at the end of * the image (which is the current value of cluster; note that cluster * may exceed old_imrt_nb_clusters if *first_free_cluster pointed beyond * the image end) */ ret = realloc_refcount_array(s, refcount_table, imrt_nb_clusters, cluster + cluster_count - contiguous_free_clusters); if (ret < 0) { return ret; } } /* Go back to the first free cluster */ cluster -= contiguous_free_clusters; for (i = 0; i < cluster_count; i++) { (*refcount_table)[cluster + i] = 1; } return cluster << s->cluster_bits; }
14,778
0
static uint64_t grlib_apbuart_read(void *opaque, target_phys_addr_t addr, unsigned size) { UART *uart = opaque; addr &= 0xff; /* Unit registers */ switch (addr) { case DATA_OFFSET: case DATA_OFFSET + 3: /* when only one byte read */ return uart_pop(uart); case STATUS_OFFSET: /* Read Only */ return uart->status; case CONTROL_OFFSET: return uart->control; case SCALER_OFFSET: /* Not supported */ return 0; default: trace_grlib_apbuart_readl_unknown(addr); return 0; } }
14,779
0
av_cold void ff_rv34dsp_init(RV34DSPContext *c, DSPContext* dsp) { c->rv34_inv_transform = rv34_inv_transform_noround_c; c->rv34_inv_transform_dc = rv34_inv_transform_dc_noround_c; c->rv34_idct_add = rv34_idct_add_c; c->rv34_idct_dc_add = rv34_idct_dc_add_c; if (HAVE_NEON) ff_rv34dsp_init_neon(c, dsp); if (ARCH_X86) ff_rv34dsp_init_x86(c, dsp); }
14,780
0
static void memory_region_add_subregion_common(MemoryRegion *mr, hwaddr offset, MemoryRegion *subregion) { MemoryRegion *other; memory_region_transaction_begin(); assert(!subregion->parent); memory_region_ref(subregion); subregion->parent = mr; subregion->addr = offset; QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { if (subregion->may_overlap || other->may_overlap) { continue; } if (int128_ge(int128_make64(offset), int128_add(int128_make64(other->addr), other->size)) || int128_le(int128_add(int128_make64(offset), subregion->size), int128_make64(other->addr))) { continue; } #if 0 printf("warning: subregion collision %llx/%llx (%s) " "vs %llx/%llx (%s)\n", (unsigned long long)offset, (unsigned long long)int128_get64(subregion->size), subregion->name, (unsigned long long)other->addr, (unsigned long long)int128_get64(other->size), other->name); #endif } QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { if (subregion->priority >= other->priority) { QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); goto done; } } QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); done: memory_region_update_pending |= mr->enabled && subregion->enabled; memory_region_transaction_commit(); }
14,782
0
qcrypto_block_luks_open(QCryptoBlock *block, QCryptoBlockOpenOptions *options, QCryptoBlockReadFunc readfunc, void *opaque, unsigned int flags, Error **errp) { QCryptoBlockLUKS *luks; Error *local_err = NULL; int ret = 0; size_t i; ssize_t rv; uint8_t *masterkey = NULL; size_t masterkeylen; char *ivgen_name, *ivhash_name; QCryptoCipherMode ciphermode; QCryptoCipherAlgorithm cipheralg; QCryptoIVGenAlgorithm ivalg; QCryptoCipherAlgorithm ivcipheralg; QCryptoHashAlgorithm hash; QCryptoHashAlgorithm ivhash; char *password = NULL; if (!(flags & QCRYPTO_BLOCK_OPEN_NO_IO)) { if (!options->u.luks.key_secret) { error_setg(errp, "Parameter 'key-secret' is required for cipher"); return -1; } password = qcrypto_secret_lookup_as_utf8( options->u.luks.key_secret, errp); if (!password) { return -1; } } luks = g_new0(QCryptoBlockLUKS, 1); block->opaque = luks; /* Read the entire LUKS header, minus the key material from * the underlying device */ rv = readfunc(block, opaque, 0, (uint8_t *)&luks->header, sizeof(luks->header), errp); if (rv < 0) { ret = rv; goto fail; } /* The header is always stored in big-endian format, so * convert everything to native */ be16_to_cpus(&luks->header.version); be32_to_cpus(&luks->header.payload_offset); be32_to_cpus(&luks->header.key_bytes); be32_to_cpus(&luks->header.master_key_iterations); for (i = 0; i < QCRYPTO_BLOCK_LUKS_NUM_KEY_SLOTS; i++) { be32_to_cpus(&luks->header.key_slots[i].active); be32_to_cpus(&luks->header.key_slots[i].iterations); be32_to_cpus(&luks->header.key_slots[i].key_offset); be32_to_cpus(&luks->header.key_slots[i].stripes); } if (memcmp(luks->header.magic, qcrypto_block_luks_magic, QCRYPTO_BLOCK_LUKS_MAGIC_LEN) != 0) { error_setg(errp, "Volume is not in LUKS format"); ret = -EINVAL; goto fail; } if (luks->header.version != QCRYPTO_BLOCK_LUKS_VERSION) { error_setg(errp, "LUKS version %" PRIu32 " is not supported", luks->header.version); ret = -ENOTSUP; goto fail; } /* * The cipher_mode header contains a string that we have * to further parse, of the format * * <cipher-mode>-<iv-generator>[:<iv-hash>] * * eg cbc-essiv:sha256, cbc-plain64 */ ivgen_name = strchr(luks->header.cipher_mode, '-'); if (!ivgen_name) { ret = -EINVAL; error_setg(errp, "Unexpected cipher mode string format %s", luks->header.cipher_mode); goto fail; } *ivgen_name = '\0'; ivgen_name++; ivhash_name = strchr(ivgen_name, ':'); if (!ivhash_name) { ivhash = 0; } else { *ivhash_name = '\0'; ivhash_name++; ivhash = qcrypto_block_luks_hash_name_lookup(ivhash_name, &local_err); if (local_err) { ret = -ENOTSUP; error_propagate(errp, local_err); goto fail; } } ciphermode = qcrypto_block_luks_cipher_mode_lookup(luks->header.cipher_mode, &local_err); if (local_err) { ret = -ENOTSUP; error_propagate(errp, local_err); goto fail; } cipheralg = qcrypto_block_luks_cipher_name_lookup(luks->header.cipher_name, ciphermode, luks->header.key_bytes, &local_err); if (local_err) { ret = -ENOTSUP; error_propagate(errp, local_err); goto fail; } hash = qcrypto_block_luks_hash_name_lookup(luks->header.hash_spec, &local_err); if (local_err) { ret = -ENOTSUP; error_propagate(errp, local_err); goto fail; } ivalg = qcrypto_block_luks_ivgen_name_lookup(ivgen_name, &local_err); if (local_err) { ret = -ENOTSUP; error_propagate(errp, local_err); goto fail; } if (ivalg == QCRYPTO_IVGEN_ALG_ESSIV) { if (!ivhash_name) { ret = -EINVAL; error_setg(errp, "Missing IV generator hash specification"); goto fail; } ivcipheralg = qcrypto_block_luks_essiv_cipher(cipheralg, ivhash, &local_err); if (local_err) { ret = -ENOTSUP; error_propagate(errp, local_err); goto fail; } } else { /* Note we parsed the ivhash_name earlier in the cipher_mode * spec string even with plain/plain64 ivgens, but we * will ignore it, since it is irrelevant for these ivgens. * This is for compat with dm-crypt which will silently * ignore hash names with these ivgens rather than report * an error about the invalid usage */ ivcipheralg = cipheralg; } if (!(flags & QCRYPTO_BLOCK_OPEN_NO_IO)) { /* Try to find which key slot our password is valid for * and unlock the master key from that slot. */ if (qcrypto_block_luks_find_key(block, password, cipheralg, ciphermode, hash, ivalg, ivcipheralg, ivhash, &masterkey, &masterkeylen, readfunc, opaque, errp) < 0) { ret = -EACCES; goto fail; } /* We have a valid master key now, so can setup the * block device payload decryption objects */ block->kdfhash = hash; block->niv = qcrypto_cipher_get_iv_len(cipheralg, ciphermode); block->ivgen = qcrypto_ivgen_new(ivalg, ivcipheralg, ivhash, masterkey, masterkeylen, errp); if (!block->ivgen) { ret = -ENOTSUP; goto fail; } block->cipher = qcrypto_cipher_new(cipheralg, ciphermode, masterkey, masterkeylen, errp); if (!block->cipher) { ret = -ENOTSUP; goto fail; } } block->payload_offset = luks->header.payload_offset * QCRYPTO_BLOCK_LUKS_SECTOR_SIZE; luks->cipher_alg = cipheralg; luks->cipher_mode = ciphermode; luks->ivgen_alg = ivalg; luks->ivgen_hash_alg = ivhash; luks->hash_alg = hash; g_free(masterkey); g_free(password); return 0; fail: g_free(masterkey); qcrypto_cipher_free(block->cipher); qcrypto_ivgen_free(block->ivgen); g_free(luks); g_free(password); return ret; }
14,783
0
static void rng_egd_free_requests(RngEgd *s) { GSList *i; for (i = s->parent.requests; i; i = i->next) { rng_egd_free_request(i->data); } g_slist_free(s->parent.requests); s->parent.requests = NULL; }
14,784
0
static void test_qemu_strtoull_octal(void) { const char *str = "0123"; char f = 'X'; const char *endptr = &f; uint64_t res = 999; int err; err = qemu_strtoull(str, &endptr, 8, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, 0123); g_assert(endptr == str + strlen(str)); endptr = &f; res = 999; err = qemu_strtoull(str, &endptr, 0, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, 0123); g_assert(endptr == str + strlen(str)); }
14,785
0
static inline void gen_ins(DisasContext *s, int ot) { gen_string_movl_A0_EDI(s); gen_op_movl_T0_0(); gen_op_st_T0_A0(ot + s->mem_index); gen_op_mov_TN_reg(OT_WORD, 1, R_EDX); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]); tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(ot + s->mem_index); gen_op_movl_T0_Dshift[ot](); #ifdef TARGET_X86_64 if (s->aflag == 2) { gen_op_addq_EDI_T0(); } else #endif if (s->aflag) { gen_op_addl_EDI_T0(); } else { gen_op_addw_EDI_T0(); } }
14,786
0
static void vapic_map_rom_writable(VAPICROMState *s) { target_phys_addr_t rom_paddr = s->rom_state_paddr & ROM_BLOCK_MASK; MemoryRegionSection section; MemoryRegion *as; size_t rom_size; uint8_t *ram; as = sysbus_address_space(&s->busdev); if (s->rom_mapped_writable) { memory_region_del_subregion(as, &s->rom); memory_region_destroy(&s->rom); } /* grab RAM memory region (region @rom_paddr may still be pc.rom) */ section = memory_region_find(as, 0, 1); /* read ROM size from RAM region */ ram = memory_region_get_ram_ptr(section.mr); rom_size = ram[rom_paddr + 2] * ROM_BLOCK_SIZE; s->rom_size = rom_size; /* We need to round to avoid creating subpages * from which we cannot run code. */ rom_size += rom_paddr & ~TARGET_PAGE_MASK; rom_paddr &= TARGET_PAGE_MASK; rom_size = TARGET_PAGE_ALIGN(rom_size); memory_region_init_alias(&s->rom, "kvmvapic-rom", section.mr, rom_paddr, rom_size); memory_region_add_subregion_overlap(as, rom_paddr, &s->rom, 1000); s->rom_mapped_writable = true; }
14,787
0
void block_job_resume(BlockJob *job) { job->paused = false; block_job_iostatus_reset(job); if (job->co && !job->busy) { qemu_coroutine_enter(job->co, NULL); } }
14,790
0
static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem, int i) { hwaddr pa; virtio_tswap32s(vq->vdev, &uelem->id); virtio_tswap32s(vq->vdev, &uelem->len); pa = vq->vring.used + offsetof(VRingUsed, ring[i]); address_space_write(&address_space_memory, pa, MEMTXATTRS_UNSPECIFIED, (void *)uelem, sizeof(VRingUsedElem)); }
14,792
0
static int writev_f(BlockBackend *blk, int argc, char **argv) { struct timeval t1, t2; int Cflag = 0, qflag = 0; int c, cnt; char *buf; int64_t offset; /* Some compilers get confused and warn if this is not initialized. */ int total = 0; int nr_iov; int pattern = 0xcd; QEMUIOVector qiov; while ((c = getopt(argc, argv, "CqP:")) != EOF) { switch (c) { case 'C': Cflag = 1; break; case 'q': qflag = 1; break; case 'P': pattern = parse_pattern(optarg); if (pattern < 0) { return 0; } break; default: return qemuio_command_usage(&writev_cmd); } } if (optind > argc - 2) { return qemuio_command_usage(&writev_cmd); } offset = cvtnum(argv[optind]); if (offset < 0) { printf("non-numeric length argument -- %s\n", argv[optind]); return 0; } optind++; if (offset & 0x1ff) { printf("offset %" PRId64 " is not sector aligned\n", offset); return 0; } nr_iov = argc - optind; buf = create_iovec(blk, &qiov, &argv[optind], nr_iov, pattern); if (buf == NULL) { return 0; } gettimeofday(&t1, NULL); cnt = do_aio_writev(blk, &qiov, offset, &total); gettimeofday(&t2, NULL); if (cnt < 0) { printf("writev failed: %s\n", strerror(-cnt)); goto out; } if (qflag) { goto out; } /* Finally, report back -- -C gives a parsable format */ t2 = tsub(t2, t1); print_report("wrote", &t2, offset, qiov.size, total, cnt, Cflag); out: qemu_iovec_destroy(&qiov); qemu_io_free(buf); return 0; }
14,793
0
static int cow_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVCowState *s = bs->opaque; struct cow_header_v2 cow_header; int bitmap_size; int64_t size; int ret; /* see if it is a cow image */ ret = bdrv_pread(bs->file, 0, &cow_header, sizeof(cow_header)); if (ret < 0) { goto fail; } if (be32_to_cpu(cow_header.magic) != COW_MAGIC) { error_setg(errp, "Image not in COW format"); ret = -EINVAL; goto fail; } if (be32_to_cpu(cow_header.version) != COW_VERSION) { char version[64]; snprintf(version, sizeof(version), "COW version %" PRIu32, cow_header.version); error_set(errp, QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, bs->device_name, "cow", version); ret = -ENOTSUP; goto fail; } /* cow image found */ size = be64_to_cpu(cow_header.size); bs->total_sectors = size / 512; pstrcpy(bs->backing_file, sizeof(bs->backing_file), cow_header.backing_file); bitmap_size = ((bs->total_sectors + 7) >> 3) + sizeof(cow_header); s->cow_sectors_offset = (bitmap_size + 511) & ~511; qemu_co_mutex_init(&s->lock); return 0; fail: return ret; }
14,794
0
static void pc_compat_2_2(MachineState *machine) { pc_compat_2_3(machine); rsdp_in_ram = false; machine->suppress_vmdesc = true; }
14,795
0
void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { MachineState *machine = MACHINE(OBJECT(hotplug_dev)); sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(OBJECT(hotplug_dev)); sPAPRMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); int spapr_max_cores = max_cpus / smp_threads; int index; Error *local_err = NULL; CPUCore *cc = CPU_CORE(dev); char *base_core_type = spapr_get_cpu_core_type(machine->cpu_model); const char *type = object_get_typename(OBJECT(dev)); if (!smc->dr_cpu_enabled) { error_setg(&local_err, "CPU hotplug not supported for this machine"); goto out; } if (strcmp(base_core_type, type)) { error_setg(&local_err, "CPU core type should be %s", base_core_type); goto out; } if (cc->nr_threads != smp_threads) { error_setg(&local_err, "threads must be %d", smp_threads); goto out; } if (cc->core_id % smp_threads) { error_setg(&local_err, "invalid core id %d\n", cc->core_id); goto out; } index = cc->core_id / smp_threads; if (index < 0 || index >= spapr_max_cores) { error_setg(&local_err, "core id %d out of range", cc->core_id); goto out; } if (spapr->cores[index]) { error_setg(&local_err, "core %d already populated", cc->core_id); goto out; } out: g_free(base_core_type); error_propagate(errp, local_err); }
14,796
0
static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint8_t *p = inbuf; int len = r->req.cmd.xfer; UnmapCBData *data; /* Reject ANCHOR=1. */ if (r->req.cmd.buf[1] & 0x1) { goto invalid_field; } if (len < 8) { goto invalid_param_len; } if (len < lduw_be_p(&p[0]) + 2) { goto invalid_param_len; } if (len < lduw_be_p(&p[2]) + 8) { goto invalid_param_len; } if (lduw_be_p(&p[2]) & 15) { goto invalid_param_len; } if (bdrv_is_read_only(s->qdev.conf.bs)) { scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); return; } data = g_new0(UnmapCBData, 1); data->r = r; data->inbuf = &p[8]; data->count = lduw_be_p(&p[2]) >> 4; /* The matching unref is in scsi_unmap_complete, before data is freed. */ scsi_req_ref(&r->req); scsi_unmap_complete(data, 0); return; invalid_param_len: scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); return; invalid_field: scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); }
14,797
0
int bdrv_is_encrypted(BlockDriverState *bs) { if (bs->backing_hd && bs->backing_hd->encrypted) return 1; return bs->encrypted; }
14,798
0
void eth_get_protocols(const struct iovec *iov, int iovcnt, bool *isip4, bool *isip6, bool *isudp, bool *istcp, size_t *l3hdr_off, size_t *l4hdr_off, size_t *l5hdr_off, eth_ip6_hdr_info *ip6hdr_info, eth_ip4_hdr_info *ip4hdr_info, eth_l4_hdr_info *l4hdr_info) { int proto; bool fragment = false; size_t l2hdr_len = eth_get_l2_hdr_length_iov(iov, iovcnt); size_t input_size = iov_size(iov, iovcnt); size_t copied; *isip4 = *isip6 = *isudp = *istcp = false; proto = eth_get_l3_proto(iov, iovcnt, l2hdr_len); *l3hdr_off = l2hdr_len; if (proto == ETH_P_IP) { struct ip_header *iphdr = &ip4hdr_info->ip4_hdr; if (input_size < l2hdr_len) { return; } copied = iov_to_buf(iov, iovcnt, l2hdr_len, iphdr, sizeof(*iphdr)); *isip4 = true; if (copied < sizeof(*iphdr)) { return; } if (IP_HEADER_VERSION(iphdr) == IP_HEADER_VERSION_4) { if (iphdr->ip_p == IP_PROTO_TCP) { *istcp = true; } else if (iphdr->ip_p == IP_PROTO_UDP) { *isudp = true; } } ip4hdr_info->fragment = IP4_IS_FRAGMENT(iphdr); *l4hdr_off = l2hdr_len + IP_HDR_GET_LEN(iphdr); fragment = ip4hdr_info->fragment; } else if (proto == ETH_P_IPV6) { *isip6 = true; if (eth_parse_ipv6_hdr(iov, iovcnt, l2hdr_len, ip6hdr_info)) { if (ip6hdr_info->l4proto == IP_PROTO_TCP) { *istcp = true; } else if (ip6hdr_info->l4proto == IP_PROTO_UDP) { *isudp = true; } } else { return; } *l4hdr_off = l2hdr_len + ip6hdr_info->full_hdr_len; fragment = ip6hdr_info->fragment; } if (!fragment) { if (*istcp) { *istcp = _eth_copy_chunk(input_size, iov, iovcnt, *l4hdr_off, sizeof(l4hdr_info->hdr.tcp), &l4hdr_info->hdr.tcp); if (istcp) { *l5hdr_off = *l4hdr_off + TCP_HEADER_DATA_OFFSET(&l4hdr_info->hdr.tcp); l4hdr_info->has_tcp_data = _eth_tcp_has_data(proto == ETH_P_IP, &ip4hdr_info->ip4_hdr, &ip6hdr_info->ip6_hdr, *l4hdr_off - *l3hdr_off, &l4hdr_info->hdr.tcp); } } else if (*isudp) { *isudp = _eth_copy_chunk(input_size, iov, iovcnt, *l4hdr_off, sizeof(l4hdr_info->hdr.udp), &l4hdr_info->hdr.udp); *l5hdr_off = *l4hdr_off + sizeof(l4hdr_info->hdr.udp); } } }
14,799
0
static void apic_bus_deliver(const uint32_t *deliver_bitmask, uint8_t delivery_mode, uint8_t vector_num, uint8_t polarity, uint8_t trigger_mode) { APICState *apic_iter; switch (delivery_mode) { case APIC_DM_LOWPRI: /* XXX: search for focus processor, arbitration */ { int i, d; d = -1; for(i = 0; i < MAX_APIC_WORDS; i++) { if (deliver_bitmask[i]) { d = i * 32 + ffs_bit(deliver_bitmask[i]); break; } } if (d >= 0) { apic_iter = local_apics[d]; if (apic_iter) { apic_set_irq(apic_iter, vector_num, trigger_mode); } } } return; case APIC_DM_FIXED: break; case APIC_DM_SMI: foreach_apic(apic_iter, deliver_bitmask, cpu_interrupt(apic_iter->cpu_env, CPU_INTERRUPT_SMI) ); return; case APIC_DM_NMI: foreach_apic(apic_iter, deliver_bitmask, cpu_interrupt(apic_iter->cpu_env, CPU_INTERRUPT_NMI) ); return; case APIC_DM_INIT: /* normal INIT IPI sent to processors */ foreach_apic(apic_iter, deliver_bitmask, apic_init_ipi(apic_iter) ); return; case APIC_DM_EXTINT: /* handled in I/O APIC code */ break; default: return; } foreach_apic(apic_iter, deliver_bitmask, apic_set_irq(apic_iter, vector_num, trigger_mode) ); }
14,800
0
static void print_sdp(void) { char sdp[16384]; int i; AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files); if (!avc) exit_program(1); for (i = 0; i < nb_output_files; i++) avc[i] = output_files[i]->ctx; av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp)); printf("SDP:\n%s\n", sdp); fflush(stdout); av_freep(&avc); }
14,802
0
void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write) { hwaddr addr, granularity; IOMMUTLBEntry iotlb; granularity = memory_region_iommu_get_min_page_size(mr); for (addr = 0; addr < memory_region_size(mr); addr += granularity) { iotlb = mr->iommu_ops->translate(mr, addr, is_write); if (iotlb.perm != IOMMU_NONE) { n->notify(n, &iotlb); } /* if (2^64 - MR size) < granularity, it's possible to get an * infinite loop here. This should catch such a wraparound */ if ((addr + granularity) < addr) { break; } } }
14,803
0
static void pic_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned int size) { struct etrax_pic *fs = opaque; D(printf("%s addr=%x val=%x\n", __func__, addr, value)); if (addr == R_RW_MASK) { fs->regs[R_RW_MASK] = value; pic_update(fs); } }
14,804
0
static int pci_cmd646_ide_initfn(PCIDevice *dev) { PCIIDEState *d = DO_UPCAST(PCIIDEState, dev, dev); uint8_t *pci_conf = d->dev.config; qemu_irq *irq; int i; pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_CMD); pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_CMD_646); pci_conf[PCI_REVISION_ID] = 0x07; // IDE controller revision pci_conf[PCI_CLASS_PROG] = 0x8f; pci_config_set_class(pci_conf, PCI_CLASS_STORAGE_IDE); pci_conf[0x51] = 0x04; // enable IDE0 if (d->secondary) { /* XXX: if not enabled, really disable the seconday IDE controller */ pci_conf[0x51] |= 0x08; /* enable IDE1 */ } pci_register_bar(dev, 0, 0x8, PCI_BASE_ADDRESS_SPACE_IO, ide_map); pci_register_bar(dev, 1, 0x4, PCI_BASE_ADDRESS_SPACE_IO, ide_map); pci_register_bar(dev, 2, 0x8, PCI_BASE_ADDRESS_SPACE_IO, ide_map); pci_register_bar(dev, 3, 0x4, PCI_BASE_ADDRESS_SPACE_IO, ide_map); pci_register_bar(dev, 4, 0x10, PCI_BASE_ADDRESS_SPACE_IO, bmdma_map); /* TODO: RST# value should be 0 */ pci_conf[PCI_INTERRUPT_PIN] = 0x01; // interrupt on pin 1 irq = qemu_allocate_irqs(cmd646_set_irq, d, 2); for (i = 0; i < 2; i++) { ide_bus_new(&d->bus[i], &d->dev.qdev, i); ide_init2(&d->bus[i], irq[i]); bmdma_init(&d->bus[i], &d->bmdma[i]); d->bmdma[i].bus = &d->bus[i]; qemu_add_vm_change_state_handler(d->bus[i].dma->ops->restart_cb, &d->bmdma[i].dma); } vmstate_register(&dev->qdev, 0, &vmstate_ide_pci, d); qemu_register_reset(cmd646_reset, d); return 0; }
14,805
0
uint8_t *smbios_get_table(size_t *length) { smbios_validate_table(); *length = smbios_entries_len; return smbios_entries; }
14,806
0
int qemu_fsdev_add(QemuOpts *opts) { struct FsTypeListEntry *fsle; int i; if (qemu_opts_id(opts) == NULL) { fprintf(stderr, "fsdev: No id specified\n"); return -1; } for (i = 0; i < ARRAY_SIZE(FsTypes); i++) { if (strcmp(FsTypes[i].name, qemu_opt_get(opts, "fstype")) == 0) { break; } } if (i == ARRAY_SIZE(FsTypes)) { fprintf(stderr, "fsdev: fstype %s not found\n", qemu_opt_get(opts, "fstype")); return -1; } fsle = qemu_malloc(sizeof(*fsle)); fsle->fse.fsdev_id = qemu_strdup(qemu_opts_id(opts)); fsle->fse.path = qemu_strdup(qemu_opt_get(opts, "path")); fsle->fse.ops = FsTypes[i].ops; QTAILQ_INSERT_TAIL(&fstype_entries, fsle, next); return 0; }
14,807
0
build_madt(GArray *table_data, GArray *linker, VirtGuestInfo *guest_info, VirtAcpiCpuInfo *cpuinfo) { int madt_start = table_data->len; const MemMapEntry *memmap = guest_info->memmap; const int *irqmap = guest_info->irqmap; AcpiMultipleApicTable *madt; AcpiMadtGenericDistributor *gicd; AcpiMadtGenericMsiFrame *gic_msi; int i; madt = acpi_data_push(table_data, sizeof *madt); for (i = 0; i < guest_info->smp_cpus; i++) { AcpiMadtGenericInterrupt *gicc = acpi_data_push(table_data, sizeof *gicc); gicc->type = ACPI_APIC_GENERIC_INTERRUPT; gicc->length = sizeof(*gicc); gicc->base_address = memmap[VIRT_GIC_CPU].base; gicc->cpu_interface_number = i; gicc->arm_mpidr = i; gicc->uid = i; if (test_bit(i, cpuinfo->found_cpus)) { gicc->flags = cpu_to_le32(ACPI_GICC_ENABLED); } } gicd = acpi_data_push(table_data, sizeof *gicd); gicd->type = ACPI_APIC_GENERIC_DISTRIBUTOR; gicd->length = sizeof(*gicd); gicd->base_address = memmap[VIRT_GIC_DIST].base; gic_msi = acpi_data_push(table_data, sizeof *gic_msi); gic_msi->type = ACPI_APIC_GENERIC_MSI_FRAME; gic_msi->length = sizeof(*gic_msi); gic_msi->gic_msi_frame_id = 0; gic_msi->base_address = cpu_to_le64(memmap[VIRT_GIC_V2M].base); gic_msi->flags = cpu_to_le32(1); gic_msi->spi_count = cpu_to_le16(NUM_GICV2M_SPIS); gic_msi->spi_base = cpu_to_le16(irqmap[VIRT_GIC_V2M] + ARM_SPI_BASE); build_header(linker, table_data, (void *)(table_data->data + madt_start), "APIC", table_data->len - madt_start, 3); }
14,809