label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
1 | static int ptx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; const uint8_t *buf_end = avpkt->data + avpkt->size; PTXContext * const s = avctx->priv_data; AVFrame *picture = data; AVFrame * const p = &s->picture; unsigned int offset, w, h, y, stride, bytes_per_pixel; uint8_t *ptr; if (buf_end - buf < 14) offset = AV_RL16(buf); w = AV_RL16(buf+8); h = AV_RL16(buf+10); bytes_per_pixel = AV_RL16(buf+12) >> 3; if (bytes_per_pixel != 2) { av_log_ask_for_sample(avctx, "Image format is not RGB15.\n"); return -1; } avctx->pix_fmt = PIX_FMT_RGB555; if (offset != 0x2c) av_log_ask_for_sample(avctx, "offset != 0x2c\n"); buf += offset; if (p->data[0]) avctx->release_buffer(avctx, p); if (av_image_check_size(w, h, 0, avctx)) return -1; if (w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } p->pict_type = AV_PICTURE_TYPE_I; ptr = p->data[0]; stride = p->linesize[0]; for (y=0; y<h; y++) { if (buf_end - buf < w * bytes_per_pixel) break; #if HAVE_BIGENDIAN unsigned int x; for (x=0; x<w*bytes_per_pixel; x+=bytes_per_pixel) AV_WN16(ptr+x, AV_RL16(buf+x)); #else memcpy(ptr, buf, w*bytes_per_pixel); #endif ptr += stride; buf += w*bytes_per_pixel; } *picture = s->picture; *data_size = sizeof(AVPicture); return offset + w*h*bytes_per_pixel; } | 16,181 |
1 | static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size) { BDRVQcowState *s = bs->opaque; uint64_t i, nb_clusters; int refcount; nb_clusters = size_to_clusters(s, size); retry: for(i = 0; i < nb_clusters; i++) { uint64_t next_cluster_index = s->free_cluster_index++; refcount = get_refcount(bs, next_cluster_index); if (refcount < 0) { return refcount; } else if (refcount != 0) { goto retry; } } /* Make sure that all offsets in the "allocated" range are representable * in an int64_t */ if (s->free_cluster_index - 1 > (INT64_MAX >> s->cluster_bits)) { return -EFBIG; } #ifdef DEBUG_ALLOC2 fprintf(stderr, "alloc_clusters: size=%" PRId64 " -> %" PRId64 "\n", size, (s->free_cluster_index - nb_clusters) << s->cluster_bits); #endif return (s->free_cluster_index - nb_clusters) << s->cluster_bits; } | 16,182 |
1 | static void vhost_region_del(MemoryListener *listener, MemoryRegionSection *section) { struct vhost_dev *dev = container_of(listener, struct vhost_dev, memory_listener); int i; vhost_set_memory(listener, section, false); for (i = 0; i < dev->n_mem_sections; ++i) { if (dev->mem_sections[i].offset_within_address_space == section->offset_within_address_space) { --dev->n_mem_sections; memmove(&dev->mem_sections[i], &dev->mem_sections[i+1], dev->n_mem_sections - i); break; } } } | 16,186 |
1 | static int wma_decode_block(WMADecodeContext *s) { int n, v, a, ch, code, bsize; int coef_nb_bits, total_gain, parse_exponents; DECLARE_ALIGNED_16(float, window[BLOCK_MAX_SIZE * 2]); int nb_coefs[MAX_CHANNELS]; float mdct_norm; #ifdef TRACE tprintf("***decode_block: %d:%d\n", s->frame_count - 1, s->block_num); #endif /* compute current block length */ if (s->use_variable_block_len) { n = av_log2(s->nb_block_sizes - 1) + 1; if (s->reset_block_lengths) { s->reset_block_lengths = 0; v = get_bits(&s->gb, n); if (v >= s->nb_block_sizes) return -1; s->prev_block_len_bits = s->frame_len_bits - v; v = get_bits(&s->gb, n); if (v >= s->nb_block_sizes) return -1; s->block_len_bits = s->frame_len_bits - v; } else { /* update block lengths */ s->prev_block_len_bits = s->block_len_bits; s->block_len_bits = s->next_block_len_bits; } v = get_bits(&s->gb, n); if (v >= s->nb_block_sizes) return -1; s->next_block_len_bits = s->frame_len_bits - v; } else { /* fixed block len */ s->next_block_len_bits = s->frame_len_bits; s->prev_block_len_bits = s->frame_len_bits; s->block_len_bits = s->frame_len_bits; } /* now check if the block length is coherent with the frame length */ s->block_len = 1 << s->block_len_bits; if ((s->block_pos + s->block_len) > s->frame_len) return -1; if (s->nb_channels == 2) { s->ms_stereo = get_bits(&s->gb, 1); } v = 0; for(ch = 0; ch < s->nb_channels; ch++) { a = get_bits(&s->gb, 1); s->channel_coded[ch] = a; v |= a; } /* if no channel coded, no need to go further */ /* XXX: fix potential framing problems */ if (!v) goto next; bsize = s->frame_len_bits - s->block_len_bits; /* read total gain and extract corresponding number of bits for coef escape coding */ total_gain = 1; for(;;) { a = get_bits(&s->gb, 7); total_gain += a; if (a != 127) break; } if (total_gain < 15) coef_nb_bits = 13; else if (total_gain < 32) coef_nb_bits = 12; else if (total_gain < 40) coef_nb_bits = 11; else if (total_gain < 45) coef_nb_bits = 10; else coef_nb_bits = 9; /* compute number of coefficients */ n = s->coefs_end[bsize] - s->coefs_start; for(ch = 0; ch < s->nb_channels; ch++) nb_coefs[ch] = n; /* complex coding */ if (s->use_noise_coding) { for(ch = 0; ch < s->nb_channels; ch++) { if (s->channel_coded[ch]) { int i, n, a; n = s->exponent_high_sizes[bsize]; for(i=0;i<n;i++) { a = get_bits(&s->gb, 1); s->high_band_coded[ch][i] = a; /* if noise coding, the coefficients are not transmitted */ if (a) nb_coefs[ch] -= s->exponent_high_bands[bsize][i]; } } } for(ch = 0; ch < s->nb_channels; ch++) { if (s->channel_coded[ch]) { int i, n, val, code; n = s->exponent_high_sizes[bsize]; val = (int)0x80000000; for(i=0;i<n;i++) { if (s->high_band_coded[ch][i]) { if (val == (int)0x80000000) { val = get_bits(&s->gb, 7) - 19; } else { code = get_vlc2(&s->gb, s->hgain_vlc.table, HGAINVLCBITS, HGAINMAX); if (code < 0) return -1; val += code - 18; } s->high_band_values[ch][i] = val; } } } } } /* exposant can be interpolated in short blocks. */ parse_exponents = 1; if (s->block_len_bits != s->frame_len_bits) { parse_exponents = get_bits(&s->gb, 1); } if (parse_exponents) { for(ch = 0; ch < s->nb_channels; ch++) { if (s->channel_coded[ch]) { if (s->use_exp_vlc) { if (decode_exp_vlc(s, ch) < 0) return -1; } else { decode_exp_lsp(s, ch); } } } } else { for(ch = 0; ch < s->nb_channels; ch++) { if (s->channel_coded[ch]) { interpolate_array(s->exponents[ch], 1 << s->prev_block_len_bits, s->block_len); } } } /* parse spectral coefficients : just RLE encoding */ for(ch = 0; ch < s->nb_channels; ch++) { if (s->channel_coded[ch]) { VLC *coef_vlc; int level, run, sign, tindex; int16_t *ptr, *eptr; const uint16_t *level_table, *run_table; /* special VLC tables are used for ms stereo because there is potentially less energy there */ tindex = (ch == 1 && s->ms_stereo); coef_vlc = &s->coef_vlc[tindex]; run_table = s->run_table[tindex]; level_table = s->level_table[tindex]; /* XXX: optimize */ ptr = &s->coefs1[ch][0]; eptr = ptr + nb_coefs[ch]; memset(ptr, 0, s->block_len * sizeof(int16_t)); for(;;) { code = get_vlc2(&s->gb, coef_vlc->table, VLCBITS, VLCMAX); if (code < 0) return -1; if (code == 1) { /* EOB */ break; } else if (code == 0) { /* escape */ level = get_bits(&s->gb, coef_nb_bits); /* NOTE: this is rather suboptimal. reading block_len_bits would be better */ run = get_bits(&s->gb, s->frame_len_bits); } else { /* normal code */ run = run_table[code]; level = level_table[code]; } sign = get_bits(&s->gb, 1); if (!sign) level = -level; ptr += run; if (ptr >= eptr) { av_log(NULL, AV_LOG_ERROR, "overflow in spectral RLE, ignoring\n"); break; } *ptr++ = level; /* NOTE: EOB can be omitted */ if (ptr >= eptr) break; } } if (s->version == 1 && s->nb_channels >= 2) { align_get_bits(&s->gb); } } /* normalize */ { int n4 = s->block_len / 2; mdct_norm = 1.0 / (float)n4; if (s->version == 1) { mdct_norm *= sqrt(n4); } } /* finally compute the MDCT coefficients */ for(ch = 0; ch < s->nb_channels; ch++) { if (s->channel_coded[ch]) { int16_t *coefs1; float *coefs, *exponents, mult, mult1, noise, *exp_ptr; int i, j, n, n1, last_high_band; float exp_power[HIGH_BAND_MAX_SIZE]; coefs1 = s->coefs1[ch]; exponents = s->exponents[ch]; mult = pow(10, total_gain * 0.05) / s->max_exponent[ch]; mult *= mdct_norm; coefs = s->coefs[ch]; if (s->use_noise_coding) { mult1 = mult; /* very low freqs : noise */ for(i = 0;i < s->coefs_start; i++) { *coefs++ = s->noise_table[s->noise_index] * (*exponents++) * mult1; s->noise_index = (s->noise_index + 1) & (NOISE_TAB_SIZE - 1); } n1 = s->exponent_high_sizes[bsize]; /* compute power of high bands */ exp_ptr = exponents + s->high_band_start[bsize] - s->coefs_start; last_high_band = 0; /* avoid warning */ for(j=0;j<n1;j++) { n = s->exponent_high_bands[s->frame_len_bits - s->block_len_bits][j]; if (s->high_band_coded[ch][j]) { float e2, v; e2 = 0; for(i = 0;i < n; i++) { v = exp_ptr[i]; e2 += v * v; } exp_power[j] = e2 / n; last_high_band = j; tprintf("%d: power=%f (%d)\n", j, exp_power[j], n); } exp_ptr += n; } /* main freqs and high freqs */ for(j=-1;j<n1;j++) { if (j < 0) { n = s->high_band_start[bsize] - s->coefs_start; } else { n = s->exponent_high_bands[s->frame_len_bits - s->block_len_bits][j]; } if (j >= 0 && s->high_band_coded[ch][j]) { /* use noise with specified power */ mult1 = sqrt(exp_power[j] / exp_power[last_high_band]); /* XXX: use a table */ mult1 = mult1 * pow(10, s->high_band_values[ch][j] * 0.05); mult1 = mult1 / (s->max_exponent[ch] * s->noise_mult); mult1 *= mdct_norm; for(i = 0;i < n; i++) { noise = s->noise_table[s->noise_index]; s->noise_index = (s->noise_index + 1) & (NOISE_TAB_SIZE - 1); *coefs++ = (*exponents++) * noise * mult1; } } else { /* coded values + small noise */ for(i = 0;i < n; i++) { noise = s->noise_table[s->noise_index]; s->noise_index = (s->noise_index + 1) & (NOISE_TAB_SIZE - 1); *coefs++ = ((*coefs1++) + noise) * (*exponents++) * mult; } } } /* very high freqs : noise */ n = s->block_len - s->coefs_end[bsize]; mult1 = mult * exponents[-1]; for(i = 0; i < n; i++) { *coefs++ = s->noise_table[s->noise_index] * mult1; s->noise_index = (s->noise_index + 1) & (NOISE_TAB_SIZE - 1); } } else { /* XXX: optimize more */ for(i = 0;i < s->coefs_start; i++) *coefs++ = 0.0; n = nb_coefs[ch]; for(i = 0;i < n; i++) { *coefs++ = coefs1[i] * exponents[i] * mult; } n = s->block_len - s->coefs_end[bsize]; for(i = 0;i < n; i++) *coefs++ = 0.0; } } } #ifdef TRACE for(ch = 0; ch < s->nb_channels; ch++) { if (s->channel_coded[ch]) { dump_floats("exponents", 3, s->exponents[ch], s->block_len); dump_floats("coefs", 1, s->coefs[ch], s->block_len); } } #endif if (s->ms_stereo && s->channel_coded[1]) { float a, b; int i; /* nominal case for ms stereo: we do it before mdct */ /* no need to optimize this case because it should almost never happen */ if (!s->channel_coded[0]) { tprintf("rare ms-stereo case happened\n"); memset(s->coefs[0], 0, sizeof(float) * s->block_len); s->channel_coded[0] = 1; } for(i = 0; i < s->block_len; i++) { a = s->coefs[0][i]; b = s->coefs[1][i]; s->coefs[0][i] = a + b; s->coefs[1][i] = a - b; } } /* build the window : we ensure that when the windows overlap their squared sum is always 1 (MDCT reconstruction rule) */ /* XXX: merge with output */ { int i, next_block_len, block_len, prev_block_len, n; float *wptr; block_len = s->block_len; prev_block_len = 1 << s->prev_block_len_bits; next_block_len = 1 << s->next_block_len_bits; /* right part */ wptr = window + block_len; if (block_len <= next_block_len) { for(i=0;i<block_len;i++) *wptr++ = s->windows[bsize][i]; } else { /* overlap */ n = (block_len / 2) - (next_block_len / 2); for(i=0;i<n;i++) *wptr++ = 1.0; for(i=0;i<next_block_len;i++) *wptr++ = s->windows[s->frame_len_bits - s->next_block_len_bits][i]; for(i=0;i<n;i++) *wptr++ = 0.0; } /* left part */ wptr = window + block_len; if (block_len <= prev_block_len) { for(i=0;i<block_len;i++) *--wptr = s->windows[bsize][i]; } else { /* overlap */ n = (block_len / 2) - (prev_block_len / 2); for(i=0;i<n;i++) *--wptr = 1.0; for(i=0;i<prev_block_len;i++) *--wptr = s->windows[s->frame_len_bits - s->prev_block_len_bits][i]; for(i=0;i<n;i++) *--wptr = 0.0; } } for(ch = 0; ch < s->nb_channels; ch++) { if (s->channel_coded[ch]) { DECLARE_ALIGNED_16(FFTSample, output[BLOCK_MAX_SIZE * 2]); float *ptr; int n4, index, n; n = s->block_len; n4 = s->block_len / 2; s->mdct_ctx[bsize].fft.imdct_calc(&s->mdct_ctx[bsize], output, s->coefs[ch], s->mdct_tmp); /* XXX: optimize all that by build the window and multipying/adding at the same time */ /* multiply by the window and add in the frame */ index = (s->frame_len / 2) + s->block_pos - n4; ptr = &s->frame_out[ch][index]; s->dsp.vector_fmul_add_add(ptr,window,output,ptr,0,2*n,1); /* specific fast case for ms-stereo : add to second channel if it is not coded */ if (s->ms_stereo && !s->channel_coded[1]) { ptr = &s->frame_out[1][index]; s->dsp.vector_fmul_add_add(ptr,window,output,ptr,0,2*n,1); } } } next: /* update block number */ s->block_num++; s->block_pos += s->block_len; if (s->block_pos >= s->frame_len) return 1; else return 0; } | 16,187 |
1 | static int swf_probe(AVProbeData *p) { if(p->buf_size < 15) return 0; /* check file header */ if ( AV_RB24(p->buf) != AV_RB24("CWS") && AV_RB24(p->buf) != AV_RB24("FWS")) return 0; if (p->buf[3] >= 20) return AVPROBE_SCORE_MAX / 4; return AVPROBE_SCORE_MAX; } | 16,188 |
1 | static void RENAME(yuv2yuyv422_1)(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *bguf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y) { const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1]; const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" YSCALEYUV2PACKED1(%%REGBP, %5) WRITEYUY2(%%REGb, 8280(%5), %%REGBP) "pop %%"REG_BP" \n\t" "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); } else { __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" YSCALEYUV2PACKED1b(%%REGBP, %5) WRITEYUY2(%%REGb, 8280(%5), %%REGBP) "pop %%"REG_BP" \n\t" "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither) ); } } | 16,189 |
1 | void command_loop(void) { int c, i, j = 0, done = 0, fetchable = 0, prompted = 0; char *input; char **v; const cmdinfo_t *ct; for (i = 0; !done && i < ncmdline; i++) { input = strdup(cmdline[i]); if (!input) { fprintf(stderr, _("cannot strdup command '%s': %s\n"), cmdline[i], strerror(errno)); exit(1); } v = breakline(input, &c); if (c) { ct = find_command(v[0]); if (ct) { if (ct->flags & CMD_FLAG_GLOBAL) { done = command(ct, c, v); } else { j = 0; while (!done && (j = args_command(j))) { done = command(ct, c, v); } } } else { fprintf(stderr, _("command \"%s\" not found\n"), v[0]); } } doneline(input, v); } if (cmdline) { free(cmdline); return; } while (!done) { if (!prompted) { printf("%s", get_prompt()); fflush(stdout); qemu_aio_set_fd_handler(STDIN_FILENO, prep_fetchline, NULL, NULL, NULL, &fetchable); prompted = 1; } qemu_aio_wait(); if (!fetchable) { continue; } input = fetchline(); if (input == NULL) { break; } v = breakline(input, &c); if (c) { ct = find_command(v[0]); if (ct) { done = command(ct, c, v); } else { fprintf(stderr, _("command \"%s\" not found\n"), v[0]); } } doneline(input, v); prompted = 0; fetchable = 0; } qemu_aio_set_fd_handler(STDIN_FILENO, NULL, NULL, NULL, NULL, NULL); } | 16,191 |
1 | static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u) { TCGv_i64 tmp; switch ((size << 1) | u) { case 0: gen_helper_neon_mull_s8(dest, a, b); break; case 1: gen_helper_neon_mull_u8(dest, a, b); break; case 2: gen_helper_neon_mull_s16(dest, a, b); break; case 3: gen_helper_neon_mull_u16(dest, a, b); break; case 4: tmp = gen_muls_i64_i32(a, b); tcg_gen_mov_i64(dest, tmp); break; case 5: tmp = gen_mulu_i64_i32(a, b); tcg_gen_mov_i64(dest, tmp); break; default: abort(); } /* gen_helper_neon_mull_[su]{8|16} do not free their parameters. Don't forget to clean them now. */ if (size < 2) { tcg_temp_free_i32(a); tcg_temp_free_i32(b); } } | 16,192 |
1 | static void dummy_signal(int sig) { } | 16,193 |
1 | void mips_malta_init (ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { char *filename; ram_addr_t ram_offset; ram_addr_t bios_offset; target_long bios_size; int64_t kernel_entry; PCIBus *pci_bus; ISADevice *isa_dev; CPUState *env; RTCState *rtc_state; fdctrl_t *floppy_controller; MaltaFPGAState *malta_fpga; qemu_irq *i8259; int piix4_devfn; uint8_t *eeprom_buf; i2c_bus *smbus; int i; DriveInfo *dinfo; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; DriveInfo *fd[MAX_FD]; int fl_idx = 0; int fl_sectors = 0; /* Make sure the first 3 serial ports are associated with a device. */ for(i = 0; i < 3; i++) { if (!serial_hds[i]) { char label[32]; snprintf(label, sizeof(label), "serial%d", i); serial_hds[i] = qemu_chr_open(label, "null", NULL); } } /* init CPUs */ if (cpu_model == NULL) { #ifdef TARGET_MIPS64 cpu_model = "20Kc"; #else cpu_model = "24Kf"; #endif } env = cpu_init(cpu_model); if (!env) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } qemu_register_reset(main_cpu_reset, env); /* allocate RAM */ if (ram_size > (256 << 20)) { fprintf(stderr, "qemu: Too much memory for this machine: %d MB, maximum 256 MB\n", ((unsigned int)ram_size / (1 << 20))); exit(1); } ram_offset = qemu_ram_alloc(ram_size); bios_offset = qemu_ram_alloc(BIOS_SIZE); cpu_register_physical_memory(0, ram_size, ram_offset | IO_MEM_RAM); /* Map the bios at two physical locations, as on the real board. */ cpu_register_physical_memory(0x1e000000LL, BIOS_SIZE, bios_offset | IO_MEM_ROM); cpu_register_physical_memory(0x1fc00000LL, BIOS_SIZE, bios_offset | IO_MEM_ROM); /* FPGA */ malta_fpga = malta_fpga_init(0x1f000000LL, env->irq[2], serial_hds[2]); /* Load firmware in flash / BIOS unless we boot directly into a kernel. */ if (kernel_filename) { /* Write a small bootloader to the flash location. */ loaderparams.ram_size = ram_size; loaderparams.kernel_filename = kernel_filename; loaderparams.kernel_cmdline = kernel_cmdline; loaderparams.initrd_filename = initrd_filename; kernel_entry = load_kernel(env); env->CP0_Status &= ~((1 << CP0St_BEV) | (1 << CP0St_ERL)); write_bootloader(env, qemu_get_ram_ptr(bios_offset), kernel_entry); } else { dinfo = drive_get(IF_PFLASH, 0, fl_idx); if (dinfo) { /* Load firmware from flash. */ bios_size = 0x400000; fl_sectors = bios_size >> 16; #ifdef DEBUG_BOARD_INIT printf("Register parallel flash %d size " TARGET_FMT_lx " at " "offset %08lx addr %08llx '%s' %x\n", fl_idx, bios_size, bios_offset, 0x1e000000LL, bdrv_get_device_name(dinfo->bdrv), fl_sectors); #endif pflash_cfi01_register(0x1e000000LL, bios_offset, dinfo->bdrv, 65536, fl_sectors, 4, 0x0000, 0x0000, 0x0000, 0x0000); fl_idx++; } else { /* Load a BIOS image. */ if (bios_name == NULL) bios_name = BIOS_FILENAME; filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); if (filename) { bios_size = load_image_targphys(filename, 0x1fc00000LL, BIOS_SIZE); qemu_free(filename); } else { bios_size = -1; } if ((bios_size < 0 || bios_size > BIOS_SIZE) && !kernel_filename) { fprintf(stderr, "qemu: Could not load MIPS bios '%s', and no -kernel argument was specified\n", bios_name); exit(1); } } /* In little endian mode the 32bit words in the bios are swapped, a neat trick which allows bi-endian firmware. */ #ifndef TARGET_WORDS_BIGENDIAN { uint32_t *addr = qemu_get_ram_ptr(bios_offset);; uint32_t *end = addr + bios_size; while (addr < end) { bswap32s(addr); } } #endif } /* Board ID = 0x420 (Malta Board with CoreLV) XXX: theoretically 0x1e000010 should map to flash and 0x1fc00010 should map to the board ID. */ stl_phys(0x1fc00010LL, 0x00000420); /* Init internal devices */ cpu_mips_irq_init_cpu(env); cpu_mips_clock_init(env); /* Interrupt controller */ /* The 8259 is attached to the MIPS CPU INT0 pin, ie interrupt 2 */ i8259 = i8259_init(env->irq[2]); /* Northbridge */ pci_bus = pci_gt64120_init(i8259); /* Southbridge */ if (drive_get_max_bus(IF_IDE) >= MAX_IDE_BUS) { fprintf(stderr, "qemu: too many IDE bus\n"); exit(1); } for(i = 0; i < MAX_IDE_BUS * MAX_IDE_DEVS; i++) { hd[i] = drive_get(IF_IDE, i / MAX_IDE_DEVS, i % MAX_IDE_DEVS); } piix4_devfn = piix4_init(pci_bus, 80); isa_bus_irqs(i8259); pci_piix4_ide_init(pci_bus, hd, piix4_devfn + 1); usb_uhci_piix4_init(pci_bus, piix4_devfn + 2); smbus = piix4_pm_init(pci_bus, piix4_devfn + 3, 0x1100, isa_reserve_irq(9)); eeprom_buf = qemu_mallocz(8 * 256); /* XXX: make this persistent */ for (i = 0; i < 8; i++) { /* TODO: Populate SPD eeprom data. */ DeviceState *eeprom; eeprom = qdev_create((BusState *)smbus, "smbus-eeprom"); qdev_prop_set_uint8(eeprom, "address", 0x50 + i); qdev_prop_set_ptr(eeprom, "data", eeprom_buf + (i * 256)); qdev_init(eeprom); } pit = pit_init(0x40, isa_reserve_irq(0)); DMA_init(0); /* Super I/O */ isa_dev = isa_create_simple("i8042"); rtc_state = rtc_init(2000); serial_isa_init(0, serial_hds[0]); serial_isa_init(1, serial_hds[1]); if (parallel_hds[0]) parallel_init(0, parallel_hds[0]); for(i = 0; i < MAX_FD; i++) { fd[i] = drive_get(IF_FLOPPY, 0, i); } floppy_controller = fdctrl_init_isa(fd); /* Sound card */ #ifdef HAS_AUDIO audio_init(pci_bus); #endif /* Network card */ network_init(); /* Optional PCI video card */ if (cirrus_vga_enabled) { pci_cirrus_vga_init(pci_bus); } else if (vmsvga_enabled) { pci_vmsvga_init(pci_bus); } else if (std_vga_enabled) { pci_vga_init(pci_bus, 0, 0); } } | 16,194 |
1 | void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val) { int op1; int op2; int crm; op1 = (insn >> 21) & 7; op2 = (insn >> 5) & 7; crm = insn & 0xf; switch ((insn >> 16) & 0xf) { case 0: /* ID codes. */ if (arm_feature(env, ARM_FEATURE_XSCALE)) break; if (arm_feature(env, ARM_FEATURE_OMAPCP)) break; if (arm_feature(env, ARM_FEATURE_V7) && op1 == 2 && crm == 0 && op2 == 0) { env->cp15.c0_cssel = val & 0xf; break; } goto bad_reg; case 1: /* System configuration. */ if (arm_feature(env, ARM_FEATURE_V7) && op1 == 0 && crm == 1 && op2 == 0) { env->cp15.c1_scr = val; break; } if (arm_feature(env, ARM_FEATURE_OMAPCP)) op2 = 0; switch (op2) { case 0: if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0) env->cp15.c1_sys = val; /* ??? Lots of these bits are not implemented. */ /* This may enable/disable the MMU, so do a TLB flush. */ tlb_flush(env, 1); break; case 1: /* Auxiliary control register. */ if (arm_feature(env, ARM_FEATURE_XSCALE)) { env->cp15.c1_xscaleauxcr = val; break; } /* Not implemented. */ break; case 2: if (arm_feature(env, ARM_FEATURE_XSCALE)) goto bad_reg; if (env->cp15.c1_coproc != val) { env->cp15.c1_coproc = val; /* ??? Is this safe when called from within a TB? */ tb_flush(env); } break; default: goto bad_reg; } break; case 2: /* MMU Page table control / MPU cache control. */ if (arm_feature(env, ARM_FEATURE_MPU)) { switch (op2) { case 0: env->cp15.c2_data = val; break; case 1: env->cp15.c2_insn = val; break; default: goto bad_reg; } } else { switch (op2) { case 0: env->cp15.c2_base0 = val; break; case 1: env->cp15.c2_base1 = val; break; case 2: val &= 7; env->cp15.c2_control = val; env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val); env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> val); break; default: goto bad_reg; } } break; case 3: /* MMU Domain access control / MPU write buffer control. */ env->cp15.c3 = val; tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */ break; case 4: /* Reserved. */ goto bad_reg; case 5: /* MMU Fault status / MPU access permission. */ if (arm_feature(env, ARM_FEATURE_OMAPCP)) op2 = 0; switch (op2) { case 0: if (arm_feature(env, ARM_FEATURE_MPU)) val = extended_mpu_ap_bits(val); env->cp15.c5_data = val; break; case 1: if (arm_feature(env, ARM_FEATURE_MPU)) val = extended_mpu_ap_bits(val); env->cp15.c5_insn = val; break; case 2: if (!arm_feature(env, ARM_FEATURE_MPU)) goto bad_reg; env->cp15.c5_data = val; break; case 3: if (!arm_feature(env, ARM_FEATURE_MPU)) goto bad_reg; env->cp15.c5_insn = val; break; default: goto bad_reg; } break; case 6: /* MMU Fault address / MPU base/size. */ if (arm_feature(env, ARM_FEATURE_MPU)) { if (crm >= 8) goto bad_reg; env->cp15.c6_region[crm] = val; } else { if (arm_feature(env, ARM_FEATURE_OMAPCP)) op2 = 0; switch (op2) { case 0: env->cp15.c6_data = val; break; case 1: /* ??? This is WFAR on armv6 */ case 2: env->cp15.c6_insn = val; break; default: goto bad_reg; } } break; case 7: /* Cache control. */ env->cp15.c15_i_max = 0x000; env->cp15.c15_i_min = 0xff0; if (op1 != 0) { goto bad_reg; } /* No cache, so nothing to do except VA->PA translations. */ if (arm_feature(env, ARM_FEATURE_VAPA)) { switch (crm) { case 4: if (arm_feature(env, ARM_FEATURE_V7)) { env->cp15.c7_par = val & 0xfffff6ff; } else { env->cp15.c7_par = val & 0xfffff1ff; } break; case 8: { uint32_t phys_addr; target_ulong page_size; int prot; int ret, is_user = op2 & 2; int access_type = op2 & 1; if (op2 & 4) { /* Other states are only available with TrustZone */ goto bad_reg; } ret = get_phys_addr(env, val, access_type, is_user, &phys_addr, &prot, &page_size); if (ret == 0) { /* We do not set any attribute bits in the PAR */ if (page_size == (1 << 24) && arm_feature(env, ARM_FEATURE_V7)) { env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1; } else { env->cp15.c7_par = phys_addr & 0xfffff000; } } else { env->cp15.c7_par = ((ret & (10 << 1)) >> 5) | ((ret & (12 << 1)) >> 6) | ((ret & 0xf) << 1) | 1; } break; } } } break; case 8: /* MMU TLB control. */ switch (op2) { case 0: /* Invalidate all. */ tlb_flush(env, 0); break; case 1: /* Invalidate single TLB entry. */ tlb_flush_page(env, val & TARGET_PAGE_MASK); break; case 2: /* Invalidate on ASID. */ tlb_flush(env, val == 0); break; case 3: /* Invalidate single entry on MVA. */ /* ??? This is like case 1, but ignores ASID. */ tlb_flush(env, 1); break; default: goto bad_reg; } break; case 9: if (arm_feature(env, ARM_FEATURE_OMAPCP)) break; if (arm_feature(env, ARM_FEATURE_STRONGARM)) break; /* Ignore ReadBuffer access */ switch (crm) { case 0: /* Cache lockdown. */ switch (op1) { case 0: /* L1 cache. */ switch (op2) { case 0: env->cp15.c9_data = val; break; case 1: env->cp15.c9_insn = val; break; default: goto bad_reg; } break; case 1: /* L2 cache. */ /* Ignore writes to L2 lockdown/auxiliary registers. */ break; default: goto bad_reg; } break; case 1: /* TCM memory region registers. */ /* Not implemented. */ goto bad_reg; case 12: /* Performance monitor control */ /* Performance monitors are implementation defined in v7, * but with an ARM recommended set of registers, which we * follow (although we don't actually implement any counters) */ if (!arm_feature(env, ARM_FEATURE_V7)) { goto bad_reg; } switch (op2) { case 0: /* performance monitor control register */ /* only the DP, X, D and E bits are writable */ env->cp15.c9_pmcr &= ~0x39; env->cp15.c9_pmcr |= (val & 0x39); break; case 1: /* Count enable set register */ val &= (1 << 31); env->cp15.c9_pmcnten |= val; break; case 2: /* Count enable clear */ val &= (1 << 31); env->cp15.c9_pmcnten &= ~val; break; case 3: /* Overflow flag status */ env->cp15.c9_pmovsr &= ~val; break; case 4: /* Software increment */ /* RAZ/WI since we don't implement the software-count event */ break; case 5: /* Event counter selection register */ /* Since we don't implement any events, writing to this register * is actually UNPREDICTABLE. So we choose to RAZ/WI. */ break; default: goto bad_reg; } break; case 13: /* Performance counters */ if (!arm_feature(env, ARM_FEATURE_V7)) { goto bad_reg; } switch (op2) { case 0: /* Cycle count register: not implemented, so RAZ/WI */ break; case 1: /* Event type select */ env->cp15.c9_pmxevtyper = val & 0xff; break; case 2: /* Event count register */ /* Unimplemented (we have no events), RAZ/WI */ break; default: goto bad_reg; } break; case 14: /* Performance monitor control */ if (!arm_feature(env, ARM_FEATURE_V7)) { goto bad_reg; } switch (op2) { case 0: /* user enable */ env->cp15.c9_pmuserenr = val & 1; /* changes access rights for cp registers, so flush tbs */ tb_flush(env); break; case 1: /* interrupt enable set */ /* We have no event counters so only the C bit can be changed */ val &= (1 << 31); env->cp15.c9_pminten |= val; break; case 2: /* interrupt enable clear */ val &= (1 << 31); env->cp15.c9_pminten &= ~val; break; } break; default: goto bad_reg; } break; case 10: /* MMU TLB lockdown. */ /* ??? TLB lockdown not implemented. */ break; case 12: /* Reserved. */ goto bad_reg; case 13: /* Process ID. */ switch (op2) { case 0: /* Unlike real hardware the qemu TLB uses virtual addresses, not modified virtual addresses, so this causes a TLB flush. */ if (env->cp15.c13_fcse != val) tlb_flush(env, 1); env->cp15.c13_fcse = val; break; case 1: /* This changes the ASID, so do a TLB flush. */ if (env->cp15.c13_context != val && !arm_feature(env, ARM_FEATURE_MPU)) tlb_flush(env, 0); env->cp15.c13_context = val; break; default: goto bad_reg; } break; case 14: /* Reserved. */ goto bad_reg; case 15: /* Implementation specific. */ if (arm_feature(env, ARM_FEATURE_XSCALE)) { if (op2 == 0 && crm == 1) { if (env->cp15.c15_cpar != (val & 0x3fff)) { /* Changes cp0 to cp13 behavior, so needs a TB flush. */ tb_flush(env); env->cp15.c15_cpar = val & 0x3fff; } break; } goto bad_reg; } if (arm_feature(env, ARM_FEATURE_OMAPCP)) { switch (crm) { case 0: break; case 1: /* Set TI925T configuration. */ env->cp15.c15_ticonfig = val & 0xe7; env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */ ARM_CPUID_TI915T : ARM_CPUID_TI925T; break; case 2: /* Set I_max. */ env->cp15.c15_i_max = val; break; case 3: /* Set I_min. */ env->cp15.c15_i_min = val; break; case 4: /* Set thread-ID. */ env->cp15.c15_threadid = val & 0xffff; break; case 8: /* Wait-for-interrupt (deprecated). */ cpu_interrupt(env, CPU_INTERRUPT_HALT); break; default: goto bad_reg; } } if (ARM_CPUID(env) == ARM_CPUID_CORTEXA9) { switch (crm) { case 0: if ((op1 == 0) && (op2 == 0)) { env->cp15.c15_power_control = val; } else if ((op1 == 0) && (op2 == 1)) { env->cp15.c15_diagnostic = val; } else if ((op1 == 0) && (op2 == 2)) { env->cp15.c15_power_diagnostic = val; } default: break; } } break; } return; bad_reg: /* ??? For debugging only. Should raise illegal instruction exception. */ cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n", (insn >> 16) & 0xf, crm, op1, op2); } | 16,195 |
1 | S390CPU *s390x_new_cpu(const char *typename, uint32_t core_id, Error **errp) { S390CPU *cpu = S390_CPU(object_new(typename)); Error *err = NULL; object_property_set_int(OBJECT(cpu), core_id, "core-id", &err); if (err != NULL) { goto out; } object_property_set_bool(OBJECT(cpu), true, "realized", &err); out: if (err) { error_propagate(errp, err); object_unref(OBJECT(cpu)); cpu = NULL; } return cpu; } | 16,196 |
1 | static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) { switch (mmu_idx) { case ARMMMUIdx_S1SE0: case ARMMMUIdx_S1NSE0: return true; default: return false; case ARMMMUIdx_S12NSE0: case ARMMMUIdx_S12NSE1: g_assert_not_reached(); } } | 16,198 |
1 | int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, uint8_t dst_color[4], enum AVPixelFormat pix_fmt, uint8_t rgba_color[4], int *is_packed_rgba, uint8_t rgba_map_ptr[4]) { uint8_t rgba_map[4] = {0}; int i; const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(pix_fmt); int hsub = pix_desc->log2_chroma_w; *is_packed_rgba = ff_fill_rgba_map(rgba_map, pix_fmt) >= 0; if (*is_packed_rgba) { pixel_step[0] = (av_get_bits_per_pixel(pix_desc))>>3; for (i = 0; i < 4; i++) dst_color[rgba_map[i]] = rgba_color[i]; line[0] = av_malloc_array(w, pixel_step[0]); for (i = 0; i < w; i++) memcpy(line[0] + i * pixel_step[0], dst_color, pixel_step[0]); if (rgba_map_ptr) memcpy(rgba_map_ptr, rgba_map, sizeof(rgba_map[0]) * 4); } else { int plane; dst_color[0] = RGB_TO_Y_CCIR(rgba_color[0], rgba_color[1], rgba_color[2]); dst_color[1] = RGB_TO_U_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0); dst_color[2] = RGB_TO_V_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0); dst_color[3] = rgba_color[3]; for (plane = 0; plane < 4; plane++) { int line_size; int hsub1 = (plane == 1 || plane == 2) ? hsub : 0; pixel_step[plane] = 1; line_size = FF_CEIL_RSHIFT(w, hsub1) * pixel_step[plane]; line[plane] = av_malloc(line_size); if (!line[plane]) { while(plane && line[plane-1]) av_freep(&line[--plane]); } memset(line[plane], dst_color[plane], line_size); } } return 0; } | 16,199 |
1 | static int bfi_decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; BFIContext *bfi = avctx->priv_data; uint8_t *dst = bfi->dst; uint8_t *src, *dst_offset, colour1, colour2; uint8_t *frame_end = bfi->dst + avctx->width * avctx->height; uint32_t *pal; int i, j, height = avctx->height; if (bfi->frame.data[0]) avctx->release_buffer(avctx, &bfi->frame); bfi->frame.reference = 1; if (avctx->get_buffer(avctx, &bfi->frame) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } /* Set frame parameters and palette, if necessary */ if (!avctx->frame_number) { bfi->frame.pict_type = FF_I_TYPE; bfi->frame.key_frame = 1; /* Setting the palette */ if(avctx->extradata_size>768) { av_log(NULL, AV_LOG_ERROR, "Palette is too large.\n"); return -1; } pal = (uint32_t *) bfi->frame.data[1]; for (i = 0; i < avctx->extradata_size / 3; i++) { int shift = 16; *pal = 0; for (j = 0; j < 3; j++, shift -= 8) *pal += ((avctx->extradata[i * 3 + j] << 2) | (avctx->extradata[i * 3 + j] >> 4)) << shift; pal++; } bfi->frame.palette_has_changed = 1; } else { bfi->frame.pict_type = FF_P_TYPE; bfi->frame.key_frame = 0; } buf += 4; //Unpacked size, not required. while (dst != frame_end) { static const uint8_t lentab[4]={0,2,0,1}; unsigned int byte = *buf++, av_uninit(offset); unsigned int code = byte >> 6; unsigned int length = byte & ~0xC0; /* Get length and offset(if required) */ if (length == 0) { if (code == 1) { length = bytestream_get_byte(&buf); offset = bytestream_get_le16(&buf); } else { length = bytestream_get_le16(&buf); if (code == 2 && length == 0) break; } } else { if (code == 1) offset = bytestream_get_byte(&buf); } /* Do boundary check */ if (dst + (length<<lentab[code]) > frame_end) break; switch (code) { case 0: //Normal Chain bytestream_get_buffer(&buf, dst, length); dst += length; break; case 1: //Back Chain dst_offset = dst - offset; length *= 4; //Convert dwords to bytes. if (dst_offset < bfi->dst) break; while (length--) *dst++ = *dst_offset++; break; case 2: //Skip Chain dst += length; break; case 3: //Fill Chain colour1 = bytestream_get_byte(&buf); colour2 = bytestream_get_byte(&buf); while (length--) { *dst++ = colour1; *dst++ = colour2; } break; } } src = bfi->dst; dst = bfi->frame.data[0]; while (height--) { memcpy(dst, src, avctx->width); src += avctx->width; dst += bfi->frame.linesize[0]; } *data_size = sizeof(AVFrame); *(AVFrame *) data = bfi->frame; return buf_size; } | 16,200 |
1 | static void sysbus_device_class_init(ObjectClass *klass, void *data) { DeviceClass *k = DEVICE_CLASS(klass); k->init = sysbus_device_init; k->bus_type = TYPE_SYSTEM_BUS; } | 16,201 |
1 | static av_cold int g726_init(AVCodecContext * avctx) { G726Context* c = avctx->priv_data; unsigned int index= (avctx->bit_rate + avctx->sample_rate/2) / avctx->sample_rate - 2; if (avctx->bit_rate % avctx->sample_rate && avctx->codec->encode) { av_log(avctx, AV_LOG_ERROR, "Bitrate - Samplerate combination is invalid\n"); return -1; } if(avctx->channels != 1){ av_log(avctx, AV_LOG_ERROR, "Only mono is supported\n"); return -1; } if(index>3){ av_log(avctx, AV_LOG_ERROR, "Unsupported number of bits %d\n", index+2); return -1; } g726_reset(c, index); c->code_size = index+2; avctx->coded_frame = avcodec_alloc_frame(); if (!avctx->coded_frame) return AVERROR(ENOMEM); avctx->coded_frame->key_frame = 1; if (avctx->codec->decode) avctx->sample_fmt = SAMPLE_FMT_S16; return 0; } | 16,202 |
1 | static int yop_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { YopDecContext *s = avctx->priv_data; int tag, firstcolor, is_odd_frame; int ret, i, x, y; uint32_t *palette; if (s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); ret = ff_get_buffer(avctx, &s->frame); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } s->dstbuf = s->frame.data[0]; s->dstptr = s->frame.data[0]; s->srcptr = avpkt->data + 4; s->low_nibble = NULL; is_odd_frame = avpkt->data[0]; firstcolor = s->first_color[is_odd_frame]; palette = (uint32_t *)s->frame.data[1]; for (i = 0; i < s->num_pal_colors; i++, s->srcptr += 3) palette[i + firstcolor] = (s->srcptr[0] << 18) | (s->srcptr[1] << 10) | (s->srcptr[2] << 2); s->frame.palette_has_changed = 1; for (y = 0; y < avctx->height; y += 2) { for (x = 0; x < avctx->width; x += 2) { if (s->srcptr - avpkt->data >= avpkt->size) { av_log(avctx, AV_LOG_ERROR, "Packet too small.\n"); return AVERROR_INVALIDDATA; } tag = yop_get_next_nibble(s); if (tag != 0xf) { yop_paint_block(s, tag); } else { tag = yop_get_next_nibble(s); ret = yop_copy_previous_block(s, tag); if (ret < 0) { avctx->release_buffer(avctx, &s->frame); return ret; } } s->dstptr += 2; } s->dstptr += 2*s->frame.linesize[0] - x; } *got_frame = 1; *(AVFrame *) data = s->frame; return avpkt->size; } | 16,206 |
1 | static int32_t scsi_send_command(SCSIDevice *d, uint32_t tag, uint8_t *buf, int lun) { SCSIDeviceState *s = d->state; uint64_t nb_sectors; uint32_t lba; uint32_t len; int cmdlen; int is_write; uint8_t command; uint8_t *outbuf; SCSIRequest *r; command = buf[0]; r = scsi_find_request(s, tag); if (r) { BADF("Tag 0x%x already in use\n", tag); scsi_cancel_io(d, tag); } /* ??? Tags are not unique for different luns. We only implement a single lun, so this should not matter. */ r = scsi_new_request(s, tag); outbuf = r->dma_buf; is_write = 0; DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]); switch (command >> 5) { case 0: lba = buf[3] | (buf[2] << 8) | ((buf[1] & 0x1f) << 16); len = buf[4]; cmdlen = 6; break; case 1: case 2: lba = buf[5] | (buf[4] << 8) | (buf[3] << 16) | (buf[2] << 24); len = buf[8] | (buf[7] << 8); cmdlen = 10; break; case 4: lba = buf[5] | (buf[4] << 8) | (buf[3] << 16) | (buf[2] << 24); len = buf[13] | (buf[12] << 8) | (buf[11] << 16) | (buf[10] << 24); cmdlen = 16; break; case 5: lba = buf[5] | (buf[4] << 8) | (buf[3] << 16) | (buf[2] << 24); len = buf[9] | (buf[8] << 8) | (buf[7] << 16) | (buf[6] << 24); cmdlen = 12; break; default: BADF("Unsupported command length, command %x\n", command); goto fail; } #ifdef DEBUG_SCSI { int i; for (i = 1; i < cmdlen; i++) { printf(" 0x%02x", buf[i]); } printf("\n"); } #endif if (lun || buf[1] >> 5) { /* Only LUN 0 supported. */ DPRINTF("Unimplemented LUN %d\n", lun ? lun : buf[1] >> 5); if (command != 0x03 && command != 0x12) /* REQUEST SENSE and INQUIRY */ goto fail; } switch (command) { case 0x0: DPRINTF("Test Unit Ready\n"); break; case 0x03: DPRINTF("Request Sense (len %d)\n", len); if (len < 4) goto fail; memset(outbuf, 0, 4); outbuf[0] = 0xf0; outbuf[1] = 0; outbuf[2] = s->sense; r->buf_len = 4; break; case 0x12: DPRINTF("Inquiry (len %d)\n", len); if (buf[1] & 0x2) { /* Command support data - optional, not implemented */ BADF("optional INQUIRY command support request not implemented\n"); goto fail; } else if (buf[1] & 0x1) { /* Vital product data */ uint8_t page_code = buf[2]; if (len < 4) { BADF("Error: Inquiry (EVPD[%02X]) buffer size %d is " "less than 4\n", page_code, len); goto fail; } switch (page_code) { case 0x00: { /* Supported page codes, mandatory */ DPRINTF("Inquiry EVPD[Supported pages] " "buffer size %d\n", len); r->buf_len = 0; if (bdrv_get_type_hint(s->bdrv) == BDRV_TYPE_CDROM) { outbuf[r->buf_len++] = 5; } else { outbuf[r->buf_len++] = 0; } outbuf[r->buf_len++] = 0x00; // this page outbuf[r->buf_len++] = 0x00; outbuf[r->buf_len++] = 3; // number of pages outbuf[r->buf_len++] = 0x00; // list of supported pages (this page) outbuf[r->buf_len++] = 0x80; // unit serial number outbuf[r->buf_len++] = 0x83; // device identification } break; case 0x80: { /* Device serial number, optional */ if (len < 4) { BADF("Error: EVPD[Serial number] Inquiry buffer " "size %d too small, %d needed\n", len, 4); goto fail; } DPRINTF("Inquiry EVPD[Serial number] buffer size %d\n", len); r->buf_len = 0; /* Supported page codes */ if (bdrv_get_type_hint(s->bdrv) == BDRV_TYPE_CDROM) { outbuf[r->buf_len++] = 5; } else { outbuf[r->buf_len++] = 0; } outbuf[r->buf_len++] = 0x80; // this page outbuf[r->buf_len++] = 0x00; outbuf[r->buf_len++] = 0x01; // 1 byte data follow outbuf[r->buf_len++] = '0'; // 1 byte data follow } break; case 0x83: { /* Device identification page, mandatory */ int max_len = 255 - 8; int id_len = strlen(bdrv_get_device_name(s->bdrv)); if (id_len > max_len) id_len = max_len; DPRINTF("Inquiry EVPD[Device identification] " "buffer size %d\n", len); r->buf_len = 0; if (bdrv_get_type_hint(s->bdrv) == BDRV_TYPE_CDROM) { outbuf[r->buf_len++] = 5; } else { outbuf[r->buf_len++] = 0; } outbuf[r->buf_len++] = 0x83; // this page outbuf[r->buf_len++] = 0x00; outbuf[r->buf_len++] = 3 + id_len; outbuf[r->buf_len++] = 0x2; // ASCII outbuf[r->buf_len++] = 0; // not officially assigned outbuf[r->buf_len++] = 0; // reserved outbuf[r->buf_len++] = id_len; // length of data following memcpy(&outbuf[r->buf_len], bdrv_get_device_name(s->bdrv), id_len); r->buf_len += id_len; } break; default: BADF("Error: unsupported Inquiry (EVPD[%02X]) " "buffer size %d\n", page_code, len); goto fail; } /* done with EVPD */ break; } else { /* Standard INQUIRY data */ if (buf[2] != 0) { BADF("Error: Inquiry (STANDARD) page or code " "is non-zero [%02X]\n", buf[2]); goto fail; } /* PAGE CODE == 0 */ if (len < 5) { BADF("Error: Inquiry (STANDARD) buffer size %d " "is less than 5\n", len); goto fail; } if (len < 36) { BADF("Error: Inquiry (STANDARD) buffer size %d " "is less than 36 (TODO: only 5 required)\n", len); } } memset(outbuf, 0, 36); if (lun || buf[1] >> 5) { outbuf[0] = 0x7f; /* LUN not supported */ } else if (bdrv_get_type_hint(s->bdrv) == BDRV_TYPE_CDROM) { outbuf[0] = 5; outbuf[1] = 0x80; memcpy(&outbuf[16], "QEMU CD-ROM ", 16); } else { outbuf[0] = 0; memcpy(&outbuf[16], "QEMU HARDDISK ", 16); } memcpy(&outbuf[8], "QEMU ", 8); memcpy(&outbuf[32], QEMU_VERSION, 4); /* Identify device as SCSI-3 rev 1. Some later commands are also implemented. */ outbuf[2] = 3; outbuf[3] = 2; /* Format 2 */ outbuf[4] = 31; /* Sync data transfer and TCQ. */ outbuf[7] = 0x10 | (s->tcq ? 0x02 : 0); r->buf_len = 36; break; case 0x16: DPRINTF("Reserve(6)\n"); if (buf[1] & 1) goto fail; break; case 0x17: DPRINTF("Release(6)\n"); if (buf[1] & 1) goto fail; break; case 0x1a: case 0x5a: { uint8_t *p; int page; page = buf[2] & 0x3f; DPRINTF("Mode Sense (page %d, len %d)\n", page, len); p = outbuf; memset(p, 0, 4); outbuf[1] = 0; /* Default media type. */ outbuf[3] = 0; /* Block descriptor length. */ if (bdrv_get_type_hint(s->bdrv) == BDRV_TYPE_CDROM) { outbuf[2] = 0x80; /* Readonly. */ } p += 4; if (page == 4) { int cylinders, heads, secs; /* Rigid disk device geometry page. */ p[0] = 4; p[1] = 0x16; /* if a geometry hint is available, use it */ bdrv_get_geometry_hint(s->bdrv, &cylinders, &heads, &secs); p[2] = (cylinders >> 16) & 0xff; p[3] = (cylinders >> 8) & 0xff; p[4] = cylinders & 0xff; p[5] = heads & 0xff; /* Write precomp start cylinder, disabled */ p[6] = (cylinders >> 16) & 0xff; p[7] = (cylinders >> 8) & 0xff; p[8] = cylinders & 0xff; /* Reduced current start cylinder, disabled */ p[9] = (cylinders >> 16) & 0xff; p[10] = (cylinders >> 8) & 0xff; p[11] = cylinders & 0xff; /* Device step rate [ns], 200ns */ p[12] = 0; p[13] = 200; /* Landing zone cylinder */ p[14] = 0xff; p[15] = 0xff; p[16] = 0xff; /* Medium rotation rate [rpm], 5400 rpm */ p[20] = (5400 >> 8) & 0xff; p[21] = 5400 & 0xff; p += 0x16; } else if (page == 5) { int cylinders, heads, secs; /* Flexible disk device geometry page. */ p[0] = 5; p[1] = 0x1e; /* Transfer rate [kbit/s], 5Mbit/s */ p[2] = 5000 >> 8; p[3] = 5000 & 0xff; /* if a geometry hint is available, use it */ bdrv_get_geometry_hint(s->bdrv, &cylinders, &heads, &secs); p[4] = heads & 0xff; p[5] = secs & 0xff; p[6] = s->cluster_size * 2; p[8] = (cylinders >> 8) & 0xff; p[9] = cylinders & 0xff; /* Write precomp start cylinder, disabled */ p[10] = (cylinders >> 8) & 0xff; p[11] = cylinders & 0xff; /* Reduced current start cylinder, disabled */ p[12] = (cylinders >> 8) & 0xff; p[13] = cylinders & 0xff; /* Device step rate [100us], 100us */ p[14] = 0; p[15] = 1; /* Device step pulse width [us], 1us */ p[16] = 1; /* Device head settle delay [100us], 100us */ p[17] = 0; p[18] = 1; /* Motor on delay [0.1s], 0.1s */ p[19] = 1; /* Motor off delay [0.1s], 0.1s */ p[20] = 1; /* Medium rotation rate [rpm], 5400 rpm */ p[28] = (5400 >> 8) & 0xff; p[29] = 5400 & 0xff; p += 0x1e; } else if ((page == 8 || page == 0x3f)) { /* Caching page. */ memset(p,0,20); p[0] = 8; p[1] = 0x12; p[2] = 4; /* WCE */ p += 20; } if ((page == 0x3f || page == 0x2a) && (bdrv_get_type_hint(s->bdrv) == BDRV_TYPE_CDROM)) { /* CD Capabilities and Mechanical Status page. */ p[0] = 0x2a; p[1] = 0x14; p[2] = 3; // CD-R & CD-RW read p[3] = 0; // Writing not supported p[4] = 0x7f; /* Audio, composite, digital out, mode 2 form 1&2, multi session */ p[5] = 0xff; /* CD DA, DA accurate, RW supported, RW corrected, C2 errors, ISRC, UPC, Bar code */ p[6] = 0x2d | (bdrv_is_locked(s->bdrv)? 2 : 0); /* Locking supported, jumper present, eject, tray */ p[7] = 0; /* no volume & mute control, no changer */ p[8] = (50 * 176) >> 8; // 50x read speed p[9] = (50 * 176) & 0xff; p[10] = 0 >> 8; // No volume p[11] = 0 & 0xff; p[12] = 2048 >> 8; // 2M buffer p[13] = 2048 & 0xff; p[14] = (16 * 176) >> 8; // 16x read speed current p[15] = (16 * 176) & 0xff; p[18] = (16 * 176) >> 8; // 16x write speed p[19] = (16 * 176) & 0xff; p[20] = (16 * 176) >> 8; // 16x write speed current p[21] = (16 * 176) & 0xff; p += 22; } r->buf_len = p - outbuf; outbuf[0] = r->buf_len - 4; if (r->buf_len > len) r->buf_len = len; } break; case 0x1b: DPRINTF("Start Stop Unit\n"); break; case 0x1e: DPRINTF("Prevent Allow Medium Removal (prevent = %d)\n", buf[4] & 3); bdrv_set_locked(s->bdrv, buf[4] & 1); break; case 0x25: DPRINTF("Read Capacity\n"); /* The normal LEN field for this command is zero. */ memset(outbuf, 0, 8); bdrv_get_geometry(s->bdrv, &nb_sectors); /* Returned value is the address of the last sector. */ if (nb_sectors) { nb_sectors--; outbuf[0] = (nb_sectors >> 24) & 0xff; outbuf[1] = (nb_sectors >> 16) & 0xff; outbuf[2] = (nb_sectors >> 8) & 0xff; outbuf[3] = nb_sectors & 0xff; outbuf[4] = 0; outbuf[5] = 0; outbuf[6] = s->cluster_size * 2; outbuf[7] = 0; r->buf_len = 8; } else { scsi_command_complete(r, STATUS_CHECK_CONDITION, SENSE_NOT_READY); return 0; } break; case 0x08: case 0x28: DPRINTF("Read (sector %d, count %d)\n", lba, len); r->sector = lba * s->cluster_size; r->sector_count = len * s->cluster_size; break; case 0x0a: case 0x2a: DPRINTF("Write (sector %d, count %d)\n", lba, len); r->sector = lba * s->cluster_size; r->sector_count = len * s->cluster_size; is_write = 1; break; case 0x35: DPRINTF("Synchronise cache (sector %d, count %d)\n", lba, len); bdrv_flush(s->bdrv); break; case 0x43: { int start_track, format, msf, toclen; msf = buf[1] & 2; format = buf[2] & 0xf; start_track = buf[6]; bdrv_get_geometry(s->bdrv, &nb_sectors); DPRINTF("Read TOC (track %d format %d msf %d)\n", start_track, format, msf >> 1); switch(format) { case 0: toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); break; case 1: /* multi session : only a single session defined */ toclen = 12; memset(outbuf, 0, 12); outbuf[1] = 0x0a; outbuf[2] = 0x01; outbuf[3] = 0x01; break; case 2: toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); break; default: goto error_cmd; } if (toclen > 0) { if (len > toclen) len = toclen; r->buf_len = len; break; } error_cmd: DPRINTF("Read TOC error\n"); goto fail; } case 0x46: DPRINTF("Get Configuration (rt %d, maxlen %d)\n", buf[1] & 3, len); memset(outbuf, 0, 8); /* ??? This should probably return much more information. For now just return the basic header indicating the CD-ROM profile. */ outbuf[7] = 8; // CD-ROM r->buf_len = 8; break; case 0x56: DPRINTF("Reserve(10)\n"); if (buf[1] & 3) goto fail; break; case 0x57: DPRINTF("Release(10)\n"); if (buf[1] & 3) goto fail; break; case 0xa0: DPRINTF("Report LUNs (len %d)\n", len); if (len < 16) goto fail; memset(outbuf, 0, 16); outbuf[3] = 8; r->buf_len = 16; break; case 0x2f: DPRINTF("Verify (sector %d, count %d)\n", lba, len); break; default: DPRINTF("Unknown SCSI command (%2.2x)\n", buf[0]); fail: scsi_command_complete(r, STATUS_CHECK_CONDITION, SENSE_ILLEGAL_REQUEST); return 0; } if (r->sector_count == 0 && r->buf_len == 0) { scsi_command_complete(r, STATUS_GOOD, SENSE_NO_SENSE); } len = r->sector_count * 512 + r->buf_len; if (is_write) { return -len; } else { if (!r->sector_count) r->sector_count = -1; return len; } } | 16,208 |
1 | void cpu_dump_state (CPUCRISState *env, FILE *f, fprintf_function cpu_fprintf, int flags) { int i; uint32_t srs; if (!env || !f) return; cpu_fprintf(f, "PC=%x CCS=%x btaken=%d btarget=%x\n" "cc_op=%d cc_src=%d cc_dest=%d cc_result=%x cc_mask=%x\n", env->pc, env->pregs[PR_CCS], env->btaken, env->btarget, env->cc_op, env->cc_src, env->cc_dest, env->cc_result, env->cc_mask); for (i = 0; i < 16; i++) { cpu_fprintf(f, "%s=%8.8x ",regnames[i], env->regs[i]); if ((i + 1) % 4 == 0) cpu_fprintf(f, "\n"); } cpu_fprintf(f, "\nspecial regs:\n"); for (i = 0; i < 16; i++) { cpu_fprintf(f, "%s=%8.8x ", pregnames[i], env->pregs[i]); if ((i + 1) % 4 == 0) cpu_fprintf(f, "\n"); } srs = env->pregs[PR_SRS]; cpu_fprintf(f, "\nsupport function regs bank %x:\n", srs); if (srs < 256) { for (i = 0; i < 16; i++) { cpu_fprintf(f, "s%2.2d=%8.8x ", i, env->sregs[srs][i]); if ((i + 1) % 4 == 0) cpu_fprintf(f, "\n"); } } cpu_fprintf(f, "\n\n"); } | 16,209 |
1 | void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width){ const int w2= (width+1)>>1; // SSE2 code runs faster with pointers aligned on a 32-byte boundary. IDWTELEM temp_buf[(width>>1) + 4]; IDWTELEM * const temp = temp_buf + 4 - (((int)temp_buf & 0xF) >> 2); const int w_l= (width>>1); const int w_r= w2 - 1; int i; { // Lift 0 IDWTELEM * const ref = b + w2 - 1; IDWTELEM b_0 = b[0]; //By allowing the first entry in b[0] to be calculated twice // (the first time erroneously), we allow the SSE2 code to run an extra pass. // The savings in code and time are well worth having to store this value and // calculate b[0] correctly afterwards. i = 0; asm volatile( "pcmpeqd %%xmm7, %%xmm7 \n\t" "pcmpeqd %%xmm3, %%xmm3 \n\t" "psllw $1, %%xmm3 \n\t" "paddw %%xmm7, %%xmm3 \n\t" "psllw $13, %%xmm3 \n\t" ::); for(; i<w_l-15; i+=16){ asm volatile( "movdqu (%1), %%xmm1 \n\t" "movdqu 16(%1), %%xmm5 \n\t" "movdqu 2(%1), %%xmm2 \n\t" "movdqu 18(%1), %%xmm6 \n\t" "paddw %%xmm1, %%xmm2 \n\t" "paddw %%xmm5, %%xmm6 \n\t" "paddw %%xmm7, %%xmm2 \n\t" "paddw %%xmm7, %%xmm6 \n\t" "pmulhw %%xmm3, %%xmm2 \n\t" "pmulhw %%xmm3, %%xmm6 \n\t" "paddw (%0), %%xmm2 \n\t" "paddw 16(%0), %%xmm6 \n\t" "movdqa %%xmm2, (%0) \n\t" "movdqa %%xmm6, 16(%0) \n\t" :: "r"(&b[i]), "r"(&ref[i]) : "memory" ); } snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS); b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS); } { // Lift 1 IDWTELEM * const dst = b+w2; i = 0; for(; (((long)&dst[i]) & 0x1F) && i<w_r; i++){ dst[i] = dst[i] - (b[i] + b[i + 1]); } for(; i<w_r-15; i+=16){ asm volatile( "movdqu (%1), %%xmm1 \n\t" "movdqu 16(%1), %%xmm5 \n\t" "movdqu 2(%1), %%xmm2 \n\t" "movdqu 18(%1), %%xmm6 \n\t" "paddw %%xmm1, %%xmm2 \n\t" "paddw %%xmm5, %%xmm6 \n\t" "movdqa (%0), %%xmm0 \n\t" "movdqa 16(%0), %%xmm4 \n\t" "psubw %%xmm2, %%xmm0 \n\t" "psubw %%xmm6, %%xmm4 \n\t" "movdqa %%xmm0, (%0) \n\t" "movdqa %%xmm4, 16(%0) \n\t" :: "r"(&dst[i]), "r"(&b[i]) : "memory" ); } snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS); } { // Lift 2 IDWTELEM * const ref = b+w2 - 1; IDWTELEM b_0 = b[0]; i = 0; asm volatile( "psllw $15, %%xmm7 \n\t" "pcmpeqw %%xmm6, %%xmm6 \n\t" "psrlw $13, %%xmm6 \n\t" "paddw %%xmm7, %%xmm6 \n\t" ::); for(; i<w_l-15; i+=16){ asm volatile( "movdqu (%1), %%xmm0 \n\t" "movdqu 16(%1), %%xmm4 \n\t" "movdqu 2(%1), %%xmm1 \n\t" "movdqu 18(%1), %%xmm5 \n\t" //FIXME try aligned reads and shifts "paddw %%xmm6, %%xmm0 \n\t" "paddw %%xmm6, %%xmm4 \n\t" "paddw %%xmm7, %%xmm1 \n\t" "paddw %%xmm7, %%xmm5 \n\t" "pavgw %%xmm1, %%xmm0 \n\t" "pavgw %%xmm5, %%xmm4 \n\t" "psubw %%xmm7, %%xmm0 \n\t" "psubw %%xmm7, %%xmm4 \n\t" "psraw $1, %%xmm0 \n\t" "psraw $1, %%xmm4 \n\t" "movdqa (%0), %%xmm1 \n\t" "movdqa 16(%0), %%xmm5 \n\t" "paddw %%xmm1, %%xmm0 \n\t" "paddw %%xmm5, %%xmm4 \n\t" "psraw $2, %%xmm0 \n\t" "psraw $2, %%xmm4 \n\t" "paddw %%xmm1, %%xmm0 \n\t" "paddw %%xmm5, %%xmm4 \n\t" "movdqa %%xmm0, (%0) \n\t" "movdqa %%xmm4, 16(%0) \n\t" :: "r"(&b[i]), "r"(&ref[i]) : "memory" ); } snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l); b[0] = b_0 + ((2 * ref[1] + W_BO-1 + 4 * b_0) >> W_BS); } { // Lift 3 IDWTELEM * const src = b+w2; i = 0; for(; (((long)&temp[i]) & 0x1F) && i<w_r; i++){ temp[i] = src[i] - ((-W_AM*(b[i] + b[i+1]))>>W_AS); } for(; i<w_r-7; i+=8){ asm volatile( "movdqu 2(%1), %%xmm2 \n\t" "movdqu 18(%1), %%xmm6 \n\t" "paddw (%1), %%xmm2 \n\t" "paddw 16(%1), %%xmm6 \n\t" "movdqu (%0), %%xmm0 \n\t" "movdqu 16(%0), %%xmm4 \n\t" "paddw %%xmm2, %%xmm0 \n\t" "paddw %%xmm6, %%xmm4 \n\t" "psraw $1, %%xmm2 \n\t" "psraw $1, %%xmm6 \n\t" "paddw %%xmm0, %%xmm2 \n\t" "paddw %%xmm4, %%xmm6 \n\t" "movdqa %%xmm2, (%2) \n\t" "movdqa %%xmm6, 16(%2) \n\t" :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i]) : "memory" ); } snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS); } { snow_interleave_line_header(&i, width, b, temp); for (; (i & 0x3E) != 0x3E; i-=2){ b[i+1] = temp[i>>1]; b[i] = b[i>>1]; } for (i-=62; i>=0; i-=64){ asm volatile( "movdqa (%1), %%xmm0 \n\t" "movdqa 16(%1), %%xmm2 \n\t" "movdqa 32(%1), %%xmm4 \n\t" "movdqa 48(%1), %%xmm6 \n\t" "movdqa (%1), %%xmm1 \n\t" "movdqa 16(%1), %%xmm3 \n\t" "movdqa 32(%1), %%xmm5 \n\t" "movdqa 48(%1), %%xmm7 \n\t" "punpcklwd (%2), %%xmm0 \n\t" "punpcklwd 16(%2), %%xmm2 \n\t" "punpcklwd 32(%2), %%xmm4 \n\t" "punpcklwd 48(%2), %%xmm6 \n\t" "movdqa %%xmm0, (%0) \n\t" "movdqa %%xmm2, 32(%0) \n\t" "movdqa %%xmm4, 64(%0) \n\t" "movdqa %%xmm6, 96(%0) \n\t" "punpckhwd (%2), %%xmm1 \n\t" "punpckhwd 16(%2), %%xmm3 \n\t" "punpckhwd 32(%2), %%xmm5 \n\t" "punpckhwd 48(%2), %%xmm7 \n\t" "movdqa %%xmm1, 16(%0) \n\t" "movdqa %%xmm3, 48(%0) \n\t" "movdqa %%xmm5, 80(%0) \n\t" "movdqa %%xmm7, 112(%0) \n\t" :: "r"(&(b)[i]), "r"(&(b)[i>>1]), "r"(&(temp)[i>>1]) : "memory" ); } } } | 16,210 |
1 | static int parse_meter(DBEContext *s) { if (s->meter_size) skip_input(s, s->key_present + s->meter_size + 1); return 0; } | 16,212 |
1 | static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn, int extra, TCGv var) { int val, rm; TCGv offset; if (insn & (1 << 22)) { /* immediate */ val = (insn & 0xf) | ((insn >> 4) & 0xf0); if (!(insn & (1 << 23))) val = -val; val += extra; if (val != 0) tcg_gen_addi_i32(var, var, val); } else { /* register */ if (extra) tcg_gen_addi_i32(var, var, extra); rm = (insn) & 0xf; offset = load_reg(s, rm); if (!(insn & (1 << 23))) tcg_gen_sub_i32(var, var, offset); else tcg_gen_add_i32(var, var, offset); dead_tmp(offset); } } | 16,213 |
1 | int kvm_cpu_exec(CPUState *env) { struct kvm_run *run = env->kvm_run; int ret; dprintf("kvm_cpu_exec()\n"); do { #ifndef CONFIG_IOTHREAD if (env->exit_request) { dprintf("interrupt exit requested\n"); ret = 0; break; } #endif if (env->kvm_vcpu_dirty) { kvm_arch_put_registers(env); env->kvm_vcpu_dirty = 0; } kvm_arch_pre_run(env, run); qemu_mutex_unlock_iothread(); ret = kvm_vcpu_ioctl(env, KVM_RUN, 0); qemu_mutex_lock_iothread(); kvm_arch_post_run(env, run); if (ret == -EINTR || ret == -EAGAIN) { cpu_exit(env); dprintf("io window exit\n"); ret = 0; break; } if (ret < 0) { dprintf("kvm run failed %s\n", strerror(-ret)); abort(); } kvm_flush_coalesced_mmio_buffer(); ret = 0; /* exit loop */ switch (run->exit_reason) { case KVM_EXIT_IO: dprintf("handle_io\n"); ret = kvm_handle_io(run->io.port, (uint8_t *)run + run->io.data_offset, run->io.direction, run->io.size, run->io.count); break; case KVM_EXIT_MMIO: dprintf("handle_mmio\n"); cpu_physical_memory_rw(run->mmio.phys_addr, run->mmio.data, run->mmio.len, run->mmio.is_write); ret = 1; break; case KVM_EXIT_IRQ_WINDOW_OPEN: dprintf("irq_window_open\n"); break; case KVM_EXIT_SHUTDOWN: dprintf("shutdown\n"); qemu_system_reset_request(); ret = 1; break; case KVM_EXIT_UNKNOWN: dprintf("kvm_exit_unknown\n"); break; case KVM_EXIT_FAIL_ENTRY: dprintf("kvm_exit_fail_entry\n"); break; case KVM_EXIT_EXCEPTION: dprintf("kvm_exit_exception\n"); break; case KVM_EXIT_DEBUG: dprintf("kvm_exit_debug\n"); #ifdef KVM_CAP_SET_GUEST_DEBUG if (kvm_arch_debug(&run->debug.arch)) { gdb_set_stop_cpu(env); vm_stop(EXCP_DEBUG); env->exception_index = EXCP_DEBUG; return 0; } /* re-enter, this exception was guest-internal */ ret = 1; #endif /* KVM_CAP_SET_GUEST_DEBUG */ break; default: dprintf("kvm_arch_handle_exit\n"); ret = kvm_arch_handle_exit(env, run); break; } } while (ret > 0); if (env->exit_request) { env->exit_request = 0; env->exception_index = EXCP_INTERRUPT; } return ret; } | 16,215 |
1 | static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf, const char *filename, QDict *options, Error **errp) { int ret; if (filename) { ret = qemu_gluster_parse_uri(gconf, filename); if (ret < 0) { error_setg(errp, "invalid URI"); error_append_hint(errp, "Usage: file=gluster[+transport]://" "[host[:port]]/volume/path[?socket=...]\n"); errno = -ret; return NULL; } } else { ret = qemu_gluster_parse_json(gconf, options, errp); if (ret < 0) { error_append_hint(errp, "Usage: " "-drive driver=qcow2,file.driver=gluster," "file.volume=testvol,file.path=/path/a.qcow2" "[,file.debug=9],file.server.0.type=tcp," "file.server.0.host=1.2.3.4," "file.server.0.port=24007," "file.server.1.transport=unix," "file.server.1.socket=/var/run/glusterd.socket ..." "\n"); errno = -ret; return NULL; } } return qemu_gluster_glfs_init(gconf, errp); } | 16,217 |
1 | void visit_type_size(Visitor *v, uint64_t *obj, const char *name, Error **errp) { int64_t value; if (!error_is_set(errp)) { if (v->type_size) { v->type_size(v, obj, name, errp); } else if (v->type_uint64) { v->type_uint64(v, obj, name, errp); } else { value = *obj; v->type_int(v, &value, name, errp); *obj = value; } } } | 16,218 |
1 | int ff_rtsp_send_cmd_with_content(AVFormatContext *s, const char *method, const char *url, const char *header, RTSPMessageHeader *reply, unsigned char **content_ptr, const unsigned char *send_content, int send_content_length) { RTSPState *rt = s->priv_data; HTTPAuthType cur_auth_type; int ret; retry: cur_auth_type = rt->auth_state.auth_type; if ((ret = ff_rtsp_send_cmd_with_content_async(s, method, url, header, send_content, send_content_length))) return ret; if ((ret = ff_rtsp_read_reply(s, reply, content_ptr, 0, method) ) < 0) return ret; if (reply->status_code == 401 && cur_auth_type == HTTP_AUTH_NONE && rt->auth_state.auth_type != HTTP_AUTH_NONE) goto retry; if (reply->status_code > 400){ av_log(s, AV_LOG_ERROR, "method %s failed: %d%s\n", method, reply->status_code, reply->reason); av_log(s, AV_LOG_DEBUG, "%s\n", rt->last_reply); } return 0; } | 16,219 |
1 | static void qmp_input_type_str(Visitor *v, char **obj, const char *name, Error **errp) { QmpInputVisitor *qiv = to_qiv(v); QObject *qobj = qmp_input_get_object(qiv, name, true); if (!qobj || qobject_type(qobj) != QTYPE_QSTRING) { error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", "string"); return; } *obj = g_strdup(qstring_get_str(qobject_to_qstring(qobj))); } | 16,220 |
1 | int ff_huff_build_tree(AVCodecContext *avctx, VLC *vlc, int nb_codes, Node *nodes, huff_cmp_t cmp, int hnode_first) { int i, j; int cur_node; int64_t sum = 0; for(i = 0; i < nb_codes; i++){ nodes[i].sym = i; nodes[i].n0 = -2; sum += nodes[i].count; } if(sum >> 31) { av_log(avctx, AV_LOG_ERROR, "Too high symbol frequencies. Tree construction is not possible\n"); return -1; } qsort(nodes, nb_codes, sizeof(Node), cmp); cur_node = nb_codes; for(i = 0; i < nb_codes*2-1; i += 2){ nodes[cur_node].sym = HNODE; nodes[cur_node].count = nodes[i].count + nodes[i+1].count; nodes[cur_node].n0 = i; for(j = cur_node; j > 0; j--){ if(nodes[j].count > nodes[j-1].count || (nodes[j].count == nodes[j-1].count && (!hnode_first || nodes[j].n0==j-1 || nodes[j].n0==j-2 || (nodes[j].sym!=HNODE && nodes[j-1].sym!=HNODE)))) break; FFSWAP(Node, nodes[j], nodes[j-1]); } cur_node++; } if(build_huff_tree(vlc, nodes, nb_codes*2-2) < 0){ av_log(avctx, AV_LOG_ERROR, "Error building tree\n"); return -1; } return 0; } | 16,221 |
0 | uint8_t *av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size) { int ret; uint8_t *data; if ((unsigned)size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) return NULL; data = av_malloc(size + AV_INPUT_BUFFER_PADDING_SIZE); if (!data) return NULL; ret = av_packet_add_side_data(pkt, type, data, size); if (ret < 0) { av_freep(&data); return NULL; } return data; } | 16,222 |
0 | static void jazz_led_text_update(void *opaque, console_ch_t *chardata) { LedState *s = opaque; char buf[2]; dpy_text_cursor(s->con, -1, -1); qemu_console_resize(s->con, 2, 1); /* TODO: draw the segments */ snprintf(buf, 2, "%02hhx\n", s->segments); console_write_ch(chardata++, 0x00200100 | buf[0]); console_write_ch(chardata++, 0x00200100 | buf[1]); dpy_text_update(s->con, 0, 0, 2, 1); } | 16,223 |
0 | static void tcp_chr_tls_init(CharDriverState *chr) { TCPCharDriver *s = chr->opaque; QIOChannelTLS *tioc; Error *err = NULL; if (s->is_listen) { tioc = qio_channel_tls_new_server( s->ioc, s->tls_creds, NULL, /* XXX Use an ACL */ &err); } else { tioc = qio_channel_tls_new_client( s->ioc, s->tls_creds, s->addr->u.inet->host, &err); } if (tioc == NULL) { error_free(err); tcp_chr_disconnect(chr); } object_unref(OBJECT(s->ioc)); s->ioc = QIO_CHANNEL(tioc); qio_channel_tls_handshake(tioc, tcp_chr_tls_handshake, chr, NULL); } | 16,224 |
0 | static int smacker_read_header(AVFormatContext *s) { AVIOContext *pb = s->pb; SmackerContext *smk = s->priv_data; AVStream *st, *ast[7]; int i, ret; int tbase; /* read and check header */ smk->magic = avio_rl32(pb); if (smk->magic != MKTAG('S', 'M', 'K', '2') && smk->magic != MKTAG('S', 'M', 'K', '4')) return AVERROR_INVALIDDATA; smk->width = avio_rl32(pb); smk->height = avio_rl32(pb); smk->frames = avio_rl32(pb); smk->pts_inc = (int32_t)avio_rl32(pb); smk->flags = avio_rl32(pb); if(smk->flags & SMACKER_FLAG_RING_FRAME) smk->frames++; for(i = 0; i < 7; i++) smk->audio[i] = avio_rl32(pb); smk->treesize = avio_rl32(pb); if(smk->treesize >= UINT_MAX/4){ // smk->treesize + 16 must not overflow (this check is probably redundant) av_log(s, AV_LOG_ERROR, "treesize too large\n"); return AVERROR_INVALIDDATA; } //FIXME remove extradata "rebuilding" smk->mmap_size = avio_rl32(pb); smk->mclr_size = avio_rl32(pb); smk->full_size = avio_rl32(pb); smk->type_size = avio_rl32(pb); for(i = 0; i < 7; i++) { smk->rates[i] = avio_rl24(pb); smk->aflags[i] = avio_r8(pb); } smk->pad = avio_rl32(pb); /* setup data */ if(smk->frames > 0xFFFFFF) { av_log(s, AV_LOG_ERROR, "Too many frames: %i\n", smk->frames); return AVERROR_INVALIDDATA; } smk->frm_size = av_malloc(smk->frames * 4); smk->frm_flags = av_malloc(smk->frames); smk->is_ver4 = (smk->magic != MKTAG('S', 'M', 'K', '2')); /* read frame info */ for(i = 0; i < smk->frames; i++) { smk->frm_size[i] = avio_rl32(pb); } for(i = 0; i < smk->frames; i++) { smk->frm_flags[i] = avio_r8(pb); } /* init video codec */ st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); smk->videoindex = st->index; st->codec->width = smk->width; st->codec->height = smk->height; st->codec->pix_fmt = AV_PIX_FMT_PAL8; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = AV_CODEC_ID_SMACKVIDEO; st->codec->codec_tag = smk->magic; /* Smacker uses 100000 as internal timebase */ if(smk->pts_inc < 0) smk->pts_inc = -smk->pts_inc; else smk->pts_inc *= 100; tbase = 100000; av_reduce(&tbase, &smk->pts_inc, tbase, smk->pts_inc, (1UL<<31)-1); avpriv_set_pts_info(st, 33, smk->pts_inc, tbase); st->duration = smk->frames; /* handle possible audio streams */ for(i = 0; i < 7; i++) { smk->indexes[i] = -1; if (smk->rates[i]) { ast[i] = avformat_new_stream(s, NULL); if (!ast[i]) return AVERROR(ENOMEM); smk->indexes[i] = ast[i]->index; ast[i]->codec->codec_type = AVMEDIA_TYPE_AUDIO; if (smk->aflags[i] & SMK_AUD_BINKAUD) { ast[i]->codec->codec_id = AV_CODEC_ID_BINKAUDIO_RDFT; } else if (smk->aflags[i] & SMK_AUD_USEDCT) { ast[i]->codec->codec_id = AV_CODEC_ID_BINKAUDIO_DCT; } else if (smk->aflags[i] & SMK_AUD_PACKED){ ast[i]->codec->codec_id = AV_CODEC_ID_SMACKAUDIO; ast[i]->codec->codec_tag = MKTAG('S', 'M', 'K', 'A'); } else { ast[i]->codec->codec_id = AV_CODEC_ID_PCM_U8; } if (smk->aflags[i] & SMK_AUD_STEREO) { ast[i]->codec->channels = 2; ast[i]->codec->channel_layout = AV_CH_LAYOUT_STEREO; } else { ast[i]->codec->channels = 1; ast[i]->codec->channel_layout = AV_CH_LAYOUT_MONO; } ast[i]->codec->sample_rate = smk->rates[i]; ast[i]->codec->bits_per_coded_sample = (smk->aflags[i] & SMK_AUD_16BITS) ? 16 : 8; if(ast[i]->codec->bits_per_coded_sample == 16 && ast[i]->codec->codec_id == AV_CODEC_ID_PCM_U8) ast[i]->codec->codec_id = AV_CODEC_ID_PCM_S16LE; avpriv_set_pts_info(ast[i], 64, 1, ast[i]->codec->sample_rate * ast[i]->codec->channels * ast[i]->codec->bits_per_coded_sample / 8); } } /* load trees to extradata, they will be unpacked by decoder */ st->codec->extradata = av_mallocz(smk->treesize + 16 + FF_INPUT_BUFFER_PADDING_SIZE); st->codec->extradata_size = smk->treesize + 16; if(!st->codec->extradata){ av_log(s, AV_LOG_ERROR, "Cannot allocate %i bytes of extradata\n", smk->treesize + 16); av_freep(&smk->frm_size); av_freep(&smk->frm_flags); return AVERROR(ENOMEM); } ret = avio_read(pb, st->codec->extradata + 16, st->codec->extradata_size - 16); if(ret != st->codec->extradata_size - 16){ av_freep(&smk->frm_size); av_freep(&smk->frm_flags); return AVERROR(EIO); } ((int32_t*)st->codec->extradata)[0] = av_le2ne32(smk->mmap_size); ((int32_t*)st->codec->extradata)[1] = av_le2ne32(smk->mclr_size); ((int32_t*)st->codec->extradata)[2] = av_le2ne32(smk->full_size); ((int32_t*)st->codec->extradata)[3] = av_le2ne32(smk->type_size); smk->curstream = -1; smk->nextpos = avio_tell(pb); return 0; } | 16,225 |
0 | static int init_output_stream(OutputStream *ost, char *error, int error_len) { int ret = 0; if (ost->encoding_needed) { AVCodec *codec = ost->enc; AVCodecContext *dec = NULL; InputStream *ist; ret = init_output_stream_encode(ost); if (ret < 0) return ret; if ((ist = get_input_stream(ost))) dec = ist->dec_ctx; if (dec && dec->subtitle_header) { ost->enc_ctx->subtitle_header = av_malloc(dec->subtitle_header_size); if (!ost->enc_ctx->subtitle_header) return AVERROR(ENOMEM); memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size); ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size; } if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0)) av_dict_set(&ost->encoder_opts, "threads", "auto", 0); if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) { ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx); if (!ost->enc_ctx->hw_frames_ctx) return AVERROR(ENOMEM); } if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) { if (ret == AVERROR_EXPERIMENTAL) abort_codec_experimental(codec, 1); snprintf(error, error_len, "Error while opening encoder for output stream #%d:%d - " "maybe incorrect parameters such as bit_rate, rate, width or height", ost->file_index, ost->index); return ret; } assert_avoptions(ost->encoder_opts); if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000) av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low." "It takes bits/s as argument, not kbits/s\n"); ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx); if (ret < 0) { av_log(NULL, AV_LOG_FATAL, "Error initializing the output stream codec context.\n"); exit_program(1); } if (ost->enc_ctx->nb_coded_side_data) { int i; ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data, sizeof(*ost->st->side_data)); if (!ost->st->side_data) return AVERROR(ENOMEM); for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) { const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i]; AVPacketSideData *sd_dst = &ost->st->side_data[i]; sd_dst->data = av_malloc(sd_src->size); if (!sd_dst->data) return AVERROR(ENOMEM); memcpy(sd_dst->data, sd_src->data, sd_src->size); sd_dst->size = sd_src->size; sd_dst->type = sd_src->type; ost->st->nb_side_data++; } } ost->st->time_base = ost->enc_ctx->time_base; } else if (ost->stream_copy) { ret = init_output_stream_streamcopy(ost); if (ret < 0) return ret; /* * FIXME: will the codec context used by the parser during streamcopy * This should go away with the new parser API. */ ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar); if (ret < 0) return ret; } /* initialize bitstream filters for the output stream * needs to be done here, because the codec id for streamcopy is not * known until now */ ret = init_output_bsfs(ost); if (ret < 0) return ret; ost->mux_timebase = ost->st->time_base; ost->initialized = 1; ret = check_init_output_file(output_files[ost->file_index], ost->file_index); if (ret < 0) return ret; return ret; } | 16,226 |
0 | static inline CopyRet receive_frame(AVCodecContext *avctx, void *data, int *got_frame) { BC_STATUS ret; BC_DTS_PROC_OUT output = { .PicInfo.width = avctx->width, .PicInfo.height = avctx->height, }; CHDContext *priv = avctx->priv_data; HANDLE dev = priv->dev; *got_frame = 0; // Request decoded data from the driver ret = DtsProcOutputNoCopy(dev, OUTPUT_PROC_TIMEOUT, &output); if (ret == BC_STS_FMT_CHANGE) { av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Initial format change\n"); avctx->width = output.PicInfo.width; avctx->height = output.PicInfo.height; switch ( output.PicInfo.aspect_ratio ) { case vdecAspectRatioSquare: avctx->sample_aspect_ratio = (AVRational) { 1, 1}; break; case vdecAspectRatio12_11: avctx->sample_aspect_ratio = (AVRational) { 12, 11}; break; case vdecAspectRatio10_11: avctx->sample_aspect_ratio = (AVRational) { 10, 11}; break; case vdecAspectRatio16_11: avctx->sample_aspect_ratio = (AVRational) { 16, 11}; break; case vdecAspectRatio40_33: avctx->sample_aspect_ratio = (AVRational) { 40, 33}; break; case vdecAspectRatio24_11: avctx->sample_aspect_ratio = (AVRational) { 24, 11}; break; case vdecAspectRatio20_11: avctx->sample_aspect_ratio = (AVRational) { 20, 11}; break; case vdecAspectRatio32_11: avctx->sample_aspect_ratio = (AVRational) { 32, 11}; break; case vdecAspectRatio80_33: avctx->sample_aspect_ratio = (AVRational) { 80, 33}; break; case vdecAspectRatio18_11: avctx->sample_aspect_ratio = (AVRational) { 18, 11}; break; case vdecAspectRatio15_11: avctx->sample_aspect_ratio = (AVRational) { 15, 11}; break; case vdecAspectRatio64_33: avctx->sample_aspect_ratio = (AVRational) { 64, 33}; break; case vdecAspectRatio160_99: avctx->sample_aspect_ratio = (AVRational) {160, 99}; break; case vdecAspectRatio4_3: avctx->sample_aspect_ratio = (AVRational) { 4, 3}; break; case vdecAspectRatio16_9: avctx->sample_aspect_ratio = (AVRational) { 16, 9}; break; case vdecAspectRatio221_1: avctx->sample_aspect_ratio = (AVRational) {221, 1}; break; } return RET_OK; } else if (ret == BC_STS_SUCCESS) { int copy_ret = -1; if (output.PoutFlags & BC_POUT_FLAGS_PIB_VALID) { if (avctx->codec->id == AV_CODEC_ID_MPEG4 && output.PicInfo.timeStamp == 0 && priv->bframe_bug) { if (!priv->bframe_bug) { av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Not returning packed frame twice.\n"); } DtsReleaseOutputBuffs(dev, NULL, FALSE); return RET_COPY_AGAIN; } print_frame_info(priv, &output); copy_ret = copy_frame(avctx, &output, data, got_frame); } else { /* * An invalid frame has been consumed. */ av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput succeeded with " "invalid PIB\n"); copy_ret = RET_OK; } DtsReleaseOutputBuffs(dev, NULL, FALSE); return copy_ret; } else if (ret == BC_STS_BUSY) { return RET_OK; } else { av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput failed %d\n", ret); return RET_ERROR; } } | 16,228 |
0 | void omap_mpuio_out_set(struct omap_mpuio_s *s, int line, qemu_irq handler) { if (line >= 16 || line < 0) hw_error("%s: No GPIO line %i\n", __FUNCTION__, line); s->handler[line] = handler; } | 16,229 |
0 | int pic_read_irq(DeviceState *d) { PICCommonState *s = DO_UPCAST(PICCommonState, dev.qdev, d); int irq, irq2, intno; irq = pic_get_irq(s); if (irq >= 0) { if (irq == 2) { irq2 = pic_get_irq(slave_pic); if (irq2 >= 0) { pic_intack(slave_pic, irq2); } else { /* spurious IRQ on slave controller */ if (no_spurious_interrupt_hack) { /* Pretend it was delivered and acknowledged. If * it was spurious due to slave_pic->imr, then * as soon as the mask is cleared, the slave will * re-trigger IRQ2 on the master. If it is spurious for * some other reason, make sure we don't keep trying * to half-process the same spurious interrupt over * and over again. */ s->irr &= ~(1<<irq); s->last_irr &= ~(1<<irq); s->isr &= ~(1<<irq); return -1; } irq2 = 7; } intno = slave_pic->irq_base + irq2; } else { intno = s->irq_base + irq; } pic_intack(s, irq); } else { /* spurious IRQ on host controller */ if (no_spurious_interrupt_hack) { return -1; } irq = 7; intno = s->irq_base + irq; } #if defined(DEBUG_PIC) || defined(DEBUG_IRQ_LATENCY) if (irq == 2) { irq = irq2 + 8; } #endif #ifdef DEBUG_IRQ_LATENCY printf("IRQ%d latency=%0.3fus\n", irq, (double)(qemu_get_clock_ns(vm_clock) - irq_time[irq]) * 1000000.0 / get_ticks_per_sec()); #endif DPRINTF("pic_interrupt: irq=%d\n", irq); return intno; } | 16,230 |
0 | static inline void gen_bcond (DisasContext *ctx, int type) { target_ulong target = 0; target_ulong li; uint32_t bo = BO(ctx->opcode); uint32_t bi = BI(ctx->opcode); uint32_t mask; if ((bo & 0x4) == 0) gen_op_dec_ctr(); switch(type) { case BCOND_IM: li = (target_long)((int16_t)(BD(ctx->opcode))); if (likely(AA(ctx->opcode) == 0)) { target = ctx->nip + li - 4; } else { target = li; } break; case BCOND_CTR: gen_op_movl_T1_ctr(); break; default: case BCOND_LR: gen_op_movl_T1_lr(); break; } if (LK(ctx->opcode)) { #if defined(TARGET_PPC64) if (ctx->sf_mode) gen_op_setlr_64(ctx->nip >> 32, ctx->nip); else #endif gen_op_setlr(ctx->nip); } if (bo & 0x10) { /* No CR condition */ switch (bo & 0x6) { case 0: #if defined(TARGET_PPC64) if (ctx->sf_mode) gen_op_test_ctr_64(); else #endif gen_op_test_ctr(); break; case 2: #if defined(TARGET_PPC64) if (ctx->sf_mode) gen_op_test_ctrz_64(); else #endif gen_op_test_ctrz(); break; default: case 4: case 6: if (type == BCOND_IM) { gen_goto_tb(ctx, 0, target); } else { #if defined(TARGET_PPC64) if (ctx->sf_mode) gen_op_b_T1_64(); else #endif gen_op_b_T1(); gen_op_reset_T0(); } goto no_test; } } else { mask = 1 << (3 - (bi & 0x03)); gen_op_load_crf_T0(bi >> 2); if (bo & 0x8) { switch (bo & 0x6) { case 0: #if defined(TARGET_PPC64) if (ctx->sf_mode) gen_op_test_ctr_true_64(mask); else #endif gen_op_test_ctr_true(mask); break; case 2: #if defined(TARGET_PPC64) if (ctx->sf_mode) gen_op_test_ctrz_true_64(mask); else #endif gen_op_test_ctrz_true(mask); break; default: case 4: case 6: gen_op_test_true(mask); break; } } else { switch (bo & 0x6) { case 0: #if defined(TARGET_PPC64) if (ctx->sf_mode) gen_op_test_ctr_false_64(mask); else #endif gen_op_test_ctr_false(mask); break; case 2: #if defined(TARGET_PPC64) if (ctx->sf_mode) gen_op_test_ctrz_false_64(mask); else #endif gen_op_test_ctrz_false(mask); break; default: case 4: case 6: gen_op_test_false(mask); break; } } } if (type == BCOND_IM) { int l1 = gen_new_label(); gen_op_jz_T0(l1); gen_goto_tb(ctx, 0, target); gen_set_label(l1); gen_goto_tb(ctx, 1, ctx->nip); } else { #if defined(TARGET_PPC64) if (ctx->sf_mode) gen_op_btest_T1_64(ctx->nip >> 32, ctx->nip); else #endif gen_op_btest_T1(ctx->nip); gen_op_reset_T0(); no_test: if (ctx->singlestep_enabled) gen_op_debug(); gen_op_exit_tb(); } ctx->exception = EXCP_BRANCH; } | 16,231 |
0 | DriveInfo *drive_get_by_id(const char *id) { DriveInfo *dinfo; TAILQ_FOREACH(dinfo, &drives, next) { if (strcmp(id, dinfo->id)) continue; return dinfo; } return NULL; } | 16,232 |
0 | static BlockDriverAIOCB *rbd_start_aio(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque, RBDAIOCmd cmd) { RBDAIOCB *acb; RADOSCB *rcb; rbd_completion_t c; int64_t off, size; char *buf; int r; BDRVRBDState *s = bs->opaque; acb = qemu_aio_get(&rbd_aiocb_info, bs, cb, opaque); acb->cmd = cmd; acb->qiov = qiov; if (cmd == RBD_AIO_DISCARD) { acb->bounce = NULL; } else { acb->bounce = qemu_blockalign(bs, qiov->size); } acb->ret = 0; acb->error = 0; acb->s = s; acb->cancelled = 0; acb->bh = NULL; acb->status = -EINPROGRESS; if (cmd == RBD_AIO_WRITE) { qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size); } buf = acb->bounce; off = sector_num * BDRV_SECTOR_SIZE; size = nb_sectors * BDRV_SECTOR_SIZE; s->qemu_aio_count++; /* All the RADOSCB */ rcb = g_malloc(sizeof(RADOSCB)); rcb->done = 0; rcb->acb = acb; rcb->buf = buf; rcb->s = acb->s; rcb->size = size; r = rbd_aio_create_completion(rcb, (rbd_callback_t) rbd_finish_aiocb, &c); if (r < 0) { goto failed; } switch (cmd) { case RBD_AIO_WRITE: r = rbd_aio_write(s->image, off, size, buf, c); break; case RBD_AIO_READ: r = rbd_aio_read(s->image, off, size, buf, c); break; case RBD_AIO_DISCARD: r = rbd_aio_discard_wrapper(s->image, off, size, c); break; default: r = -EINVAL; } if (r < 0) { goto failed; } return &acb->common; failed: g_free(rcb); s->qemu_aio_count--; qemu_aio_release(acb); return NULL; } | 16,233 |
0 | static int colo_packet_compare_udp(Packet *spkt, Packet *ppkt) { int ret; trace_colo_compare_main("compare udp"); ret = colo_packet_compare_common(ppkt, spkt); if (ret) { trace_colo_compare_udp_miscompare("primary pkt size", ppkt->size); qemu_hexdump((char *)ppkt->data, stderr, "colo-compare", ppkt->size); trace_colo_compare_udp_miscompare("Secondary pkt size", spkt->size); qemu_hexdump((char *)spkt->data, stderr, "colo-compare", spkt->size); } return ret; } | 16,234 |
0 | void helper_msa_st_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs, int32_t s10) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); target_ulong addr = env->active_tc.gpr[rs] + (s10 << df); int i; switch (df) { case DF_BYTE: for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { do_sb(env, addr + (i << DF_BYTE), pwd->b[i], env->hflags & MIPS_HFLAG_KSU); } break; case DF_HALF: for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { do_sh(env, addr + (i << DF_HALF), pwd->h[i], env->hflags & MIPS_HFLAG_KSU); } break; case DF_WORD: for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { do_sw(env, addr + (i << DF_WORD), pwd->w[i], env->hflags & MIPS_HFLAG_KSU); } break; case DF_DOUBLE: for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { do_sd(env, addr + (i << DF_DOUBLE), pwd->d[i], env->hflags & MIPS_HFLAG_KSU); } break; } } | 16,236 |
0 | static void apic_common_class_init(ObjectClass *klass, void *data) { ICCDeviceClass *idc = ICC_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_apic_common; dc->reset = apic_reset_common; dc->props = apic_properties_common; idc->realize = apic_common_realize; /* * Reason: APIC and CPU need to be wired up by * x86_cpu_apic_create() */ dc->cannot_instantiate_with_device_add_yet = true; } | 16,238 |
0 | static unsigned tget_short(const uint8_t **p, int le) { unsigned v = le ? AV_RL16(*p) : AV_RB16(*p); *p += 2; return v; } | 16,239 |
0 | static void s390_cpu_initial_reset(CPUState *s) { S390CPU *cpu = S390_CPU(s); CPUS390XState *env = &cpu->env; int i; s390_cpu_reset(s); /* initial reset does not clear everything! */ memset(&env->start_initial_reset_fields, 0, offsetof(CPUS390XState, end_reset_fields) - offsetof(CPUS390XState, start_initial_reset_fields)); /* architectured initial values for CR 0 and 14 */ env->cregs[0] = CR0_RESET; env->cregs[14] = CR14_RESET; /* architectured initial value for Breaking-Event-Address register */ env->gbea = 1; env->pfault_token = -1UL; env->ext_index = -1; for (i = 0; i < ARRAY_SIZE(env->io_index); i++) { env->io_index[i] = -1; } env->mchk_index = -1; /* tininess for underflow is detected before rounding */ set_float_detect_tininess(float_tininess_before_rounding, &env->fpu_status); /* Reset state inside the kernel that we cannot access yet from QEMU. */ if (kvm_enabled()) { kvm_s390_reset_vcpu(cpu); } } | 16,240 |
0 | static void migrate_fd_cancel(MigrationState *s) { if (s->state != MIG_STATE_ACTIVE) return; DPRINTF("cancelling migration\n"); s->state = MIG_STATE_CANCELLED; notifier_list_notify(&migration_state_notifiers, s); migrate_fd_cleanup(s); } | 16,241 |
0 | static void test_dispatch_cmd(void) { QDict *req = qdict_new(); QObject *resp; qdict_put_obj(req, "execute", QOBJECT(qstring_from_str("user_def_cmd"))); resp = qmp_dispatch(QOBJECT(req)); assert(resp != NULL); assert(!qdict_haskey(qobject_to_qdict(resp), "error")); g_print("\nresp: %s\n", qstring_get_str(qobject_to_json(resp))); qobject_decref(resp); QDECREF(req); } | 16,242 |
0 | DriveInfo *drive_init(QemuOpts *opts, BlockInterfaceType block_default_type) { const char *buf; const char *file = NULL; const char *serial; const char *mediastr = ""; BlockInterfaceType type; enum { MEDIA_DISK, MEDIA_CDROM } media; int bus_id, unit_id; int cyls, heads, secs, translation; BlockDriver *drv = NULL; int max_devs; int index; int ro = 0; int bdrv_flags = 0; int on_read_error, on_write_error; const char *devaddr; DriveInfo *dinfo; BlockIOLimit io_limits; int snapshot = 0; bool copy_on_read; int ret; translation = BIOS_ATA_TRANSLATION_AUTO; media = MEDIA_DISK; /* extract parameters */ bus_id = qemu_opt_get_number(opts, "bus", 0); unit_id = qemu_opt_get_number(opts, "unit", -1); index = qemu_opt_get_number(opts, "index", -1); cyls = qemu_opt_get_number(opts, "cyls", 0); heads = qemu_opt_get_number(opts, "heads", 0); secs = qemu_opt_get_number(opts, "secs", 0); snapshot = qemu_opt_get_bool(opts, "snapshot", 0); ro = qemu_opt_get_bool(opts, "readonly", 0); copy_on_read = qemu_opt_get_bool(opts, "copy-on-read", false); file = qemu_opt_get(opts, "file"); serial = qemu_opt_get(opts, "serial"); if ((buf = qemu_opt_get(opts, "if")) != NULL) { for (type = 0; type < IF_COUNT && strcmp(buf, if_name[type]); type++) ; if (type == IF_COUNT) { error_report("unsupported bus type '%s'", buf); return NULL; } } else { type = block_default_type; } max_devs = if_max_devs[type]; if (cyls || heads || secs) { if (cyls < 1) { error_report("invalid physical cyls number"); return NULL; } if (heads < 1) { error_report("invalid physical heads number"); return NULL; } if (secs < 1) { error_report("invalid physical secs number"); return NULL; } } if ((buf = qemu_opt_get(opts, "trans")) != NULL) { if (!cyls) { error_report("'%s' trans must be used with cyls, heads and secs", buf); return NULL; } if (!strcmp(buf, "none")) translation = BIOS_ATA_TRANSLATION_NONE; else if (!strcmp(buf, "lba")) translation = BIOS_ATA_TRANSLATION_LBA; else if (!strcmp(buf, "auto")) translation = BIOS_ATA_TRANSLATION_AUTO; else { error_report("'%s' invalid translation type", buf); return NULL; } } if ((buf = qemu_opt_get(opts, "media")) != NULL) { if (!strcmp(buf, "disk")) { media = MEDIA_DISK; } else if (!strcmp(buf, "cdrom")) { if (cyls || secs || heads) { error_report("CHS can't be set with media=%s", buf); return NULL; } media = MEDIA_CDROM; } else { error_report("'%s' invalid media", buf); return NULL; } } bdrv_flags |= BDRV_O_CACHE_WB; if ((buf = qemu_opt_get(opts, "cache")) != NULL) { if (bdrv_parse_cache_flags(buf, &bdrv_flags) != 0) { error_report("invalid cache option"); return NULL; } } #ifdef CONFIG_LINUX_AIO if ((buf = qemu_opt_get(opts, "aio")) != NULL) { if (!strcmp(buf, "native")) { bdrv_flags |= BDRV_O_NATIVE_AIO; } else if (!strcmp(buf, "threads")) { /* this is the default */ } else { error_report("invalid aio option"); return NULL; } } #endif if ((buf = qemu_opt_get(opts, "format")) != NULL) { if (is_help_option(buf)) { error_printf("Supported formats:"); bdrv_iterate_format(bdrv_format_print, NULL); error_printf("\n"); return NULL; } drv = bdrv_find_whitelisted_format(buf); if (!drv) { error_report("'%s' invalid format", buf); return NULL; } } /* disk I/O throttling */ io_limits.bps[BLOCK_IO_LIMIT_TOTAL] = qemu_opt_get_number(opts, "bps", 0); io_limits.bps[BLOCK_IO_LIMIT_READ] = qemu_opt_get_number(opts, "bps_rd", 0); io_limits.bps[BLOCK_IO_LIMIT_WRITE] = qemu_opt_get_number(opts, "bps_wr", 0); io_limits.iops[BLOCK_IO_LIMIT_TOTAL] = qemu_opt_get_number(opts, "iops", 0); io_limits.iops[BLOCK_IO_LIMIT_READ] = qemu_opt_get_number(opts, "iops_rd", 0); io_limits.iops[BLOCK_IO_LIMIT_WRITE] = qemu_opt_get_number(opts, "iops_wr", 0); if (!do_check_io_limits(&io_limits)) { error_report("bps(iops) and bps_rd/bps_wr(iops_rd/iops_wr) " "cannot be used at the same time"); return NULL; } if (qemu_opt_get(opts, "boot") != NULL) { fprintf(stderr, "qemu-kvm: boot=on|off is deprecated and will be " "ignored. Future versions will reject this parameter. Please " "update your scripts.\n"); } on_write_error = BLOCKDEV_ON_ERROR_ENOSPC; if ((buf = qemu_opt_get(opts, "werror")) != NULL) { if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO && type != IF_NONE) { error_report("werror is not supported by this bus type"); return NULL; } on_write_error = parse_block_error_action(buf, 0); if (on_write_error < 0) { return NULL; } } on_read_error = BLOCKDEV_ON_ERROR_REPORT; if ((buf = qemu_opt_get(opts, "rerror")) != NULL) { if (type != IF_IDE && type != IF_VIRTIO && type != IF_SCSI && type != IF_NONE) { error_report("rerror is not supported by this bus type"); return NULL; } on_read_error = parse_block_error_action(buf, 1); if (on_read_error < 0) { return NULL; } } if ((devaddr = qemu_opt_get(opts, "addr")) != NULL) { if (type != IF_VIRTIO) { error_report("addr is not supported by this bus type"); return NULL; } } /* compute bus and unit according index */ if (index != -1) { if (bus_id != 0 || unit_id != -1) { error_report("index cannot be used with bus and unit"); return NULL; } bus_id = drive_index_to_bus_id(type, index); unit_id = drive_index_to_unit_id(type, index); } /* if user doesn't specify a unit_id, * try to find the first free */ if (unit_id == -1) { unit_id = 0; while (drive_get(type, bus_id, unit_id) != NULL) { unit_id++; if (max_devs && unit_id >= max_devs) { unit_id -= max_devs; bus_id++; } } } /* check unit id */ if (max_devs && unit_id >= max_devs) { error_report("unit %d too big (max is %d)", unit_id, max_devs - 1); return NULL; } /* * catch multiple definitions */ if (drive_get(type, bus_id, unit_id) != NULL) { error_report("drive with bus=%d, unit=%d (index=%d) exists", bus_id, unit_id, index); return NULL; } /* init */ dinfo = g_malloc0(sizeof(*dinfo)); if ((buf = qemu_opts_id(opts)) != NULL) { dinfo->id = g_strdup(buf); } else { /* no id supplied -> create one */ dinfo->id = g_malloc0(32); if (type == IF_IDE || type == IF_SCSI) mediastr = (media == MEDIA_CDROM) ? "-cd" : "-hd"; if (max_devs) snprintf(dinfo->id, 32, "%s%i%s%i", if_name[type], bus_id, mediastr, unit_id); else snprintf(dinfo->id, 32, "%s%s%i", if_name[type], mediastr, unit_id); } dinfo->bdrv = bdrv_new(dinfo->id); dinfo->bdrv->open_flags = snapshot ? BDRV_O_SNAPSHOT : 0; dinfo->bdrv->read_only = ro; dinfo->devaddr = devaddr; dinfo->type = type; dinfo->bus = bus_id; dinfo->unit = unit_id; dinfo->cyls = cyls; dinfo->heads = heads; dinfo->secs = secs; dinfo->trans = translation; dinfo->opts = opts; dinfo->refcount = 1; dinfo->serial = serial; QTAILQ_INSERT_TAIL(&drives, dinfo, next); bdrv_set_on_error(dinfo->bdrv, on_read_error, on_write_error); /* disk I/O throttling */ bdrv_set_io_limits(dinfo->bdrv, &io_limits); switch(type) { case IF_IDE: case IF_SCSI: case IF_XEN: case IF_NONE: dinfo->media_cd = media == MEDIA_CDROM; break; case IF_SD: case IF_FLOPPY: case IF_PFLASH: case IF_MTD: break; case IF_VIRTIO: /* add virtio block device */ opts = qemu_opts_create_nofail(qemu_find_opts("device")); if (arch_type == QEMU_ARCH_S390X) { qemu_opt_set(opts, "driver", "virtio-blk-s390"); } else { qemu_opt_set(opts, "driver", "virtio-blk-pci"); } qemu_opt_set(opts, "drive", dinfo->id); if (devaddr) qemu_opt_set(opts, "addr", devaddr); break; default: abort(); } if (!file || !*file) { return dinfo; } if (snapshot) { /* always use cache=unsafe with snapshot */ bdrv_flags &= ~BDRV_O_CACHE_MASK; bdrv_flags |= (BDRV_O_SNAPSHOT|BDRV_O_CACHE_WB|BDRV_O_NO_FLUSH); } if (copy_on_read) { bdrv_flags |= BDRV_O_COPY_ON_READ; } if (runstate_check(RUN_STATE_INMIGRATE)) { bdrv_flags |= BDRV_O_INCOMING; } if (media == MEDIA_CDROM) { /* CDROM is fine for any interface, don't check. */ ro = 1; } else if (ro == 1) { if (type != IF_SCSI && type != IF_VIRTIO && type != IF_FLOPPY && type != IF_NONE && type != IF_PFLASH) { error_report("readonly not supported by this bus type"); goto err; } } bdrv_flags |= ro ? 0 : BDRV_O_RDWR; if (ro && copy_on_read) { error_report("warning: disabling copy_on_read on readonly drive"); } ret = bdrv_open(dinfo->bdrv, file, bdrv_flags, drv); if (ret < 0) { if (ret == -EMEDIUMTYPE) { error_report("could not open disk image %s: not in %s format", file, drv->format_name); } else { error_report("could not open disk image %s: %s", file, strerror(-ret)); } goto err; } if (bdrv_key_required(dinfo->bdrv)) autostart = 0; return dinfo; err: bdrv_delete(dinfo->bdrv); g_free(dinfo->id); QTAILQ_REMOVE(&drives, dinfo, next); g_free(dinfo); return NULL; } | 16,244 |
0 | void hmp_sendkey(Monitor *mon, const QDict *qdict) { const char *keys = qdict_get_str(qdict, "keys"); KeyValueList *keylist, *head = NULL, *tmp = NULL; int has_hold_time = qdict_haskey(qdict, "hold-time"); int hold_time = qdict_get_try_int(qdict, "hold-time", -1); Error *err = NULL; char *separator; int keyname_len; while (1) { separator = strchr(keys, '-'); keyname_len = separator ? separator - keys : strlen(keys); /* Be compatible with old interface, convert user inputted "<" */ if (keys[0] == '<' && keyname_len == 1) { keys = "less"; keyname_len = 4; } keylist = g_malloc0(sizeof(*keylist)); keylist->value = g_malloc0(sizeof(*keylist->value)); if (!head) { head = keylist; } if (tmp) { tmp->next = keylist; } tmp = keylist; if (strstart(keys, "0x", NULL)) { char *endp; int value = strtoul(keys, &endp, 0); assert(endp <= keys + keyname_len); if (endp != keys + keyname_len) { goto err_out; } keylist->value->type = KEY_VALUE_KIND_NUMBER; keylist->value->u.number = value; } else { int idx = index_from_key(keys, keyname_len); if (idx == Q_KEY_CODE__MAX) { goto err_out; } keylist->value->type = KEY_VALUE_KIND_QCODE; keylist->value->u.qcode = idx; } if (!separator) { break; } keys = separator + 1; } qmp_send_key(head, has_hold_time, hold_time, &err); hmp_handle_error(mon, &err); out: qapi_free_KeyValueList(head); return; err_out: monitor_printf(mon, "invalid parameter: %.*s\n", keyname_len, keys); goto out; } | 16,245 |
0 | SocketAddressLegacy *socket_remote_address(int fd, Error **errp) { struct sockaddr_storage ss; socklen_t sslen = sizeof(ss); if (getpeername(fd, (struct sockaddr *)&ss, &sslen) < 0) { error_setg_errno(errp, errno, "%s", "Unable to query remote socket address"); return NULL; } return socket_sockaddr_to_address(&ss, sslen, errp); } | 16,248 |
0 | static int megasas_ctrl_get_info(MegasasState *s, MegasasCmd *cmd) { PCIDevice *pci_dev = PCI_DEVICE(s); struct mfi_ctrl_info info; size_t dcmd_size = sizeof(info); BusChild *kid; int num_ld_disks = 0; uint16_t sdev_id; memset(&info, 0x0, cmd->iov_size); if (cmd->iov_size < dcmd_size) { trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size, dcmd_size); return MFI_STAT_INVALID_PARAMETER; } info.pci.vendor = cpu_to_le16(PCI_VENDOR_ID_LSI_LOGIC); info.pci.device = cpu_to_le16(PCI_DEVICE_ID_LSI_SAS1078); info.pci.subvendor = cpu_to_le16(PCI_VENDOR_ID_LSI_LOGIC); info.pci.subdevice = cpu_to_le16(0x1013); /* * For some reason the firmware supports * only up to 8 device ports. * Despite supporting a far larger number * of devices for the physical devices. * So just display the first 8 devices * in the device port list, independent * of how many logical devices are actually * present. */ info.host.type = MFI_INFO_HOST_PCIE; info.device.type = MFI_INFO_DEV_SAS3G; info.device.port_count = 8; QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { SCSIDevice *sdev = DO_UPCAST(SCSIDevice, qdev, kid->child); if (num_ld_disks < 8) { sdev_id = ((sdev->id & 0xFF) >> 8) | (sdev->lun & 0xFF); info.device.port_addr[num_ld_disks] = cpu_to_le64(megasas_get_sata_addr(sdev_id)); } num_ld_disks++; } memcpy(info.product_name, "MegaRAID SAS 8708EM2", 20); snprintf(info.serial_number, 32, "%s", s->hba_serial); snprintf(info.package_version, 0x60, "%s-QEMU", QEMU_VERSION); memcpy(info.image_component[0].name, "APP", 3); memcpy(info.image_component[0].version, MEGASAS_VERSION "-QEMU", 9); memcpy(info.image_component[0].build_date, __DATE__, 11); memcpy(info.image_component[0].build_time, __TIME__, 8); info.image_component_count = 1; if (pci_dev->has_rom) { uint8_t biosver[32]; uint8_t *ptr; ptr = memory_region_get_ram_ptr(&pci_dev->rom); memcpy(biosver, ptr + 0x41, 31); memcpy(info.image_component[1].name, "BIOS", 4); memcpy(info.image_component[1].version, biosver, strlen((const char *)biosver)); info.image_component_count++; } info.current_fw_time = cpu_to_le32(megasas_fw_time()); info.max_arms = 32; info.max_spans = 8; info.max_arrays = MEGASAS_MAX_ARRAYS; info.max_lds = s->fw_luns; info.max_cmds = cpu_to_le16(s->fw_cmds); info.max_sg_elements = cpu_to_le16(s->fw_sge); info.max_request_size = cpu_to_le32(MEGASAS_MAX_SECTORS); info.lds_present = cpu_to_le16(num_ld_disks); info.pd_present = cpu_to_le16(num_ld_disks); info.pd_disks_present = cpu_to_le16(num_ld_disks); info.hw_present = cpu_to_le32(MFI_INFO_HW_NVRAM | MFI_INFO_HW_MEM | MFI_INFO_HW_FLASH); info.memory_size = cpu_to_le16(512); info.nvram_size = cpu_to_le16(32); info.flash_size = cpu_to_le16(16); info.raid_levels = cpu_to_le32(MFI_INFO_RAID_0); info.adapter_ops = cpu_to_le32(MFI_INFO_AOPS_RBLD_RATE | MFI_INFO_AOPS_SELF_DIAGNOSTIC | MFI_INFO_AOPS_MIXED_ARRAY); info.ld_ops = cpu_to_le32(MFI_INFO_LDOPS_DISK_CACHE_POLICY | MFI_INFO_LDOPS_ACCESS_POLICY | MFI_INFO_LDOPS_IO_POLICY | MFI_INFO_LDOPS_WRITE_POLICY | MFI_INFO_LDOPS_READ_POLICY); info.max_strips_per_io = cpu_to_le16(s->fw_sge); info.stripe_sz_ops.min = 3; info.stripe_sz_ops.max = ffs(MEGASAS_MAX_SECTORS + 1) - 1; info.properties.pred_fail_poll_interval = cpu_to_le16(300); info.properties.intr_throttle_cnt = cpu_to_le16(16); info.properties.intr_throttle_timeout = cpu_to_le16(50); info.properties.rebuild_rate = 30; info.properties.patrol_read_rate = 30; info.properties.bgi_rate = 30; info.properties.cc_rate = 30; info.properties.recon_rate = 30; info.properties.cache_flush_interval = 4; info.properties.spinup_drv_cnt = 2; info.properties.spinup_delay = 6; info.properties.ecc_bucket_size = 15; info.properties.ecc_bucket_leak_rate = cpu_to_le16(1440); info.properties.expose_encl_devices = 1; info.properties.OnOffProperties = cpu_to_le32(MFI_CTRL_PROP_EnableJBOD); info.pd_ops = cpu_to_le32(MFI_INFO_PDOPS_FORCE_ONLINE | MFI_INFO_PDOPS_FORCE_OFFLINE); info.pd_mix_support = cpu_to_le32(MFI_INFO_PDMIX_SAS | MFI_INFO_PDMIX_SATA | MFI_INFO_PDMIX_LD); cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); return MFI_STAT_OK; } | 16,249 |
0 | static int dirac_combine_frame(AVCodecParserContext *s, AVCodecContext *avctx, int next, const uint8_t **buf, int *buf_size) { int parse_timing_info = (s->pts == AV_NOPTS_VALUE && s->dts == AV_NOPTS_VALUE); DiracParseContext *pc = s->priv_data; if (pc->overread_index) { memmove(pc->buffer, pc->buffer + pc->overread_index, pc->index - pc->overread_index); pc->index -= pc->overread_index; pc->overread_index = 0; if (*buf_size == 0 && pc->buffer[4] == 0x10) { *buf = pc->buffer; *buf_size = pc->index; return 0; } } if (next == -1) { /* Found a possible frame start but not a frame end */ void *new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, pc->index + (*buf_size - pc->sync_offset)); if (!new_buffer) return AVERROR(ENOMEM); pc->buffer = new_buffer; memcpy(pc->buffer + pc->index, (*buf + pc->sync_offset), *buf_size - pc->sync_offset); pc->index += *buf_size - pc->sync_offset; return -1; } else { /* Found a possible frame start and a possible frame end */ DiracParseUnit pu1, pu; void *new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, pc->index + next); if (!new_buffer) return AVERROR(ENOMEM); pc->buffer = new_buffer; memcpy(pc->buffer + pc->index, *buf, next); pc->index += next; /* Need to check if we have a valid Parse Unit. We can't go by the * sync pattern 'BBCD' alone because arithmetic coding of the residual * and motion data can cause the pattern triggering a false start of * frame. So check if the previous parse offset of the next parse unit * is equal to the next parse offset of the current parse unit then * we can be pretty sure that we have a valid parse unit */ if (!unpack_parse_unit(&pu1, pc, pc->index - 13) || !unpack_parse_unit(&pu, pc, pc->index - 13 - pu1.prev_pu_offset) || pu.next_pu_offset != pu1.prev_pu_offset || pc->index < pc->dirac_unit_size + 13LL + pu1.prev_pu_offset ) { pc->index -= 9; *buf_size = next - 9; pc->header_bytes_needed = 9; return -1; } /* All non-frame data must be accompanied by frame data. This is to * ensure that pts is set correctly. So if the current parse unit is * not frame data, wait for frame data to come along */ pc->dirac_unit = pc->buffer + pc->index - 13 - pu1.prev_pu_offset - pc->dirac_unit_size; pc->dirac_unit_size += pu.next_pu_offset; if ((pu.pu_type & 0x08) != 0x08) { pc->header_bytes_needed = 9; *buf_size = next; return -1; } /* Get the picture number to set the pts and dts*/ if (parse_timing_info) { uint8_t *cur_pu = pc->buffer + pc->index - 13 - pu1.prev_pu_offset; int pts = AV_RB32(cur_pu + 13); if (s->last_pts == 0 && s->last_dts == 0) s->dts = pts - 1; else s->dts = s->last_dts + 1; s->pts = pts; if (!avctx->has_b_frames && (cur_pu[4] & 0x03)) avctx->has_b_frames = 1; } if (avctx->has_b_frames && s->pts == s->dts) s->pict_type = AV_PICTURE_TYPE_B; /* Finally have a complete Dirac data unit */ *buf = pc->dirac_unit; *buf_size = pc->dirac_unit_size; pc->dirac_unit_size = 0; pc->overread_index = pc->index - 13; pc->header_bytes_needed = 9; } return next; } | 16,250 |
0 | static bool sdhci_can_issue_command(SDHCIState *s) { if (!SDHC_CLOCK_IS_ON(s->clkcon) || !(s->pwrcon & SDHC_POWER_ON) || (((s->prnsts & SDHC_DATA_INHIBIT) || s->stopped_state) && ((s->cmdreg & SDHC_CMD_DATA_PRESENT) || ((s->cmdreg & SDHC_CMD_RESPONSE) == SDHC_CMD_RSP_WITH_BUSY && !(SDHC_COMMAND_TYPE(s->cmdreg) == SDHC_CMD_ABORT))))) { return false; } return true; } | 16,251 |
0 | target_ulong helper_rdhwr_ccres(CPUMIPSState *env) { if ((env->hflags & MIPS_HFLAG_CP0) || (env->CP0_HWREna & (1 << 3))) return env->CCRes; else do_raise_exception(env, EXCP_RI, GETPC()); return 0; } | 16,253 |
0 | static void vc1_decode_i_blocks(VC1Context *v) { int k, j; MpegEncContext *s = &v->s; int cbp, val; uint8_t *coded_val; int mb_pos; /* select codingmode used for VLC tables selection */ switch(v->y_ac_table_index){ case 0: v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA; break; case 1: v->codingset = CS_HIGH_MOT_INTRA; break; case 2: v->codingset = CS_MID_RATE_INTRA; break; } switch(v->c_ac_table_index){ case 0: v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER; break; case 1: v->codingset2 = CS_HIGH_MOT_INTER; break; case 2: v->codingset2 = CS_MID_RATE_INTER; break; } /* Set DC scale - y and c use the same */ s->y_dc_scale = s->y_dc_scale_table[v->pq]; s->c_dc_scale = s->c_dc_scale_table[v->pq]; //do frame decode s->mb_x = s->mb_y = 0; s->mb_intra = 1; s->first_slice_line = 1; for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) { for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) { ff_init_block_index(s); ff_update_block_index(s); s->dsp.clear_blocks(s->block[0]); mb_pos = s->mb_x + s->mb_y * s->mb_width; s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA; s->current_picture.qscale_table[mb_pos] = v->pq; s->current_picture.motion_val[1][s->block_index[0]][0] = 0; s->current_picture.motion_val[1][s->block_index[0]][1] = 0; // do actual MB decoding and displaying cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2); v->s.ac_pred = get_bits1(&v->s.gb); for(k = 0; k < 6; k++) { val = ((cbp >> (5 - k)) & 1); if (k < 4) { int pred = vc1_coded_block_pred(&v->s, k, &coded_val); val = val ^ pred; *coded_val = val; } cbp |= val << (5 - k); vc1_decode_i_block(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2); s->dsp.vc1_inv_trans_8x8(s->block[k]); if(v->pq >= 9 && v->overlap) { for(j = 0; j < 64; j++) s->block[k][j] += 128; } } vc1_put_block(v, s->block); if(v->pq >= 9 && v->overlap) { if(s->mb_x) { s->dsp.vc1_h_overlap(s->dest[0], s->linesize); s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize); if(!(s->flags & CODEC_FLAG_GRAY)) { s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize); s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize); } } s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize); s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize); if(!s->first_slice_line) { s->dsp.vc1_v_overlap(s->dest[0], s->linesize); s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize); if(!(s->flags & CODEC_FLAG_GRAY)) { s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize); s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize); } } s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize); s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize); } if(v->s.loop_filter) vc1_loop_filter_iblk(s, s->current_picture.qscale_table[mb_pos]); if(get_bits_count(&s->gb) > v->bits) { ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END)); av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits); return; } } ff_draw_horiz_band(s, s->mb_y * 16, 16); s->first_slice_line = 0; } ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END)); } | 16,258 |
0 | static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size, uint8_t * buf, int buf_size) { AC3DecodeContext *ctx = avctx->priv_data; int frame_start; int i, j, k, l; float tmp0[128], tmp1[128], tmp[512]; short *out_samples = (short *)data; float *samples = ctx->samples; //Synchronize the frame. frame_start = ac3_synchronize(buf, buf_size); if (frame_start == -1) { av_log(avctx, AV_LOG_ERROR, "frame is not synchronized\n"); *data_size = 0; return -1; } //Initialize the GetBitContext with the start of valid AC3 Frame. init_get_bits(&(ctx->gb), buf + frame_start, (buf_size - frame_start) * 8); //Parse the syncinfo. ////If 'fscod' is not valid the decoder shall mute as per the standard. if (ac3_parse_sync_info(ctx)) { av_log(avctx, AV_LOG_ERROR, "fscod is not valid\n"); *data_size = 0; return -1; } //Check for the errors. /* if (ac3_error_check(ctx)) { *data_size = 0; return -1; } */ //Parse the BSI. //If 'bsid' is not valid decoder shall not decode the audio as per the standard. if (ac3_parse_bsi(ctx)) { av_log(avctx, AV_LOG_ERROR, "bsid is not valid\n"); *data_size = 0; return -1; } avctx->sample_rate = ctx->sync_info.sampling_rate; if (avctx->channels == 0) { avctx->channels = ctx->bsi.nfchans + ((ctx->bsi.flags & AC3_BSI_LFEON) ? 1 : 0); ctx->output = AC3_OUTPUT_UNMODIFIED; } else if ((ctx->bsi.nfchans + ((ctx->bsi.flags & AC3_BSI_LFEON) ? 1 : 0)) < avctx->channels) { av_log(avctx, AV_LOG_INFO, "ac3_decoder: AC3 Source Channels Are Less Then Specified %d: Output to %d Channels\n", avctx->channels, (ctx->bsi.nfchans + ((ctx->bsi.flags & AC3_BSI_LFEON) ? 1 : 0))); avctx->channels = ctx->bsi.nfchans + ((ctx->bsi.flags & AC3_BSI_LFEON) ? 1 : 0); ctx->output = AC3_OUTPUT_UNMODIFIED; } else if (avctx->channels == 1) { ctx->output = AC3_OUTPUT_MONO; } else if (avctx->channels == 2) { if (ctx->bsi.dsurmod == 0x02) ctx->output = AC3_OUTPUT_DOLBY; else ctx->output = AC3_OUTPUT_STEREO; } avctx->bit_rate = ctx->sync_info.bit_rate; av_log(avctx, AV_LOG_INFO, "channels = %d \t bit rate = %d \t sampling rate = %d \n", avctx->channels, avctx->sample_rate, avctx->bit_rate); //Parse the Audio Blocks. for (i = 0; i < 6; i++) { if (ac3_parse_audio_block(ctx, i)) { av_log(avctx, AV_LOG_ERROR, "error parsing the audio block\n"); *data_size = 0; return -1; } samples = ctx->samples; if (ctx->bsi.flags & AC3_BSI_LFEON) { ff_imdct_calc(&ctx->imdct_ctx_512, ctx->samples + 1536, samples, tmp); for (l = 0; l < 256; l++) samples[l] = (ctx->samples + 1536)[l]; float_to_int(samples, out_samples, 256); samples += 256; out_samples += 256; } for (j = 0; j < ctx->bsi.nfchans; j++) { if (ctx->audio_block.blksw & (1 << j)) { for (k = 0; k < 128; k++) { tmp0[k] = samples[2 * k]; tmp1[k] = samples[2 * k + 1]; } ff_imdct_calc(&ctx->imdct_ctx_256, ctx->samples + 1536, tmp0, tmp); for (l = 0; l < 256; l++) samples[l] = (ctx->samples + 1536)[l] * window[l] + (ctx->samples + 2048)[l] * window[255 - l]; ff_imdct_calc(&ctx->imdct_ctx_256, ctx->samples + 2048, tmp1, tmp); float_to_int(samples, out_samples, 256); samples += 256; out_samples += 256; } else { ff_imdct_calc(&ctx->imdct_ctx_512, ctx->samples + 1536, samples, tmp); for (l = 0; l < 256; l++) samples[l] = (ctx->samples + 1536)[l] * window[l] + (ctx->samples + 2048)[l] * window[255 - l]; float_to_int(samples, out_samples, 256); memcpy(ctx->samples + 2048, ctx->samples + 1792, 256 * sizeof (float)); samples += 256; out_samples += 256; } } } *data_size = 6 * ctx->bsi.nfchans * 256 * sizeof (int16_t); return (buf_size - frame_start); } | 16,259 |
0 | build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) { AcpiSystemResourceAffinityTable *srat; AcpiSratMemoryAffinity *numamem; int i; int srat_start, numa_start, slots; uint64_t mem_len, mem_base, next_base; MachineClass *mc = MACHINE_GET_CLASS(machine); const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine); PCMachineState *pcms = PC_MACHINE(machine); ram_addr_t hotplugabble_address_space_size = object_property_get_int(OBJECT(pcms), PC_MACHINE_MEMHP_REGION_SIZE, NULL); srat_start = table_data->len; srat = acpi_data_push(table_data, sizeof *srat); srat->reserved1 = cpu_to_le32(1); for (i = 0; i < apic_ids->len; i++) { int node_id = apic_ids->cpus[i].props.has_node_id ? apic_ids->cpus[i].props.node_id : 0; uint32_t apic_id = apic_ids->cpus[i].arch_id; if (apic_id < 255) { AcpiSratProcessorAffinity *core; core = acpi_data_push(table_data, sizeof *core); core->type = ACPI_SRAT_PROCESSOR_APIC; core->length = sizeof(*core); core->local_apic_id = apic_id; core->proximity_lo = node_id; memset(core->proximity_hi, 0, 3); core->local_sapic_eid = 0; core->flags = cpu_to_le32(1); } else { AcpiSratProcessorX2ApicAffinity *core; core = acpi_data_push(table_data, sizeof *core); core->type = ACPI_SRAT_PROCESSOR_x2APIC; core->length = sizeof(*core); core->x2apic_id = cpu_to_le32(apic_id); core->proximity_domain = cpu_to_le32(node_id); core->flags = cpu_to_le32(1); } } /* the memory map is a bit tricky, it contains at least one hole * from 640k-1M and possibly another one from 3.5G-4G. */ next_base = 0; numa_start = table_data->len; numamem = acpi_data_push(table_data, sizeof *numamem); build_srat_memory(numamem, 0, 640 * 1024, 0, MEM_AFFINITY_ENABLED); next_base = 1024 * 1024; for (i = 1; i < pcms->numa_nodes + 1; ++i) { mem_base = next_base; mem_len = pcms->node_mem[i - 1]; if (i == 1) { mem_len -= 1024 * 1024; } next_base = mem_base + mem_len; /* Cut out the ACPI_PCI hole */ if (mem_base <= pcms->below_4g_mem_size && next_base > pcms->below_4g_mem_size) { mem_len -= next_base - pcms->below_4g_mem_size; if (mem_len > 0) { numamem = acpi_data_push(table_data, sizeof *numamem); build_srat_memory(numamem, mem_base, mem_len, i - 1, MEM_AFFINITY_ENABLED); } mem_base = 1ULL << 32; mem_len = next_base - pcms->below_4g_mem_size; next_base += (1ULL << 32) - pcms->below_4g_mem_size; } numamem = acpi_data_push(table_data, sizeof *numamem); build_srat_memory(numamem, mem_base, mem_len, i - 1, MEM_AFFINITY_ENABLED); } slots = (table_data->len - numa_start) / sizeof *numamem; for (; slots < pcms->numa_nodes + 2; slots++) { numamem = acpi_data_push(table_data, sizeof *numamem); build_srat_memory(numamem, 0, 0, 0, MEM_AFFINITY_NOFLAGS); } /* * Entry is required for Windows to enable memory hotplug in OS * and for Linux to enable SWIOTLB when booted with less than * 4G of RAM. Windows works better if the entry sets proximity * to the highest NUMA node in the machine. * Memory devices may override proximity set by this entry, * providing _PXM method if necessary. */ if (hotplugabble_address_space_size) { numamem = acpi_data_push(table_data, sizeof *numamem); build_srat_memory(numamem, pcms->hotplug_memory.base, hotplugabble_address_space_size, pcms->numa_nodes - 1, MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED); } build_header(linker, table_data, (void *)(table_data->data + srat_start), "SRAT", table_data->len - srat_start, 1, NULL, NULL); } | 16,260 |
0 | static int net_tap_init(VLANState *vlan, const char *model, const char *name, const char *ifname1, const char *setup_script, const char *down_script) { TAPState *s; int fd; char ifname[128]; if (ifname1 != NULL) pstrcpy(ifname, sizeof(ifname), ifname1); else ifname[0] = '\0'; TFR(fd = tap_open(ifname, sizeof(ifname))); if (fd < 0) return -1; if (!setup_script || !strcmp(setup_script, "no")) setup_script = ""; if (setup_script[0] != '\0') { if (launch_script(setup_script, ifname, fd)) return -1; } s = net_tap_fd_init(vlan, model, name, fd); if (!s) return -1; snprintf(s->vc->info_str, sizeof(s->vc->info_str), "ifname=%s,script=%s,downscript=%s", ifname, setup_script, down_script); if (down_script && strcmp(down_script, "no")) snprintf(s->down_script, sizeof(s->down_script), "%s", down_script); return 0; } | 16,261 |
0 | void helper_icbi(target_ulong addr) { addr &= ~(env->dcache_line_size - 1); /* Invalidate one cache line : * PowerPC specification says this is to be treated like a load * (not a fetch) by the MMU. To be sure it will be so, * do the load "by hand". */ ldl(addr); tb_invalidate_page_range(addr, addr + env->icache_line_size); } | 16,263 |
0 | static int get_packetheader(NUTContext *nut, ByteIOContext *bc, int prefix_length) { int64_t start, size, last_size; start= url_ftell(bc) - prefix_length; if(start != nut->packet_start + nut->written_packet_size){ av_log(nut->avf, AV_LOG_ERROR, "get_packetheader called at weird position\n"); return -1; } size= get_v(bc); last_size= get_v(bc); if(nut->written_packet_size != last_size){ av_log(nut->avf, AV_LOG_ERROR, "packet size missmatch %d != %lld at %lld\n", nut->written_packet_size, last_size, start); return -1; } nut->last_packet_start = nut->packet_start; nut->packet_start = start; nut->written_packet_size= size; return size; } | 16,264 |
0 | sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) { SDHCIState *s = (SDHCIState *)opaque; unsigned shift = 8 * (offset & 0x3); uint32_t mask = ~(((1ULL << (size * 8)) - 1) << shift); uint32_t value = val; value <<= shift; switch (offset & ~0x3) { case SDHC_SYSAD: s->sdmasysad = (s->sdmasysad & mask) | value; MASKED_WRITE(s->sdmasysad, mask, value); /* Writing to last byte of sdmasysad might trigger transfer */ if (!(mask & 0xFF000000) && TRANSFERRING_DATA(s->prnsts) && s->blkcnt && s->blksize && SDHC_DMA_TYPE(s->hostctl) == SDHC_CTRL_SDMA) { sdhci_sdma_transfer_multi_blocks(s); } break; case SDHC_BLKSIZE: if (!TRANSFERRING_DATA(s->prnsts)) { MASKED_WRITE(s->blksize, mask, value); MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16); } /* Limit block size to the maximum buffer size */ if (extract32(s->blksize, 0, 12) > s->buf_maxsz) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Size 0x%x is larger than " \ "the maximum buffer 0x%x", __func__, s->blksize, s->buf_maxsz); s->blksize = deposit32(s->blksize, 0, 12, s->buf_maxsz); } break; case SDHC_ARGUMENT: MASKED_WRITE(s->argument, mask, value); break; case SDHC_TRNMOD: /* DMA can be enabled only if it is supported as indicated by * capabilities register */ if (!(s->capareg & SDHC_CAN_DO_DMA)) { value &= ~SDHC_TRNS_DMA; } MASKED_WRITE(s->trnmod, mask, value); MASKED_WRITE(s->cmdreg, mask >> 16, value >> 16); /* Writing to the upper byte of CMDREG triggers SD command generation */ if ((mask & 0xFF000000) || !sdhci_can_issue_command(s)) { break; } sdhci_send_command(s); break; case SDHC_BDATA: if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) { sdhci_write_dataport(s, value >> shift, size); } break; case SDHC_HOSTCTL: if (!(mask & 0xFF0000)) { sdhci_blkgap_write(s, value >> 16); } MASKED_WRITE(s->hostctl, mask, value); MASKED_WRITE(s->pwrcon, mask >> 8, value >> 8); MASKED_WRITE(s->wakcon, mask >> 24, value >> 24); if (!(s->prnsts & SDHC_CARD_PRESENT) || ((s->pwrcon >> 1) & 0x7) < 5 || !(s->capareg & (1 << (31 - ((s->pwrcon >> 1) & 0x7))))) { s->pwrcon &= ~SDHC_POWER_ON; } break; case SDHC_CLKCON: if (!(mask & 0xFF000000)) { sdhci_reset_write(s, value >> 24); } MASKED_WRITE(s->clkcon, mask, value); MASKED_WRITE(s->timeoutcon, mask >> 16, value >> 16); if (s->clkcon & SDHC_CLOCK_INT_EN) { s->clkcon |= SDHC_CLOCK_INT_STABLE; } else { s->clkcon &= ~SDHC_CLOCK_INT_STABLE; } break; case SDHC_NORINTSTS: if (s->norintstsen & SDHC_NISEN_CARDINT) { value &= ~SDHC_NIS_CARDINT; } s->norintsts &= mask | ~value; s->errintsts &= (mask >> 16) | ~(value >> 16); if (s->errintsts) { s->norintsts |= SDHC_NIS_ERR; } else { s->norintsts &= ~SDHC_NIS_ERR; } sdhci_update_irq(s); break; case SDHC_NORINTSTSEN: MASKED_WRITE(s->norintstsen, mask, value); MASKED_WRITE(s->errintstsen, mask >> 16, value >> 16); s->norintsts &= s->norintstsen; s->errintsts &= s->errintstsen; if (s->errintsts) { s->norintsts |= SDHC_NIS_ERR; } else { s->norintsts &= ~SDHC_NIS_ERR; } /* Quirk for Raspberry Pi: pending card insert interrupt * appears when first enabled after power on */ if ((s->norintstsen & SDHC_NISEN_INSERT) && s->pending_insert_state) { assert(s->pending_insert_quirk); s->norintsts |= SDHC_NIS_INSERT; s->pending_insert_state = false; } sdhci_update_irq(s); break; case SDHC_NORINTSIGEN: MASKED_WRITE(s->norintsigen, mask, value); MASKED_WRITE(s->errintsigen, mask >> 16, value >> 16); sdhci_update_irq(s); break; case SDHC_ADMAERR: MASKED_WRITE(s->admaerr, mask, value); break; case SDHC_ADMASYSADDR: s->admasysaddr = (s->admasysaddr & (0xFFFFFFFF00000000ULL | (uint64_t)mask)) | (uint64_t)value; break; case SDHC_ADMASYSADDR + 4: s->admasysaddr = (s->admasysaddr & (0x00000000FFFFFFFFULL | ((uint64_t)mask << 32))) | ((uint64_t)value << 32); break; case SDHC_FEAER: s->acmd12errsts |= value; s->errintsts |= (value >> 16) & s->errintstsen; if (s->acmd12errsts) { s->errintsts |= SDHC_EIS_CMD12ERR; } if (s->errintsts) { s->norintsts |= SDHC_NIS_ERR; } sdhci_update_irq(s); break; default: ERRPRINT("bad %ub write offset: addr[0x%04x] <- %u(0x%x)\n", size, (int)offset, value >> shift, value >> shift); break; } DPRINT_L2("write %ub: addr[0x%04x] <- %u(0x%x)\n", size, (int)offset, value >> shift, value >> shift); } | 16,265 |
0 | static void nbd_refresh_limits(BlockDriverState *bs, Error **errp) { bs->bl.max_pdiscard = NBD_MAX_BUFFER_SIZE; bs->bl.max_pwrite_zeroes = NBD_MAX_BUFFER_SIZE; bs->bl.max_transfer = NBD_MAX_BUFFER_SIZE; } | 16,266 |
0 | void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr) { int64_t nb_sectors = bdrv_nb_sectors(bs); *nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors; } | 16,267 |
0 | static void virt_acpi_get_cpu_info(VirtAcpiCpuInfo *cpuinfo) { CPUState *cpu; memset(cpuinfo->found_cpus, 0, sizeof cpuinfo->found_cpus); CPU_FOREACH(cpu) { set_bit(cpu->cpu_index, cpuinfo->found_cpus); } } | 16,268 |
0 | static void v9fs_wstat_post_utime(V9fsState *s, V9fsWstatState *vs, int err) { if (err < 0) { goto out; } if (vs->v9stat.n_gid != -1) { if (v9fs_do_chown(s, &vs->fidp->path, vs->v9stat.n_uid, vs->v9stat.n_gid)) { err = -errno; } } v9fs_wstat_post_chown(s, vs, err); return; out: v9fs_stat_free(&vs->v9stat); complete_pdu(s, vs->pdu, err); qemu_free(vs); } | 16,269 |
0 | static int setfsugid(int uid, int gid) { /* * We still need DAC_OVERRIDE because we don't change * supplementary group ids, and hence may be subjected DAC rules */ cap_value_t cap_list[] = { CAP_DAC_OVERRIDE, }; setfsgid(gid); setfsuid(uid); if (uid != 0 || gid != 0) { return do_cap_set(cap_list, ARRAY_SIZE(cap_list), 0); } return 0; } | 16,271 |
0 | static void target_setup_frame(int usig, struct target_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPUARMState *env) { struct target_rt_sigframe *frame; abi_ulong frame_addr, return_addr; frame_addr = get_sigframe(ka, env); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { goto give_sigsegv; } __put_user(0, &frame->uc.tuc_flags); __put_user(0, &frame->uc.tuc_link); __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); __put_user(sas_ss_flags(env->xregs[31]), &frame->uc.tuc_stack.ss_flags); __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); target_setup_sigframe(frame, env, set); if (ka->sa_flags & TARGET_SA_RESTORER) { return_addr = ka->sa_restorer; } else { /* mov x8,#__NR_rt_sigreturn; svc #0 */ __put_user(0xd2801168, &frame->tramp[0]); __put_user(0xd4000001, &frame->tramp[1]); return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp); } env->xregs[0] = usig; env->xregs[31] = frame_addr; env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp); env->pc = ka->_sa_handler; env->xregs[30] = return_addr; if (info) { if (copy_siginfo_to_user(&frame->info, info)) { goto give_sigsegv; } env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc); } unlock_user_struct(frame, frame_addr, 1); return; give_sigsegv: unlock_user_struct(frame, frame_addr, 1); force_sig(TARGET_SIGSEGV); } | 16,272 |
0 | static bool check_solid_tile(VncState *vs, int x, int y, int w, int h, uint32_t* color, bool samecolor) { VncDisplay *vd = vs->vd; switch(vd->server->pf.bytes_per_pixel) { case 4: return check_solid_tile32(vs, x, y, w, h, color, samecolor); case 2: return check_solid_tile16(vs, x, y, w, h, color, samecolor); default: return check_solid_tile8(vs, x, y, w, h, color, samecolor); } } | 16,273 |
0 | static void avc_loopfilter_luma_inter_edge_hor_msa(uint8_t *data, uint8_t bs0, uint8_t bs1, uint8_t bs2, uint8_t bs3, uint8_t tc0, uint8_t tc1, uint8_t tc2, uint8_t tc3, uint8_t alpha_in, uint8_t beta_in, uint32_t image_width) { v16u8 p2_asub_p0, u8_q2asub_q0; v16u8 alpha, beta, is_less_than, is_less_than_beta; v16u8 p1, p0, q0, q1; v8i16 p1_r = { 0 }; v8i16 p0_r, q0_r, q1_r = { 0 }; v8i16 p1_l = { 0 }; v8i16 p0_l, q0_l, q1_l = { 0 }; v16u8 p2_org, p1_org, p0_org, q0_org, q1_org, q2_org; v8i16 p2_org_r, p1_org_r, p0_org_r, q0_org_r, q1_org_r, q2_org_r; v8i16 p2_org_l, p1_org_l, p0_org_l, q0_org_l, q1_org_l, q2_org_l; v16i8 zero = { 0 }; v16u8 tmp_vec; v16u8 bs = { 0 }; v16i8 tc = { 0 }; tmp_vec = (v16u8) __msa_fill_b(bs0); bs = (v16u8) __msa_insve_w((v4i32) bs, 0, (v4i32) tmp_vec); tmp_vec = (v16u8) __msa_fill_b(bs1); bs = (v16u8) __msa_insve_w((v4i32) bs, 1, (v4i32) tmp_vec); tmp_vec = (v16u8) __msa_fill_b(bs2); bs = (v16u8) __msa_insve_w((v4i32) bs, 2, (v4i32) tmp_vec); tmp_vec = (v16u8) __msa_fill_b(bs3); bs = (v16u8) __msa_insve_w((v4i32) bs, 3, (v4i32) tmp_vec); if (!__msa_test_bz_v(bs)) { tmp_vec = (v16u8) __msa_fill_b(tc0); tc = (v16i8) __msa_insve_w((v4i32) tc, 0, (v4i32) tmp_vec); tmp_vec = (v16u8) __msa_fill_b(tc1); tc = (v16i8) __msa_insve_w((v4i32) tc, 1, (v4i32) tmp_vec); tmp_vec = (v16u8) __msa_fill_b(tc2); tc = (v16i8) __msa_insve_w((v4i32) tc, 2, (v4i32) tmp_vec); tmp_vec = (v16u8) __msa_fill_b(tc3); tc = (v16i8) __msa_insve_w((v4i32) tc, 3, (v4i32) tmp_vec); alpha = (v16u8) __msa_fill_b(alpha_in); beta = (v16u8) __msa_fill_b(beta_in); p2_org = LOAD_UB(data - (3 * image_width)); p1_org = LOAD_UB(data - (image_width << 1)); p0_org = LOAD_UB(data - image_width); q0_org = LOAD_UB(data); q1_org = LOAD_UB(data + image_width); { v16u8 p0_asub_q0, p1_asub_p0, q1_asub_q0; v16u8 is_less_than_alpha, is_bs_greater_than0; is_bs_greater_than0 = ((v16u8) zero < bs); p0_asub_q0 = __msa_asub_u_b(p0_org, q0_org); p1_asub_p0 = __msa_asub_u_b(p1_org, p0_org); q1_asub_q0 = __msa_asub_u_b(q1_org, q0_org); is_less_than_alpha = (p0_asub_q0 < alpha); is_less_than_beta = (p1_asub_p0 < beta); is_less_than = is_less_than_beta & is_less_than_alpha; is_less_than_beta = (q1_asub_q0 < beta); is_less_than = is_less_than_beta & is_less_than; is_less_than = is_less_than & is_bs_greater_than0; } if (!__msa_test_bz_v(is_less_than)) { v16i8 sign_negate_tc, negate_tc; v8i16 negate_tc_r, i16_negatetc_l, tc_l, tc_r; q2_org = LOAD_UB(data + (2 * image_width)); negate_tc = zero - tc; sign_negate_tc = __msa_clti_s_b(negate_tc, 0); negate_tc_r = (v8i16) __msa_ilvr_b(sign_negate_tc, negate_tc); i16_negatetc_l = (v8i16) __msa_ilvl_b(sign_negate_tc, negate_tc); tc_r = (v8i16) __msa_ilvr_b(zero, tc); tc_l = (v8i16) __msa_ilvl_b(zero, tc); p1_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) p1_org); p0_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) p0_org); q0_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) q0_org); p1_org_l = (v8i16) __msa_ilvl_b(zero, (v16i8) p1_org); p0_org_l = (v8i16) __msa_ilvl_b(zero, (v16i8) p0_org); q0_org_l = (v8i16) __msa_ilvl_b(zero, (v16i8) q0_org); p2_asub_p0 = __msa_asub_u_b(p2_org, p0_org); is_less_than_beta = (p2_asub_p0 < beta); is_less_than_beta = is_less_than_beta & is_less_than; { v8u16 is_less_than_beta_r, is_less_than_beta_l; is_less_than_beta_r = (v8u16) __msa_sldi_b((v16i8) is_less_than_beta, zero, 8); if (!__msa_test_bz_v((v16u8) is_less_than_beta_r)) { p2_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) p2_org); AVC_LOOP_FILTER_P1_OR_Q1(p0_org_r, q0_org_r, p1_org_r, p2_org_r, negate_tc_r, tc_r, p1_r); } is_less_than_beta_l = (v8u16) __msa_sldi_b(zero, (v16i8) is_less_than_beta, 8); if (!__msa_test_bz_v((v16u8) is_less_than_beta_l)) { p2_org_l = (v8i16) __msa_ilvl_b(zero, (v16i8) p2_org); AVC_LOOP_FILTER_P1_OR_Q1(p0_org_l, q0_org_l, p1_org_l, p2_org_l, i16_negatetc_l, tc_l, p1_l); } } if (!__msa_test_bz_v(is_less_than_beta)) { p1 = (v16u8) __msa_pckev_b((v16i8) p1_l, (v16i8) p1_r); p1_org = __msa_bmnz_v(p1_org, p1, is_less_than_beta); STORE_UB(p1_org, data - (2 * image_width)); is_less_than_beta = __msa_andi_b(is_less_than_beta, 1); tc = tc + (v16i8) is_less_than_beta; } u8_q2asub_q0 = __msa_asub_u_b(q2_org, q0_org); is_less_than_beta = (u8_q2asub_q0 < beta); is_less_than_beta = is_less_than_beta & is_less_than; { v8u16 is_less_than_beta_r, is_less_than_beta_l; is_less_than_beta_r = (v8u16) __msa_sldi_b((v16i8) is_less_than_beta, zero, 8); q1_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) q1_org); if (!__msa_test_bz_v((v16u8) is_less_than_beta_r)) { q2_org_r = (v8i16) __msa_ilvr_b(zero, (v16i8) q2_org); AVC_LOOP_FILTER_P1_OR_Q1(p0_org_r, q0_org_r, q1_org_r, q2_org_r, negate_tc_r, tc_r, q1_r); } is_less_than_beta_l = (v8u16) __msa_sldi_b(zero, (v16i8) is_less_than_beta, 8); q1_org_l = (v8i16) __msa_ilvl_b(zero, (v16i8) q1_org); if (!__msa_test_bz_v((v16u8) is_less_than_beta_l)) { q2_org_l = (v8i16) __msa_ilvl_b(zero, (v16i8) q2_org); AVC_LOOP_FILTER_P1_OR_Q1(p0_org_l, q0_org_l, q1_org_l, q2_org_l, i16_negatetc_l, tc_l, q1_l); } } if (!__msa_test_bz_v(is_less_than_beta)) { q1 = (v16u8) __msa_pckev_b((v16i8) q1_l, (v16i8) q1_r); q1_org = __msa_bmnz_v(q1_org, q1, is_less_than_beta); STORE_UB(q1_org, data + image_width); is_less_than_beta = __msa_andi_b(is_less_than_beta, 1); tc = tc + (v16i8) is_less_than_beta; } { v16i8 negate_thresh, sign_negate_thresh; v8i16 threshold_r, threshold_l; v8i16 negate_thresh_l, negate_thresh_r; negate_thresh = zero - tc; sign_negate_thresh = __msa_clti_s_b(negate_thresh, 0); threshold_r = (v8i16) __msa_ilvr_b(zero, tc); negate_thresh_r = (v8i16) __msa_ilvr_b(sign_negate_thresh, negate_thresh); AVC_LOOP_FILTER_P0Q0(q0_org_r, p0_org_r, p1_org_r, q1_org_r, negate_thresh_r, threshold_r, p0_r, q0_r); threshold_l = (v8i16) __msa_ilvl_b(zero, tc); negate_thresh_l = (v8i16) __msa_ilvl_b(sign_negate_thresh, negate_thresh); AVC_LOOP_FILTER_P0Q0(q0_org_l, p0_org_l, p1_org_l, q1_org_l, negate_thresh_l, threshold_l, p0_l, q0_l); } p0 = (v16u8) __msa_pckev_b((v16i8) p0_l, (v16i8) p0_r); q0 = (v16u8) __msa_pckev_b((v16i8) q0_l, (v16i8) q0_r); p0_org = __msa_bmnz_v(p0_org, p0, is_less_than); q0_org = __msa_bmnz_v(q0_org, q0, is_less_than); STORE_UB(p0_org, (data - image_width)); STORE_UB(q0_org, data); } } } | 16,275 |
0 | void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...), const char *optarg) { unsigned char model = !strcmp("?model", optarg); unsigned char dump = !strcmp("?dump", optarg); unsigned char cpuid = !strcmp("?cpuid", optarg); x86_def_t *def; char buf[256]; if (cpuid) { (*cpu_fprintf)(f, "Recognized CPUID flags:\n"); listflags(buf, sizeof (buf), (uint32_t)~0, feature_name, 1); (*cpu_fprintf)(f, " f_edx: %s\n", buf); listflags(buf, sizeof (buf), (uint32_t)~0, ext_feature_name, 1); (*cpu_fprintf)(f, " f_ecx: %s\n", buf); listflags(buf, sizeof (buf), (uint32_t)~0, ext2_feature_name, 1); (*cpu_fprintf)(f, " extf_edx: %s\n", buf); listflags(buf, sizeof (buf), (uint32_t)~0, ext3_feature_name, 1); (*cpu_fprintf)(f, " extf_ecx: %s\n", buf); return; } for (def = x86_defs; def; def = def->next) { snprintf(buf, sizeof (buf), def->flags ? "[%s]": "%s", def->name); if (model || dump) { (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id); } else { (*cpu_fprintf)(f, "x86 %16s\n", buf); } if (dump) { memcpy(buf, &def->vendor1, sizeof (def->vendor1)); memcpy(buf + 4, &def->vendor2, sizeof (def->vendor2)); memcpy(buf + 8, &def->vendor3, sizeof (def->vendor3)); buf[12] = '\0'; (*cpu_fprintf)(f, " family %d model %d stepping %d level %d xlevel 0x%x" " vendor \"%s\"\n", def->family, def->model, def->stepping, def->level, def->xlevel, buf); listflags(buf, sizeof (buf), def->features, feature_name, 0); (*cpu_fprintf)(f, " feature_edx %08x (%s)\n", def->features, buf); listflags(buf, sizeof (buf), def->ext_features, ext_feature_name, 0); (*cpu_fprintf)(f, " feature_ecx %08x (%s)\n", def->ext_features, buf); listflags(buf, sizeof (buf), def->ext2_features, ext2_feature_name, 0); (*cpu_fprintf)(f, " extfeature_edx %08x (%s)\n", def->ext2_features, buf); listflags(buf, sizeof (buf), def->ext3_features, ext3_feature_name, 0); (*cpu_fprintf)(f, " extfeature_ecx %08x (%s)\n", def->ext3_features, buf); (*cpu_fprintf)(f, "\n"); } } if (kvm_enabled()) { (*cpu_fprintf)(f, "x86 %16s\n", "[host]"); } } | 16,278 |
0 | static void dec_misc(DisasContext *dc, uint32_t insn) { uint32_t op0, op1; uint32_t ra, rb, rd; #ifdef OPENRISC_DISAS uint32_t L6, K5; #endif uint32_t I16, I5, I11, N26, tmp; TCGMemOp mop; op0 = extract32(insn, 26, 6); op1 = extract32(insn, 24, 2); ra = extract32(insn, 16, 5); rb = extract32(insn, 11, 5); rd = extract32(insn, 21, 5); #ifdef OPENRISC_DISAS L6 = extract32(insn, 5, 6); K5 = extract32(insn, 0, 5); #endif I16 = extract32(insn, 0, 16); I5 = extract32(insn, 21, 5); I11 = extract32(insn, 0, 11); N26 = extract32(insn, 0, 26); tmp = (I5<<11) + I11; switch (op0) { case 0x00: /* l.j */ LOG_DIS("l.j %d\n", N26); gen_jump(dc, N26, 0, op0); break; case 0x01: /* l.jal */ LOG_DIS("l.jal %d\n", N26); gen_jump(dc, N26, 0, op0); break; case 0x03: /* l.bnf */ LOG_DIS("l.bnf %d\n", N26); gen_jump(dc, N26, 0, op0); break; case 0x04: /* l.bf */ LOG_DIS("l.bf %d\n", N26); gen_jump(dc, N26, 0, op0); break; case 0x05: switch (op1) { case 0x01: /* l.nop */ LOG_DIS("l.nop %d\n", I16); break; default: gen_illegal_exception(dc); break; } break; case 0x11: /* l.jr */ LOG_DIS("l.jr r%d\n", rb); gen_jump(dc, 0, rb, op0); break; case 0x12: /* l.jalr */ LOG_DIS("l.jalr r%d\n", rb); gen_jump(dc, 0, rb, op0); break; case 0x13: /* l.maci */ LOG_DIS("l.maci %d, r%d, %d\n", I5, ra, I11); { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); TCGv_i32 dst = tcg_temp_new_i32(); TCGv ttmp = tcg_const_tl(tmp); tcg_gen_mul_tl(dst, cpu_R[ra], ttmp); tcg_gen_ext_i32_i64(t1, dst); tcg_gen_concat_i32_i64(t2, maclo, machi); tcg_gen_add_i64(t2, t2, t1); tcg_gen_trunc_i64_i32(maclo, t2); tcg_gen_shri_i64(t2, t2, 32); tcg_gen_trunc_i64_i32(machi, t2); tcg_temp_free_i32(dst); tcg_temp_free(ttmp); tcg_temp_free_i64(t1); tcg_temp_free_i64(t2); } break; case 0x09: /* l.rfe */ LOG_DIS("l.rfe\n"); { #if defined(CONFIG_USER_ONLY) return; #else if (dc->mem_idx == MMU_USER_IDX) { gen_illegal_exception(dc); return; } gen_helper_rfe(cpu_env); dc->is_jmp = DISAS_UPDATE; #endif } break; case 0x1c: /* l.cust1 */ LOG_DIS("l.cust1\n"); break; case 0x1d: /* l.cust2 */ LOG_DIS("l.cust2\n"); break; case 0x1e: /* l.cust3 */ LOG_DIS("l.cust3\n"); break; case 0x1f: /* l.cust4 */ LOG_DIS("l.cust4\n"); break; case 0x3c: /* l.cust5 */ LOG_DIS("l.cust5 r%d, r%d, r%d, %d, %d\n", rd, ra, rb, L6, K5); break; case 0x3d: /* l.cust6 */ LOG_DIS("l.cust6\n"); break; case 0x3e: /* l.cust7 */ LOG_DIS("l.cust7\n"); break; case 0x3f: /* l.cust8 */ LOG_DIS("l.cust8\n"); break; /* not used yet, open it when we need or64. */ /*#ifdef TARGET_OPENRISC64 case 0x20: l.ld LOG_DIS("l.ld r%d, r%d, %d\n", rd, ra, I16); check_ob64s(dc); mop = MO_TEQ; goto do_load; #endif*/ case 0x21: /* l.lwz */ LOG_DIS("l.lwz r%d, r%d, %d\n", rd, ra, I16); mop = MO_TEUL; goto do_load; case 0x22: /* l.lws */ LOG_DIS("l.lws r%d, r%d, %d\n", rd, ra, I16); mop = MO_TESL; goto do_load; case 0x23: /* l.lbz */ LOG_DIS("l.lbz r%d, r%d, %d\n", rd, ra, I16); mop = MO_UB; goto do_load; case 0x24: /* l.lbs */ LOG_DIS("l.lbs r%d, r%d, %d\n", rd, ra, I16); mop = MO_SB; goto do_load; case 0x25: /* l.lhz */ LOG_DIS("l.lhz r%d, r%d, %d\n", rd, ra, I16); mop = MO_TEUW; goto do_load; case 0x26: /* l.lhs */ LOG_DIS("l.lhs r%d, r%d, %d\n", rd, ra, I16); mop = MO_TESW; goto do_load; do_load: { TCGv t0 = tcg_temp_new(); tcg_gen_addi_tl(t0, cpu_R[ra], sign_extend(I16, 16)); tcg_gen_qemu_ld_tl(cpu_R[rd], t0, dc->mem_idx, mop); tcg_temp_free(t0); } break; case 0x27: /* l.addi */ LOG_DIS("l.addi r%d, r%d, %d\n", rd, ra, I16); { if (I16 == 0) { tcg_gen_mov_tl(cpu_R[rd], cpu_R[ra]); } else { int lab = gen_new_label(); TCGv_i64 ta = tcg_temp_new_i64(); TCGv_i64 td = tcg_temp_local_new_i64(); TCGv_i32 res = tcg_temp_local_new_i32(); TCGv_i32 sr_ove = tcg_temp_local_new_i32(); tcg_gen_extu_i32_i64(ta, cpu_R[ra]); tcg_gen_addi_i64(td, ta, sign_extend(I16, 16)); tcg_gen_trunc_i64_i32(res, td); tcg_gen_shri_i64(td, td, 32); tcg_gen_andi_i64(td, td, 0x3); /* Jump to lab when no overflow. */ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x0, lab); tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x3, lab); tcg_gen_ori_i32(cpu_sr, cpu_sr, (SR_OV | SR_CY)); tcg_gen_andi_i32(sr_ove, cpu_sr, SR_OVE); tcg_gen_brcondi_i32(TCG_COND_NE, sr_ove, SR_OVE, lab); gen_exception(dc, EXCP_RANGE); gen_set_label(lab); tcg_gen_mov_i32(cpu_R[rd], res); tcg_temp_free_i64(ta); tcg_temp_free_i64(td); tcg_temp_free_i32(res); tcg_temp_free_i32(sr_ove); } } break; case 0x28: /* l.addic */ LOG_DIS("l.addic r%d, r%d, %d\n", rd, ra, I16); { int lab = gen_new_label(); TCGv_i64 ta = tcg_temp_new_i64(); TCGv_i64 td = tcg_temp_local_new_i64(); TCGv_i64 tcy = tcg_temp_local_new_i64(); TCGv_i32 res = tcg_temp_local_new_i32(); TCGv_i32 sr_cy = tcg_temp_local_new_i32(); TCGv_i32 sr_ove = tcg_temp_local_new_i32(); tcg_gen_extu_i32_i64(ta, cpu_R[ra]); tcg_gen_andi_i32(sr_cy, cpu_sr, SR_CY); tcg_gen_shri_i32(sr_cy, sr_cy, 10); tcg_gen_extu_i32_i64(tcy, sr_cy); tcg_gen_addi_i64(td, ta, sign_extend(I16, 16)); tcg_gen_add_i64(td, td, tcy); tcg_gen_trunc_i64_i32(res, td); tcg_gen_shri_i64(td, td, 32); tcg_gen_andi_i64(td, td, 0x3); /* Jump to lab when no overflow. */ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x0, lab); tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x3, lab); tcg_gen_ori_i32(cpu_sr, cpu_sr, (SR_OV | SR_CY)); tcg_gen_andi_i32(sr_ove, cpu_sr, SR_OVE); tcg_gen_brcondi_i32(TCG_COND_NE, sr_ove, SR_OVE, lab); gen_exception(dc, EXCP_RANGE); gen_set_label(lab); tcg_gen_mov_i32(cpu_R[rd], res); tcg_temp_free_i64(ta); tcg_temp_free_i64(td); tcg_temp_free_i64(tcy); tcg_temp_free_i32(res); tcg_temp_free_i32(sr_cy); tcg_temp_free_i32(sr_ove); } break; case 0x29: /* l.andi */ LOG_DIS("l.andi r%d, r%d, %d\n", rd, ra, I16); tcg_gen_andi_tl(cpu_R[rd], cpu_R[ra], zero_extend(I16, 16)); break; case 0x2a: /* l.ori */ LOG_DIS("l.ori r%d, r%d, %d\n", rd, ra, I16); tcg_gen_ori_tl(cpu_R[rd], cpu_R[ra], zero_extend(I16, 16)); break; case 0x2b: /* l.xori */ LOG_DIS("l.xori r%d, r%d, %d\n", rd, ra, I16); tcg_gen_xori_tl(cpu_R[rd], cpu_R[ra], sign_extend(I16, 16)); break; case 0x2c: /* l.muli */ LOG_DIS("l.muli r%d, r%d, %d\n", rd, ra, I16); if (ra != 0 && I16 != 0) { TCGv_i32 im = tcg_const_i32(I16); gen_helper_mul32(cpu_R[rd], cpu_env, cpu_R[ra], im); tcg_temp_free_i32(im); } else { tcg_gen_movi_tl(cpu_R[rd], 0x0); } break; case 0x2d: /* l.mfspr */ LOG_DIS("l.mfspr r%d, r%d, %d\n", rd, ra, I16); { #if defined(CONFIG_USER_ONLY) return; #else TCGv_i32 ti = tcg_const_i32(I16); if (dc->mem_idx == MMU_USER_IDX) { gen_illegal_exception(dc); return; } gen_helper_mfspr(cpu_R[rd], cpu_env, cpu_R[rd], cpu_R[ra], ti); tcg_temp_free_i32(ti); #endif } break; case 0x30: /* l.mtspr */ LOG_DIS("l.mtspr %d, r%d, r%d, %d\n", I5, ra, rb, I11); { #if defined(CONFIG_USER_ONLY) return; #else TCGv_i32 im = tcg_const_i32(tmp); if (dc->mem_idx == MMU_USER_IDX) { gen_illegal_exception(dc); return; } gen_helper_mtspr(cpu_env, cpu_R[ra], cpu_R[rb], im); tcg_temp_free_i32(im); #endif } break; /* not used yet, open it when we need or64. */ /*#ifdef TARGET_OPENRISC64 case 0x34: l.sd LOG_DIS("l.sd %d, r%d, r%d, %d\n", I5, ra, rb, I11); check_ob64s(dc); mop = MO_TEQ; goto do_store; #endif*/ case 0x35: /* l.sw */ LOG_DIS("l.sw %d, r%d, r%d, %d\n", I5, ra, rb, I11); mop = MO_TEUL; goto do_store; case 0x36: /* l.sb */ LOG_DIS("l.sb %d, r%d, r%d, %d\n", I5, ra, rb, I11); mop = MO_UB; goto do_store; case 0x37: /* l.sh */ LOG_DIS("l.sh %d, r%d, r%d, %d\n", I5, ra, rb, I11); mop = MO_TEUW; goto do_store; do_store: { TCGv t0 = tcg_temp_new(); tcg_gen_addi_tl(t0, cpu_R[ra], sign_extend(tmp, 16)); tcg_gen_qemu_st_tl(cpu_R[rb], t0, dc->mem_idx, mop); tcg_temp_free(t0); } break; default: gen_illegal_exception(dc); break; } } | 16,280 |
0 | int socket_connect(SocketAddress *addr, NonBlockingConnectHandler *callback, void *opaque, Error **errp) { int fd; switch (addr->type) { case SOCKET_ADDRESS_KIND_INET: fd = inet_connect_saddr(addr->u.inet.data, callback, opaque, errp); break; case SOCKET_ADDRESS_KIND_UNIX: fd = unix_connect_saddr(addr->u.q_unix.data, callback, opaque, errp); break; case SOCKET_ADDRESS_KIND_FD: fd = monitor_get_fd(cur_mon, addr->u.fd.data->str, errp); if (fd >= 0 && callback) { qemu_set_nonblock(fd); callback(fd, NULL, opaque); } break; case SOCKET_ADDRESS_KIND_VSOCK: fd = vsock_connect_saddr(addr->u.vsock.data, callback, opaque, errp); break; default: abort(); } return fd; } | 16,282 |
0 | static av_cold int init(AVFilterContext *ctx) { FormatContext *s = ctx->priv; char *cur, *sep; int nb_formats = 1; int i; int ret; /* count the formats */ cur = s->pix_fmts; while ((cur = strchr(cur, '|'))) { nb_formats++; if (*cur) cur++; } s->formats = av_malloc_array(nb_formats + 1, sizeof(*s->formats)); if (!s->formats) return AVERROR(ENOMEM); if (!s->pix_fmts) return AVERROR(EINVAL); /* parse the list of formats */ cur = s->pix_fmts; for (i = 0; i < nb_formats; i++) { sep = strchr(cur, '|'); if (sep) *sep++ = 0; if ((ret = ff_parse_pixel_format(&s->formats[i], cur, ctx)) < 0) return ret; cur = sep; } s->formats[nb_formats] = AV_PIX_FMT_NONE; if (!strcmp(ctx->filter->name, "noformat")) { const AVPixFmtDescriptor *desc = NULL; enum AVPixelFormat *formats_allowed; int nb_formats_lavu = 0, nb_formats_allowed = 0; /* count the formats known to lavu */ while ((desc = av_pix_fmt_desc_next(desc))) nb_formats_lavu++; formats_allowed = av_malloc_array(nb_formats_lavu + 1, sizeof(*formats_allowed)); if (!formats_allowed) return AVERROR(ENOMEM); /* for each format known to lavu, check if it's in the list of * forbidden formats */ while ((desc = av_pix_fmt_desc_next(desc))) { enum AVPixelFormat pix_fmt = av_pix_fmt_desc_get_id(desc); for (i = 0; i < nb_formats; i++) { if (s->formats[i] == pix_fmt) break; } if (i < nb_formats) continue; formats_allowed[nb_formats_allowed++] = pix_fmt; } formats_allowed[nb_formats_allowed] = AV_PIX_FMT_NONE; av_freep(&s->formats); s->formats = formats_allowed; } return 0; } | 16,284 |
1 | static void tcg_commit(MemoryListener *listener) { CPUAddressSpace *cpuas; AddressSpaceDispatch *d; /* since each CPU stores ram addresses in its TLB cache, we must reset the modified entries */ cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); cpu_reloading_memory_map(); /* The CPU and TLB are protected by the iothread lock. * We reload the dispatch pointer now because cpu_reloading_memory_map() * may have split the RCU critical section. */ d = atomic_rcu_read(&cpuas->as->dispatch); cpuas->memory_dispatch = d; tlb_flush(cpuas->cpu, 1); } | 16,285 |
1 | static int qemu_chr_open_null(QemuOpts *opts, CharDriverState **_chr) { CharDriverState *chr; chr = g_malloc0(sizeof(CharDriverState)); chr->chr_write = null_chr_write; *_chr= chr; return 0; } | 16,286 |
1 | void host_net_remove_completion(ReadLineState *rs, int nb_args, const char *str) { NetClientState *ncs[MAX_QUEUE_NUM]; int count, i, len; len = strlen(str); readline_set_completion_index(rs, len); if (nb_args == 2) { count = qemu_find_net_clients_except(NULL, ncs, NET_CLIENT_OPTIONS_KIND_NONE, MAX_QUEUE_NUM); for (i = 0; i < count; i++) { int id; char name[16]; if (net_hub_id_for_client(ncs[i], &id)) { continue; } snprintf(name, sizeof(name), "%d", id); if (!strncmp(str, name, len)) { readline_add_completion(rs, name); } } return; } else if (nb_args == 3) { count = qemu_find_net_clients_except(NULL, ncs, NET_CLIENT_OPTIONS_KIND_NIC, MAX_QUEUE_NUM); for (i = 0; i < count; i++) { int id; const char *name; if (ncs[i]->info->type == NET_CLIENT_OPTIONS_KIND_HUBPORT || net_hub_id_for_client(ncs[i], &id)) { continue; } name = ncs[i]->name; if (!strncmp(str, name, len)) { readline_add_completion(rs, name); } } return; } } | 16,287 |
1 | int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src) { MpegEncContext *s = dst->priv_data, *s1 = src->priv_data; if (dst == src || !s1->context_initialized) return 0; // FIXME can parameters change on I-frames? // in that case dst may need a reinit if (!s->context_initialized) { memcpy(s, s1, sizeof(MpegEncContext)); s->avctx = dst; s->picture_range_start += MAX_PICTURE_COUNT; s->picture_range_end += MAX_PICTURE_COUNT; s->bitstream_buffer = NULL; s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0; ff_MPV_common_init(s); } if (s->height != s1->height || s->width != s1->width || s->context_reinit) { int err; s->context_reinit = 0; s->height = s1->height; s->width = s1->width; if ((err = ff_MPV_common_frame_size_change(s)) < 0) return err; } s->avctx->coded_height = s1->avctx->coded_height; s->avctx->coded_width = s1->avctx->coded_width; s->avctx->width = s1->avctx->width; s->avctx->height = s1->avctx->height; s->coded_picture_number = s1->coded_picture_number; s->picture_number = s1->picture_number; s->input_picture_number = s1->input_picture_number; memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture)); memcpy(&s->last_picture, &s1->last_picture, (char *) &s1->last_picture_ptr - (char *) &s1->last_picture); // reset s->picture[].f.extended_data to s->picture[].f.data for (i = 0; i < s->picture_count; i++) s->picture[i].f.extended_data = s->picture[i].f.data; s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1); s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1); s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1); // Error/bug resilience s->next_p_frame_damaged = s1->next_p_frame_damaged; s->workaround_bugs = s1->workaround_bugs; // MPEG4 timing info memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char *) &s1->shape - (char *) &s1->time_increment_bits); // B-frame info s->max_b_frames = s1->max_b_frames; s->low_delay = s1->low_delay; s->dropable = s1->dropable; // DivX handling (doesn't work) s->divx_packed = s1->divx_packed; if (s1->bitstream_buffer) { if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size) av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size); s->bitstream_buffer_size = s1->bitstream_buffer_size; memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size); memset(s->bitstream_buffer + s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE); } // MPEG2/interlacing info memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence); if (!s1->first_field) { s->last_pict_type = s1->pict_type; if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality; if (s1->pict_type != AV_PICTURE_TYPE_B) { s->last_non_b_pict_type = s1->pict_type; } } return 0; } | 16,288 |
1 | static void init_proc_970FX (CPUPPCState *env) { gen_spr_ne_601(env); gen_spr_7xx(env); /* Time base */ gen_tbl(env); /* Hardware implementation registers */ /* XXX : not implemented */ spr_register(env, SPR_HID0, "HID0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_clear, 0x60000000); /* XXX : not implemented */ spr_register(env, SPR_HID1, "HID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_750FX_HID2, "HID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_970_HID5, "HID5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, POWERPC970_HID5_INIT); /* XXX : not implemented */ spr_register(env, SPR_L2CR, "L2CR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, NULL, 0x00000000); /* Memory management */ /* XXX: not correct */ gen_low_BATs(env); /* XXX : not implemented */ spr_register(env, SPR_MMUCFG, "MMUCFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* TOFIX */ /* XXX : not implemented */ spr_register(env, SPR_MMUCSR0, "MMUCSR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* TOFIX */ spr_register(env, SPR_HIOR, "SPR_HIOR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_hior, &spr_write_hior, 0x00000000); spr_register(env, SPR_CTRL, "SPR_CTRL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_UCTRL, "SPR_UCTRL", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_VRSAVE, "SPR_VRSAVE", &spr_read_generic, &spr_write_generic, &spr_read_generic, &spr_write_generic, 0x00000000); #if !defined(CONFIG_USER_ONLY) env->slb_nr = 64; #endif init_excp_970(env); env->dcache_line_size = 128; env->icache_line_size = 128; /* Allocate hardware IRQ controller */ ppc970_irq_init(env); /* Can't find information on what this should be on reset. This * value is the one used by 74xx processors. */ vscr_init(env, 0x00010000); } | 16,289 |
1 | int av_fifo_generic_read(AVFifoBuffer *f, int buf_size, void (*func)(void*, void*, int), void* dest) { int size = av_fifo_size(f); if (size < buf_size) return -1; do { int len = FFMIN(f->end - f->rptr, buf_size); if(func) func(dest, f->rptr, len); else{ memcpy(dest, f->rptr, len); dest = (uint8_t*)dest + len; } av_fifo_drain(f, len); buf_size -= len; } while (buf_size > 0); return 0; } | 16,291 |
1 | static int cmp_pkt_sub_ts_pos(const void *a, const void *b) { const AVPacket *s1 = a; const AVPacket *s2 = b; if (s1->pts == s2->pts) { if (s1->pos == s2->pos) return 0; return s1->pos > s2->pos ? 1 : -1; } return s1->pts > s2->pts ? 1 : -1; } | 16,292 |
1 | static void xics_kvm_realize(DeviceState *dev, Error **errp) { KVMXICSState *icpkvm = KVM_XICS(dev); XICSState *icp = XICS_COMMON(dev); int i, rc; Error *error = NULL; struct kvm_create_device xics_create_device = { .type = KVM_DEV_TYPE_XICS, .flags = 0, }; if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_IRQ_XICS)) { error_setg(errp, "KVM and IRQ_XICS capability must be present for in-kernel XICS"); goto fail; } icpkvm->set_xive_token = spapr_rtas_register("ibm,set-xive", rtas_dummy); icpkvm->get_xive_token = spapr_rtas_register("ibm,get-xive", rtas_dummy); icpkvm->int_off_token = spapr_rtas_register("ibm,int-off", rtas_dummy); icpkvm->int_on_token = spapr_rtas_register("ibm,int-on", rtas_dummy); rc = kvmppc_define_rtas_kernel_token(icpkvm->set_xive_token, "ibm,set-xive"); if (rc < 0) { error_setg(errp, "kvmppc_define_rtas_kernel_token: ibm,set-xive"); goto fail; } rc = kvmppc_define_rtas_kernel_token(icpkvm->get_xive_token, "ibm,get-xive"); if (rc < 0) { error_setg(errp, "kvmppc_define_rtas_kernel_token: ibm,get-xive"); goto fail; } rc = kvmppc_define_rtas_kernel_token(icpkvm->int_on_token, "ibm,int-on"); if (rc < 0) { error_setg(errp, "kvmppc_define_rtas_kernel_token: ibm,int-on"); goto fail; } rc = kvmppc_define_rtas_kernel_token(icpkvm->int_off_token, "ibm,int-off"); if (rc < 0) { error_setg(errp, "kvmppc_define_rtas_kernel_token: ibm,int-off"); goto fail; } /* Create the kernel ICP */ rc = kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &xics_create_device); if (rc < 0) { error_setg_errno(errp, -rc, "Error on KVM_CREATE_DEVICE for XICS"); goto fail; } icpkvm->kernel_xics_fd = xics_create_device.fd; object_property_set_bool(OBJECT(icp->ics), true, "realized", &error); if (error) { error_propagate(errp, error); goto fail; } assert(icp->nr_servers); for (i = 0; i < icp->nr_servers; i++) { object_property_set_bool(OBJECT(&icp->ss[i]), true, "realized", &error); if (error) { error_propagate(errp, error); goto fail; } } kvm_kernel_irqchip = true; kvm_irqfds_allowed = true; kvm_msi_via_irqfd_allowed = true; kvm_gsi_direct_mapping = true; return; fail: kvmppc_define_rtas_kernel_token(0, "ibm,set-xive"); kvmppc_define_rtas_kernel_token(0, "ibm,get-xive"); kvmppc_define_rtas_kernel_token(0, "ibm,int-on"); kvmppc_define_rtas_kernel_token(0, "ibm,int-off"); } | 16,295 |
1 | static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb) { const HEVCSPS *sps = s->ps.sps; int max_poc_lsb = 1 << sps->log2_max_poc_lsb; int prev_delta_msb = 0; unsigned int nb_sps = 0, nb_sh; int i; rps->nb_refs = 0; if (!sps->long_term_ref_pics_present_flag) return 0; if (sps->num_long_term_ref_pics_sps > 0) nb_sps = get_ue_golomb_long(gb); nb_sh = get_ue_golomb_long(gb); if (nb_sps > sps->num_long_term_ref_pics_sps) return AVERROR_INVALIDDATA; if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc)) return AVERROR_INVALIDDATA; rps->nb_refs = nb_sh + nb_sps; for (i = 0; i < rps->nb_refs; i++) { uint8_t delta_poc_msb_present; if (i < nb_sps) { uint8_t lt_idx_sps = 0; if (sps->num_long_term_ref_pics_sps > 1) lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps)); rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps]; rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps]; } else { rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb); rps->used[i] = get_bits1(gb); } delta_poc_msb_present = get_bits1(gb); if (delta_poc_msb_present) { int delta = get_ue_golomb_long(gb); if (i && i != nb_sps) delta += prev_delta_msb; rps->poc[i] += s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb; prev_delta_msb = delta; } } return 0; } | 16,296 |
1 | static void xenstore_update_be(char *watch, char *type, int dom, struct XenDevOps *ops) { struct XenDevice *xendev; char path[XEN_BUFSIZE], *dom0, *bepath; unsigned int len, dev; dom0 = xs_get_domain_path(xenstore, 0); len = snprintf(path, sizeof(path), "%s/backend/%s/%d", dom0, type, dom); free(dom0); if (strncmp(path, watch, len) != 0) { return; } if (sscanf(watch+len, "/%u/%255s", &dev, path) != 2) { strcpy(path, ""); if (sscanf(watch+len, "/%u", &dev) != 1) { dev = -1; } } if (dev == -1) { return; } xendev = xen_be_get_xendev(type, dom, dev, ops); if (xendev != NULL) { bepath = xs_read(xenstore, 0, xendev->be, &len); if (bepath == NULL) { xen_be_del_xendev(dom, dev); } else { free(bepath); xen_be_backend_changed(xendev, path); xen_be_check_state(xendev); } } } | 16,297 |
1 | static int open_url(HLSContext *c, URLContext **uc, const char *url, AVDictionary *opts) { AVDictionary *tmp = NULL; int ret; const char *proto_name = avio_find_protocol_name(url); // only http(s) & file are allowed if (!av_strstart(proto_name, "http", NULL) && !av_strstart(proto_name, "file", NULL)) return AVERROR_INVALIDDATA; if (!strncmp(proto_name, url, strlen(proto_name)) && url[strlen(proto_name)] == ':') ; else if (strcmp(proto_name, "file") || !strcmp(url, "file,")) return AVERROR_INVALIDDATA; av_dict_copy(&tmp, c->avio_opts, 0); av_dict_copy(&tmp, opts, 0); ret = ffurl_open(uc, url, AVIO_FLAG_READ, c->interrupt_callback, &tmp); if( ret >= 0) { // update cookies on http response with setcookies. URLContext *u = *uc; update_options(&c->cookies, "cookies", u->priv_data); av_dict_set(&opts, "cookies", c->cookies, 0); } av_dict_free(&tmp); return ret; } | 16,298 |
1 | static int rm_assemble_video_frame(AVFormatContext *s, RMContext *rm, AVPacket *pkt, int len) { ByteIOContext *pb = &s->pb; int hdr, seq, pic_num, len2, pos; int type; int ssize; hdr = get_byte(pb); len--; type = hdr >> 6; switch(type){ case 0: // slice case 2: // last slice seq = get_byte(pb); len--; len2 = get_num(pb, &len); pos = get_num(pb, &len); pic_num = get_byte(pb); len--; rm->remaining_len = len; break; case 1: //whole frame seq = get_byte(pb); len--; if(av_new_packet(pkt, len + 9) < 0) return AVERROR(EIO); pkt->data[0] = 0; AV_WL32(pkt->data + 1, 1); AV_WL32(pkt->data + 5, 0); get_buffer(pb, pkt->data + 9, len); rm->remaining_len = 0; return 0; case 3: //frame as a part of packet len2 = get_num(pb, &len); pos = get_num(pb, &len); pic_num = get_byte(pb); len--; rm->remaining_len = len - len2; if(av_new_packet(pkt, len2 + 9) < 0) return AVERROR(EIO); pkt->data[0] = 0; AV_WL32(pkt->data + 1, 1); AV_WL32(pkt->data + 5, 0); get_buffer(pb, pkt->data + 9, len2); return 0; } //now we have to deal with single slice if((seq & 0x7F) == 1 || rm->curpic_num != pic_num){ rm->slices = ((hdr & 0x3F) << 1) + 1; ssize = len2 + 8*rm->slices + 1; rm->videobuf = av_realloc(rm->videobuf, ssize); rm->videobufsize = ssize; rm->videobufpos = 8*rm->slices + 1; rm->cur_slice = 0; rm->curpic_num = pic_num; rm->pktpos = url_ftell(pb); } if(type == 2){ len = FFMIN(len, pos); pos = len2 - pos; } if(++rm->cur_slice > rm->slices) return 1; AV_WL32(rm->videobuf - 7 + 8*rm->cur_slice, 1); AV_WL32(rm->videobuf - 3 + 8*rm->cur_slice, rm->videobufpos - 8*rm->slices - 1); if(rm->videobufpos + len > rm->videobufsize) return 1; if (get_buffer(pb, rm->videobuf + rm->videobufpos, len) != len) return AVERROR(EIO); rm->videobufpos += len, rm->remaining_len-= len; if(type == 2 || (rm->videobufpos) == rm->videobufsize){ rm->videobuf[0] = rm->cur_slice-1; if(av_new_packet(pkt, rm->videobufpos - 8*(rm->slices - rm->cur_slice)) < 0) return AVERROR(ENOMEM); memcpy(pkt->data, rm->videobuf, 1 + 8*rm->cur_slice); memcpy(pkt->data + 1 + 8*rm->cur_slice, rm->videobuf + 1 + 8*rm->slices, rm->videobufpos - 1 - 8*rm->slices); pkt->pts = AV_NOPTS_VALUE; pkt->pos = rm->pktpos; return 0; } return 1; } | 16,299 |
0 | static int sunrast_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; const uint8_t *buf_end = avpkt->data + avpkt->size; SUNRASTContext * const s = avctx->priv_data; AVFrame *picture = data; AVFrame * const p = &s->picture; unsigned int w, h, depth, type, maptype, maplength, stride, x, y, len, alen; uint8_t *ptr; const uint8_t *bufstart = buf; if (avpkt->size < 32) return AVERROR_INVALIDDATA; if (AV_RB32(buf) != 0x59a66a95) { av_log(avctx, AV_LOG_ERROR, "this is not sunras encoded data\n"); return -1; } w = AV_RB32(buf+4); h = AV_RB32(buf+8); depth = AV_RB32(buf+12); type = AV_RB32(buf+20); maptype = AV_RB32(buf+24); maplength = AV_RB32(buf+28); buf += 32; if (type == RT_FORMAT_TIFF || type == RT_FORMAT_IFF) { av_log(avctx, AV_LOG_ERROR, "unsupported (compression) type\n"); return -1; } if (type < RT_OLD || type > RT_FORMAT_IFF) { av_log(avctx, AV_LOG_ERROR, "invalid (compression) type\n"); return -1; } if (av_image_check_size(w, h, 0, avctx)) { av_log(avctx, AV_LOG_ERROR, "invalid image size\n"); return -1; } if (maptype & ~1) { av_log(avctx, AV_LOG_ERROR, "invalid colormap type\n"); return -1; } switch (depth) { case 1: avctx->pix_fmt = PIX_FMT_MONOWHITE; break; case 8: avctx->pix_fmt = PIX_FMT_PAL8; break; case 24: avctx->pix_fmt = (type == RT_FORMAT_RGB) ? PIX_FMT_RGB24 : PIX_FMT_BGR24; break; default: av_log(avctx, AV_LOG_ERROR, "invalid depth\n"); return -1; } if (p->data[0]) avctx->release_buffer(avctx, p); if (w != avctx->width || h != avctx->height) avcodec_set_dimensions(avctx, w, h); if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } p->pict_type = AV_PICTURE_TYPE_I; if (buf_end - buf < maplength) return AVERROR_INVALIDDATA; if (depth != 8 && maplength) { av_log(avctx, AV_LOG_WARNING, "useless colormap found or file is corrupted, trying to recover\n"); } else if (depth == 8) { unsigned int len = maplength / 3; if (!maplength) { av_log(avctx, AV_LOG_ERROR, "colormap expected\n"); return -1; } if (maplength % 3 || maplength > 768) { av_log(avctx, AV_LOG_WARNING, "invalid colormap length\n"); return -1; } ptr = p->data[1]; for (x=0; x<len; x++, ptr+=4) *(uint32_t *)ptr = (buf[x]<<16) + (buf[len+x]<<8) + buf[len+len+x]; } buf += maplength; ptr = p->data[0]; stride = p->linesize[0]; /* scanlines are aligned on 16 bit boundaries */ len = (depth * w + 7) >> 3; alen = len + (len&1); if (type == RT_BYTE_ENCODED) { int value, run; uint8_t *end = ptr + h*stride; x = 0; while (ptr != end && buf < buf_end) { run = 1; if (buf_end - buf < 1) return AVERROR_INVALIDDATA; if ((value = *buf++) == 0x80) { run = *buf++ + 1; if (run != 1) value = *buf++; } while (run--) { if (x < len) ptr[x] = value; if (++x >= alen) { x = 0; ptr += stride; if (ptr == end) break; } } } } else { for (y=0; y<h; y++) { if (buf_end - buf < len) break; memcpy(ptr, buf, len); ptr += stride; buf += alen; } } *picture = s->picture; *data_size = sizeof(AVFrame); return buf - bufstart; } | 16,300 |
0 | static void external_snapshot_prepare(BlkTransactionStates *common, Error **errp) { BlockDriver *proto_drv; BlockDriver *drv; int flags, ret; Error *local_err = NULL; const char *device; const char *new_image_file; const char *format = "qcow2"; enum NewImageMode mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS; ExternalSnapshotStates *states = DO_UPCAST(ExternalSnapshotStates, common, common); TransactionAction *action = common->action; /* get parameters */ g_assert(action->kind == TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC); device = action->blockdev_snapshot_sync->device; new_image_file = action->blockdev_snapshot_sync->snapshot_file; if (action->blockdev_snapshot_sync->has_format) { format = action->blockdev_snapshot_sync->format; } if (action->blockdev_snapshot_sync->has_mode) { mode = action->blockdev_snapshot_sync->mode; } /* start processing */ drv = bdrv_find_format(format); if (!drv) { error_set(errp, QERR_INVALID_BLOCK_FORMAT, format); return; } states->old_bs = bdrv_find(device); if (!states->old_bs) { error_set(errp, QERR_DEVICE_NOT_FOUND, device); return; } if (!bdrv_is_inserted(states->old_bs)) { error_set(errp, QERR_DEVICE_HAS_NO_MEDIUM, device); return; } if (bdrv_in_use(states->old_bs)) { error_set(errp, QERR_DEVICE_IN_USE, device); return; } if (!bdrv_is_read_only(states->old_bs)) { if (bdrv_flush(states->old_bs)) { error_set(errp, QERR_IO_ERROR); return; } } flags = states->old_bs->open_flags; proto_drv = bdrv_find_protocol(new_image_file); if (!proto_drv) { error_set(errp, QERR_INVALID_BLOCK_FORMAT, format); return; } /* create new image w/backing file */ if (mode != NEW_IMAGE_MODE_EXISTING) { bdrv_img_create(new_image_file, format, states->old_bs->filename, states->old_bs->drv->format_name, NULL, -1, flags, &local_err, false); if (error_is_set(&local_err)) { error_propagate(errp, local_err); return; } } /* We will manually add the backing_hd field to the bs later */ states->new_bs = bdrv_new(""); /* TODO Inherit bs->options or only take explicit options with an * extended QMP command? */ ret = bdrv_open(states->new_bs, new_image_file, NULL, flags | BDRV_O_NO_BACKING, drv); if (ret != 0) { error_setg_file_open(errp, -ret, new_image_file); } } | 16,301 |
0 | static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr, unsigned int *num_sg, unsigned int max_size, int is_write) { unsigned int i; hwaddr len; /* Note: this function MUST validate input, some callers * are passing in num_sg values received over the network. */ /* TODO: teach all callers that this can fail, and return failure instead * of asserting here. * When we do, we might be able to re-enable NDEBUG below. */ #ifdef NDEBUG #error building with NDEBUG is not supported #endif assert(*num_sg <= max_size); for (i = 0; i < *num_sg; i++) { len = sg[i].iov_len; sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write); if (!sg[i].iov_base) { error_report("virtio: error trying to map MMIO memory"); exit(1); } if (len != sg[i].iov_len) { error_report("virtio: unexpected memory split"); exit(1); } } } | 16,302 |
0 | static void bdrv_inherited_options(int *child_flags, QDict *child_options, int parent_flags, QDict *parent_options) { int flags = parent_flags; /* Enable protocol handling, disable format probing for bs->file */ flags |= BDRV_O_PROTOCOL; /* If the cache mode isn't explicitly set, inherit direct and no-flush from * the parent. */ qdict_copy_default(child_options, parent_options, BDRV_OPT_CACHE_DIRECT); qdict_copy_default(child_options, parent_options, BDRV_OPT_CACHE_NO_FLUSH); /* Inherit the read-only option from the parent if it's not set */ qdict_copy_default(child_options, parent_options, BDRV_OPT_READ_ONLY); /* Our block drivers take care to send flushes and respect unmap policy, * so we can default to enable both on lower layers regardless of the * corresponding parent options. */ flags |= BDRV_O_UNMAP; /* Clear flags that only apply to the top layer */ flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ | BDRV_O_NO_IO); *child_flags = flags; } | 16,303 |
0 | static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { int opi, opx; assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); if (type == TCG_TYPE_I32) { opi = STW, opx = STWX; } else { opi = STD, opx = STDX; } tcg_out_mem_long(s, opi, opx, arg, arg1, arg2); } | 16,304 |
0 | static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ) { IRQDest *dst; IRQSource *src; int priority; dst = &opp->dst[n_CPU]; src = &opp->src[n_IRQ]; if (src->output != OPENPIC_OUTPUT_INT) { /* On Freescale MPIC, critical interrupts ignore priority, * IACK, EOI, etc. Before MPIC v4.1 they also ignore * masking. */ src->ivpr |= IVPR_ACTIVITY_MASK; DPRINTF("%s: Raise OpenPIC output %d cpu %d irq %d\n", __func__, src->output, n_CPU, n_IRQ); qemu_irq_raise(opp->dst[n_CPU].irqs[src->output]); return; } priority = IVPR_PRIORITY(src->ivpr); if (priority <= dst->ctpr) { /* Too low priority */ DPRINTF("%s: IRQ %d has too low priority on CPU %d\n", __func__, n_IRQ, n_CPU); return; } if (IRQ_testbit(&dst->raised, n_IRQ)) { /* Interrupt miss */ DPRINTF("%s: IRQ %d was missed on CPU %d\n", __func__, n_IRQ, n_CPU); return; } src->ivpr |= IVPR_ACTIVITY_MASK; IRQ_setbit(&dst->raised, n_IRQ); if (priority < dst->raised.priority) { /* An higher priority IRQ is already raised */ DPRINTF("%s: IRQ %d is hidden by raised IRQ %d on CPU %d\n", __func__, n_IRQ, dst->raised.next, n_CPU); return; } IRQ_check(opp, &dst->raised); if (IRQ_get_next(opp, &dst->servicing) != -1 && priority <= dst->servicing.priority) { DPRINTF("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d\n", __func__, n_IRQ, dst->servicing.next, n_CPU); /* Already servicing a higher priority IRQ */ return; } DPRINTF("Raise OpenPIC INT output cpu %d irq %d\n", n_CPU, n_IRQ); qemu_irq_raise(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]); } | 16,306 |
0 | static void test_acpi_dsdt_table(test_data *data) { AcpiSdtTable dsdt_table; uint32_t addr = le32_to_cpu(data->fadt_table.dsdt); test_dst_table(&dsdt_table, addr); ACPI_ASSERT_CMP(dsdt_table.header.signature, "DSDT"); /* Since DSDT isn't in RSDT, add DSDT to ASL test tables list manually */ g_array_append_val(data->tables, dsdt_table); } | 16,308 |
0 | static void vfio_err_notifier_handler(void *opaque) { VFIOPCIDevice *vdev = opaque; if (!event_notifier_test_and_clear(&vdev->err_notifier)) { return; } /* * TBD. Retrieve the error details and decide what action * needs to be taken. One of the actions could be to pass * the error to the guest and have the guest driver recover * from the error. This requires that PCIe capabilities be * exposed to the guest. For now, we just terminate the * guest to contain the error. */ error_report("%s(%04x:%02x:%02x.%x) Unrecoverable error detected. " "Please collect any data possible and then kill the guest", __func__, vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function); vm_stop(RUN_STATE_INTERNAL_ERROR); } | 16,309 |
0 | static int do_attach(USBDevice *dev) { USBBus *bus = usb_bus_from_device(dev); USBPort *port; if (dev->attached) { error_report("Error: tried to attach usb device %s twice\n", dev->product_desc); return -1; } if (bus->nfree == 0) { error_report("Error: tried to attach usb device %s to a bus with no free ports\n", dev->product_desc); return -1; } if (dev->port_path) { QTAILQ_FOREACH(port, &bus->free, next) { if (strcmp(port->path, dev->port_path) == 0) { break; } } if (port == NULL) { error_report("Error: usb port %s (bus %s) not found\n", dev->port_path, bus->qbus.name); return -1; } } else { port = QTAILQ_FIRST(&bus->free); } if (!(port->speedmask & dev->speedmask)) { error_report("Warning: speed mismatch trying to attach usb device %s to bus %s\n", dev->product_desc, bus->qbus.name); return -1; } dev->attached++; QTAILQ_REMOVE(&bus->free, port, next); bus->nfree--; usb_attach(port, dev); QTAILQ_INSERT_TAIL(&bus->used, port, next); bus->nused++; return 0; } | 16,310 |
0 | static void apply_dependent_coupling(AACContext * ac, SingleChannelElement * target, ChannelElement * cce, int index) { IndividualChannelStream * ics = &cce->ch[0].ics; const uint16_t * offsets = ics->swb_offset; float * dest = target->coeffs; const float * src = cce->ch[0].coeffs; int g, i, group, k, idx = 0; if(ac->m4ac.object_type == AOT_AAC_LTP) { av_log(ac->avccontext, AV_LOG_ERROR, "Dependent coupling is not supported together with LTP\n"); return; } for (g = 0; g < ics->num_window_groups; g++) { for (i = 0; i < ics->max_sfb; i++, idx++) { if (cce->ch[0].band_type[idx] != ZERO_BT) { for (group = 0; group < ics->group_len[g]; group++) { for (k = offsets[i]; k < offsets[i+1]; k++) { // XXX dsputil-ize dest[group*128+k] += cce->coup.gain[index][idx] * src[group*128+k]; } } } } dest += ics->group_len[g]*128; src += ics->group_len[g]*128; } } | 16,311 |
0 | static inline void omap_timer_update(struct omap_mpu_timer_s *timer) { int64_t expires; if (timer->enable && timer->st && timer->rate) { timer->val = timer->reset_val; /* Should skip this on clk enable */ expires = timer->time + muldiv64(timer->val << (timer->ptv + 1), ticks_per_sec, timer->rate); qemu_mod_timer(timer->timer, expires); } else qemu_del_timer(timer->timer); } | 16,312 |
0 | void cpu_save(QEMUFile *f, void *opaque) { CPUState *env = opaque; uint16_t fptag, fpus, fpuc, fpregs_format; uint32_t hflags; int32_t a20_mask; int i; for(i = 0; i < CPU_NB_REGS; i++) qemu_put_betls(f, &env->regs[i]); qemu_put_betls(f, &env->eip); qemu_put_betls(f, &env->eflags); hflags = env->hflags; /* XXX: suppress most of the redundant hflags */ qemu_put_be32s(f, &hflags); /* FPU */ fpuc = env->fpuc; fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; fptag = 0; for(i = 0; i < 8; i++) { fptag |= ((!env->fptags[i]) << i); } qemu_put_be16s(f, &fpuc); qemu_put_be16s(f, &fpus); qemu_put_be16s(f, &fptag); #ifdef USE_X86LDOUBLE fpregs_format = 0; #else fpregs_format = 1; #endif qemu_put_be16s(f, &fpregs_format); for(i = 0; i < 8; i++) { #ifdef USE_X86LDOUBLE { uint64_t mant; uint16_t exp; /* we save the real CPU data (in case of MMX usage only 'mant' contains the MMX register */ cpu_get_fp80(&mant, &exp, env->fpregs[i].d); qemu_put_be64(f, mant); qemu_put_be16(f, exp); } #else /* if we use doubles for float emulation, we save the doubles to avoid losing information in case of MMX usage. It can give problems if the image is restored on a CPU where long doubles are used instead. */ qemu_put_be64(f, env->fpregs[i].mmx.MMX_Q(0)); #endif } for(i = 0; i < 6; i++) cpu_put_seg(f, &env->segs[i]); cpu_put_seg(f, &env->ldt); cpu_put_seg(f, &env->tr); cpu_put_seg(f, &env->gdt); cpu_put_seg(f, &env->idt); qemu_put_be32s(f, &env->sysenter_cs); qemu_put_be32s(f, &env->sysenter_esp); qemu_put_be32s(f, &env->sysenter_eip); qemu_put_betls(f, &env->cr[0]); qemu_put_betls(f, &env->cr[2]); qemu_put_betls(f, &env->cr[3]); qemu_put_betls(f, &env->cr[4]); for(i = 0; i < 8; i++) qemu_put_betls(f, &env->dr[i]); /* MMU */ a20_mask = (int32_t) env->a20_mask; qemu_put_sbe32s(f, &a20_mask); /* XMM */ qemu_put_be32s(f, &env->mxcsr); for(i = 0; i < CPU_NB_REGS; i++) { qemu_put_be64s(f, &env->xmm_regs[i].XMM_Q(0)); qemu_put_be64s(f, &env->xmm_regs[i].XMM_Q(1)); } #ifdef TARGET_X86_64 qemu_put_be64s(f, &env->efer); qemu_put_be64s(f, &env->star); qemu_put_be64s(f, &env->lstar); qemu_put_be64s(f, &env->cstar); qemu_put_be64s(f, &env->fmask); qemu_put_be64s(f, &env->kernelgsbase); #endif qemu_put_be32s(f, &env->smbase); qemu_put_be64s(f, &env->pat); qemu_put_be32s(f, &env->hflags2); qemu_put_be64s(f, &env->vm_hsave); qemu_put_be64s(f, &env->vm_vmcb); qemu_put_be64s(f, &env->tsc_offset); qemu_put_be64s(f, &env->intercept); qemu_put_be16s(f, &env->intercept_cr_read); qemu_put_be16s(f, &env->intercept_cr_write); qemu_put_be16s(f, &env->intercept_dr_read); qemu_put_be16s(f, &env->intercept_dr_write); qemu_put_be32s(f, &env->intercept_exceptions); qemu_put_8s(f, &env->v_tpr); } | 16,313 |
0 | static void omap_mcbsp_source_tick(void *opaque) { struct omap_mcbsp_s *s = (struct omap_mcbsp_s *) opaque; static const int bps[8] = { 0, 1, 1, 2, 2, 2, -255, -255 }; if (!s->rx_rate) return; if (s->rx_req) printf("%s: Rx FIFO overrun\n", __FUNCTION__); s->rx_req = s->rx_rate << bps[(s->rcr[0] >> 5) & 7]; omap_mcbsp_rx_newdata(s); timer_mod(s->source_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + NANOSECONDS_PER_SECOND); } | 16,314 |
0 | static int xen_9pfs_free(struct XenDevice *xendev) { return -1; } | 16,315 |
0 | static void phys_sections_clear(PhysPageMap *map) { while (map->sections_nb > 0) { MemoryRegionSection *section = &map->sections[--map->sections_nb]; phys_section_destroy(section->mr); } g_free(map->sections); g_free(map->nodes); } | 16,316 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.