label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
1 | static int handle_utimensat(FsContext *ctx, V9fsPath *fs_path, const struct timespec *buf) { int ret; #ifdef CONFIG_UTIMENSAT int fd; struct handle_data *data = (struct handle_data *)ctx->private; fd = open_by_handle(data->mountfd, fs_path->data, O_NONBLOCK); if (fd < 0) { return fd; } ret = futimens(fd, buf); close(fd); #else ret = -1; errno = ENOSYS; #endif return ret; } | 16,692 |
1 | void object_property_add_str(Object *obj, const char *name, char *(*get)(Object *, Error **), void (*set)(Object *, const char *, Error **), Error **errp) { StringProperty *prop = g_malloc0(sizeof(*prop)); prop->get = get; prop->set = set; object_property_add(obj, name, "string", get ? property_get_str : NULL, set ? property_set_str : NULL, property_release_str, prop, errp); } | 16,694 |
1 | static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn) { int crn = (insn >> 16) & 0xf; int crm = insn & 0xf; int op1 = (insn >> 21) & 7; int op2 = (insn >> 5) & 7; int rt = (insn >> 12) & 0xf; TCGv tmp; if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) { /* TEECR */ if (IS_USER(s)) return 1; tmp = load_reg(s, rt); gen_helper_set_teecr(cpu_env, tmp); dead_tmp(tmp); return 0; } if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) { /* TEEHBR */ if (IS_USER(s) && (env->teecr & 1)) return 1; tmp = load_reg(s, rt); store_cpu_field(tmp, teehbr); return 0; } } fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n", op1, crn, crm, op2); return 1; } | 16,695 |
1 | static void tcp_wait_for_connect(int fd, Error *err, void *opaque) { MigrationState *s = opaque; if (fd < 0) { DPRINTF("migrate connect error: %s\n", error_get_pretty(err)); s->to_dst_file = NULL; migrate_fd_error(s); } else { DPRINTF("migrate connect success\n"); s->to_dst_file = qemu_fopen_socket(fd, "wb"); migrate_fd_connect(s); } } | 16,696 |
1 | static FlatView generate_memory_topology(MemoryRegion *mr) { FlatView view; flatview_init(&view); render_memory_region(&view, mr, 0, addrrange_make(0, UINT64_MAX)); flatview_simplify(&view); return view; } | 16,697 |
1 | static void visit_type_int32(Visitor *v, int *value, const char *name, Error **errp) { int64_t val = *value; visit_type_int(v, &val, name, errp); } | 16,699 |
1 | static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf, int do_search_pc, const uint8_t *searched_pc) { int opc, op_index, macro_op_index; const TCGOpDef *def; unsigned int dead_iargs; const TCGArg *args; #ifdef DEBUG_DISAS if (unlikely(loglevel & CPU_LOG_TB_OP)) { fprintf(logfile, "OP:\n"); tcg_dump_ops(s, logfile); fprintf(logfile, "\n"); } #endif tcg_liveness_analysis(s); #ifdef DEBUG_DISAS if (unlikely(loglevel & CPU_LOG_TB_OP_OPT)) { fprintf(logfile, "OP after la:\n"); tcg_dump_ops(s, logfile); fprintf(logfile, "\n"); } #endif tcg_reg_alloc_start(s); s->code_buf = gen_code_buf; s->code_ptr = gen_code_buf; macro_op_index = -1; args = gen_opparam_buf; op_index = 0; for(;;) { opc = gen_opc_buf[op_index]; #ifdef CONFIG_PROFILER dyngen_table_op_count[opc]++; #endif def = &tcg_op_defs[opc]; #if 0 printf("%s: %d %d %d\n", def->name, def->nb_oargs, def->nb_iargs, def->nb_cargs); // dump_regs(s); #endif switch(opc) { case INDEX_op_mov_i32: #if TCG_TARGET_REG_BITS == 64 case INDEX_op_mov_i64: #endif dead_iargs = s->op_dead_iargs[op_index]; tcg_reg_alloc_mov(s, def, args, dead_iargs); break; case INDEX_op_nop: case INDEX_op_nop1: case INDEX_op_nop2: case INDEX_op_nop3: break; case INDEX_op_nopn: args += args[0]; goto next; case INDEX_op_discard: { TCGTemp *ts; ts = &s->temps[args[0]]; /* mark the temporary as dead */ if (ts->val_type != TEMP_VAL_CONST && !ts->fixed_reg) { if (ts->val_type == TEMP_VAL_REG) s->reg_to_temp[ts->reg] = -1; ts->val_type = TEMP_VAL_DEAD; } } break; case INDEX_op_macro_goto: macro_op_index = op_index; /* only used for exceptions */ op_index = args[0] - 1; args = gen_opparam_buf + args[1]; goto next; case INDEX_op_macro_end: macro_op_index = -1; /* only used for exceptions */ op_index = args[0] - 1; args = gen_opparam_buf + args[1]; goto next; case INDEX_op_macro_start: /* must never happen here */ tcg_abort(); case INDEX_op_set_label: tcg_reg_alloc_bb_end(s); tcg_out_label(s, args[0], (long)s->code_ptr); break; case INDEX_op_call: dead_iargs = s->op_dead_iargs[op_index]; args += tcg_reg_alloc_call(s, def, opc, args, dead_iargs); goto next; case INDEX_op_end: goto the_end; case 0 ... INDEX_op_end - 1: /* legacy dyngen ops */ #ifdef CONFIG_PROFILER { extern int64_t dyngen_old_op_count; dyngen_old_op_count++; } #endif tcg_reg_alloc_bb_end(s); if (do_search_pc) { s->code_ptr += def->copy_size; args += def->nb_args; } else { args = dyngen_op(s, opc, args); } goto next; default: /* Note: in order to speed up the code, it would be much faster to have specialized register allocator functions for some common argument patterns */ dead_iargs = s->op_dead_iargs[op_index]; tcg_reg_alloc_op(s, def, opc, args, dead_iargs); break; } args += def->nb_args; next: ; if (do_search_pc) { if (searched_pc < s->code_ptr) { if (macro_op_index >= 0) return macro_op_index; else return op_index; } } op_index++; #ifndef NDEBUG check_regs(s); #endif } the_end: return -1; } | 16,701 |
1 | static int setup_hwaccel(AVCodecContext *avctx, const enum AVPixelFormat fmt, const char *name) { AVHWAccel *hwa = find_hwaccel(avctx->codec_id, fmt); int ret = 0; if (!hwa) { "Could not find an AVHWAccel for the pixel format: %s", name); return AVERROR(ENOENT); if (hwa->capabilities & HWACCEL_CODEC_CAP_EXPERIMENTAL && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n", hwa->name); return AVERROR_PATCHWELCOME; if (hwa->priv_data_size) { avctx->internal->hwaccel_priv_data = av_mallocz(hwa->priv_data_size); if (!avctx->internal->hwaccel_priv_data) return AVERROR(ENOMEM); if (hwa->init) { ret = hwa->init(avctx); if (ret < 0) { av_freep(&avctx->internal->hwaccel_priv_data); return ret; avctx->hwaccel = hwa; return 0; | 16,702 |
1 | static void rpza_decode_stream(RpzaContext *s) { int width = s->avctx->width; int stride = s->frame.linesize[0] / 2; int row_inc = stride - 4; int stream_ptr = 0; int chunk_size; unsigned char opcode; int n_blocks; unsigned short colorA = 0, colorB; unsigned short color4[4]; unsigned char index, idx; unsigned short ta, tb; unsigned short *pixels = (unsigned short *)s->frame.data[0]; int row_ptr = 0; int pixel_ptr = 0; int block_ptr; int pixel_x, pixel_y; int total_blocks; /* First byte is always 0xe1. Warn if it's different */ if (s->buf[stream_ptr] != 0xe1) av_log(s->avctx, AV_LOG_ERROR, "First chunk byte is 0x%02x instead of 0xe1\n", s->buf[stream_ptr]); /* Get chunk size, ingnoring first byte */ chunk_size = AV_RB32(&s->buf[stream_ptr]) & 0x00FFFFFF; stream_ptr += 4; /* If length mismatch use size from MOV file and try to decode anyway */ if (chunk_size != s->size) av_log(s->avctx, AV_LOG_ERROR, "MOV chunk size != encoded chunk size; using MOV chunk size\n"); chunk_size = s->size; /* Number of 4x4 blocks in frame. */ total_blocks = ((s->avctx->width + 3) / 4) * ((s->avctx->height + 3) / 4); /* Process chunk data */ while (stream_ptr < chunk_size) { opcode = s->buf[stream_ptr++]; /* Get opcode */ n_blocks = (opcode & 0x1f) + 1; /* Extract block counter from opcode */ /* If opcode MSbit is 0, we need more data to decide what to do */ if ((opcode & 0x80) == 0) { colorA = (opcode << 8) | (s->buf[stream_ptr++]); opcode = 0; if ((s->buf[stream_ptr] & 0x80) != 0) { /* Must behave as opcode 110xxxxx, using colorA computed * above. Use fake opcode 0x20 to enter switch block at * the right place */ opcode = 0x20; n_blocks = 1; } } switch (opcode & 0xe0) { /* Skip blocks */ case 0x80: while (n_blocks--) { ADVANCE_BLOCK(); } break; /* Fill blocks with one color */ case 0xa0: colorA = AV_RB16 (&s->buf[stream_ptr]); stream_ptr += 2; while (n_blocks--) { block_ptr = row_ptr + pixel_ptr; for (pixel_y = 0; pixel_y < 4; pixel_y++) { for (pixel_x = 0; pixel_x < 4; pixel_x++){ pixels[block_ptr] = colorA; block_ptr++; } block_ptr += row_inc; } ADVANCE_BLOCK(); } break; /* Fill blocks with 4 colors */ case 0xc0: colorA = AV_RB16 (&s->buf[stream_ptr]); stream_ptr += 2; case 0x20: colorB = AV_RB16 (&s->buf[stream_ptr]); stream_ptr += 2; /* sort out the colors */ color4[0] = colorB; color4[1] = 0; color4[2] = 0; color4[3] = colorA; /* red components */ ta = (colorA >> 10) & 0x1F; tb = (colorB >> 10) & 0x1F; color4[1] |= ((11 * ta + 21 * tb) >> 5) << 10; color4[2] |= ((21 * ta + 11 * tb) >> 5) << 10; /* green components */ ta = (colorA >> 5) & 0x1F; tb = (colorB >> 5) & 0x1F; color4[1] |= ((11 * ta + 21 * tb) >> 5) << 5; color4[2] |= ((21 * ta + 11 * tb) >> 5) << 5; /* blue components */ ta = colorA & 0x1F; tb = colorB & 0x1F; color4[1] |= ((11 * ta + 21 * tb) >> 5); color4[2] |= ((21 * ta + 11 * tb) >> 5); while (n_blocks--) { block_ptr = row_ptr + pixel_ptr; for (pixel_y = 0; pixel_y < 4; pixel_y++) { index = s->buf[stream_ptr++]; for (pixel_x = 0; pixel_x < 4; pixel_x++){ idx = (index >> (2 * (3 - pixel_x))) & 0x03; pixels[block_ptr] = color4[idx]; block_ptr++; } block_ptr += row_inc; } ADVANCE_BLOCK(); } break; /* Fill block with 16 colors */ case 0x00: if (s->size - stream_ptr < 16) block_ptr = row_ptr + pixel_ptr; for (pixel_y = 0; pixel_y < 4; pixel_y++) { for (pixel_x = 0; pixel_x < 4; pixel_x++){ /* We already have color of upper left pixel */ if ((pixel_y != 0) || (pixel_x !=0)) { colorA = AV_RB16 (&s->buf[stream_ptr]); stream_ptr += 2; } pixels[block_ptr] = colorA; block_ptr++; } block_ptr += row_inc; } ADVANCE_BLOCK(); break; /* Unknown opcode */ default: av_log(s->avctx, AV_LOG_ERROR, "Unknown opcode %d in rpza chunk." " Skip remaining %d bytes of chunk data.\n", opcode, chunk_size - stream_ptr); } /* Opcode switch */ } } | 16,703 |
1 | static int mlib_YUV2RGB420_24(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[]){ if(c->srcFormat == PIX_FMT_YUV422P){ srcStride[1] *= 2; srcStride[2] *= 2; } assert(srcStride[1] == srcStride[2]); mlib_VideoColorYUV2RGB420(dst[0]+srcSliceY*dstStride[0], src[0], src[1], src[2], c->dstW, srcSliceH, dstStride[0], srcStride[0], srcStride[1]); return srcSliceH; } | 16,705 |
1 | uint64_t ldq_tce(VIOsPAPRDevice *dev, uint64_t taddr) { uint64_t val; spapr_tce_dma_read(dev, taddr, &val, sizeof(val)); return tswap64(val); } | 16,706 |
1 | static int raw_reopen_prepare(BDRVReopenState *state, BlockReopenQueue *queue, Error **errp) { BDRVRawState *s; BDRVRawReopenState *raw_s; int ret = 0; Error *local_err = NULL; assert(state != NULL); assert(state->bs != NULL); s = state->bs->opaque; state->opaque = g_malloc0(sizeof(BDRVRawReopenState)); raw_s = state->opaque; #ifdef CONFIG_LINUX_AIO raw_s->use_aio = s->use_aio; /* we can use s->aio_ctx instead of a copy, because the use_aio flag is * valid in the 'false' condition even if aio_ctx is set, and raw_set_aio() * won't override aio_ctx if aio_ctx is non-NULL */ if (raw_set_aio(&s->aio_ctx, &raw_s->use_aio, state->flags)) { error_setg(errp, "Could not set AIO state"); return -1; } #endif if (s->type == FTYPE_FD || s->type == FTYPE_CD) { raw_s->open_flags |= O_NONBLOCK; } raw_parse_flags(state->flags, &raw_s->open_flags); raw_s->fd = -1; int fcntl_flags = O_APPEND | O_NONBLOCK; #ifdef O_NOATIME fcntl_flags |= O_NOATIME; #endif #ifdef O_ASYNC /* Not all operating systems have O_ASYNC, and those that don't * will not let us track the state into raw_s->open_flags (typically * you achieve the same effect with an ioctl, for example I_SETSIG * on Solaris). But we do not use O_ASYNC, so that's fine. */ assert((s->open_flags & O_ASYNC) == 0); #endif if ((raw_s->open_flags & ~fcntl_flags) == (s->open_flags & ~fcntl_flags)) { /* dup the original fd */ /* TODO: use qemu fcntl wrapper */ #ifdef F_DUPFD_CLOEXEC raw_s->fd = fcntl(s->fd, F_DUPFD_CLOEXEC, 0); #else raw_s->fd = dup(s->fd); if (raw_s->fd != -1) { qemu_set_cloexec(raw_s->fd); } #endif if (raw_s->fd >= 0) { ret = fcntl_setfl(raw_s->fd, raw_s->open_flags); if (ret) { qemu_close(raw_s->fd); raw_s->fd = -1; } } } /* If we cannot use fcntl, or fcntl failed, fall back to qemu_open() */ if (raw_s->fd == -1) { assert(!(raw_s->open_flags & O_CREAT)); raw_s->fd = qemu_open(state->bs->filename, raw_s->open_flags); if (raw_s->fd == -1) { error_setg_errno(errp, errno, "Could not reopen file"); ret = -1; } } /* Fail already reopen_prepare() if we can't get a working O_DIRECT * alignment with the new fd. */ if (raw_s->fd != -1) { raw_probe_alignment(state->bs, raw_s->fd, &local_err); if (local_err) { qemu_close(raw_s->fd); raw_s->fd = -1; error_propagate(errp, local_err); ret = -EINVAL; } } return ret; } | 16,707 |
1 | int net_init_dump(QemuOpts *opts, const char *name, VLANState *vlan) { int len; const char *file; char def_file[128]; assert(vlan); file = qemu_opt_get(opts, "file"); if (!file) { snprintf(def_file, sizeof(def_file), "qemu-vlan%d.pcap", vlan->id); file = def_file; } len = qemu_opt_get_size(opts, "len", 65536); return net_dump_init(vlan, "dump", name, file, len); } | 16,710 |
1 | static int blk_free(struct XenDevice *xendev) { struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); struct ioreq *ioreq; if (blkdev->blk || blkdev->sring) { blk_disconnect(xendev); } /* Free persistent grants */ if (blkdev->feature_persistent) { g_tree_destroy(blkdev->persistent_gnts); } while (!QLIST_EMPTY(&blkdev->freelist)) { ioreq = QLIST_FIRST(&blkdev->freelist); QLIST_REMOVE(ioreq, list); qemu_iovec_destroy(&ioreq->v); g_free(ioreq); } g_free(blkdev->params); g_free(blkdev->mode); g_free(blkdev->type); g_free(blkdev->dev); g_free(blkdev->devtype); qemu_bh_delete(blkdev->bh); return 0; } | 16,711 |
1 | static void r2d_init(ram_addr_t ram_size, int vga_ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { CPUState *env; struct SH7750State *s; ram_addr_t sdram_addr, sm501_vga_ram_addr; qemu_irq *irq; PCIBus *pci; int i; if (!cpu_model) cpu_model = "SH7751R"; env = cpu_init(cpu_model); if (!env) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } /* Allocate memory space */ sdram_addr = qemu_ram_alloc(SDRAM_SIZE); cpu_register_physical_memory(SDRAM_BASE, SDRAM_SIZE, sdram_addr); /* Register peripherals */ s = sh7750_init(env); irq = r2d_fpga_init(0x04000000, sh7750_irl(s)); pci = sh_pci_register_bus(r2d_pci_set_irq, r2d_pci_map_irq, irq, 0, 4); sm501_vga_ram_addr = qemu_ram_alloc(SM501_VRAM_SIZE); sm501_init(0x10000000, sm501_vga_ram_addr, SM501_VRAM_SIZE, serial_hds[2]); /* onboard CF (True IDE mode, Master only). */ mmio_ide_init(0x14001000, 0x1400080c, irq[CF_IDE], 1, drives_table[drive_get_index(IF_IDE, 0, 0)].bdrv, NULL); /* NIC: rtl8139 on-board, and 2 slots. */ pci_nic_init(pci, &nd_table[0], 2 << 3, "rtl8139"); for (i = 1; i < nb_nics; i++) pci_nic_init(pci, &nd_table[i], -1, "ne2k_pci"); /* Todo: register on board registers */ { int kernel_size; /* initialization which should be done by firmware */ stl_phys(SH7750_BCR1, 1<<3); /* cs3 SDRAM */ stw_phys(SH7750_BCR2, 3<<(3*2)); /* cs3 32bit */ kernel_size = load_image(kernel_filename, phys_ram_base); if (kernel_size < 0) { fprintf(stderr, "qemu: could not load kernel '%s'\n", kernel_filename); exit(1); } env->pc = SDRAM_BASE | 0xa0000000; /* Start from P2 area */ } } | 16,712 |
1 | MigrationIncomingState *migration_incoming_state_new(QEMUFile* f) { mis_current = g_malloc0(sizeof(MigrationIncomingState)); mis_current->file = f; QLIST_INIT(&mis_current->loadvm_handlers); return mis_current; } | 16,713 |
1 | double av_get_double(void *obj, const char *name, const AVOption **o_out) { int64_t intnum=1; double num=1; int den=1; av_get_number(obj, name, o_out, &num, &den, &intnum); return num*intnum/den; } | 16,714 |
1 | static void laio_cancel(BlockDriverAIOCB *blockacb) { struct qemu_laiocb *laiocb = (struct qemu_laiocb *)blockacb; struct io_event event; int ret; if (laiocb->ret != -EINPROGRESS) return; /* * Note that as of Linux 2.6.31 neither the block device code nor any * filesystem implements cancellation of AIO request. * Thus the polling loop below is the normal code path. */ ret = io_cancel(laiocb->ctx->ctx, &laiocb->iocb, &event); if (ret == 0) { laiocb->ret = -ECANCELED; return; } /* * We have to wait for the iocb to finish. * * The only way to get the iocb status update is by polling the io context. * We might be able to do this slightly more optimal by removing the * O_NONBLOCK flag. */ while (laiocb->ret == -EINPROGRESS) { qemu_laio_completion_cb(&laiocb->ctx->e); } } | 16,715 |
1 | static inline void RENAME(yuv2bgr24_1)(SwsContext *c, const uint16_t *buf0, const uint16_t *ubuf0, const uint16_t *ubuf1, const uint16_t *vbuf0, const uint16_t *vbuf1, const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, enum PixelFormat dstFormat, int flags, int y) { x86_reg uv_off = c->uv_off << 1; const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" YSCALEYUV2RGB1(%%REGBP, %5, %6) "pxor %%mm7, %%mm7 \n\t" WRITEBGR24(%%REGb, 8280(%5), %%REGBP) "pop %%"REG_BP" \n\t" "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither), "m"(uv_off) ); } else { __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" YSCALEYUV2RGB1b(%%REGBP, %5, %6) "pxor %%mm7, %%mm7 \n\t" WRITEBGR24(%%REGb, 8280(%5), %%REGBP) "pop %%"REG_BP" \n\t" "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither), "m"(uv_off) ); } } | 16,716 |
1 | static int avi_read_idx1(AVFormatContext *s, int size) { AVIContext *avi = s->priv_data; AVIOContext *pb = s->pb; int nb_index_entries, i; AVStream *st; AVIStream *ast; unsigned int index, tag, flags, pos, len, first_packet = 1; unsigned last_pos= -1; int64_t idx1_pos, first_packet_pos = 0, data_offset = 0; nb_index_entries = size / 16; if (nb_index_entries <= 0) return -1; idx1_pos = avio_tell(pb); avio_seek(pb, avi->movi_list+4, SEEK_SET); if (avi_sync(s, 1) == 0) { first_packet_pos = avio_tell(pb) - 8; } avi->stream_index = -1; avio_seek(pb, idx1_pos, SEEK_SET); /* Read the entries and sort them in each stream component. */ for(i = 0; i < nb_index_entries; i++) { tag = avio_rl32(pb); flags = avio_rl32(pb); pos = avio_rl32(pb); len = avio_rl32(pb); av_dlog(s, "%d: tag=0x%x flags=0x%x pos=0x%x len=%d/", i, tag, flags, pos, len); index = ((tag & 0xff) - '0') * 10; index += ((tag >> 8) & 0xff) - '0'; if (index >= s->nb_streams) continue; st = s->streams[index]; ast = st->priv_data; if(first_packet && first_packet_pos && len) { data_offset = first_packet_pos - pos; first_packet = 0; } pos += data_offset; av_dlog(s, "%d cum_len=%"PRId64"\n", len, ast->cum_len); if(url_feof(pb)) return -1; if(last_pos == pos) avi->non_interleaved= 1; else if(len || !ast->sample_size) av_add_index_entry(st, pos, ast->cum_len, len, 0, (flags&AVIIF_INDEX) ? AVINDEX_KEYFRAME : 0); ast->cum_len += get_duration(ast, len); last_pos= pos; } return 0; } | 16,718 |
1 | static float quantize_band_cost_bits(struct AACEncContext *s, const float *in, const float *scaled, int size, int scale_idx, int cb, const float lambda, const float uplim, int *bits, int rtz) { return get_band_numbits(s, NULL, in, scaled, size, scale_idx, cb, lambda, uplim, bits); } | 16,719 |
1 | static void coroutine_fn stream_run(void *opaque) { StreamBlockJob *s = opaque; BlockDriverState *bs = s->common.bs; BlockDriverState *base = s->base; int64_t sector_num, end; int error = 0; int ret = 0; int n = 0; void *buf; s->common.len = bdrv_getlength(bs); if (s->common.len < 0) { block_job_completed(&s->common, s->common.len); return; } end = s->common.len >> BDRV_SECTOR_BITS; buf = qemu_blockalign(bs, STREAM_BUFFER_SIZE); /* Turn on copy-on-read for the whole block device so that guest read * requests help us make progress. Only do this when copying the entire * backing chain since the copy-on-read operation does not take base into * account. */ if (!base) { bdrv_enable_copy_on_read(bs); } for (sector_num = 0; sector_num < end; sector_num += n) { uint64_t delay_ns = 0; bool copy; wait: /* Note that even when no rate limit is applied we need to yield * with no pending I/O here so that bdrv_drain_all() returns. */ block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); if (block_job_is_cancelled(&s->common)) { break; } ret = bdrv_is_allocated(bs, sector_num, STREAM_BUFFER_SIZE / BDRV_SECTOR_SIZE, &n); if (ret == 1) { /* Allocated in the top, no need to copy. */ copy = false; } else if (ret >= 0) { /* Copy if allocated in the intermediate images. Limit to the * known-unallocated area [sector_num, sector_num+n). */ ret = bdrv_is_allocated_above(bs->backing_hd, base, sector_num, n, &n); /* Finish early if end of backing file has been reached */ if (ret == 0 && n == 0) { n = end - sector_num; } copy = (ret == 1); } trace_stream_one_iteration(s, sector_num, n, ret); if (ret >= 0 && copy) { if (s->common.speed) { delay_ns = ratelimit_calculate_delay(&s->limit, n); if (delay_ns > 0) { goto wait; } } ret = stream_populate(bs, sector_num, n, buf); } if (ret < 0) { BlockErrorAction action = block_job_error_action(&s->common, s->common.bs, s->on_error, true, -ret); if (action == BDRV_ACTION_STOP) { n = 0; continue; } if (error == 0) { error = ret; } if (action == BDRV_ACTION_REPORT) { break; } } ret = 0; /* Publish progress */ s->common.offset += n * BDRV_SECTOR_SIZE; } if (!base) { bdrv_disable_copy_on_read(bs); } /* Do not remove the backing file if an error was there but ignored. */ ret = error; if (!block_job_is_cancelled(&s->common) && sector_num == end && ret == 0) { const char *base_id = NULL, *base_fmt = NULL; if (base) { base_id = s->backing_file_id; if (base->drv) { base_fmt = base->drv->format_name; } } ret = bdrv_change_backing_file(bs, base_id, base_fmt); close_unused_images(bs, base, base_id); } qemu_vfree(buf); block_job_completed(&s->common, ret); } | 16,720 |
0 | PCIBus *pci_apb_init(hwaddr special_base, hwaddr mem_base, qemu_irq *ivec_irqs, PCIBus **busA, PCIBus **busB, qemu_irq **pbm_irqs) { DeviceState *dev; SysBusDevice *s; PCIHostState *phb; APBState *d; IOMMUState *is; PCIDevice *pci_dev; PCIBridge *br; /* Ultrasparc PBM main bus */ dev = qdev_create(NULL, TYPE_APB); d = APB_DEVICE(dev); phb = PCI_HOST_BRIDGE(dev); phb->bus = pci_register_bus(DEVICE(phb), "pci", pci_apb_set_irq, pci_pbm_map_irq, d, &d->pci_mmio, get_system_io(), 0, 32, TYPE_PCI_BUS); qdev_init_nofail(dev); s = SYS_BUS_DEVICE(dev); /* apb_config */ sysbus_mmio_map(s, 0, special_base); /* PCI configuration space */ sysbus_mmio_map(s, 1, special_base + 0x1000000ULL); /* pci_ioport */ sysbus_mmio_map(s, 2, special_base + 0x2000000ULL); memory_region_init(&d->pci_mmio, OBJECT(s), "pci-mmio", 0x100000000ULL); memory_region_add_subregion(get_system_memory(), mem_base, &d->pci_mmio); *pbm_irqs = d->pbm_irqs; d->ivec_irqs = ivec_irqs; pci_create_simple(phb->bus, 0, "pbm-pci"); /* APB IOMMU */ is = &d->iommu; memset(is, 0, sizeof(IOMMUState)); memory_region_init_iommu(&is->iommu, sizeof(is->iommu), TYPE_APB_IOMMU_MEMORY_REGION, OBJECT(dev), "iommu-apb", UINT64_MAX); address_space_init(&is->iommu_as, MEMORY_REGION(&is->iommu), "pbm-as"); pci_setup_iommu(phb->bus, pbm_pci_dma_iommu, is); /* APB secondary busses */ pci_dev = pci_create_multifunction(phb->bus, PCI_DEVFN(1, 0), true, TYPE_PBM_PCI_BRIDGE); br = PCI_BRIDGE(pci_dev); pci_bridge_map_irq(br, "pciB", pci_apb_map_irq); qdev_init_nofail(&pci_dev->qdev); *busB = pci_bridge_get_sec_bus(br); pci_dev = pci_create_multifunction(phb->bus, PCI_DEVFN(1, 1), true, TYPE_PBM_PCI_BRIDGE); br = PCI_BRIDGE(pci_dev); pci_bridge_map_irq(br, "pciA", pci_apb_map_irq); qdev_prop_set_bit(DEVICE(pci_dev), "busA", true); qdev_init_nofail(&pci_dev->qdev); *busA = pci_bridge_get_sec_bus(br); return phb->bus; } | 16,722 |
0 | void register_module_init(void (*fn)(void), module_init_type type) { ModuleEntry *e; ModuleTypeList *l; e = qemu_mallocz(sizeof(*e)); e->init = fn; l = find_type(type); TAILQ_INSERT_TAIL(l, e, node); } | 16,725 |
0 | static void spitz_common_init(ram_addr_t ram_size, int vga_ram_size, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model, enum spitz_model_e model, int arm_id) { struct pxa2xx_state_s *cpu; struct scoop_info_s *scp0, *scp1 = NULL; if (!cpu_model) cpu_model = (model == terrier) ? "pxa270-c5" : "pxa270-c0"; /* Setup CPU & memory */ if (ram_size < SPITZ_RAM + SPITZ_ROM + PXA2XX_INTERNAL_SIZE) { fprintf(stderr, "This platform requires %i bytes of memory\n", SPITZ_RAM + SPITZ_ROM + PXA2XX_INTERNAL_SIZE); exit(1); } cpu = pxa270_init(spitz_binfo.ram_size, cpu_model); sl_flash_register(cpu, (model == spitz) ? FLASH_128M : FLASH_1024M); cpu_register_physical_memory(0, SPITZ_ROM, qemu_ram_alloc(SPITZ_ROM) | IO_MEM_ROM); /* Setup peripherals */ spitz_keyboard_register(cpu); spitz_ssp_attach(cpu); scp0 = scoop_init(cpu, 0, 0x10800000); if (model != akita) { scp1 = scoop_init(cpu, 1, 0x08800040); } spitz_scoop_gpio_setup(cpu, scp0, scp1); spitz_gpio_setup(cpu, (model == akita) ? 1 : 2); spitz_i2c_setup(cpu); if (model == akita) spitz_akita_i2c_setup(cpu); if (model == terrier) /* A 6.0 GB microdrive is permanently sitting in CF slot 1. */ spitz_microdrive_attach(cpu, 1); else if (model != akita) /* A 4.0 GB microdrive is permanently sitting in CF slot 0. */ spitz_microdrive_attach(cpu, 0); /* Setup initial (reset) machine state */ cpu->env->regs[15] = spitz_binfo.loader_start; spitz_binfo.kernel_filename = kernel_filename; spitz_binfo.kernel_cmdline = kernel_cmdline; spitz_binfo.initrd_filename = initrd_filename; spitz_binfo.board_id = arm_id; arm_load_kernel(cpu->env, &spitz_binfo); sl_bootparam_write(SL_PXA_PARAM_BASE); } | 16,726 |
0 | uint32_t cpu_inl(pio_addr_t addr) { uint32_t val; val = ioport_read(2, addr); trace_cpu_in(addr, val); LOG_IOPORT("inl : %04"FMT_pioaddr" %08"PRIx32"\n", addr, val); return val; } | 16,729 |
0 | static int nbd_co_request(BlockDriverState *bs, NBDRequest *request, QEMUIOVector *qiov) { NBDClientSession *client = nbd_get_client_session(bs); int ret; assert(!qiov || request->type == NBD_CMD_WRITE || request->type == NBD_CMD_READ); ret = nbd_co_send_request(bs, request, request->type == NBD_CMD_WRITE ? qiov : NULL); if (ret < 0) { return ret; } return nbd_co_receive_reply(client, request, request->type == NBD_CMD_READ ? qiov : NULL); } | 16,730 |
0 | static int posix_aio_process_queue(void *opaque) { PosixAioState *s = opaque; struct qemu_paiocb *acb, **pacb; int ret; int result = 0; int async_context_id = get_async_context_id(); for(;;) { pacb = &s->first_aio; for(;;) { acb = *pacb; if (!acb) return result; /* we're only interested in requests in the right context */ if (acb->async_context_id != async_context_id) { pacb = &acb->next; continue; } ret = qemu_paio_error(acb); if (ret == ECANCELED) { /* remove the request */ *pacb = acb->next; qemu_aio_release(acb); result = 1; } else if (ret != EINPROGRESS) { /* end of aio */ if (ret == 0) { ret = qemu_paio_return(acb); if (ret == acb->aio_nbytes) ret = 0; else ret = -EINVAL; } else { ret = -ret; } trace_paio_complete(acb, acb->common.opaque, ret); /* remove the request */ *pacb = acb->next; /* call the callback */ acb->common.cb(acb->common.opaque, ret); qemu_aio_release(acb); result = 1; break; } else { pacb = &acb->next; } } } return result; } | 16,732 |
0 | static uint32_t isa_mmio_readb (void *opaque, target_phys_addr_t addr) { return cpu_inb(addr & IOPORTS_MASK); } | 16,733 |
0 | static int vpc_create(const char *filename, int64_t total_sectors, const char *backing_file, int flags) { uint8_t buf[1024]; struct vhd_footer* footer = (struct vhd_footer*) buf; struct vhd_dyndisk_header* dyndisk_header = (struct vhd_dyndisk_header*) buf; int fd, i; uint16_t cyls; uint8_t heads; uint8_t secs_per_cyl; size_t block_size, num_bat_entries; if (backing_file != NULL) return -ENOTSUP; fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0644); if (fd < 0) return -EIO; // Calculate matching total_size and geometry calculate_geometry(total_sectors, &cyls, &heads, &secs_per_cyl); total_sectors = (int64_t) cyls * heads * secs_per_cyl; // Prepare the Hard Disk Footer memset(buf, 0, 1024); strncpy(footer->creator, "conectix", 8); // TODO Check if "qemu" creator_app is ok for VPC strncpy(footer->creator_app, "qemu", 4); strncpy(footer->creator_os, "Wi2k", 4); footer->features = be32_to_cpu(0x02); footer->version = be32_to_cpu(0x00010000); footer->data_offset = be64_to_cpu(HEADER_SIZE); footer->timestamp = be32_to_cpu(time(NULL) - VHD_TIMESTAMP_BASE); // Version of Virtual PC 2007 footer->major = be16_to_cpu(0x0005); footer->minor =be16_to_cpu(0x0003); footer->orig_size = be64_to_cpu(total_sectors * 512); footer->size = be64_to_cpu(total_sectors * 512); footer->cyls = be16_to_cpu(cyls); footer->heads = heads; footer->secs_per_cyl = secs_per_cyl; footer->type = be32_to_cpu(VHD_DYNAMIC); // TODO uuid is missing footer->checksum = be32_to_cpu(vpc_checksum(buf, HEADER_SIZE)); // Write the footer (twice: at the beginning and at the end) block_size = 0x200000; num_bat_entries = (total_sectors + block_size / 512) / (block_size / 512); if (write(fd, buf, HEADER_SIZE) != HEADER_SIZE) return -EIO; if (lseek(fd, 1536 + ((num_bat_entries * 4 + 511) & ~511), SEEK_SET) < 0) return -EIO; if (write(fd, buf, HEADER_SIZE) != HEADER_SIZE) return -EIO; // Write the initial BAT if (lseek(fd, 3 * 512, SEEK_SET) < 0) return -EIO; memset(buf, 0xFF, 512); for (i = 0; i < (num_bat_entries * 4 + 511) / 512; i++) if (write(fd, buf, 512) != 512) return -EIO; // Prepare the Dynamic Disk Header memset(buf, 0, 1024); strncpy(dyndisk_header->magic, "cxsparse", 8); dyndisk_header->data_offset = be64_to_cpu(0xFFFFFFFF); dyndisk_header->table_offset = be64_to_cpu(3 * 512); dyndisk_header->version = be32_to_cpu(0x00010000); dyndisk_header->block_size = be32_to_cpu(block_size); dyndisk_header->max_table_entries = be32_to_cpu(num_bat_entries); dyndisk_header->checksum = be32_to_cpu(vpc_checksum(buf, 1024)); // Write the header if (lseek(fd, 512, SEEK_SET) < 0) return -EIO; if (write(fd, buf, 1024) != 1024) return -EIO; close(fd); return 0; } | 16,734 |
0 | static int usb_msd_initfn(USBDevice *dev) { MSDState *s = DO_UPCAST(MSDState, dev, dev); BlockDriverState *bs = s->conf.bs; DriveInfo *dinfo; if (!bs) { error_report("usb-msd: drive property not set"); return -1; } /* * Hack alert: this pretends to be a block device, but it's really * a SCSI bus that can serve only a single device, which it * creates automatically. But first it needs to detach from its * blockdev, or else scsi_bus_legacy_add_drive() dies when it * attaches again. * * The hack is probably a bad idea. */ bdrv_detach_dev(bs, &s->dev.qdev); s->conf.bs = NULL; if (!s->serial) { /* try to fall back to value set with legacy -drive serial=... */ dinfo = drive_get_by_blockdev(bs); if (*dinfo->serial) { s->serial = strdup(dinfo->serial); } } if (s->serial) { usb_desc_set_string(dev, STR_SERIALNUMBER, s->serial); } usb_desc_init(dev); scsi_bus_new(&s->bus, &s->dev.qdev, &usb_msd_scsi_info); s->scsi_dev = scsi_bus_legacy_add_drive(&s->bus, bs, 0, !!s->removable, s->conf.bootindex); if (!s->scsi_dev) { return -1; } s->bus.qbus.allow_hotplug = 0; usb_msd_handle_reset(dev); if (bdrv_key_required(bs)) { if (cur_mon) { monitor_read_bdrv_key_start(cur_mon, bs, usb_msd_password_cb, s); s->dev.auto_attach = 0; } else { autostart = 0; } } return 0; } | 16,736 |
0 | static int encode_superframe(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ WMACodecContext *s = avctx->priv_data; const short *samples = data; int i, total_gain; s->block_len_bits= s->frame_len_bits; //required by non variable block len s->block_len = 1 << s->block_len_bits; apply_window_and_mdct(avctx, samples, avctx->frame_size); if (s->ms_stereo) { float a, b; int i; for(i = 0; i < s->block_len; i++) { a = s->coefs[0][i]*0.5; b = s->coefs[1][i]*0.5; s->coefs[0][i] = a + b; s->coefs[1][i] = a - b; } } if (buf_size < 2 * MAX_CODED_SUPERFRAME_SIZE) { av_log(avctx, AV_LOG_ERROR, "output buffer size is too small\n"); return AVERROR(EINVAL); } #if 1 total_gain= 128; for(i=64; i; i>>=1){ int error= encode_frame(s, s->coefs, buf, buf_size, total_gain-i); if(error<0) total_gain-= i; } #else total_gain= 90; best= encode_frame(s, s->coefs, buf, buf_size, total_gain); for(i=32; i; i>>=1){ int scoreL= encode_frame(s, s->coefs, buf, buf_size, total_gain-i); int scoreR= encode_frame(s, s->coefs, buf, buf_size, total_gain+i); av_log(NULL, AV_LOG_ERROR, "%d %d %d (%d)\n", scoreL, best, scoreR, total_gain); if(scoreL < FFMIN(best, scoreR)){ best = scoreL; total_gain -= i; }else if(scoreR < best){ best = scoreR; total_gain += i; } } #endif encode_frame(s, s->coefs, buf, buf_size, total_gain); assert((put_bits_count(&s->pb) & 7) == 0); i= s->block_align - (put_bits_count(&s->pb)+7)/8; assert(i>=0); while(i--) put_bits(&s->pb, 8, 'N'); flush_put_bits(&s->pb); return put_bits_ptr(&s->pb) - s->pb.buf; } | 16,738 |
0 | static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest, TCGReg c1, TCGArg c2, int c2const, TCGReg r3) { int cc; if (facilities & FACILITY_LOAD_ON_COND) { cc = tgen_cmp(s, type, c, c1, c2, c2const, false); tcg_out_insn(s, RRF, LOCGR, dest, r3, cc); } else { c = tcg_invert_cond(c); cc = tgen_cmp(s, type, c, c1, c2, c2const, false); /* Emit: if (cc) goto over; dest = r3; over: */ tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1); tcg_out_insn(s, RRE, LGR, dest, r3); } } | 16,739 |
0 | static void parse_numa_node(MachineState *ms, NumaNodeOptions *node, QemuOpts *opts, Error **errp) { uint16_t nodenr; uint16List *cpus = NULL; MachineClass *mc = MACHINE_GET_CLASS(ms); if (node->has_nodeid) { nodenr = node->nodeid; } else { nodenr = nb_numa_nodes; } if (nodenr >= MAX_NODES) { error_setg(errp, "Max number of NUMA nodes reached: %" PRIu16 "", nodenr); return; } if (numa_info[nodenr].present) { error_setg(errp, "Duplicate NUMA nodeid: %" PRIu16, nodenr); return; } if (!mc->cpu_index_to_instance_props) { error_report("NUMA is not supported by this machine-type"); exit(1); } for (cpus = node->cpus; cpus; cpus = cpus->next) { CpuInstanceProperties props; if (cpus->value >= max_cpus) { error_setg(errp, "CPU index (%" PRIu16 ")" " should be smaller than maxcpus (%d)", cpus->value, max_cpus); return; } props = mc->cpu_index_to_instance_props(ms, cpus->value); props.node_id = nodenr; props.has_node_id = true; machine_set_cpu_numa_node(ms, &props, &error_fatal); } if (node->has_mem && node->has_memdev) { error_setg(errp, "cannot specify both mem= and memdev="); return; } if (have_memdevs == -1) { have_memdevs = node->has_memdev; } if (node->has_memdev != have_memdevs) { error_setg(errp, "memdev option must be specified for either " "all or no nodes"); return; } if (node->has_mem) { uint64_t mem_size = node->mem; const char *mem_str = qemu_opt_get(opts, "mem"); /* Fix up legacy suffix-less format */ if (g_ascii_isdigit(mem_str[strlen(mem_str) - 1])) { mem_size <<= 20; } numa_info[nodenr].node_mem = mem_size; } if (node->has_memdev) { Object *o; o = object_resolve_path_type(node->memdev, TYPE_MEMORY_BACKEND, NULL); if (!o) { error_setg(errp, "memdev=%s is ambiguous", node->memdev); return; } object_ref(o); numa_info[nodenr].node_mem = object_property_get_uint(o, "size", NULL); numa_info[nodenr].node_memdev = MEMORY_BACKEND(o); } numa_info[nodenr].present = true; max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1); } | 16,740 |
0 | static int calculate_geometry(int64_t total_sectors, uint16_t* cyls, uint8_t* heads, uint8_t* secs_per_cyl) { uint32_t cyls_times_heads; if (total_sectors > 65535 * 16 * 255) return -EFBIG; if (total_sectors > 65535 * 16 * 63) { *secs_per_cyl = 255; *heads = 16; cyls_times_heads = total_sectors / *secs_per_cyl; } else { *secs_per_cyl = 17; cyls_times_heads = total_sectors / *secs_per_cyl; *heads = (cyls_times_heads + 1023) / 1024; if (*heads < 4) *heads = 4; if (cyls_times_heads >= (*heads * 1024) || *heads > 16) { *secs_per_cyl = 31; *heads = 16; cyls_times_heads = total_sectors / *secs_per_cyl; } if (cyls_times_heads >= (*heads * 1024)) { *secs_per_cyl = 63; *heads = 16; cyls_times_heads = total_sectors / *secs_per_cyl; } } *cyls = cyls_times_heads / *heads; return 0; } | 16,741 |
0 | static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2, bool parallel) { #if !defined(CONFIG_USER_ONLY) || defined(CONFIG_ATOMIC128) uint32_t mem_idx = cpu_mmu_index(env, false); #endif uintptr_t ra = GETPC(); uint32_t fc = extract32(env->regs[0], 0, 8); uint32_t sc = extract32(env->regs[0], 8, 8); uint64_t pl = get_address(env, 1) & -16; uint64_t svh, svl; uint32_t cc; /* Sanity check the function code and storage characteristic. */ if (fc > 1 || sc > 3) { if (!s390_has_feat(S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2)) { goto spec_exception; } if (fc > 2 || sc > 4 || (fc == 2 && (r3 & 1))) { goto spec_exception; } } /* Sanity check the alignments. */ if (extract32(a1, 0, 4 << fc) || extract32(a2, 0, 1 << sc)) { goto spec_exception; } /* Sanity check writability of the store address. */ #ifndef CONFIG_USER_ONLY probe_write(env, a2, mem_idx, ra); #endif /* Note that the compare-and-swap is atomic, and the store is atomic, but the complete operation is not. Therefore we do not need to assert serial context in order to implement this. That said, restart early if we can't support either operation that is supposed to be atomic. */ if (parallel) { int mask = 0; #if !defined(CONFIG_ATOMIC64) mask = -8; #elif !defined(CONFIG_ATOMIC128) mask = -16; #endif if (((4 << fc) | (1 << sc)) & mask) { cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); } } /* All loads happen before all stores. For simplicity, load the entire store value area from the parameter list. */ svh = cpu_ldq_data_ra(env, pl + 16, ra); svl = cpu_ldq_data_ra(env, pl + 24, ra); switch (fc) { case 0: { uint32_t nv = cpu_ldl_data_ra(env, pl, ra); uint32_t cv = env->regs[r3]; uint32_t ov; if (parallel) { #ifdef CONFIG_USER_ONLY uint32_t *haddr = g2h(a1); ov = atomic_cmpxchg__nocheck(haddr, cv, nv); #else TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx); ov = helper_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra); #endif } else { ov = cpu_ldl_data_ra(env, a1, ra); cpu_stl_data_ra(env, a1, (ov == cv ? nv : ov), ra); } cc = (ov != cv); env->regs[r3] = deposit64(env->regs[r3], 32, 32, ov); } break; case 1: { uint64_t nv = cpu_ldq_data_ra(env, pl, ra); uint64_t cv = env->regs[r3]; uint64_t ov; if (parallel) { #ifdef CONFIG_ATOMIC64 # ifdef CONFIG_USER_ONLY uint64_t *haddr = g2h(a1); ov = atomic_cmpxchg__nocheck(haddr, cv, nv); # else TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx); ov = helper_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra); # endif #else /* Note that we asserted !parallel above. */ g_assert_not_reached(); #endif } else { ov = cpu_ldq_data_ra(env, a1, ra); cpu_stq_data_ra(env, a1, (ov == cv ? nv : ov), ra); } cc = (ov != cv); env->regs[r3] = ov; } break; case 2: { uint64_t nvh = cpu_ldq_data_ra(env, pl, ra); uint64_t nvl = cpu_ldq_data_ra(env, pl + 8, ra); Int128 nv = int128_make128(nvl, nvh); Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]); Int128 ov; if (parallel) { #ifdef CONFIG_ATOMIC128 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra); cc = !int128_eq(ov, cv); #else /* Note that we asserted !parallel above. */ g_assert_not_reached(); #endif } else { uint64_t oh = cpu_ldq_data_ra(env, a1 + 0, ra); uint64_t ol = cpu_ldq_data_ra(env, a1 + 8, ra); ov = int128_make128(ol, oh); cc = !int128_eq(ov, cv); if (cc) { nv = ov; } cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra); cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra); } env->regs[r3 + 0] = int128_gethi(ov); env->regs[r3 + 1] = int128_getlo(ov); } break; default: g_assert_not_reached(); } /* Store only if the comparison succeeded. Note that above we use a pair of 64-bit big-endian loads, so for sc < 3 we must extract the value from the most-significant bits of svh. */ if (cc == 0) { switch (sc) { case 0: cpu_stb_data_ra(env, a2, svh >> 56, ra); break; case 1: cpu_stw_data_ra(env, a2, svh >> 48, ra); break; case 2: cpu_stl_data_ra(env, a2, svh >> 32, ra); break; case 3: cpu_stq_data_ra(env, a2, svh, ra); break; case 4: if (parallel) { #ifdef CONFIG_ATOMIC128 TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); Int128 sv = int128_make128(svl, svh); helper_atomic_sto_be_mmu(env, a2, sv, oi, ra); #else /* Note that we asserted !parallel above. */ g_assert_not_reached(); #endif } else { cpu_stq_data_ra(env, a2 + 0, svh, ra); cpu_stq_data_ra(env, a2 + 8, svl, ra); } break; default: g_assert_not_reached(); } } return cc; spec_exception: cpu_restore_state(ENV_GET_CPU(env), ra); program_interrupt(env, PGM_SPECIFICATION, 6); g_assert_not_reached(); } | 16,742 |
0 | static void test_visitor_out_native_list_int32(TestOutputVisitorData *data, const void *unused) { test_native_list(data, unused, USER_DEF_NATIVE_LIST_UNION_KIND_S32); } | 16,743 |
0 | static void blockdev_mirror_common(BlockDriverState *bs, BlockDriverState *target, bool has_replaces, const char *replaces, enum MirrorSyncMode sync, bool has_speed, int64_t speed, bool has_granularity, uint32_t granularity, bool has_buf_size, int64_t buf_size, bool has_on_source_error, BlockdevOnError on_source_error, bool has_on_target_error, BlockdevOnError on_target_error, bool has_unmap, bool unmap, Error **errp) { if (!has_speed) { speed = 0; } if (!has_on_source_error) { on_source_error = BLOCKDEV_ON_ERROR_REPORT; } if (!has_on_target_error) { on_target_error = BLOCKDEV_ON_ERROR_REPORT; } if (!has_granularity) { granularity = 0; } if (!has_buf_size) { buf_size = 0; } if (!has_unmap) { unmap = true; } if (granularity != 0 && (granularity < 512 || granularity > 1048576 * 64)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity", "a value in range [512B, 64MB]"); return; } if (granularity & (granularity - 1)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "granularity", "power of 2"); return; } if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_MIRROR_SOURCE, errp)) { return; } if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_MIRROR_TARGET, errp)) { return; } if (target->blk) { error_setg(errp, "Cannot mirror to an attached block device"); return; } if (!bs->backing && sync == MIRROR_SYNC_MODE_TOP) { sync = MIRROR_SYNC_MODE_FULL; } /* pass the node name to replace to mirror start since it's loose coupling * and will allow to check whether the node still exist at mirror completion */ mirror_start(bs, target, has_replaces ? replaces : NULL, speed, granularity, buf_size, sync, on_source_error, on_target_error, unmap, block_job_cb, bs, errp); } | 16,745 |
0 | void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1) { int num = 1; unsigned int tmp = env->tlb->nb_tlb; do { tmp >>= 1; num <<= 1; } while (tmp); env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1)); } | 16,746 |
0 | static ram_addr_t kqemu_ram_alloc(ram_addr_t size) { ram_addr_t addr; if ((last_ram_offset + size) > kqemu_phys_ram_size) { fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n", (uint64_t)size, (uint64_t)kqemu_phys_ram_size); abort(); } addr = last_ram_offset; last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size); return addr; } | 16,747 |
0 | void do_cpu_init(X86CPU *cpu) { CPUX86State *env = &cpu->env; int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI; uint64_t pat = env->pat; cpu_reset(CPU(cpu)); env->interrupt_request = sipi; env->pat = pat; apic_init_reset(env->apic_state); env->halted = !cpu_is_bsp(env); } | 16,748 |
0 | static inline int show_tags(WriterContext *wctx, AVDictionary *tags, int section_id) { AVDictionaryEntry *tag = NULL; int ret = 0; if (!tags) return 0; writer_print_section_header(wctx, section_id); while ((tag = av_dict_get(tags, "", tag, AV_DICT_IGNORE_SUFFIX))) { ret = writer_print_string(wctx, tag->key, tag->value, 0); if (ret < 0) break; } writer_print_section_footer(wctx); return ret; } | 16,749 |
0 | static void gen_dozi(DisasContext *ctx) { target_long simm = SIMM(ctx->opcode); int l1 = gen_new_label(); int l2 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_LT, cpu_gpr[rA(ctx->opcode)], simm, l1); tcg_gen_subfi_tl(cpu_gpr[rD(ctx->opcode)], simm, cpu_gpr[rA(ctx->opcode)]); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0); gen_set_label(l2); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); } | 16,750 |
0 | static inline void t_gen_swapb(TCGv d, TCGv s) { TCGv t, org_s; t = tcg_temp_new(TCG_TYPE_TL); org_s = tcg_temp_new(TCG_TYPE_TL); /* d and s may refer to the same object. */ tcg_gen_mov_tl(org_s, s); tcg_gen_shli_tl(t, org_s, 8); tcg_gen_andi_tl(d, t, 0xff00ff00); tcg_gen_shri_tl(t, org_s, 8); tcg_gen_andi_tl(t, t, 0x00ff00ff); tcg_gen_or_tl(d, d, t); tcg_temp_free(t); tcg_temp_free(org_s); } | 16,751 |
1 | static inline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed){ if(get_rac(c, state+0)) return 0; else{ int i, e, a; e= 0; while(get_rac(c, state+1 + e)){ //1..10 e++; } assert(e<=9); a= 1; for(i=e-1; i>=0; i--){ a += a + get_rac(c, state+22 + i); //22..31 } if(is_signed && get_rac(c, state+11 + e)) //11..21 return -a; else return a; } } | 16,754 |
1 | static int asink_query_formats(AVFilterContext *ctx) { BufferSinkContext *buf = ctx->priv; AVFilterFormats *formats = NULL; AVFilterChannelLayouts *layouts = NULL; unsigned i; int ret; CHECK_LIST_SIZE(sample_fmts) CHECK_LIST_SIZE(sample_rates) CHECK_LIST_SIZE(channel_layouts) CHECK_LIST_SIZE(channel_counts) if (buf->sample_fmts_size) { for (i = 0; i < NB_ITEMS(buf->sample_fmts); i++) if ((ret = ff_add_format(&formats, buf->sample_fmts[i])) < 0) return ret; ff_set_common_formats(ctx, formats); } if (buf->channel_layouts_size || buf->channel_counts_size || buf->all_channel_counts) { for (i = 0; i < NB_ITEMS(buf->channel_layouts); i++) if ((ret = ff_add_channel_layout(&layouts, buf->channel_layouts[i])) < 0) return ret; for (i = 0; i < NB_ITEMS(buf->channel_counts); i++) if ((ret = ff_add_channel_layout(&layouts, FF_COUNT2LAYOUT(buf->channel_counts[i]))) < 0) return ret; if (buf->all_channel_counts) { if (layouts) av_log(ctx, AV_LOG_WARNING, "Conflicting all_channel_counts and list in options\n"); else if (!(layouts = ff_all_channel_counts())) return AVERROR(ENOMEM); } ff_set_common_channel_layouts(ctx, layouts); } if (buf->sample_rates_size) { formats = NULL; for (i = 0; i < NB_ITEMS(buf->sample_rates); i++) if ((ret = ff_add_format(&formats, buf->sample_rates[i])) < 0) return ret; ff_set_common_samplerates(ctx, formats); } return 0; } | 16,756 |
1 | static int mxf_read_source_package(void *arg, AVIOContext *pb, int tag, int size, UID uid) { MXFPackage *package = arg; switch(tag) { case 0x4403: package->tracks_count = avio_rb32(pb); if (package->tracks_count >= UINT_MAX / sizeof(UID)) return -1; package->tracks_refs = av_malloc(package->tracks_count * sizeof(UID)); if (!package->tracks_refs) return -1; avio_skip(pb, 4); /* useless size of objects, always 16 according to specs */ avio_read(pb, (uint8_t *)package->tracks_refs, package->tracks_count * sizeof(UID)); break; case 0x4401: /* UMID, only get last 16 bytes */ avio_skip(pb, 16); avio_read(pb, package->package_uid, 16); break; case 0x4701: avio_read(pb, package->descriptor_ref, 16); break; } return 0; } | 16,757 |
1 | int ff_lzf_uncompress(GetByteContext *gb, uint8_t **buf, int64_t *size) { int ret = 0; uint8_t *p = *buf; int64_t len = 0; while (bytestream2_get_bytes_left(gb) > 2) { uint8_t s = bytestream2_get_byte(gb); if (s < LZF_LITERAL_MAX) { s++; if (s > *size - len) { *size += *size /2; ret = av_reallocp(buf, *size); if (ret < 0) return ret; } bytestream2_get_buffer(gb, p, s); p += s; len += s; } else { int l = 2 + (s >> 5); int off = ((s & 0x1f) << 8) + 1; if (l == LZF_LONG_BACKREF) l += bytestream2_get_byte(gb); off += bytestream2_get_byte(gb); if (off > len) return AVERROR_INVALIDDATA; if (l > *size - len) { *size += *size / 2; ret = av_reallocp(buf, *size); if (ret < 0) return ret; } av_memcpy_backptr(p, off, l); p += l; len += l; } } *size = len; return 0; } | 16,760 |
1 | static int inject_fake_duration_metadata(RTMPContext *rt) { // We need to insert the metdata packet directly after the FLV // header, i.e. we need to move all other already read data by the // size of our fake metadata packet. uint8_t* p; // Keep old flv_data pointer uint8_t* old_flv_data = rt->flv_data; // Allocate a new flv_data pointer with enough space for the additional package if (!(rt->flv_data = av_malloc(rt->flv_size + 55))) { rt->flv_data = old_flv_data; return AVERROR(ENOMEM); } // Copy FLV header memcpy(rt->flv_data, old_flv_data, 13); // Copy remaining packets memcpy(rt->flv_data + 13 + 55, old_flv_data + 13, rt->flv_size - 13); // Increase the size by the injected packet rt->flv_size += 55; // Delete the old FLV data av_free(old_flv_data); p = rt->flv_data + 13; bytestream_put_byte(&p, FLV_TAG_TYPE_META); bytestream_put_be24(&p, 40); // size of data part (sum of all parts below) bytestream_put_be24(&p, 0); // timestamp bytestream_put_be32(&p, 0); // reserved // first event name as a string bytestream_put_byte(&p, AMF_DATA_TYPE_STRING); // "onMetaData" as AMF string bytestream_put_be16(&p, 10); bytestream_put_buffer(&p, "onMetaData", 10); // mixed array (hash) with size and string/type/data tuples bytestream_put_byte(&p, AMF_DATA_TYPE_MIXEDARRAY); bytestream_put_be32(&p, 1); // metadata_count // "duration" as AMF string bytestream_put_be16(&p, 8); bytestream_put_buffer(&p, "duration", 8); bytestream_put_byte(&p, AMF_DATA_TYPE_NUMBER); bytestream_put_be64(&p, av_double2int(rt->duration)); // Finalise object bytestream_put_be16(&p, 0); // Empty string bytestream_put_byte(&p, AMF_END_OF_OBJECT); bytestream_put_be32(&p, 40); // size of data part (sum of all parts below) return 0; } | 16,761 |
1 | static int dmg_open(BlockDriverState *bs, int flags) { BDRVDMGState *s = bs->opaque; off_t info_begin,info_end,last_in_offset,last_out_offset; uint32_t count; uint32_t max_compressed_size=1,max_sectors_per_chunk=1,i; int64_t offset; bs->read_only = 1; s->n_chunks = 0; s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL; /* read offset of info blocks */ offset = bdrv_getlength(bs->file); if (offset < 0) { goto fail; } offset -= 0x1d8; info_begin = read_off(bs, offset); if (info_begin == 0) { goto fail; } if (read_uint32(bs, info_begin) != 0x100) { goto fail; } count = read_uint32(bs, info_begin + 4); if (count == 0) { goto fail; } info_end = info_begin + count; offset = info_begin + 0x100; /* read offsets */ last_in_offset = last_out_offset = 0; while (offset < info_end) { uint32_t type; count = read_uint32(bs, offset); if(count==0) goto fail; offset += 4; type = read_uint32(bs, offset); if (type == 0x6d697368 && count >= 244) { int new_size, chunk_count; offset += 4; offset += 200; chunk_count = (count-204)/40; new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count); s->types = g_realloc(s->types, new_size/2); s->offsets = g_realloc(s->offsets, new_size); s->lengths = g_realloc(s->lengths, new_size); s->sectors = g_realloc(s->sectors, new_size); s->sectorcounts = g_realloc(s->sectorcounts, new_size); for(i=s->n_chunks;i<s->n_chunks+chunk_count;i++) { s->types[i] = read_uint32(bs, offset); offset += 4; if(s->types[i]!=0x80000005 && s->types[i]!=1 && s->types[i]!=2) { if(s->types[i]==0xffffffff) { last_in_offset = s->offsets[i-1]+s->lengths[i-1]; last_out_offset = s->sectors[i-1]+s->sectorcounts[i-1]; } chunk_count--; i--; offset += 36; continue; } offset += 4; s->sectors[i] = last_out_offset+read_off(bs, offset); offset += 8; s->sectorcounts[i] = read_off(bs, offset); offset += 8; s->offsets[i] = last_in_offset+read_off(bs, offset); offset += 8; s->lengths[i] = read_off(bs, offset); offset += 8; if(s->lengths[i]>max_compressed_size) max_compressed_size = s->lengths[i]; if(s->sectorcounts[i]>max_sectors_per_chunk) max_sectors_per_chunk = s->sectorcounts[i]; } s->n_chunks+=chunk_count; } } /* initialize zlib engine */ s->compressed_chunk = g_malloc(max_compressed_size+1); s->uncompressed_chunk = g_malloc(512*max_sectors_per_chunk); if(inflateInit(&s->zstream) != Z_OK) goto fail; s->current_chunk = s->n_chunks; qemu_co_mutex_init(&s->lock); return 0; fail: return -1; } | 16,762 |
0 | static void idct32(int *out, int *tab) { int i, j; int *t, *t1, xr; const int *xp = costab32; for(j=31;j>=3;j-=2) tab[j] += tab[j - 2]; t = tab + 30; t1 = tab + 2; do { t[0] += t[-4]; t[1] += t[1 - 4]; t -= 4; } while (t != t1); t = tab + 28; t1 = tab + 4; do { t[0] += t[-8]; t[1] += t[1-8]; t[2] += t[2-8]; t[3] += t[3-8]; t -= 8; } while (t != t1); t = tab; t1 = tab + 32; do { t[ 3] = -t[ 3]; t[ 6] = -t[ 6]; t[11] = -t[11]; t[12] = -t[12]; t[13] = -t[13]; t[15] = -t[15]; t += 16; } while (t != t1); t = tab; t1 = tab + 8; do { int x1, x2, x3, x4; x3 = MUL(t[16], FIX(SQRT2*0.5)); x4 = t[0] - x3; x3 = t[0] + x3; x2 = MUL(-(t[24] + t[8]), FIX(SQRT2*0.5)); x1 = MUL((t[8] - x2), xp[0]); x2 = MUL((t[8] + x2), xp[1]); t[ 0] = x3 + x1; t[ 8] = x4 - x2; t[16] = x4 + x2; t[24] = x3 - x1; t++; } while (t != t1); xp += 2; t = tab; t1 = tab + 4; do { xr = MUL(t[28],xp[0]); t[28] = (t[0] - xr); t[0] = (t[0] + xr); xr = MUL(t[4],xp[1]); t[ 4] = (t[24] - xr); t[24] = (t[24] + xr); xr = MUL(t[20],xp[2]); t[20] = (t[8] - xr); t[ 8] = (t[8] + xr); xr = MUL(t[12],xp[3]); t[12] = (t[16] - xr); t[16] = (t[16] + xr); t++; } while (t != t1); xp += 4; for (i = 0; i < 4; i++) { xr = MUL(tab[30-i*4],xp[0]); tab[30-i*4] = (tab[i*4] - xr); tab[ i*4] = (tab[i*4] + xr); xr = MUL(tab[ 2+i*4],xp[1]); tab[ 2+i*4] = (tab[28-i*4] - xr); tab[28-i*4] = (tab[28-i*4] + xr); xr = MUL(tab[31-i*4],xp[0]); tab[31-i*4] = (tab[1+i*4] - xr); tab[ 1+i*4] = (tab[1+i*4] + xr); xr = MUL(tab[ 3+i*4],xp[1]); tab[ 3+i*4] = (tab[29-i*4] - xr); tab[29-i*4] = (tab[29-i*4] + xr); xp += 2; } t = tab + 30; t1 = tab + 1; do { xr = MUL(t1[0], *xp); t1[0] = (t[0] - xr); t[0] = (t[0] + xr); t -= 2; t1 += 2; xp++; } while (t >= tab); for(i=0;i<32;i++) { out[i] = tab[bitinv32[i]]; } } | 16,763 |
0 | static int ac3_decode_frame(AVCodecContext * avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { AC3DecodeContext *s = avctx->priv_data; int16_t *out_samples = (int16_t *)data; int i, blk, ch, err; /* initialize the GetBitContext with the start of valid AC-3 Frame */ if (s->input_buffer) { /* copy input buffer to decoder context to avoid reading past the end of the buffer, which can be caused by a damaged input stream. */ memcpy(s->input_buffer, buf, FFMIN(buf_size, AC3_MAX_FRAME_SIZE)); init_get_bits(&s->gbc, s->input_buffer, buf_size * 8); } else { init_get_bits(&s->gbc, buf, buf_size * 8); } /* parse the syncinfo */ err = ac3_parse_header(s); if(err) { switch(err) { case AC3_PARSE_ERROR_SYNC: av_log(avctx, AV_LOG_ERROR, "frame sync error : cannot use error concealment\n"); return -1; case AC3_PARSE_ERROR_BSID: av_log(avctx, AV_LOG_ERROR, "invalid bitstream id\n"); break; case AC3_PARSE_ERROR_SAMPLE_RATE: av_log(avctx, AV_LOG_ERROR, "invalid sample rate\n"); break; case AC3_PARSE_ERROR_FRAME_SIZE: av_log(avctx, AV_LOG_ERROR, "invalid frame size\n"); break; case AC3_PARSE_ERROR_FRAME_TYPE: av_log(avctx, AV_LOG_ERROR, "invalid frame type\n"); break; default: av_log(avctx, AV_LOG_ERROR, "invalid header\n"); break; } } /* check that reported frame size fits in input buffer */ if(s->frame_size > buf_size) { av_log(avctx, AV_LOG_ERROR, "incomplete frame\n"); return -1; } /* check for crc mismatch */ if(!err && avctx->error_resilience >= FF_ER_CAREFUL) { if(av_crc(av_crc_get_table(AV_CRC_16_ANSI), 0, &buf[2], s->frame_size-2)) { av_log(avctx, AV_LOG_ERROR, "frame CRC mismatch\n"); err = 1; } } /* if frame is ok, set audio parameters */ if (!err) { avctx->sample_rate = s->sample_rate; avctx->bit_rate = s->bit_rate; /* channel config */ s->out_channels = s->channels; s->output_mode = s->channel_mode; if(s->lfe_on) s->output_mode |= AC3_OUTPUT_LFEON; if (avctx->request_channels > 0 && avctx->request_channels <= 2 && avctx->request_channels < s->channels) { s->out_channels = avctx->request_channels; s->output_mode = avctx->request_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO; } avctx->channels = s->out_channels; /* set downmixing coefficients if needed */ if(s->channels != s->out_channels && !((s->output_mode & AC3_OUTPUT_LFEON) && s->fbw_channels == s->out_channels)) { set_downmix_coeffs(s); } } else if (!s->out_channels) { s->out_channels = avctx->channels; if(s->out_channels < s->channels) s->output_mode = s->out_channels == 1 ? AC3_CHMODE_MONO : AC3_CHMODE_STEREO; } /* parse the audio blocks */ for (blk = 0; blk < NB_BLOCKS; blk++) { if (!err && ac3_parse_audio_block(s, blk)) { av_log(avctx, AV_LOG_ERROR, "error parsing the audio block\n"); } for (i = 0; i < 256; i++) for (ch = 0; ch < s->out_channels; ch++) *(out_samples++) = s->int_output[ch][i]; } *data_size = NB_BLOCKS * 256 * avctx->channels * sizeof (int16_t); return s->frame_size; } | 16,764 |
1 | static void multi_serial_pci_realize(PCIDevice *dev, Error **errp) { PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(dev); PCIMultiSerialState *pci = DO_UPCAST(PCIMultiSerialState, dev, dev); SerialState *s; Error *err = NULL; int i; switch (pc->device_id) { case 0x0003: pci->ports = 2; break; case 0x0004: pci->ports = 4; break; } assert(pci->ports > 0); assert(pci->ports <= PCI_SERIAL_MAX_PORTS); pci->dev.config[PCI_CLASS_PROG] = pci->prog_if; pci->dev.config[PCI_INTERRUPT_PIN] = 0x01; memory_region_init(&pci->iobar, OBJECT(pci), "multiserial", 8 * pci->ports); pci_register_bar(&pci->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->iobar); pci->irqs = qemu_allocate_irqs(multi_serial_irq_mux, pci, pci->ports); for (i = 0; i < pci->ports; i++) { s = pci->state + i; s->baudbase = 115200; serial_realize_core(s, &err); if (err != NULL) { error_propagate(errp, err); return; } s->irq = pci->irqs[i]; pci->name[i] = g_strdup_printf("uart #%d", i+1); memory_region_init_io(&s->io, OBJECT(pci), &serial_io_ops, s, pci->name[i], 8); memory_region_add_subregion(&pci->iobar, 8 * i, &s->io); } } | 16,766 |
1 | int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length){ MpegEncContext * const s = &h->s; unsigned int pps_id= get_ue_golomb(&s->gb); PPS *pps; const int qp_bd_offset = 6*(h->sps.bit_depth_luma-8); int bits_left; if(pps_id >= MAX_PPS_COUNT) { av_log(h->s.avctx, AV_LOG_ERROR, "pps_id (%d) out of range\n", pps_id); return -1; } pps= av_mallocz(sizeof(PPS)); if(pps == NULL) return -1; pps->sps_id= get_ue_golomb_31(&s->gb); if((unsigned)pps->sps_id>=MAX_SPS_COUNT || h->sps_buffers[pps->sps_id] == NULL){ av_log(h->s.avctx, AV_LOG_ERROR, "sps_id out of range\n"); goto fail; } pps->cabac= get_bits1(&s->gb); pps->pic_order_present= get_bits1(&s->gb); pps->slice_group_count= get_ue_golomb(&s->gb) + 1; if(pps->slice_group_count > 1 ){ pps->mb_slice_group_map_type= get_ue_golomb(&s->gb); av_log(h->s.avctx, AV_LOG_ERROR, "FMO not supported\n"); switch(pps->mb_slice_group_map_type){ case 0: #if 0 | for( i = 0; i <= num_slice_groups_minus1; i++ ) | | | | run_length[ i ] |1 |ue(v) | #endif break; case 2: #if 0 | for( i = 0; i < num_slice_groups_minus1; i++ ) | | | |{ | | | | top_left_mb[ i ] |1 |ue(v) | | bottom_right_mb[ i ] |1 |ue(v) | | } | | | #endif break; case 3: case 4: case 5: #if 0 | slice_group_change_direction_flag |1 |u(1) | | slice_group_change_rate_minus1 |1 |ue(v) | #endif break; case 6: #if 0 | slice_group_id_cnt_minus1 |1 |ue(v) | | for( i = 0; i <= slice_group_id_cnt_minus1; i++ | | | |) | | | | slice_group_id[ i ] |1 |u(v) | #endif break; } } pps->ref_count[0]= get_ue_golomb(&s->gb) + 1; pps->ref_count[1]= get_ue_golomb(&s->gb) + 1; if(pps->ref_count[0]-1 > 32-1 || pps->ref_count[1]-1 > 32-1){ av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow (pps)\n"); goto fail; } pps->weighted_pred= get_bits1(&s->gb); pps->weighted_bipred_idc= get_bits(&s->gb, 2); pps->init_qp= get_se_golomb(&s->gb) + 26 + qp_bd_offset; pps->init_qs= get_se_golomb(&s->gb) + 26 + qp_bd_offset; pps->chroma_qp_index_offset[0]= get_se_golomb(&s->gb); pps->deblocking_filter_parameters_present= get_bits1(&s->gb); pps->constrained_intra_pred= get_bits1(&s->gb); pps->redundant_pic_cnt_present = get_bits1(&s->gb); pps->transform_8x8_mode= 0; h->dequant_coeff_pps= -1; //contents of sps/pps can change even if id doesn't, so reinit memcpy(pps->scaling_matrix4, h->sps_buffers[pps->sps_id]->scaling_matrix4, sizeof(pps->scaling_matrix4)); memcpy(pps->scaling_matrix8, h->sps_buffers[pps->sps_id]->scaling_matrix8, sizeof(pps->scaling_matrix8)); bits_left = bit_length - get_bits_count(&s->gb); if (bits_left && (bits_left > 8 || show_bits(&s->gb, bits_left) != 1 << (bits_left - 1))) { pps->transform_8x8_mode= get_bits1(&s->gb); decode_scaling_matrices(h, h->sps_buffers[pps->sps_id], pps, 0, pps->scaling_matrix4, pps->scaling_matrix8); pps->chroma_qp_index_offset[1]= get_se_golomb(&s->gb); //second_chroma_qp_index_offset } else { pps->chroma_qp_index_offset[1]= pps->chroma_qp_index_offset[0]; } build_qp_table(pps, 0, pps->chroma_qp_index_offset[0], h->sps.bit_depth_luma); build_qp_table(pps, 1, pps->chroma_qp_index_offset[1], h->sps.bit_depth_luma); if(pps->chroma_qp_index_offset[0] != pps->chroma_qp_index_offset[1]) pps->chroma_qp_diff= 1; if(s->avctx->debug&FF_DEBUG_PICT_INFO){ av_log(h->s.avctx, AV_LOG_DEBUG, "pps:%u sps:%u %s slice_groups:%d ref:%d/%d %s qp:%d/%d/%d/%d %s %s %s %s\n", pps_id, pps->sps_id, pps->cabac ? "CABAC" : "CAVLC", pps->slice_group_count, pps->ref_count[0], pps->ref_count[1], pps->weighted_pred ? "weighted" : "", pps->init_qp, pps->init_qs, pps->chroma_qp_index_offset[0], pps->chroma_qp_index_offset[1], pps->deblocking_filter_parameters_present ? "LPAR" : "", pps->constrained_intra_pred ? "CONSTR" : "", pps->redundant_pic_cnt_present ? "REDU" : "", pps->transform_8x8_mode ? "8x8DCT" : "" ); } av_free(h->pps_buffers[pps_id]); h->pps_buffers[pps_id]= pps; return 0; fail: av_free(pps); return -1; } | 16,768 |
1 | int spapr_rtas_device_tree_setup(void *fdt, hwaddr rtas_addr, hwaddr rtas_size) { int ret; int i; ret = fdt_add_mem_rsv(fdt, rtas_addr, rtas_size); if (ret < 0) { fprintf(stderr, "Couldn't add RTAS reserve entry: %s\n", fdt_strerror(ret)); return ret; } ret = qemu_fdt_setprop_cell(fdt, "/rtas", "linux,rtas-base", rtas_addr); if (ret < 0) { fprintf(stderr, "Couldn't add linux,rtas-base property: %s\n", fdt_strerror(ret)); return ret; } ret = qemu_fdt_setprop_cell(fdt, "/rtas", "linux,rtas-entry", rtas_addr); if (ret < 0) { fprintf(stderr, "Couldn't add linux,rtas-entry property: %s\n", fdt_strerror(ret)); return ret; } ret = qemu_fdt_setprop_cell(fdt, "/rtas", "rtas-size", rtas_size); if (ret < 0) { fprintf(stderr, "Couldn't add rtas-size property: %s\n", fdt_strerror(ret)); return ret; } for (i = 0; i < TOKEN_MAX; i++) { struct rtas_call *call = &rtas_table[i]; if (!call->name) { continue; } ret = qemu_fdt_setprop_cell(fdt, "/rtas", call->name, i + TOKEN_BASE); if (ret < 0) { fprintf(stderr, "Couldn't add rtas token for %s: %s\n", call->name, fdt_strerror(ret)); return ret; } } return 0; } | 16,769 |
1 | static int get_phys_addr_mpu(CPUARMState *env, uint32_t address, int access_type, int is_user, hwaddr *phys_ptr, int *prot) { int n; uint32_t mask; uint32_t base; *phys_ptr = address; for (n = 7; n >= 0; n--) { base = env->cp15.c6_region[n]; if ((base & 1) == 0) continue; mask = 1 << ((base >> 1) & 0x1f); /* Keep this shift separate from the above to avoid an (undefined) << 32. */ mask = (mask << 1) - 1; if (((base ^ address) & ~mask) == 0) break; } if (n < 0) return 2; if (access_type == 2) { mask = env->cp15.c5_insn; } else { mask = env->cp15.c5_data; } mask = (mask >> (n * 4)) & 0xf; switch (mask) { case 0: return 1; case 1: if (is_user) return 1; *prot = PAGE_READ | PAGE_WRITE; break; case 2: *prot = PAGE_READ; if (!is_user) *prot |= PAGE_WRITE; break; case 3: *prot = PAGE_READ | PAGE_WRITE; break; case 5: if (is_user) return 1; *prot = PAGE_READ; break; case 6: *prot = PAGE_READ; break; default: /* Bad permission. */ return 1; } *prot |= PAGE_EXEC; return 0; } | 16,770 |
1 | static int huff_build(VLC *vlc, uint8_t *len) { HuffEntry he[256]; uint32_t codes[256]; uint8_t bits[256]; uint8_t syms[256]; uint32_t code; int i; for (i = 0; i < 256; i++) { he[i].sym = 255 - i; he[i].len = len[i]; if (len[i] == 0) return AVERROR_INVALIDDATA; } AV_QSORT(he, 256, HuffEntry, huff_cmp_len); code = 1; for (i = 255; i >= 0; i--) { codes[i] = code >> (32 - he[i].len); bits[i] = he[i].len; syms[i] = he[i].sym; code += 0x80000000u >> (he[i].len - 1); } ff_free_vlc(vlc); return ff_init_vlc_sparse(vlc, FFMIN(he[255].len, 12), 256, bits, sizeof(*bits), sizeof(*bits), codes, sizeof(*codes), sizeof(*codes), syms, sizeof(*syms), sizeof(*syms), 0); } | 16,772 |
1 | static int get_cox(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *c) { uint8_t byte; if (s->buf_end - s->buf < 5) return AVERROR(EINVAL); /* nreslevels = number of resolution levels = number of decomposition level +1 */ c->nreslevels = bytestream_get_byte(&s->buf) + 1; if (c->nreslevels > JPEG2000_MAX_RESLEVELS) return AVERROR_INVALIDDATA; /* compute number of resolution levels to decode */ if (c->nreslevels < s->reduction_factor) c->nreslevels2decode = 1; else c->nreslevels2decode = c->nreslevels - s->reduction_factor; c->log2_cblk_width = bytestream_get_byte(&s->buf) + 2; // cblk width c->log2_cblk_height = bytestream_get_byte(&s->buf) + 2; // cblk height if (c->log2_cblk_width > 10 || c->log2_cblk_height > 10 || c->log2_cblk_width + c->log2_cblk_height > 12) { av_log(s->avctx, AV_LOG_ERROR, "cblk size invalid\n"); return AVERROR_INVALIDDATA; } c->cblk_style = bytestream_get_byte(&s->buf); if (c->cblk_style != 0) { // cblk style avpriv_request_sample(s->avctx, "Support for extra cblk styles"); return AVERROR_PATCHWELCOME; } c->transform = bytestream_get_byte(&s->buf); // DWT transformation type /* set integer 9/7 DWT in case of BITEXACT flag */ if ((s->avctx->flags & CODEC_FLAG_BITEXACT) && (c->transform == FF_DWT97)) c->transform = FF_DWT97_INT; if (c->csty & JPEG2000_CSTY_PREC) { int i; for (i = 0; i < c->nreslevels; i++) { byte = bytestream_get_byte(&s->buf); c->log2_prec_widths[i] = byte & 0x0F; // precinct PPx c->log2_prec_heights[i] = (byte >> 4) & 0x0F; // precinct PPy } } return 0; } | 16,773 |
1 | void qemu_clock_warp(QEMUClockType type) { int64_t deadline; /* * There are too many global variables to make the "warp" behavior * applicable to other clocks. But a clock argument removes the * need for if statements all over the place. */ if (type != QEMU_CLOCK_VIRTUAL || !use_icount) { return; } /* * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now. * This ensures that the deadline for the timer is computed correctly below. * This also makes sure that the insn counter is synchronized before the * CPU starts running, in case the CPU is woken by an event other than * the earliest QEMU_CLOCK_VIRTUAL timer. */ icount_warp_rt(NULL); if (!all_cpu_threads_idle() || !qemu_clock_has_timers(QEMU_CLOCK_VIRTUAL)) { timer_del(icount_warp_timer); return; } if (qtest_enabled()) { /* When testing, qtest commands advance icount. */ return; } vm_clock_warp_start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); /* We want to use the earliest deadline from ALL vm_clocks */ deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); /* Maintain prior (possibly buggy) behaviour where if no deadline * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than * INT32_MAX nanoseconds ahead, we still use INT32_MAX * nanoseconds. */ if ((deadline < 0) || (deadline > INT32_MAX)) { deadline = INT32_MAX; } if (deadline > 0) { /* * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to * sleep. Otherwise, the CPU might be waiting for a future timer * interrupt to wake it up, but the interrupt never comes because * the vCPU isn't running any insns and thus doesn't advance the * QEMU_CLOCK_VIRTUAL. * * An extreme solution for this problem would be to never let VCPUs * sleep in icount mode if there is a pending QEMU_CLOCK_VIRTUAL * timer; rather time could just advance to the next QEMU_CLOCK_VIRTUAL * event. Instead, we do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL * after some e"real" time, (related to the time left until the next * event) has passed. The QEMU_CLOCK_REALTIME timer will do this. * This avoids that the warps are visible externally; for example, * you will not be sending network packets continuously instead of * every 100ms. */ timer_mod(icount_warp_timer, vm_clock_warp_start + deadline); } else if (deadline == 0) { qemu_clock_notify(QEMU_CLOCK_VIRTUAL); } } | 16,774 |
0 | void qpci_plug_device_test(const char *driver, const char *id, uint8_t slot, const char *opts) { QDict *response; char *cmd; cmd = g_strdup_printf("{'execute': 'device_add'," " 'arguments': {" " 'driver': '%s'," " 'addr': '%d'," " %s%s" " 'id': '%s'" "}}", driver, slot, opts ? opts : "", opts ? "," : "", id); response = qmp(cmd); g_free(cmd); g_assert(response); g_assert(!qdict_haskey(response, "error")); QDECREF(response); } | 16,775 |
0 | void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx, uintptr_t retaddr) { bool ret; uint32_t fsr = 0; ARMMMUFaultInfo fi = {}; ret = arm_tlb_fill(cs, addr, is_write, mmu_idx, &fsr, &fi); if (unlikely(ret)) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; uint32_t syn, exc; unsigned int target_el; bool same_el; if (retaddr) { /* now we have a real cpu fault */ cpu_restore_state(cs, retaddr); } target_el = exception_target_el(env); if (fi.stage2) { target_el = 2; env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4; } same_el = arm_current_el(env) == target_el; /* AArch64 syndrome does not have an LPAE bit */ syn = fsr & ~(1 << 9); /* For insn and data aborts we assume there is no instruction syndrome * information; this is always true for exceptions reported to EL1. */ if (is_write == 2) { syn = syn_insn_abort(same_el, 0, fi.s1ptw, syn); exc = EXCP_PREFETCH_ABORT; } else { syn = syn_data_abort(same_el, 0, 0, fi.s1ptw, is_write == 1, syn); if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) { fsr |= (1 << 11); } exc = EXCP_DATA_ABORT; } env->exception.vaddress = addr; env->exception.fsr = fsr; raise_exception(env, exc, syn, target_el); } } | 16,778 |
0 | static int local_fstat(FsContext *fs_ctx, int fd, struct stat *stbuf) { int err; err = fstat(fd, stbuf); if (err) { return err; } if (fs_ctx->fs_sm == SM_MAPPED) { /* Actual credentials are part of extended attrs */ uid_t tmp_uid; gid_t tmp_gid; mode_t tmp_mode; dev_t tmp_dev; if (fgetxattr(fd, "user.virtfs.uid", &tmp_uid, sizeof(uid_t)) > 0) { stbuf->st_uid = tmp_uid; } if (fgetxattr(fd, "user.virtfs.gid", &tmp_gid, sizeof(gid_t)) > 0) { stbuf->st_gid = tmp_gid; } if (fgetxattr(fd, "user.virtfs.mode", &tmp_mode, sizeof(mode_t)) > 0) { stbuf->st_mode = tmp_mode; } if (fgetxattr(fd, "user.virtfs.rdev", &tmp_dev, sizeof(dev_t)) > 0) { stbuf->st_rdev = tmp_dev; } } return err; } | 16,779 |
0 | static void test_visitor_in_native_list_int8(TestInputVisitorData *data, const void *unused) { test_native_list_integer_helper(data, unused, USER_DEF_NATIVE_LIST_UNION_KIND_S8); } | 16,781 |
0 | static void xen_platform_ioport_writeb(void *opaque, hwaddr addr, uint64_t val, unsigned int size) { PCIXenPlatformState *s = opaque; PCIDevice *pci_dev = PCI_DEVICE(s); switch (addr) { case 0: /* Platform flags */ platform_fixed_ioport_writeb(opaque, 0, (uint32_t)val); break; case 4: if (val == 1) { /* * SUSE unplug for Xenlinux * xen-kmp used this since xen-3.0.4, instead the official protocol * from xen-3.3+ It did an unconditional "outl(1, (ioaddr + 4));" * Pre VMDP 1.7 used 4 and 8 depending on how VMDP was configured. * If VMDP was to control both disk and LAN it would use 4. * If it controlled just disk or just LAN, it would use 8 below. */ pci_unplug_disks(pci_dev->bus); pci_unplug_nics(pci_dev->bus); } break; case 8: switch (val) { case 1: pci_unplug_disks(pci_dev->bus); break; case 2: pci_unplug_nics(pci_dev->bus); break; default: log_writeb(s, (uint32_t)val); break; } break; default: break; } } | 16,782 |
0 | void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f) { PerThreadContext *p = avctx->internal->thread_ctx; FrameThreadContext *fctx; AVFrame *dst, *tmp; FF_DISABLE_DEPRECATION_WARNINGS int can_direct_free = !(avctx->active_thread_type & FF_THREAD_FRAME) || avctx->thread_safe_callbacks || ( #if FF_API_GET_BUFFER !avctx->get_buffer && #endif avctx->get_buffer2 == avcodec_default_get_buffer2); FF_ENABLE_DEPRECATION_WARNINGS if (!f->f->data[0]) return; if (avctx->debug & FF_DEBUG_BUFFERS) av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f); av_buffer_unref(&f->progress); f->owner = NULL; if (can_direct_free) { av_frame_unref(f->f); return; } fctx = p->parent; pthread_mutex_lock(&fctx->buffer_mutex); if (p->num_released_buffers + 1 >= INT_MAX / sizeof(*p->released_buffers)) goto fail; tmp = av_fast_realloc(p->released_buffers, &p->released_buffers_allocated, (p->num_released_buffers + 1) * sizeof(*p->released_buffers)); if (!tmp) goto fail; p->released_buffers = tmp; dst = &p->released_buffers[p->num_released_buffers]; av_frame_move_ref(dst, f->f); p->num_released_buffers++; fail: pthread_mutex_unlock(&fctx->buffer_mutex); } | 16,784 |
0 | static int count_contiguous_clusters_by_type(int nb_clusters, uint64_t *l2_table, int wanted_type) { int i; for (i = 0; i < nb_clusters; i++) { int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i])); if (type != wanted_type) { break; } } return i; } | 16,785 |
0 | static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint8_t *p = inbuf; int cmd = r->req.cmd.buf[0]; int len = r->req.cmd.xfer; int hdr_len = (cmd == MODE_SELECT ? 4 : 8); int bd_len; int pass; /* We only support PF=1, SP=0. */ if ((r->req.cmd.buf[1] & 0x11) != 0x10) { goto invalid_field; } if (len < hdr_len) { goto invalid_param_len; } bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); len -= hdr_len; p += hdr_len; if (len < bd_len) { goto invalid_param_len; } if (bd_len != 0 && bd_len != 8) { goto invalid_param; } len -= bd_len; p += bd_len; /* Ensure no change is made if there is an error! */ for (pass = 0; pass < 2; pass++) { if (mode_select_pages(r, p, len, pass == 1) < 0) { assert(pass == 0); return; } } if (!bdrv_enable_write_cache(s->qdev.conf.bs)) { /* The request is used as the AIO opaque value, so add a ref. */ scsi_req_ref(&r->req); block_acct_start(bdrv_get_stats(s->qdev.conf.bs), &r->acct, 0, BLOCK_ACCT_FLUSH); r->req.aiocb = bdrv_aio_flush(s->qdev.conf.bs, scsi_aio_complete, r); return; } scsi_req_complete(&r->req, GOOD); return; invalid_param: scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); return; invalid_param_len: scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); return; invalid_field: scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); } | 16,786 |
0 | int32 float32_to_int32_round_to_zero( float32 a STATUS_PARAM ) { flag aSign; int16 aExp, shiftCount; bits32 aSig; int32 z; aSig = extractFloat32Frac( a ); aExp = extractFloat32Exp( a ); aSign = extractFloat32Sign( a ); shiftCount = aExp - 0x9E; if ( 0 <= shiftCount ) { if ( a != 0xCF000000 ) { float_raise( float_flag_invalid STATUS_VAR); if ( ! aSign || ( ( aExp == 0xFF ) && aSig ) ) return 0x7FFFFFFF; } return (sbits32) 0x80000000; } else if ( aExp <= 0x7E ) { if ( aExp | aSig ) STATUS(float_exception_flags) |= float_flag_inexact; return 0; } aSig = ( aSig | 0x00800000 )<<8; z = aSig>>( - shiftCount ); if ( (bits32) ( aSig<<( shiftCount & 31 ) ) ) { STATUS(float_exception_flags) |= float_flag_inexact; } if ( aSign ) z = - z; return z; } | 16,788 |
0 | QEMUFile *qemu_popen_cmd(const char *command, const char *mode) { FILE *stdio_file; QEMUFileStdio *s; stdio_file = popen(command, mode); if (stdio_file == NULL) { return NULL; } if (mode == NULL || (mode[0] != 'r' && mode[0] != 'w') || mode[1] != 0) { fprintf(stderr, "qemu_popen: Argument validity check failed\n"); return NULL; } s = g_malloc0(sizeof(QEMUFileStdio)); s->stdio_file = stdio_file; if(mode[0] == 'r') { s->file = qemu_fopen_ops(s, &stdio_pipe_read_ops); } else { s->file = qemu_fopen_ops(s, &stdio_pipe_write_ops); } return s->file; } | 16,789 |
0 | static int openpic_load(QEMUFile* f, void *opaque, int version_id) { OpenPICState *opp = (OpenPICState *)opaque; unsigned int i; if (version_id != 1) return -EINVAL; qemu_get_be32s(f, &opp->glbc); qemu_get_be32s(f, &opp->veni); qemu_get_be32s(f, &opp->pint); qemu_get_be32s(f, &opp->spve); qemu_get_be32s(f, &opp->tifr); for (i = 0; i < opp->max_irq; i++) { qemu_get_be32s(f, &opp->src[i].ipvp); qemu_get_be32s(f, &opp->src[i].ide); qemu_get_sbe32s(f, &opp->src[i].last_cpu); qemu_get_sbe32s(f, &opp->src[i].pending); } qemu_get_be32s(f, &opp->nb_cpus); for (i = 0; i < opp->nb_cpus; i++) { qemu_get_be32s(f, &opp->dst[i].pctp); qemu_get_be32s(f, &opp->dst[i].pcsr); openpic_load_IRQ_queue(f, &opp->dst[i].raised); openpic_load_IRQ_queue(f, &opp->dst[i].servicing); } for (i = 0; i < MAX_TMR; i++) { qemu_get_be32s(f, &opp->timers[i].ticc); qemu_get_be32s(f, &opp->timers[i].tibc); } return 0; } | 16,790 |
0 | uint64_t HELPER(abs_i64)(int64_t val) { HELPER_LOG("%s: val 0x%" PRIx64 "\n", __func__, val); if (val < 0) { return -val; } else { return val; } } | 16,791 |
0 | void blk_resume_after_migration(Error **errp) { BlockBackend *blk; Error *local_err = NULL; for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) { if (!blk->disable_perm) { continue; } blk->disable_perm = false; blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err); if (local_err) { error_propagate(errp, local_err); blk->disable_perm = true; return; } } } | 16,793 |
0 | static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single) { VFIOGroup *group; struct vfio_pci_hot_reset_info *info; struct vfio_pci_dependent_device *devices; struct vfio_pci_hot_reset *reset; int32_t *fds; int ret, i, count; bool multi = false; trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi"); vfio_pci_pre_reset(vdev); vdev->vbasedev.needs_reset = false; info = g_malloc0(sizeof(*info)); info->argsz = sizeof(*info); ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); if (ret && errno != ENOSPC) { ret = -errno; if (!vdev->has_pm_reset) { error_report("vfio: Cannot reset device %04x:%02x:%02x.%x, " "no available reset mechanism.", vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function); } goto out_single; } count = info->count; info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices))); info->argsz = sizeof(*info) + (count * sizeof(*devices)); devices = &info->devices[0]; ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); if (ret) { ret = -errno; error_report("vfio: hot reset info failed: %m"); goto out_single; } trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name); /* Verify that we have all the groups required */ for (i = 0; i < info->count; i++) { PCIHostDeviceAddress host; VFIOPCIDevice *tmp; VFIODevice *vbasedev_iter; host.domain = devices[i].segment; host.bus = devices[i].bus; host.slot = PCI_SLOT(devices[i].devfn); host.function = PCI_FUNC(devices[i].devfn); trace_vfio_pci_hot_reset_dep_devices(host.domain, host.bus, host.slot, host.function, devices[i].group_id); if (vfio_pci_host_match(&host, &vdev->host)) { continue; } QLIST_FOREACH(group, &vfio_group_list, next) { if (group->groupid == devices[i].group_id) { break; } } if (!group) { if (!vdev->has_pm_reset) { error_report("vfio: Cannot reset device %s, " "depends on group %d which is not owned.", vdev->vbasedev.name, devices[i].group_id); } ret = -EPERM; goto out; } /* Prep dependent devices for reset and clear our marker. */ QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { continue; } tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); if (vfio_pci_host_match(&host, &tmp->host)) { if (single) { ret = -EINVAL; goto out_single; } vfio_pci_pre_reset(tmp); tmp->vbasedev.needs_reset = false; multi = true; break; } } } if (!single && !multi) { ret = -EINVAL; goto out_single; } /* Determine how many group fds need to be passed */ count = 0; QLIST_FOREACH(group, &vfio_group_list, next) { for (i = 0; i < info->count; i++) { if (group->groupid == devices[i].group_id) { count++; break; } } } reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds))); reset->argsz = sizeof(*reset) + (count * sizeof(*fds)); fds = &reset->group_fds[0]; /* Fill in group fds */ QLIST_FOREACH(group, &vfio_group_list, next) { for (i = 0; i < info->count; i++) { if (group->groupid == devices[i].group_id) { fds[reset->count++] = group->fd; break; } } } /* Bus reset! */ ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset); g_free(reset); trace_vfio_pci_hot_reset_result(vdev->vbasedev.name, ret ? "%m" : "Success"); out: /* Re-enable INTx on affected devices */ for (i = 0; i < info->count; i++) { PCIHostDeviceAddress host; VFIOPCIDevice *tmp; VFIODevice *vbasedev_iter; host.domain = devices[i].segment; host.bus = devices[i].bus; host.slot = PCI_SLOT(devices[i].devfn); host.function = PCI_FUNC(devices[i].devfn); if (vfio_pci_host_match(&host, &vdev->host)) { continue; } QLIST_FOREACH(group, &vfio_group_list, next) { if (group->groupid == devices[i].group_id) { break; } } if (!group) { break; } QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { continue; } tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); if (vfio_pci_host_match(&host, &tmp->host)) { vfio_pci_post_reset(tmp); break; } } } out_single: vfio_pci_post_reset(vdev); g_free(info); return ret; } | 16,796 |
0 | pvscsi_ring_pop_req_descr(PVSCSIRingInfo *mgr) { uint32_t ready_ptr = RS_GET_FIELD(mgr, reqProdIdx); if (ready_ptr != mgr->consumed_ptr) { uint32_t next_ready_ptr = mgr->consumed_ptr++ & mgr->txr_len_mask; uint32_t next_ready_page = next_ready_ptr / PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; uint32_t inpage_idx = next_ready_ptr % PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; return mgr->req_ring_pages_pa[next_ready_page] + inpage_idx * sizeof(PVSCSIRingReqDesc); } else { return 0; } } | 16,797 |
0 | int nbd_receive_negotiate(QIOChannel *ioc, const char *name, uint32_t *flags, QCryptoTLSCreds *tlscreds, const char *hostname, QIOChannel **outioc, off_t *size, Error **errp) { char buf[256]; uint64_t magic, s; int rc; TRACE("Receiving negotiation tlscreds=%p hostname=%s.", tlscreds, hostname ? hostname : "<null>"); rc = -EINVAL; if (outioc) { *outioc = NULL; } if (tlscreds && !outioc) { error_setg(errp, "Output I/O channel required for TLS"); goto fail; } if (read_sync(ioc, buf, 8) != 8) { error_setg(errp, "Failed to read data"); goto fail; } buf[8] = '\0'; if (strlen(buf) == 0) { error_setg(errp, "Server connection closed unexpectedly"); goto fail; } TRACE("Magic is %c%c%c%c%c%c%c%c", qemu_isprint(buf[0]) ? buf[0] : '.', qemu_isprint(buf[1]) ? buf[1] : '.', qemu_isprint(buf[2]) ? buf[2] : '.', qemu_isprint(buf[3]) ? buf[3] : '.', qemu_isprint(buf[4]) ? buf[4] : '.', qemu_isprint(buf[5]) ? buf[5] : '.', qemu_isprint(buf[6]) ? buf[6] : '.', qemu_isprint(buf[7]) ? buf[7] : '.'); if (memcmp(buf, "NBDMAGIC", 8) != 0) { error_setg(errp, "Invalid magic received"); goto fail; } if (read_sync(ioc, &magic, sizeof(magic)) != sizeof(magic)) { error_setg(errp, "Failed to read magic"); goto fail; } magic = be64_to_cpu(magic); TRACE("Magic is 0x%" PRIx64, magic); if (magic == NBD_OPTS_MAGIC) { uint32_t clientflags = 0; uint32_t opt; uint32_t namesize; uint16_t globalflags; uint16_t exportflags; bool fixedNewStyle = false; if (read_sync(ioc, &globalflags, sizeof(globalflags)) != sizeof(globalflags)) { error_setg(errp, "Failed to read server flags"); goto fail; } globalflags = be16_to_cpu(globalflags); *flags = globalflags << 16; TRACE("Global flags are %" PRIx32, globalflags); if (globalflags & NBD_FLAG_FIXED_NEWSTYLE) { fixedNewStyle = true; TRACE("Server supports fixed new style"); clientflags |= NBD_FLAG_C_FIXED_NEWSTYLE; } /* client requested flags */ clientflags = cpu_to_be32(clientflags); if (write_sync(ioc, &clientflags, sizeof(clientflags)) != sizeof(clientflags)) { error_setg(errp, "Failed to send clientflags field"); goto fail; } if (tlscreds) { if (fixedNewStyle) { *outioc = nbd_receive_starttls(ioc, tlscreds, hostname, errp); if (!*outioc) { goto fail; } ioc = *outioc; } else { error_setg(errp, "Server does not support STARTTLS"); goto fail; } } if (!name) { TRACE("Using default NBD export name \"\""); name = ""; } if (fixedNewStyle) { /* Check our desired export is present in the * server export list. Since NBD_OPT_EXPORT_NAME * cannot return an error message, running this * query gives us good error reporting if the * server required TLS */ if (nbd_receive_query_exports(ioc, name, errp) < 0) { goto fail; } } /* write the export name */ magic = cpu_to_be64(magic); if (write_sync(ioc, &magic, sizeof(magic)) != sizeof(magic)) { error_setg(errp, "Failed to send export name magic"); goto fail; } opt = cpu_to_be32(NBD_OPT_EXPORT_NAME); if (write_sync(ioc, &opt, sizeof(opt)) != sizeof(opt)) { error_setg(errp, "Failed to send export name option number"); goto fail; } namesize = cpu_to_be32(strlen(name)); if (write_sync(ioc, &namesize, sizeof(namesize)) != sizeof(namesize)) { error_setg(errp, "Failed to send export name length"); goto fail; } if (write_sync(ioc, (char *)name, strlen(name)) != strlen(name)) { error_setg(errp, "Failed to send export name"); goto fail; } if (read_sync(ioc, &s, sizeof(s)) != sizeof(s)) { error_setg(errp, "Failed to read export length"); goto fail; } *size = be64_to_cpu(s); TRACE("Size is %" PRIu64, *size); if (read_sync(ioc, &exportflags, sizeof(exportflags)) != sizeof(exportflags)) { error_setg(errp, "Failed to read export flags"); goto fail; } exportflags = be16_to_cpu(exportflags); *flags |= exportflags; TRACE("Export flags are %" PRIx16, exportflags); } else if (magic == NBD_CLIENT_MAGIC) { if (name) { error_setg(errp, "Server does not support export names"); goto fail; } if (tlscreds) { error_setg(errp, "Server does not support STARTTLS"); goto fail; } if (read_sync(ioc, &s, sizeof(s)) != sizeof(s)) { error_setg(errp, "Failed to read export length"); goto fail; } *size = be64_to_cpu(s); TRACE("Size is %" PRIu64, *size); if (read_sync(ioc, flags, sizeof(*flags)) != sizeof(*flags)) { error_setg(errp, "Failed to read export flags"); goto fail; } *flags = be32_to_cpu(*flags); } else { error_setg(errp, "Bad magic received"); goto fail; } if (read_sync(ioc, &buf, 124) != 124) { error_setg(errp, "Failed to read reserved block"); goto fail; } rc = 0; fail: return rc; } | 16,798 |
0 | SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun, uint8_t *buf, void *hba_private) { SCSIRequest *req; req = d->info->alloc_req(d, tag, lun, hba_private); memcpy(req->cmd.buf, buf, 16); return req; } | 16,799 |
0 | int qemu_read_password(char *buf, int buf_size) { uint8_t ch; int i, ret; printf("password: "); fflush(stdout); term_init(); i = 0; for (;;) { ret = read(0, &ch, 1); if (ret == -1) { if (errno == EAGAIN || errno == EINTR) { continue; } else { break; } } else if (ret == 0) { ret = -1; break; } else { if (ch == '\r') { ret = 0; break; } if (i < (buf_size - 1)) { buf[i++] = ch; } } } term_exit(); buf[i] = '\0'; printf("\n"); return ret; } | 16,800 |
0 | static void qemu_account_warp_timer(void) { if (!use_icount || !icount_sleep) { return; } /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers * do not fire, so computing the deadline does not make sense. */ if (!runstate_is_running()) { return; } /* warp clock deterministically in record/replay mode */ if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) { return; } timer_del(icount_warp_timer); icount_warp_rt(); } | 16,801 |
0 | int host_to_target_signal(int sig) { if (sig >= _NSIG) return sig; return host_to_target_signal_table[sig]; } | 16,802 |
0 | void process_pending_signals(CPUArchState *cpu_env) { CPUState *cpu = ENV_GET_CPU(cpu_env); int sig; abi_ulong handler; sigset_t set, old_set; target_sigset_t target_old_set; struct emulated_sigtable *k; struct target_sigaction *sa; struct sigqueue *q; TaskState *ts = cpu->opaque; if (!ts->signal_pending) return; /* FIXME: This is not threadsafe. */ k = ts->sigtab; for(sig = 1; sig <= TARGET_NSIG; sig++) { if (k->pending) goto handle_signal; k++; } /* if no signal is pending, just return */ ts->signal_pending = 0; return; handle_signal: #ifdef DEBUG_SIGNAL fprintf(stderr, "qemu: process signal %d\n", sig); #endif /* dequeue signal */ q = k->first; k->first = q->next; if (!k->first) k->pending = 0; sig = gdb_handlesig(cpu, sig); if (!sig) { sa = NULL; handler = TARGET_SIG_IGN; } else { sa = &sigact_table[sig - 1]; handler = sa->_sa_handler; } if (handler == TARGET_SIG_DFL) { /* default handler : ignore some signal. The other are job control or fatal */ if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) { kill(getpid(),SIGSTOP); } else if (sig != TARGET_SIGCHLD && sig != TARGET_SIGURG && sig != TARGET_SIGWINCH && sig != TARGET_SIGCONT) { force_sig(sig); } } else if (handler == TARGET_SIG_IGN) { /* ignore sig */ } else if (handler == TARGET_SIG_ERR) { force_sig(sig); } else { /* compute the blocked signals during the handler execution */ target_to_host_sigset(&set, &sa->sa_mask); /* SA_NODEFER indicates that the current signal should not be blocked during the handler */ if (!(sa->sa_flags & TARGET_SA_NODEFER)) sigaddset(&set, target_to_host_signal(sig)); /* block signals in the handler using Linux */ sigprocmask(SIG_BLOCK, &set, &old_set); /* save the previous blocked signal state to restore it at the end of the signal execution (see do_sigreturn) */ host_to_target_sigset_internal(&target_old_set, &old_set); /* if the CPU is in VM86 mode, we restore the 32 bit values */ #if defined(TARGET_I386) && !defined(TARGET_X86_64) { CPUX86State *env = cpu_env; if (env->eflags & VM_MASK) save_v86_state(env); } #endif /* prepare the stack frame of the virtual CPU */ #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) /* These targets do not have traditional signals. */ setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env); #else if (sa->sa_flags & TARGET_SA_SIGINFO) setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env); else setup_frame(sig, sa, &target_old_set, cpu_env); #endif if (sa->sa_flags & TARGET_SA_RESETHAND) sa->_sa_handler = TARGET_SIG_DFL; } if (q != &k->info) free_sigqueue(cpu_env, q); } | 16,803 |
0 | static void handle_stream_probing(AVStream *st) { if (st->codec->codec_id == AV_CODEC_ID_PCM_S16LE) { st->request_probe = AVPROBE_SCORE_EXTENSION; st->probe_packets = FFMIN(st->probe_packets, 14); } } | 16,805 |
0 | int avcodec_close(AVCodecContext *avctx) { /* If there is a user-supplied mutex locking routine, call it. */ if (ff_lockmgr_cb) { if ((*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN)) return -1; } entangled_thread_counter++; if(entangled_thread_counter != 1){ av_log(avctx, AV_LOG_ERROR, "insufficient thread locking around avcodec_open/close()\n"); entangled_thread_counter--; return -1; } if (HAVE_THREADS && avctx->thread_opaque) avcodec_thread_free(avctx); if (avctx->codec->close) avctx->codec->close(avctx); avcodec_default_free_buffers(avctx); av_freep(&avctx->priv_data); avctx->codec = NULL; entangled_thread_counter--; /* Release any user-supplied mutex. */ if (ff_lockmgr_cb) { (*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE); } return 0; } | 16,806 |
0 | static int a52_decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size) { AC3DecodeState *s = avctx->priv_data; uint8_t *buf_ptr; int flags, i, len; int sample_rate, bit_rate; short *out_samples = data; float level; static const int ac3_channels[8] = { 2, 1, 2, 3, 3, 4, 4, 5 }; *data_size= 0; buf_ptr = buf; while (buf_size > 0) { len = s->inbuf_ptr - s->inbuf; if (s->frame_size == 0) { /* no header seen : find one. We need at least 7 bytes to parse it */ len = HEADER_SIZE - len; if (len > buf_size) len = buf_size; memcpy(s->inbuf_ptr, buf_ptr, len); buf_ptr += len; s->inbuf_ptr += len; buf_size -= len; if ((s->inbuf_ptr - s->inbuf) == HEADER_SIZE) { len = s->a52_syncinfo(s->inbuf, &s->flags, &sample_rate, &bit_rate); if (len == 0) { /* no sync found : move by one byte (inefficient, but simple!) */ memcpy(s->inbuf, s->inbuf + 1, HEADER_SIZE - 1); s->inbuf_ptr--; } else { s->frame_size = len; /* update codec info */ avctx->sample_rate = sample_rate; s->channels = ac3_channels[s->flags & 7]; if (s->flags & A52_LFE) s->channels++; avctx->channels = s->channels; if (avctx->request_channels > 0 && avctx->request_channels <= 2 && avctx->request_channels < s->channels) { avctx->channels = avctx->request_channels; } avctx->bit_rate = bit_rate; } } } else if (len < s->frame_size) { len = s->frame_size - len; if (len > buf_size) len = buf_size; memcpy(s->inbuf_ptr, buf_ptr, len); buf_ptr += len; s->inbuf_ptr += len; buf_size -= len; } else { flags = s->flags; if (avctx->channels == 1) flags = A52_MONO; else if (avctx->channels == 2) flags = A52_STEREO; else flags |= A52_ADJUST_LEVEL; level = 1; if (s->a52_frame(s->state, s->inbuf, &flags, &level, 384)) { fail: av_log(avctx, AV_LOG_ERROR, "Error decoding frame\n"); s->inbuf_ptr = s->inbuf; s->frame_size = 0; continue; } for (i = 0; i < 6; i++) { if (s->a52_block(s->state)) goto fail; float_to_int(s->samples, out_samples + i * 256 * avctx->channels, avctx->channels); } s->inbuf_ptr = s->inbuf; s->frame_size = 0; *data_size = 6 * avctx->channels * 256 * sizeof(int16_t); break; } } return buf_ptr - buf; } | 16,807 |
0 | static int get_packetheader(NUTContext *nut, ByteIOContext *bc, int calculate_checksum) { int64_t start, size; start= url_ftell(bc) - 8; init_checksum(bc, calculate_checksum ? update_adler32 : NULL, 0); size= get_v(bc); nut->packet_start[2] = start; nut->written_packet_size= size; return size; } | 16,809 |
0 | static void rtsp_close_streams(RTSPState *rt) { int i; RTSPStream *rtsp_st; for(i=0;i<rt->nb_rtsp_streams;i++) { rtsp_st = rt->rtsp_streams[i]; if (rtsp_st) { if (rtsp_st->transport_priv) { if (rt->transport == RTSP_TRANSPORT_RDT) ff_rdt_parse_close(rtsp_st->transport_priv); else rtp_parse_close(rtsp_st->transport_priv); } if (rtsp_st->rtp_handle) url_close(rtsp_st->rtp_handle); if (rtsp_st->dynamic_handler && rtsp_st->dynamic_protocol_context) rtsp_st->dynamic_handler->close(rtsp_st->dynamic_protocol_context); } } av_free(rt->rtsp_streams); if (rt->asf_ctx) { av_close_input_stream (rt->asf_ctx); rt->asf_ctx = NULL; } av_freep(&rt->auth_b64); } | 16,810 |
0 | static uint32_t drc_isolate_logical(sPAPRDRConnector *drc) { /* if the guest is configuring a device attached to this DRC, we * should reset the configuration state at this point since it may * no longer be reliable (guest released device and needs to start * over, or unplug occurred so the FDT is no longer valid) */ g_free(drc->ccs); drc->ccs = NULL; /* * Fail any requests to ISOLATE the LMB DRC if this LMB doesn't * belong to a DIMM device that is marked for removal. * * Currently the guest userspace tool drmgr that drives the memory * hotplug/unplug will just try to remove a set of 'removable' LMBs * in response to a hot unplug request that is based on drc-count. * If the LMB being removed doesn't belong to a DIMM device that is * actually being unplugged, fail the isolation request here. */ if (spapr_drc_type(drc) == SPAPR_DR_CONNECTOR_TYPE_LMB && !drc->unplug_requested) { return RTAS_OUT_HW_ERROR; } drc->isolation_state = SPAPR_DR_ISOLATION_STATE_ISOLATED; /* if we're awaiting release, but still in an unconfigured state, * it's likely the guest is still in the process of configuring * the device and is transitioning the devices to an ISOLATED * state as a part of that process. so we only complete the * removal when this transition happens for a device in a * configured state, as suggested by the state diagram from PAPR+ * 2.7, 13.4 */ if (drc->unplug_requested) { uint32_t drc_index = spapr_drc_index(drc); if (drc->configured) { trace_spapr_drc_set_isolation_state_finalizing(drc_index); spapr_drc_detach(drc); } else { trace_spapr_drc_set_isolation_state_deferring(drc_index); } } drc->configured = false; return RTAS_OUT_SUCCESS; } | 16,811 |
0 | static void compat_free_buffer(void *opaque, uint8_t *data) { CompatReleaseBufPriv *priv = opaque; priv->avctx.release_buffer(&priv->avctx, &priv->frame); av_freep(&priv); } | 16,813 |
0 | static int aio_write_f(BlockBackend *blk, int argc, char **argv) { int nr_iov, c; int pattern = 0xcd; struct aio_ctx *ctx = g_new0(struct aio_ctx, 1); int flags = 0; ctx->blk = blk; while ((c = getopt(argc, argv, "CfqP:uz")) != -1) { switch (c) { case 'C': ctx->Cflag = true; break; case 'f': flags |= BDRV_REQ_FUA; break; case 'q': ctx->qflag = true; break; case 'u': flags |= BDRV_REQ_MAY_UNMAP; break; case 'P': pattern = parse_pattern(optarg); if (pattern < 0) { g_free(ctx); return 0; } break; case 'z': ctx->zflag = true; break; default: g_free(ctx); return qemuio_command_usage(&aio_write_cmd); } } if (optind > argc - 2) { g_free(ctx); return qemuio_command_usage(&aio_write_cmd); } if (ctx->zflag && optind != argc - 2) { printf("-z supports only a single length parameter\n"); g_free(ctx); return 0; } if ((flags & BDRV_REQ_MAY_UNMAP) && !ctx->zflag) { printf("-u requires -z to be specified\n"); g_free(ctx); return 0; } if (ctx->zflag && ctx->Pflag) { printf("-z and -P cannot be specified at the same time\n"); g_free(ctx); return 0; } ctx->offset = cvtnum(argv[optind]); if (ctx->offset < 0) { print_cvtnum_err(ctx->offset, argv[optind]); g_free(ctx); return 0; } optind++; if (ctx->zflag) { int64_t count = cvtnum(argv[optind]); if (count < 0) { print_cvtnum_err(count, argv[optind]); g_free(ctx); return 0; } ctx->qiov.size = count; blk_aio_write_zeroes(blk, ctx->offset, count, flags, aio_write_done, ctx); } else { nr_iov = argc - optind; ctx->buf = create_iovec(blk, &ctx->qiov, &argv[optind], nr_iov, pattern); if (ctx->buf == NULL) { block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_WRITE); g_free(ctx); return 0; } gettimeofday(&ctx->t1, NULL); block_acct_start(blk_get_stats(blk), &ctx->acct, ctx->qiov.size, BLOCK_ACCT_WRITE); blk_aio_pwritev(blk, ctx->offset, &ctx->qiov, flags, aio_write_done, ctx); } return 0; } | 16,814 |
0 | void *g_malloc_n(size_t nmemb, size_t size) { size_t sz; void *ptr; __coverity_negative_sink__(nmemb); __coverity_negative_sink__(size); sz = nmemb * size; ptr = __coverity_alloc__(size); __coverity_mark_as_uninitialized_buffer__(ptr); __coverity_mark_as_afm_allocated__(ptr, AFM_free); return ptr; } | 16,815 |
0 | int i2c_recv(I2CBus *bus) { I2CSlaveClass *sc; if ((QLIST_EMPTY(&bus->current_devs)) || (bus->broadcast)) { return -1; } sc = I2C_SLAVE_GET_CLASS(QLIST_FIRST(&bus->current_devs)->elt); if (sc->recv) { return sc->recv(QLIST_FIRST(&bus->current_devs)->elt); } return -1; } | 16,818 |
0 | static int spapr_vga_init(PCIBus *pci_bus) { switch (vga_interface_type) { case VGA_STD: pci_std_vga_init(pci_bus); return 1; case VGA_NONE: return 0; default: fprintf(stderr, "This vga model is not supported," "currently it only supports -vga std\n"); exit(0); break; } } | 16,819 |
0 | void async_context_pop(void) { } | 16,820 |
0 | static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx) { if (!ctx) { ctx = qemu_get_aio_context(); } memset(pool, 0, sizeof(*pool)); event_notifier_init(&pool->notifier, false); pool->ctx = ctx; qemu_mutex_init(&pool->lock); qemu_cond_init(&pool->check_cancel); qemu_cond_init(&pool->worker_stopped); qemu_sem_init(&pool->sem, 0); pool->max_threads = 64; pool->new_thread_bh = aio_bh_new(ctx, spawn_thread_bh_fn, pool); QLIST_INIT(&pool->head); QTAILQ_INIT(&pool->request_list); aio_set_event_notifier(ctx, &pool->notifier, event_notifier_ready); } | 16,822 |
0 | static always_inline void gen_cmov (TCGCond inv_cond, int ra, int rb, int rc, int islit, uint8_t lit, int mask) { int l1; if (unlikely(rc == 31)) return; l1 = gen_new_label(); if (ra != 31) { if (mask) { TCGv tmp = tcg_temp_new(TCG_TYPE_I64); tcg_gen_andi_i64(tmp, cpu_ir[ra], 1); tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1); tcg_temp_free(tmp); } else tcg_gen_brcondi_i64(inv_cond, cpu_ir[ra], 0, l1); } else { /* Very uncommon case - Do not bother to optimize. */ TCGv tmp = tcg_const_i64(0); tcg_gen_brcondi_i64(inv_cond, tmp, 0, l1); tcg_temp_free(tmp); } if (islit) tcg_gen_movi_i64(cpu_ir[rc], lit); else tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]); gen_set_label(l1); } | 16,823 |
0 | static int process_tns_coeffs(TemporalNoiseShaping *tns, float *tns_coefs_raw, int order, int w, int filt) { int i, j; int *idx = tns->coef_idx[w][filt]; float *lpc = tns->coef[w][filt]; const int iqfac_p = ((1 << (MAX_LPC_PRECISION-1)) - 0.5)/(M_PI/2.0); const int iqfac_m = ((1 << (MAX_LPC_PRECISION-1)) + 0.5)/(M_PI/2.0); float temp[TNS_MAX_ORDER] = {0.0f}, out[TNS_MAX_ORDER] = {0.0f}; /* Quantization */ for (i = 0; i < order; i++) { idx[i] = ceilf(asin(tns_coefs_raw[i])*((tns_coefs_raw[i] >= 0) ? iqfac_p : iqfac_m)); lpc[i] = 2*sin(idx[i]/((idx[i] >= 0) ? iqfac_p : iqfac_m)); } /* Trim any coeff less than 0.1f from the end */ for (i = order; i > -1; i--) { lpc[i] = (fabs(lpc[i]) > 0.1f) ? lpc[i] : 0.0f; if (lpc[i] != 0.0 ) { order = i; break; } } if (!order) return 0; /* Step up procedure, convert to LPC coeffs */ out[0] = 1.0f; for (i = 1; i <= order; i++) { for (j = 1; j < i; j++) { temp[j] = out[j] + lpc[i]*out[i-j]; } for (j = 1; j <= i; j++) { out[j] = temp[j]; } out[i] = lpc[i-1]; } memcpy(lpc, out, TNS_MAX_ORDER*sizeof(float)); return order; } | 16,824 |
0 | static inline void downmix_3f_to_stereo(float *samples) { int i; for (i = 0; i < 256; i++) { samples[i] += samples[i + 256]; samples[i + 256] = samples[i + 512]; samples[i + 512] = 0; } } | 16,825 |
0 | static int vfio_set_resample_eventfd(VFIOINTp *intp) { VFIODevice *vbasedev = &intp->vdev->vbasedev; struct vfio_irq_set *irq_set; int argsz, ret; int32_t *pfd; argsz = sizeof(*irq_set) + sizeof(*pfd); irq_set = g_malloc0(argsz); irq_set->argsz = argsz; irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK; irq_set->index = intp->pin; irq_set->start = 0; irq_set->count = 1; pfd = (int32_t *)&irq_set->data; *pfd = event_notifier_get_fd(&intp->unmask); qemu_set_fd_handler(*pfd, NULL, NULL, NULL); ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set); g_free(irq_set); if (ret < 0) { error_report("vfio: Failed to set resample eventfd: %m"); } return ret; } | 16,826 |
0 | static void test_io_channel_unix_fd_pass(void) { SocketAddress *listen_addr = g_new0(SocketAddress, 1); SocketAddress *connect_addr = g_new0(SocketAddress, 1); QIOChannel *src, *dst; int testfd; int fdsend[3]; int *fdrecv = NULL; size_t nfdrecv = 0; size_t i; char bufsend[12], bufrecv[12]; struct iovec iosend[1], iorecv[1]; #define TEST_SOCKET "test-io-channel-socket.sock" #define TEST_FILE "test-io-channel-socket.txt" testfd = open(TEST_FILE, O_RDWR|O_TRUNC|O_CREAT, 0700); g_assert(testfd != -1); fdsend[0] = testfd; fdsend[1] = testfd; fdsend[2] = testfd; listen_addr->type = SOCKET_ADDRESS_KIND_UNIX; listen_addr->u.q_unix.data = g_new0(UnixSocketAddress, 1); listen_addr->u.q_unix.data->path = g_strdup(TEST_SOCKET); connect_addr->type = SOCKET_ADDRESS_KIND_UNIX; connect_addr->u.q_unix.data = g_new0(UnixSocketAddress, 1); connect_addr->u.q_unix.data->path = g_strdup(TEST_SOCKET); test_io_channel_setup_sync(listen_addr, connect_addr, &src, &dst); memcpy(bufsend, "Hello World", G_N_ELEMENTS(bufsend)); iosend[0].iov_base = bufsend; iosend[0].iov_len = G_N_ELEMENTS(bufsend); iorecv[0].iov_base = bufrecv; iorecv[0].iov_len = G_N_ELEMENTS(bufrecv); g_assert(qio_channel_has_feature(src, QIO_CHANNEL_FEATURE_FD_PASS)); g_assert(qio_channel_has_feature(dst, QIO_CHANNEL_FEATURE_FD_PASS)); qio_channel_writev_full(src, iosend, G_N_ELEMENTS(iosend), fdsend, G_N_ELEMENTS(fdsend), &error_abort); qio_channel_readv_full(dst, iorecv, G_N_ELEMENTS(iorecv), &fdrecv, &nfdrecv, &error_abort); g_assert(nfdrecv == G_N_ELEMENTS(fdsend)); /* Each recvd FD should be different from sent FD */ for (i = 0; i < nfdrecv; i++) { g_assert_cmpint(fdrecv[i], !=, testfd); } /* Each recvd FD should be different from each other */ g_assert_cmpint(fdrecv[0], !=, fdrecv[1]); g_assert_cmpint(fdrecv[0], !=, fdrecv[2]); g_assert_cmpint(fdrecv[1], !=, fdrecv[2]); /* Check the I/O buf we sent at the same time matches */ g_assert(memcmp(bufsend, bufrecv, G_N_ELEMENTS(bufsend)) == 0); /* Write some data into the FD we received */ g_assert(write(fdrecv[0], bufsend, G_N_ELEMENTS(bufsend)) == G_N_ELEMENTS(bufsend)); /* Read data from the original FD and make sure it matches */ memset(bufrecv, 0, G_N_ELEMENTS(bufrecv)); g_assert(lseek(testfd, 0, SEEK_SET) == 0); g_assert(read(testfd, bufrecv, G_N_ELEMENTS(bufrecv)) == G_N_ELEMENTS(bufrecv)); g_assert(memcmp(bufsend, bufrecv, G_N_ELEMENTS(bufsend)) == 0); object_unref(OBJECT(src)); object_unref(OBJECT(dst)); qapi_free_SocketAddress(listen_addr); qapi_free_SocketAddress(connect_addr); unlink(TEST_SOCKET); unlink(TEST_FILE); close(testfd); for (i = 0; i < nfdrecv; i++) { close(fdrecv[i]); } g_free(fdrecv); } | 16,827 |
0 | static int raw_open_common(BlockDriverState *bs, const char *filename, int bdrv_flags, int open_flags) { BDRVRawState *s = bs->opaque; int fd, ret; ret = raw_normalize_devicepath(&filename); if (ret != 0) { return ret; } s->open_flags = open_flags | O_BINARY; s->open_flags &= ~O_ACCMODE; if (bdrv_flags & BDRV_O_RDWR) { s->open_flags |= O_RDWR; } else { s->open_flags |= O_RDONLY; } /* Use O_DSYNC for write-through caching, no flags for write-back caching, * and O_DIRECT for no caching. */ if ((bdrv_flags & BDRV_O_NOCACHE)) s->open_flags |= O_DIRECT; if (!(bdrv_flags & BDRV_O_CACHE_WB)) s->open_flags |= O_DSYNC; s->fd = -1; fd = qemu_open(filename, s->open_flags, 0644); if (fd < 0) { ret = -errno; if (ret == -EROFS) ret = -EACCES; return ret; } s->fd = fd; s->aligned_buf = NULL; if ((bdrv_flags & BDRV_O_NOCACHE)) { /* * Allocate a buffer for read/modify/write cycles. Chose the size * pessimistically as we don't know the block size yet. */ s->aligned_buf_size = 32 * MAX_BLOCKSIZE; s->aligned_buf = qemu_memalign(MAX_BLOCKSIZE, s->aligned_buf_size); if (s->aligned_buf == NULL) { goto out_close; } } /* We're falling back to POSIX AIO in some cases so init always */ if (paio_init() < 0) { goto out_free_buf; } #ifdef CONFIG_LINUX_AIO /* * Currently Linux do AIO only for files opened with O_DIRECT * specified so check NOCACHE flag too */ if ((bdrv_flags & (BDRV_O_NOCACHE|BDRV_O_NATIVE_AIO)) == (BDRV_O_NOCACHE|BDRV_O_NATIVE_AIO)) { s->aio_ctx = laio_init(); if (!s->aio_ctx) { goto out_free_buf; } s->use_aio = 1; } else #endif { #ifdef CONFIG_LINUX_AIO s->use_aio = 0; #endif } #ifdef CONFIG_XFS if (platform_test_xfs_fd(s->fd)) { s->is_xfs = 1; } #endif return 0; out_free_buf: qemu_vfree(s->aligned_buf); out_close: qemu_close(fd); return -errno; } | 16,828 |
0 | static int proxy_close(FsContext *ctx, V9fsFidOpenState *fs) { return close(fs->fd); } | 16,830 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.