label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
0 | static unsigned int dec_addu_r(DisasContext *dc) { TCGv t0; int size = memsize_z(dc); DIS(fprintf (logfile, "addu.%c $r%u, $r%u\n", memsize_char(size), dc->op1, dc->op2)); cris_cc_mask(dc, CC_MASK_NZVC); t0 = tcg_temp_new(TCG_TYPE_TL); /* Size can only be qi or hi. */ t_gen_zext(t0, cpu_R[dc->op1], size); cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4); tcg_temp_free(t0); return 2; } | 18,874 |
0 | static void net_rx_packet(void *opaque, const uint8_t *buf, size_t size) { struct XenNetDev *netdev = opaque; netif_rx_request_t rxreq; RING_IDX rc, rp; void *page; if (netdev->xendev.be_state != XenbusStateConnected) return; rc = netdev->rx_ring.req_cons; rp = netdev->rx_ring.sring->req_prod; xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) { xen_be_printf(&netdev->xendev, 2, "no buffer, drop packet\n"); return; } if (size > XC_PAGE_SIZE - NET_IP_ALIGN) { xen_be_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)", (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN); return; } memcpy(&rxreq, RING_GET_REQUEST(&netdev->rx_ring, rc), sizeof(rxreq)); netdev->rx_ring.req_cons = ++rc; page = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev, netdev->xendev.dom, rxreq.gref, PROT_WRITE); if (page == NULL) { xen_be_printf(&netdev->xendev, 0, "error: rx gref dereference failed (%d)\n", rxreq.gref); net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0); return; } memcpy(page + NET_IP_ALIGN, buf, size); xc_gnttab_munmap(netdev->xendev.gnttabdev, page, 1); net_rx_response(netdev, &rxreq, NETIF_RSP_OKAY, NET_IP_ALIGN, size, 0); } | 18,875 |
0 | static void mcf_fec_receive(void *opaque, const uint8_t *buf, size_t size) { mcf_fec_state *s = (mcf_fec_state *)opaque; mcf_fec_bd bd; uint32_t flags = 0; uint32_t addr; uint32_t crc; uint32_t buf_addr; uint8_t *crc_ptr; unsigned int buf_len; DPRINTF("do_rx len %d\n", size); if (!s->rx_enabled) { fprintf(stderr, "mcf_fec_receive: Unexpected packet\n"); } /* 4 bytes for the CRC. */ size += 4; crc = cpu_to_be32(crc32(~0, buf, size)); crc_ptr = (uint8_t *)&crc; /* Huge frames are truncted. */ if (size > FEC_MAX_FRAME_SIZE) { size = FEC_MAX_FRAME_SIZE; flags |= FEC_BD_TR | FEC_BD_LG; } /* Frames larger than the user limit just set error flags. */ if (size > (s->rcr >> 16)) { flags |= FEC_BD_LG; } addr = s->rx_descriptor; while (size > 0) { mcf_fec_read_bd(&bd, addr); if ((bd.flags & FEC_BD_E) == 0) { /* No descriptors available. Bail out. */ /* FIXME: This is wrong. We should probably either save the remainder for when more RX buffers are available, or flag an error. */ fprintf(stderr, "mcf_fec: Lost end of frame\n"); break; } buf_len = (size <= s->emrbr) ? size: s->emrbr; bd.length = buf_len; size -= buf_len; DPRINTF("rx_bd %x length %d\n", addr, bd.length); /* The last 4 bytes are the CRC. */ if (size < 4) buf_len += size - 4; buf_addr = bd.data; cpu_physical_memory_write(buf_addr, buf, buf_len); buf += buf_len; if (size < 4) { cpu_physical_memory_write(buf_addr + buf_len, crc_ptr, 4 - size); crc_ptr += 4 - size; } bd.flags &= ~FEC_BD_E; if (size == 0) { /* Last buffer in frame. */ bd.flags |= flags | FEC_BD_L; DPRINTF("rx frame flags %04x\n", bd.flags); s->eir |= FEC_INT_RXF; } else { s->eir |= FEC_INT_RXB; } mcf_fec_write_bd(&bd, addr); /* Advance to the next descriptor. */ if ((bd.flags & FEC_BD_W) != 0) { addr = s->erdsr; } else { addr += 8; } } s->rx_descriptor = addr; mcf_fec_enable_rx(s); mcf_fec_update(s); } | 18,876 |
0 | static BlockDriverAIOCB *paio_submit(BlockDriverState *bs, int fd, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque, int type) { RawPosixAIOData *acb = g_slice_new(RawPosixAIOData); acb->bs = bs; acb->aio_type = type; acb->aio_fildes = fd; if (qiov) { acb->aio_iov = qiov->iov; acb->aio_niov = qiov->niov; } acb->aio_nbytes = nb_sectors * 512; acb->aio_offset = sector_num * 512; trace_paio_submit(acb, opaque, sector_num, nb_sectors, type); return thread_pool_submit_aio(aio_worker, acb, cb, opaque); } | 18,877 |
0 | static void fdt_add_pmu_nodes(const VirtMachineState *vms) { CPUState *cpu; ARMCPU *armcpu; uint32_t irqflags = GIC_FDT_IRQ_FLAGS_LEVEL_HI; CPU_FOREACH(cpu) { armcpu = ARM_CPU(cpu); if (!arm_feature(&armcpu->env, ARM_FEATURE_PMU) || !kvm_arm_pmu_create(cpu, PPI(VIRTUAL_PMU_IRQ))) { return; } } if (vms->gic_version == 2) { irqflags = deposit32(irqflags, GIC_FDT_IRQ_PPI_CPU_START, GIC_FDT_IRQ_PPI_CPU_WIDTH, (1 << vms->smp_cpus) - 1); } armcpu = ARM_CPU(qemu_get_cpu(0)); qemu_fdt_add_subnode(vms->fdt, "/pmu"); if (arm_feature(&armcpu->env, ARM_FEATURE_V8)) { const char compat[] = "arm,armv8-pmuv3"; qemu_fdt_setprop(vms->fdt, "/pmu", "compatible", compat, sizeof(compat)); qemu_fdt_setprop_cells(vms->fdt, "/pmu", "interrupts", GIC_FDT_IRQ_TYPE_PPI, VIRTUAL_PMU_IRQ, irqflags); } } | 18,878 |
0 | static int usb_ohci_initfn_pci(struct PCIDevice *dev) { OHCIPCIState *ohci = DO_UPCAST(OHCIPCIState, pci_dev, dev); int num_ports = 3; pci_config_set_vendor_id(ohci->pci_dev.config, PCI_VENDOR_ID_APPLE); pci_config_set_device_id(ohci->pci_dev.config, PCI_DEVICE_ID_APPLE_IPID_USB); ohci->pci_dev.config[PCI_CLASS_PROG] = 0x10; /* OHCI */ pci_config_set_class(ohci->pci_dev.config, PCI_CLASS_SERIAL_USB); /* TODO: RST# value should be 0. */ ohci->pci_dev.config[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin 1 */ usb_ohci_init(&ohci->state, &dev->qdev, num_ports, 0); ohci->state.irq = ohci->pci_dev.irq[0]; /* TODO: avoid cast below by using dev */ pci_register_bar_simple(&ohci->pci_dev, 0, 256, 0, ohci->state.mem); return 0; } | 18,879 |
0 | static int nbd_negotiate_read(QIOChannel *ioc, void *buffer, size_t size) { ssize_t ret; guint watch; assert(qemu_in_coroutine()); /* Negotiation are always in main loop. */ watch = qio_channel_add_watch(ioc, G_IO_IN, nbd_negotiate_continue, qemu_coroutine_self(), NULL); ret = read_sync(ioc, buffer, size, NULL); g_source_remove(watch); return ret; } | 18,880 |
0 | static inline void sdhci_reset_write(SDHCIState *s, uint8_t value) { switch (value) { case SDHC_RESET_ALL: DEVICE_GET_CLASS(s)->reset(DEVICE(s)); break; case SDHC_RESET_CMD: s->prnsts &= ~SDHC_CMD_INHIBIT; s->norintsts &= ~SDHC_NIS_CMDCMP; break; case SDHC_RESET_DATA: s->data_count = 0; s->prnsts &= ~(SDHC_SPACE_AVAILABLE | SDHC_DATA_AVAILABLE | SDHC_DOING_READ | SDHC_DOING_WRITE | SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE); s->blkgap &= ~(SDHC_STOP_AT_GAP_REQ | SDHC_CONTINUE_REQ); s->stopped_state = sdhc_not_stopped; s->norintsts &= ~(SDHC_NIS_WBUFRDY | SDHC_NIS_RBUFRDY | SDHC_NIS_DMA | SDHC_NIS_TRSCMP | SDHC_NIS_BLKGAP); break; } } | 18,881 |
0 | static void print_report(OutputFile *output_files, OutputStream *ost_table, int nb_ostreams, int is_last_report, int64_t timer_start) { char buf[1024]; OutputStream *ost; AVFormatContext *oc; int64_t total_size; AVCodecContext *enc; int frame_number, vid, i; double bitrate; int64_t pts = INT64_MAX; static int64_t last_time = -1; static int qp_histogram[52]; int hours, mins, secs, us; if (!is_last_report) { int64_t cur_time; /* display the report every 0.5 seconds */ cur_time = av_gettime(); if (last_time == -1) { last_time = cur_time; return; } if ((cur_time - last_time) < 500000) return; last_time = cur_time; } oc = output_files[0].ctx; total_size = avio_size(oc->pb); if(total_size<0) // FIXME improve avio_size() so it works with non seekable output too total_size= avio_tell(oc->pb); buf[0] = '\0'; vid = 0; for(i=0;i<nb_ostreams;i++) { float q = -1; ost = &ost_table[i]; enc = ost->st->codec; if (!ost->st->stream_copy && enc->coded_frame) q = enc->coded_frame->quality/(float)FF_QP2LAMBDA; if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) { snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q); } if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) { float t = (av_gettime()-timer_start) / 1000000.0; frame_number = ost->frame_number; snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ", frame_number, (t>1)?(int)(frame_number/t+0.5) : 0, q); if(is_last_report) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L"); if(qp_hist){ int j; int qp = lrintf(q); if(qp>=0 && qp<FF_ARRAY_ELEMS(qp_histogram)) qp_histogram[qp]++; for(j=0; j<32; j++) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j]+1)/log(2))); } if (enc->flags&CODEC_FLAG_PSNR){ int j; double error, error_sum=0; double scale, scale_sum=0; char type[3]= {'Y','U','V'}; snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR="); for(j=0; j<3; j++){ if(is_last_report){ error= enc->error[j]; scale= enc->width*enc->height*255.0*255.0*frame_number; }else{ error= enc->coded_frame->error[j]; scale= enc->width*enc->height*255.0*255.0; } if(j) scale/=4; error_sum += error; scale_sum += scale; snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error/scale)); } snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum/scale_sum)); } vid = 1; } /* compute min output value */ pts = FFMIN(pts, av_rescale_q(ost->st->pts.val, ost->st->time_base, AV_TIME_BASE_Q)); } secs = pts / AV_TIME_BASE; us = pts % AV_TIME_BASE; mins = secs / 60; secs %= 60; hours = mins / 60; mins %= 60; bitrate = pts ? total_size * 8 / (pts / 1000.0) : 0; snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "size=%8.0fkB time=", total_size / 1024.0); snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%02d:%02d:%02d.%02d ", hours, mins, secs, (100 * us) / AV_TIME_BASE); snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "bitrate=%6.1fkbits/s", bitrate); if (nb_frames_dup || nb_frames_drop) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d", nb_frames_dup, nb_frames_drop); av_log(NULL, is_last_report ? AV_LOG_WARNING : AV_LOG_INFO, "%s \r", buf); fflush(stderr); if (is_last_report) { int64_t raw= audio_size + video_size + extra_size; av_log(NULL, AV_LOG_INFO, "\n"); av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n", video_size/1024.0, audio_size/1024.0, extra_size/1024.0, 100.0*(total_size - raw)/raw ); } } | 18,882 |
0 | static int ohci_bus_start(OHCIState *ohci) { ohci->eof_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ohci_frame_boundary, ohci); if (ohci->eof_timer == NULL) { trace_usb_ohci_bus_eof_timer_failed(ohci->name); ohci_die(ohci); return 0; } trace_usb_ohci_start(ohci->name); ohci_sof(ohci); return 1; } | 18,884 |
0 | static void arm_thistimer_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { arm_mptimer_state *s = (arm_mptimer_state *)opaque; int id = get_current_cpu(s); timerblock_write(&s->timerblock[id * 2], addr, value, size); } | 18,885 |
0 | long do_rt_sigreturn(CPUX86State *env) { abi_ulong frame_addr; struct rt_sigframe *frame; sigset_t set; int eax; frame_addr = env->regs[R_ESP] - 4; if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) goto badframe; target_to_host_sigset(&set, &frame->uc.tuc_sigmask); sigprocmask(SIG_SETMASK, &set, NULL); if (restore_sigcontext(env, &frame->uc.tuc_mcontext, &eax)) goto badframe; if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) goto badframe; unlock_user_struct(frame, frame_addr, 0); return eax; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); return 0; } | 18,886 |
0 | int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src) { const AVCodec *orig_codec = dest->codec; uint8_t *orig_priv_data = dest->priv_data; if (avcodec_is_open(dest)) { // check that the dest context is uninitialized av_log(dest, AV_LOG_ERROR, "Tried to copy AVCodecContext %p into already-initialized %p\n", src, dest); return AVERROR(EINVAL); } av_opt_free(dest); av_free(dest->priv_data); memcpy(dest, src, sizeof(*dest)); dest->priv_data = orig_priv_data; dest->codec = orig_codec; /* set values specific to opened codecs back to their default state */ dest->slice_offset = NULL; dest->hwaccel = NULL; dest->internal = NULL; /* reallocate values that should be allocated separately */ dest->rc_eq = NULL; dest->extradata = NULL; dest->intra_matrix = NULL; dest->inter_matrix = NULL; dest->rc_override = NULL; dest->subtitle_header = NULL; if (src->rc_eq) { dest->rc_eq = av_strdup(src->rc_eq); if (!dest->rc_eq) return AVERROR(ENOMEM); } #define alloc_and_copy_or_fail(obj, size, pad) \ if (src->obj && size > 0) { \ dest->obj = av_malloc(size + pad); \ if (!dest->obj) \ goto fail; \ memcpy(dest->obj, src->obj, size); \ if (pad) \ memset(((uint8_t *) dest->obj) + size, 0, pad); \ } alloc_and_copy_or_fail(extradata, src->extradata_size, FF_INPUT_BUFFER_PADDING_SIZE); alloc_and_copy_or_fail(intra_matrix, 64 * sizeof(int16_t), 0); alloc_and_copy_or_fail(inter_matrix, 64 * sizeof(int16_t), 0); alloc_and_copy_or_fail(rc_override, src->rc_override_count * sizeof(*src->rc_override), 0); alloc_and_copy_or_fail(subtitle_header, src->subtitle_header_size, 1); dest->subtitle_header_size = src->subtitle_header_size; #undef alloc_and_copy_or_fail return 0; fail: av_freep(&dest->rc_override); av_freep(&dest->intra_matrix); av_freep(&dest->inter_matrix); av_freep(&dest->extradata); av_freep(&dest->rc_eq); return AVERROR(ENOMEM); } | 18,887 |
0 | target_ulong helper_rdhwr_synci_step(CPUMIPSState *env) { if ((env->hflags & MIPS_HFLAG_CP0) || (env->CP0_HWREna & (1 << 1))) return env->SYNCI_Step; else do_raise_exception(env, EXCP_RI, GETPC()); return 0; } | 18,888 |
0 | static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque) { BlockDriverAIOCBCoroutine *acb = opaque; BlockDriverState *bs = acb->common.bs; acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors); acb->bh = qemu_bh_new(bdrv_co_em_bh, acb); qemu_bh_schedule(acb->bh); } | 18,889 |
0 | POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); dc->fw_name = "PowerPC,POWER7"; dc->desc = "POWER7"; dc->props = powerpc_servercpu_properties; pcc->pvr_match = ppc_pvr_match_power7; pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06; pcc->init_proc = init_proc_POWER7; pcc->check_pow = check_pow_nocheck; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD; pcc->insns_flags2 = PPC2_VSX | PPC2_DFP | PPC2_DBRX | PPC2_ISA205 | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | PPC2_FP_CVT_S64; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_VR) | (1ull << MSR_VSX) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_2_06; #if defined(CONFIG_SOFTMMU) pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault; pcc->sps = &POWER7_POWER8_sps; #endif pcc->excp_model = POWERPC_EXCP_POWER7; pcc->bus_model = PPC_FLAGS_INPUT_POWER7; pcc->bfd_mach = bfd_mach_ppc64; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | POWERPC_FLAG_VSX; pcc->l1_dcache_size = 0x8000; pcc->l1_icache_size = 0x8000; pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr; } | 18,890 |
0 | static void configure_rtc(QemuOpts *opts) { const char *value; value = qemu_opt_get(opts, "base"); if (value) { if (!strcmp(value, "utc")) { rtc_utc = 1; } else if (!strcmp(value, "localtime")) { rtc_utc = 0; } else { configure_rtc_date_offset(value, 0); } } value = qemu_opt_get(opts, "clock"); if (value) { if (!strcmp(value, "host")) { rtc_clock = QEMU_CLOCK_HOST; } else if (!strcmp(value, "rt")) { rtc_clock = QEMU_CLOCK_REALTIME; } else if (!strcmp(value, "vm")) { rtc_clock = QEMU_CLOCK_VIRTUAL; } else { fprintf(stderr, "qemu: invalid option value '%s'\n", value); exit(1); } } value = qemu_opt_get(opts, "driftfix"); if (value) { if (!strcmp(value, "slew")) { static GlobalProperty slew_lost_ticks[] = { { .driver = "mc146818rtc", .property = "lost_tick_policy", .value = "slew", }, { /* end of list */ } }; qdev_prop_register_global_list(slew_lost_ticks); } else if (!strcmp(value, "none")) { /* discard is default */ } else { fprintf(stderr, "qemu: invalid option value '%s'\n", value); exit(1); } } } | 18,894 |
0 | static inline void t_gen_zext(TCGv d, TCGv s, int size) { if (size == 1) tcg_gen_ext8u_i32(d, s); else if (size == 2) tcg_gen_ext16u_i32(d, s); else if (GET_TCGV(d) != GET_TCGV(s)) tcg_gen_mov_tl(d, s); } | 18,895 |
0 | static ssize_t virtio_net_receive(VLANClientState *nc, const uint8_t *buf, size_t size) { VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque; struct virtio_net_hdr_mrg_rxbuf *mhdr = NULL; size_t hdr_len, offset, i; if (!virtio_net_can_receive(&n->nic->nc)) return -1; /* hdr_len refers to the header we supply to the guest */ hdr_len = n->mergeable_rx_bufs ? sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr); if (!virtio_net_has_buffers(n, size + hdr_len)) return 0; if (!receive_filter(n, buf, size)) return size; offset = i = 0; while (offset < size) { VirtQueueElement elem; int len, total; struct iovec sg[VIRTQUEUE_MAX_SIZE]; total = 0; if ((i != 0 && !n->mergeable_rx_bufs) || virtqueue_pop(n->rx_vq, &elem) == 0) { if (i == 0) return -1; fprintf(stderr, "virtio-net truncating packet: " "offset %zd, size %zd, hdr_len %zd\n", offset, size, hdr_len); exit(1); } if (elem.in_num < 1) { fprintf(stderr, "virtio-net receive queue contains no in buffers\n"); exit(1); } if (!n->mergeable_rx_bufs && elem.in_sg[0].iov_len != hdr_len) { fprintf(stderr, "virtio-net header not in first element\n"); exit(1); } memcpy(&sg, &elem.in_sg[0], sizeof(sg[0]) * elem.in_num); if (i == 0) { if (n->mergeable_rx_bufs) mhdr = (struct virtio_net_hdr_mrg_rxbuf *)sg[0].iov_base; offset += receive_header(n, sg, elem.in_num, buf + offset, size - offset, hdr_len); total += hdr_len; } /* copy in packet. ugh */ len = iov_from_buf(sg, elem.in_num, buf + offset, size - offset); total += len; /* signal other side */ virtqueue_fill(n->rx_vq, &elem, total, i++); offset += len; } if (mhdr) mhdr->num_buffers = i; virtqueue_flush(n->rx_vq, i); virtio_notify(&n->vdev, n->rx_vq); return size; } | 18,896 |
0 | static InputEvent *qapi_clone_InputEvent(InputEvent *src) { QmpOutputVisitor *qov; QmpInputVisitor *qiv; Visitor *ov, *iv; QObject *obj; InputEvent *dst = NULL; qov = qmp_output_visitor_new(); ov = qmp_output_get_visitor(qov); visit_type_InputEvent(ov, NULL, &src, &error_abort); obj = qmp_output_get_qobject(qov); qmp_output_visitor_cleanup(qov); if (!obj) { return NULL; } qiv = qmp_input_visitor_new(obj, false); iv = qmp_input_get_visitor(qiv); visit_type_InputEvent(iv, NULL, &dst, &error_abort); qmp_input_visitor_cleanup(qiv); qobject_decref(obj); return dst; } | 18,897 |
0 | static void stop(DBDMA_channel *ch) { ch->regs[DBDMA_STATUS] &= cpu_to_be32(~(ACTIVE|DEAD|FLUSH)); /* the stop command does not increment command pointer */ } | 18,899 |
0 | void qmp_drive_mirror(const char *device, const char *target, bool has_format, const char *format, enum MirrorSyncMode sync, bool has_mode, enum NewImageMode mode, bool has_speed, int64_t speed, bool has_granularity, uint32_t granularity, bool has_buf_size, int64_t buf_size, bool has_on_source_error, BlockdevOnError on_source_error, bool has_on_target_error, BlockdevOnError on_target_error, Error **errp) { BlockDriverState *bs; BlockDriverState *source, *target_bs; BlockDriver *proto_drv; BlockDriver *drv = NULL; Error *local_err = NULL; int flags; uint64_t size; int ret; if (!has_speed) { speed = 0; } if (!has_on_source_error) { on_source_error = BLOCKDEV_ON_ERROR_REPORT; } if (!has_on_target_error) { on_target_error = BLOCKDEV_ON_ERROR_REPORT; } if (!has_mode) { mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS; } if (!has_granularity) { granularity = 0; } if (!has_buf_size) { buf_size = DEFAULT_MIRROR_BUF_SIZE; } if (granularity != 0 && (granularity < 512 || granularity > 1048576 * 64)) { error_set(errp, QERR_INVALID_PARAMETER, device); return; } if (granularity & (granularity - 1)) { error_set(errp, QERR_INVALID_PARAMETER, device); return; } bs = bdrv_find(device); if (!bs) { error_set(errp, QERR_DEVICE_NOT_FOUND, device); return; } if (!bdrv_is_inserted(bs)) { error_set(errp, QERR_DEVICE_HAS_NO_MEDIUM, device); return; } if (!has_format) { format = mode == NEW_IMAGE_MODE_EXISTING ? NULL : bs->drv->format_name; } if (format) { drv = bdrv_find_format(format); if (!drv) { error_set(errp, QERR_INVALID_BLOCK_FORMAT, format); return; } } if (bdrv_in_use(bs)) { error_set(errp, QERR_DEVICE_IN_USE, device); return; } flags = bs->open_flags | BDRV_O_RDWR; source = bs->backing_hd; if (!source && sync == MIRROR_SYNC_MODE_TOP) { sync = MIRROR_SYNC_MODE_FULL; } proto_drv = bdrv_find_protocol(target); if (!proto_drv) { error_set(errp, QERR_INVALID_BLOCK_FORMAT, format); return; } bdrv_get_geometry(bs, &size); size *= 512; if (sync == MIRROR_SYNC_MODE_FULL && mode != NEW_IMAGE_MODE_EXISTING) { /* create new image w/o backing file */ assert(format && drv); bdrv_img_create(target, format, NULL, NULL, NULL, size, flags, &local_err, false); } else { switch (mode) { case NEW_IMAGE_MODE_EXISTING: ret = 0; break; case NEW_IMAGE_MODE_ABSOLUTE_PATHS: /* create new image with backing file */ bdrv_img_create(target, format, source->filename, source->drv->format_name, NULL, size, flags, &local_err, false); break; default: abort(); } } if (error_is_set(&local_err)) { error_propagate(errp, local_err); return; } /* Mirroring takes care of copy-on-write using the source's backing * file. */ target_bs = bdrv_new(""); ret = bdrv_open(target_bs, target, NULL, flags | BDRV_O_NO_BACKING, drv); if (ret < 0) { bdrv_delete(target_bs); error_setg_file_open(errp, -ret, target); return; } mirror_start(bs, target_bs, speed, granularity, buf_size, sync, on_source_error, on_target_error, block_job_cb, bs, &local_err); if (local_err != NULL) { bdrv_delete(target_bs); error_propagate(errp, local_err); return; } /* Grab a reference so hotplug does not delete the BlockDriverState from * underneath us. */ drive_get_ref(drive_get_by_blockdev(bs)); } | 18,900 |
0 | static void gen_delayed_conditional_jump(DisasContext * ctx) { int l1; TCGv ds; l1 = gen_new_label(); ds = tcg_temp_new(); tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE); tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1); gen_goto_tb(ctx, 1, ctx->pc + 2); gen_set_label(l1); tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE); gen_jump(ctx); } | 18,901 |
0 | static void qmp_input_type_number(Visitor *v, const char *name, double *obj, Error **errp) { QmpInputVisitor *qiv = to_qiv(v); QObject *qobj = qmp_input_get_object(qiv, name, true, errp); QInt *qint; QFloat *qfloat; if (!qobj) { return; } qint = qobject_to_qint(qobj); if (qint) { *obj = qint_get_int(qobject_to_qint(qobj)); return; } qfloat = qobject_to_qfloat(qobj); if (qfloat) { *obj = qfloat_get_double(qobject_to_qfloat(qobj)); return; } error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", "number"); } | 18,902 |
0 | static void test_dynamic_globalprop(void) { g_test_trap_subprocess("/qdev/properties/dynamic/global/subprocess", 0, 0); g_test_trap_assert_passed(); g_test_trap_assert_stderr_unmatched("*prop1*"); g_test_trap_assert_stderr_unmatched("*prop2*"); g_test_trap_assert_stderr("*Warning: \"-global dynamic-prop-type-bad.prop3=103\" not used\n*"); g_test_trap_assert_stderr_unmatched("*prop4*"); g_test_trap_assert_stderr("*Warning: \"-global nohotplug-type.prop5=105\" not used\n*"); g_test_trap_assert_stderr("*Warning: \"-global nondevice-type.prop6=106\" not used\n*"); g_test_trap_assert_stdout(""); } | 18,903 |
0 | static void audio_init (void) { size_t i; int done = 0; const char *drvname; VMChangeStateEntry *e; AudioState *s = &glob_audio_state; if (s->drv) { return; } QLIST_INIT (&s->hw_head_out); QLIST_INIT (&s->hw_head_in); QLIST_INIT (&s->cap_head); atexit (audio_atexit); s->ts = qemu_new_timer (vm_clock, audio_timer, s); if (!s->ts) { hw_error("Could not create audio timer\n"); } audio_process_options ("AUDIO", audio_options); s->nb_hw_voices_out = conf.fixed_out.nb_voices; s->nb_hw_voices_in = conf.fixed_in.nb_voices; if (s->nb_hw_voices_out <= 0) { dolog ("Bogus number of playback voices %d, setting to 1\n", s->nb_hw_voices_out); s->nb_hw_voices_out = 1; } if (s->nb_hw_voices_in <= 0) { dolog ("Bogus number of capture voices %d, setting to 0\n", s->nb_hw_voices_in); s->nb_hw_voices_in = 0; } { int def; drvname = audio_get_conf_str ("QEMU_AUDIO_DRV", NULL, &def); } if (drvname) { int found = 0; for (i = 0; i < ARRAY_SIZE (drvtab); i++) { if (!strcmp (drvname, drvtab[i]->name)) { done = !audio_driver_init (s, drvtab[i]); found = 1; break; } } if (!found) { dolog ("Unknown audio driver `%s'\n", drvname); dolog ("Run with -audio-help to list available drivers\n"); } } if (!done) { for (i = 0; !done && i < ARRAY_SIZE (drvtab); i++) { if (drvtab[i]->can_be_default) { done = !audio_driver_init (s, drvtab[i]); } } } if (!done) { done = !audio_driver_init (s, &no_audio_driver); if (!done) { hw_error("Could not initialize audio subsystem\n"); } else { dolog ("warning: Using timer based audio emulation\n"); } } if (conf.period.hertz <= 0) { if (conf.period.hertz < 0) { dolog ("warning: Timer period is negative - %d " "treating as zero\n", conf.period.hertz); } conf.period.ticks = 1; } else { conf.period.ticks = muldiv64 (1, get_ticks_per_sec (), conf.period.hertz); } e = qemu_add_vm_change_state_handler (audio_vm_change_state_handler, s); if (!e) { dolog ("warning: Could not register change state handler\n" "(Audio can continue looping even after stopping the VM)\n"); } QLIST_INIT (&s->card_head); vmstate_register (NULL, 0, &vmstate_audio, s); } | 18,904 |
0 | void build_memory_hotplug_aml(Aml *table, uint32_t nr_mem, uint16_t io_base, uint16_t io_len, const char *res_root, const char *event_handler_method) { int i; Aml *ifctx; Aml *method; Aml *sb_scope; Aml *mem_ctrl_dev; char *scan_path; char *mhp_res_path = g_strdup_printf("%s." MEMORY_HOTPLUG_DEVICE, res_root); mem_ctrl_dev = aml_device("%s", mhp_res_path); { Aml *crs; Aml *field; Aml *one = aml_int(1); Aml *zero = aml_int(0); Aml *ret_val = aml_local(0); Aml *slot_arg0 = aml_arg(0); Aml *slots_nr = aml_name(MEMORY_SLOTS_NUMBER); Aml *ctrl_lock = aml_name(MEMORY_SLOT_LOCK); Aml *slot_selector = aml_name(MEMORY_SLOT_SLECTOR); aml_append(mem_ctrl_dev, aml_name_decl("_HID", aml_string("PNP0A06"))); aml_append(mem_ctrl_dev, aml_name_decl("_UID", aml_string("Memory hotplug resources"))); assert(nr_mem <= ACPI_MAX_RAM_SLOTS); aml_append(mem_ctrl_dev, aml_name_decl(MEMORY_SLOTS_NUMBER, aml_int(nr_mem)) ); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, io_base, io_base, 0, io_len) ); aml_append(mem_ctrl_dev, aml_name_decl("_CRS", crs)); aml_append(mem_ctrl_dev, aml_operation_region( MEMORY_HOTPLUG_IO_REGION, AML_SYSTEM_IO, aml_int(io_base), io_len) ); field = aml_field(MEMORY_HOTPLUG_IO_REGION, AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE); aml_append(field, /* read only */ aml_named_field(MEMORY_SLOT_ADDR_LOW, 32)); aml_append(field, /* read only */ aml_named_field(MEMORY_SLOT_ADDR_HIGH, 32)); aml_append(field, /* read only */ aml_named_field(MEMORY_SLOT_SIZE_LOW, 32)); aml_append(field, /* read only */ aml_named_field(MEMORY_SLOT_SIZE_HIGH, 32)); aml_append(field, /* read only */ aml_named_field(MEMORY_SLOT_PROXIMITY, 32)); aml_append(mem_ctrl_dev, field); field = aml_field(MEMORY_HOTPLUG_IO_REGION, AML_BYTE_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS); aml_append(field, aml_reserved_field(160 /* bits, Offset(20) */)); aml_append(field, /* 1 if enabled, read only */ aml_named_field(MEMORY_SLOT_ENABLED, 1)); aml_append(field, /*(read) 1 if has a insert event. (write) 1 to clear event */ aml_named_field(MEMORY_SLOT_INSERT_EVENT, 1)); aml_append(field, /* (read) 1 if has a remove event. (write) 1 to clear event */ aml_named_field(MEMORY_SLOT_REMOVE_EVENT, 1)); aml_append(field, /* initiates device eject, write only */ aml_named_field(MEMORY_SLOT_EJECT, 1)); aml_append(mem_ctrl_dev, field); field = aml_field(MEMORY_HOTPLUG_IO_REGION, AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE); aml_append(field, /* DIMM selector, write only */ aml_named_field(MEMORY_SLOT_SLECTOR, 32)); aml_append(field, /* _OST event code, write only */ aml_named_field(MEMORY_SLOT_OST_EVENT, 32)); aml_append(field, /* _OST status code, write only */ aml_named_field(MEMORY_SLOT_OST_STATUS, 32)); aml_append(mem_ctrl_dev, field); method = aml_method("_STA", 0, AML_NOTSERIALIZED); ifctx = aml_if(aml_equal(slots_nr, zero)); { aml_append(ifctx, aml_return(zero)); } aml_append(method, ifctx); /* present, functioning, decoding, not shown in UI */ aml_append(method, aml_return(aml_int(0xB))); aml_append(mem_ctrl_dev, method); aml_append(mem_ctrl_dev, aml_mutex(MEMORY_SLOT_LOCK, 0)); method = aml_method(MEMORY_SLOT_SCAN_METHOD, 0, AML_NOTSERIALIZED); { Aml *else_ctx; Aml *while_ctx; Aml *idx = aml_local(0); Aml *eject_req = aml_int(3); Aml *dev_chk = aml_int(1); ifctx = aml_if(aml_equal(slots_nr, zero)); { aml_append(ifctx, aml_return(zero)); } aml_append(method, ifctx); aml_append(method, aml_store(zero, idx)); aml_append(method, aml_acquire(ctrl_lock, 0xFFFF)); /* build AML that: * loops over all slots and Notifies DIMMs with * Device Check or Eject Request notifications if * slot has corresponding status bit set and clears * slot status. */ while_ctx = aml_while(aml_lless(idx, slots_nr)); { Aml *ins_evt = aml_name(MEMORY_SLOT_INSERT_EVENT); Aml *rm_evt = aml_name(MEMORY_SLOT_REMOVE_EVENT); aml_append(while_ctx, aml_store(idx, slot_selector)); ifctx = aml_if(aml_equal(ins_evt, one)); { aml_append(ifctx, aml_call2(MEMORY_SLOT_NOTIFY_METHOD, idx, dev_chk)); aml_append(ifctx, aml_store(one, ins_evt)); } aml_append(while_ctx, ifctx); else_ctx = aml_else(); ifctx = aml_if(aml_equal(rm_evt, one)); { aml_append(ifctx, aml_call2(MEMORY_SLOT_NOTIFY_METHOD, idx, eject_req)); aml_append(ifctx, aml_store(one, rm_evt)); } aml_append(else_ctx, ifctx); aml_append(while_ctx, else_ctx); aml_append(while_ctx, aml_add(idx, one, idx)); } aml_append(method, while_ctx); aml_append(method, aml_release(ctrl_lock)); aml_append(method, aml_return(one)); } aml_append(mem_ctrl_dev, method); method = aml_method(MEMORY_SLOT_STATUS_METHOD, 1, AML_NOTSERIALIZED); { Aml *slot_enabled = aml_name(MEMORY_SLOT_ENABLED); aml_append(method, aml_store(zero, ret_val)); aml_append(method, aml_acquire(ctrl_lock, 0xFFFF)); aml_append(method, aml_store(aml_to_integer(slot_arg0), slot_selector)); ifctx = aml_if(aml_equal(slot_enabled, one)); { aml_append(ifctx, aml_store(aml_int(0xF), ret_val)); } aml_append(method, ifctx); aml_append(method, aml_release(ctrl_lock)); aml_append(method, aml_return(ret_val)); } aml_append(mem_ctrl_dev, method); method = aml_method(MEMORY_SLOT_CRS_METHOD, 1, AML_SERIALIZED); { Aml *mr64 = aml_name("MR64"); Aml *mr32 = aml_name("MR32"); Aml *crs_tmpl = aml_resource_template(); Aml *minl = aml_name("MINL"); Aml *minh = aml_name("MINH"); Aml *maxl = aml_name("MAXL"); Aml *maxh = aml_name("MAXH"); Aml *lenl = aml_name("LENL"); Aml *lenh = aml_name("LENH"); aml_append(method, aml_acquire(ctrl_lock, 0xFFFF)); aml_append(method, aml_store(aml_to_integer(slot_arg0), slot_selector)); aml_append(crs_tmpl, aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_CACHEABLE, AML_READ_WRITE, 0, 0x0, 0xFFFFFFFFFFFFFFFEULL, 0, 0xFFFFFFFFFFFFFFFFULL)); aml_append(method, aml_name_decl("MR64", crs_tmpl)); aml_append(method, aml_create_dword_field(mr64, aml_int(14), "MINL")); aml_append(method, aml_create_dword_field(mr64, aml_int(18), "MINH")); aml_append(method, aml_create_dword_field(mr64, aml_int(38), "LENL")); aml_append(method, aml_create_dword_field(mr64, aml_int(42), "LENH")); aml_append(method, aml_create_dword_field(mr64, aml_int(22), "MAXL")); aml_append(method, aml_create_dword_field(mr64, aml_int(26), "MAXH")); aml_append(method, aml_store(aml_name(MEMORY_SLOT_ADDR_HIGH), minh)); aml_append(method, aml_store(aml_name(MEMORY_SLOT_ADDR_LOW), minl)); aml_append(method, aml_store(aml_name(MEMORY_SLOT_SIZE_HIGH), lenh)); aml_append(method, aml_store(aml_name(MEMORY_SLOT_SIZE_LOW), lenl)); /* 64-bit math: MAX = MIN + LEN - 1 */ aml_append(method, aml_add(minl, lenl, maxl)); aml_append(method, aml_add(minh, lenh, maxh)); ifctx = aml_if(aml_lless(maxl, minl)); { aml_append(ifctx, aml_add(maxh, one, maxh)); } aml_append(method, ifctx); ifctx = aml_if(aml_lless(maxl, one)); { aml_append(ifctx, aml_subtract(maxh, one, maxh)); } aml_append(method, ifctx); aml_append(method, aml_subtract(maxl, one, maxl)); /* return 32-bit _CRS if addr/size is in low mem */ /* TODO: remove it since all hotplugged DIMMs are in high mem */ ifctx = aml_if(aml_equal(maxh, zero)); { crs_tmpl = aml_resource_template(); aml_append(crs_tmpl, aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_CACHEABLE, AML_READ_WRITE, 0, 0x0, 0xFFFFFFFE, 0, 0xFFFFFFFF)); aml_append(ifctx, aml_name_decl("MR32", crs_tmpl)); aml_append(ifctx, aml_create_dword_field(mr32, aml_int(10), "MIN")); aml_append(ifctx, aml_create_dword_field(mr32, aml_int(14), "MAX")); aml_append(ifctx, aml_create_dword_field(mr32, aml_int(22), "LEN")); aml_append(ifctx, aml_store(minl, aml_name("MIN"))); aml_append(ifctx, aml_store(maxl, aml_name("MAX"))); aml_append(ifctx, aml_store(lenl, aml_name("LEN"))); aml_append(ifctx, aml_release(ctrl_lock)); aml_append(ifctx, aml_return(mr32)); } aml_append(method, ifctx); aml_append(method, aml_release(ctrl_lock)); aml_append(method, aml_return(mr64)); } aml_append(mem_ctrl_dev, method); method = aml_method(MEMORY_SLOT_PROXIMITY_METHOD, 1, AML_NOTSERIALIZED); { Aml *proximity = aml_name(MEMORY_SLOT_PROXIMITY); aml_append(method, aml_acquire(ctrl_lock, 0xFFFF)); aml_append(method, aml_store(aml_to_integer(slot_arg0), slot_selector)); aml_append(method, aml_store(proximity, ret_val)); aml_append(method, aml_release(ctrl_lock)); aml_append(method, aml_return(ret_val)); } aml_append(mem_ctrl_dev, method); method = aml_method(MEMORY_SLOT_OST_METHOD, 4, AML_NOTSERIALIZED); { Aml *ost_evt = aml_name(MEMORY_SLOT_OST_EVENT); Aml *ost_status = aml_name(MEMORY_SLOT_OST_STATUS); aml_append(method, aml_acquire(ctrl_lock, 0xFFFF)); aml_append(method, aml_store(aml_to_integer(slot_arg0), slot_selector)); aml_append(method, aml_store(aml_arg(1), ost_evt)); aml_append(method, aml_store(aml_arg(2), ost_status)); aml_append(method, aml_release(ctrl_lock)); } aml_append(mem_ctrl_dev, method); method = aml_method(MEMORY_SLOT_EJECT_METHOD, 2, AML_NOTSERIALIZED); { Aml *eject = aml_name(MEMORY_SLOT_EJECT); aml_append(method, aml_acquire(ctrl_lock, 0xFFFF)); aml_append(method, aml_store(aml_to_integer(slot_arg0), slot_selector)); aml_append(method, aml_store(one, eject)); aml_append(method, aml_release(ctrl_lock)); } aml_append(mem_ctrl_dev, method); } aml_append(table, mem_ctrl_dev); sb_scope = aml_scope("_SB"); /* build memory devices */ for (i = 0; i < nr_mem; i++) { Aml *dev; char *s; dev = aml_device("MP%02X", i); aml_append(dev, aml_name_decl("_UID", aml_string("0x%02X", i))); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C80"))); method = aml_method("_CRS", 0, AML_NOTSERIALIZED); s = g_strdup_printf("%s.%s", mhp_res_path, MEMORY_SLOT_CRS_METHOD); aml_append(method, aml_return(aml_call1(s, aml_name("_UID")))); g_free(s); aml_append(dev, method); method = aml_method("_STA", 0, AML_NOTSERIALIZED); s = g_strdup_printf("%s.%s", mhp_res_path, MEMORY_SLOT_STATUS_METHOD); aml_append(method, aml_return(aml_call1(s, aml_name("_UID")))); g_free(s); aml_append(dev, method); method = aml_method("_PXM", 0, AML_NOTSERIALIZED); s = g_strdup_printf("%s.%s", mhp_res_path, MEMORY_SLOT_PROXIMITY_METHOD); aml_append(method, aml_return(aml_call1(s, aml_name("_UID")))); g_free(s); aml_append(dev, method); method = aml_method("_OST", 3, AML_NOTSERIALIZED); s = g_strdup_printf("%s.%s", mhp_res_path, MEMORY_SLOT_OST_METHOD); aml_append(method, aml_return(aml_call4( s, aml_name("_UID"), aml_arg(0), aml_arg(1), aml_arg(2) ))); g_free(s); aml_append(dev, method); method = aml_method("_EJ0", 1, AML_NOTSERIALIZED); s = g_strdup_printf("%s.%s", mhp_res_path, MEMORY_SLOT_EJECT_METHOD); aml_append(method, aml_return(aml_call2( s, aml_name("_UID"), aml_arg(0)))); g_free(s); aml_append(dev, method); aml_append(sb_scope, dev); } /* build Method(MEMORY_SLOT_NOTIFY_METHOD, 2) { * If (LEqual(Arg0, 0x00)) {Notify(MP00, Arg1)} ... } */ method = aml_method(MEMORY_SLOT_NOTIFY_METHOD, 2, AML_NOTSERIALIZED); for (i = 0; i < nr_mem; i++) { ifctx = aml_if(aml_equal(aml_arg(0), aml_int(i))); aml_append(ifctx, aml_notify(aml_name("MP%.02X", i), aml_arg(1)) ); aml_append(method, ifctx); } aml_append(sb_scope, method); aml_append(table, sb_scope); method = aml_method(event_handler_method, 0, AML_NOTSERIALIZED); scan_path = g_strdup_printf("%s.%s", mhp_res_path, MEMORY_SLOT_SCAN_METHOD); aml_append(method, aml_call0(scan_path)); g_free(scan_path); aml_append(table, method); g_free(mhp_res_path); } | 18,906 |
0 | char *g_strdup_printf(const char *format, ...) { char ch, *s; size_t len; __coverity_string_null_sink__(format); __coverity_string_size_sink__(format); ch = *format; s = __coverity_alloc_nosize__(); __coverity_writeall__(s); __coverity_mark_as_afm_allocated__(s, AFM_free); return s; } | 18,907 |
0 | static int check_features_against_host(x86_def_t *guest_def) { x86_def_t host_def; uint32_t mask; int rv, i; struct model_features_t ft[] = { {&guest_def->features, &host_def.features, ~0, feature_name, 0x00000000}, {&guest_def->ext_features, &host_def.ext_features, ~CPUID_EXT_HYPERVISOR, ext_feature_name, 0x00000001}, {&guest_def->ext2_features, &host_def.ext2_features, ~PPRO_FEATURES, ext2_feature_name, 0x80000000}, {&guest_def->ext3_features, &host_def.ext3_features, ~CPUID_EXT3_SVM, ext3_feature_name, 0x80000001}}; cpu_x86_fill_host(&host_def); for (rv = 0, i = 0; i < ARRAY_SIZE(ft); ++i) for (mask = 1; mask; mask <<= 1) if (ft[i].check_feat & mask && *ft[i].guest_feat & mask && !(*ft[i].host_feat & mask)) { unavailable_host_feature(&ft[i], mask); rv = 1; } return rv; } | 18,908 |
0 | static int rkmpp_retrieve_frame(AVCodecContext *avctx, AVFrame *frame) { RKMPPDecodeContext *rk_context = avctx->priv_data; RKMPPDecoder *decoder = (RKMPPDecoder *)rk_context->decoder_ref->data; RKMPPFrameContext *framecontext = NULL; AVBufferRef *framecontextref = NULL; int ret; MppFrame mppframe = NULL; MppBuffer buffer = NULL; AVDRMFrameDescriptor *desc = NULL; AVDRMLayerDescriptor *layer = NULL; int retrycount = 0; int mode; MppFrameFormat mppformat; uint32_t drmformat; // on start of decoding, MPP can return -1, which is supposed to be expected // this is due to some internal MPP init which is not completed, that will // only happen in the first few frames queries, but should not be interpreted // as an error, Therefore we need to retry a couple times when we get -1 // in order to let it time to complete it's init, then we sleep a bit between retries. retry_get_frame: ret = decoder->mpi->decode_get_frame(decoder->ctx, &mppframe); if (ret != MPP_OK && ret != MPP_ERR_TIMEOUT && !decoder->first_frame) { if (retrycount < 5) { av_log(avctx, AV_LOG_DEBUG, "Failed to get a frame, retrying (code = %d, retrycount = %d)\n", ret, retrycount); usleep(10000); retrycount++; goto retry_get_frame; } else { av_log(avctx, AV_LOG_ERROR, "Failed to get a frame from MPP (code = %d)\n", ret); goto fail; } } if (mppframe) { // Check wether we have a special frame or not if (mpp_frame_get_info_change(mppframe)) { AVHWFramesContext *hwframes; av_log(avctx, AV_LOG_INFO, "Decoder noticed an info change (%dx%d), format=%d\n", (int)mpp_frame_get_width(mppframe), (int)mpp_frame_get_height(mppframe), (int)mpp_frame_get_fmt(mppframe)); avctx->width = mpp_frame_get_width(mppframe); avctx->height = mpp_frame_get_height(mppframe); decoder->mpi->control(decoder->ctx, MPP_DEC_SET_INFO_CHANGE_READY, NULL); decoder->first_frame = 1; av_buffer_unref(&decoder->frames_ref); decoder->frames_ref = av_hwframe_ctx_alloc(decoder->device_ref); if (!decoder->frames_ref) { ret = AVERROR(ENOMEM); goto fail; } mppformat = mpp_frame_get_fmt(mppframe); drmformat = rkmpp_get_frameformat(mppformat); hwframes = (AVHWFramesContext*)decoder->frames_ref->data; hwframes->format = AV_PIX_FMT_DRM_PRIME; hwframes->sw_format = drmformat == DRM_FORMAT_NV12 ? AV_PIX_FMT_NV12 : AV_PIX_FMT_NONE; hwframes->width = avctx->width; hwframes->height = avctx->height; ret = av_hwframe_ctx_init(decoder->frames_ref); if (ret < 0) goto fail; // here decoder is fully initialized, we need to feed it again with data ret = AVERROR(EAGAIN); goto fail; } else if (mpp_frame_get_eos(mppframe)) { av_log(avctx, AV_LOG_DEBUG, "Received a EOS frame.\n"); decoder->eos_reached = 1; ret = AVERROR_EOF; goto fail; } else if (mpp_frame_get_discard(mppframe)) { av_log(avctx, AV_LOG_DEBUG, "Received a discard frame.\n"); ret = AVERROR(EAGAIN); goto fail; } else if (mpp_frame_get_errinfo(mppframe)) { av_log(avctx, AV_LOG_ERROR, "Received a errinfo frame.\n"); ret = AVERROR_UNKNOWN; goto fail; } // here we should have a valid frame av_log(avctx, AV_LOG_DEBUG, "Received a frame.\n"); // setup general frame fields frame->format = AV_PIX_FMT_DRM_PRIME; frame->width = mpp_frame_get_width(mppframe); frame->height = mpp_frame_get_height(mppframe); frame->pts = mpp_frame_get_pts(mppframe); frame->color_range = mpp_frame_get_color_range(mppframe); frame->color_primaries = mpp_frame_get_color_primaries(mppframe); frame->color_trc = mpp_frame_get_color_trc(mppframe); frame->colorspace = mpp_frame_get_colorspace(mppframe); mode = mpp_frame_get_mode(mppframe); frame->interlaced_frame = ((mode & MPP_FRAME_FLAG_FIELD_ORDER_MASK) == MPP_FRAME_FLAG_DEINTERLACED); frame->top_field_first = ((mode & MPP_FRAME_FLAG_FIELD_ORDER_MASK) == MPP_FRAME_FLAG_TOP_FIRST); mppformat = mpp_frame_get_fmt(mppframe); drmformat = rkmpp_get_frameformat(mppformat); // now setup the frame buffer info buffer = mpp_frame_get_buffer(mppframe); if (buffer) { desc = av_mallocz(sizeof(AVDRMFrameDescriptor)); if (!desc) { ret = AVERROR(ENOMEM); goto fail; } desc->nb_objects = 1; desc->objects[0].fd = mpp_buffer_get_fd(buffer); desc->objects[0].size = mpp_buffer_get_size(buffer); desc->nb_layers = 1; layer = &desc->layers[0]; layer->format = drmformat; layer->nb_planes = 2; layer->planes[0].object_index = 0; layer->planes[0].offset = 0; layer->planes[0].pitch = mpp_frame_get_hor_stride(mppframe); layer->planes[1].object_index = 0; layer->planes[1].offset = layer->planes[0].pitch * mpp_frame_get_ver_stride(mppframe); layer->planes[1].pitch = layer->planes[0].pitch; // we also allocate a struct in buf[0] that will allow to hold additionnal information // for releasing properly MPP frames and decoder framecontextref = av_buffer_allocz(sizeof(*framecontext)); if (!framecontextref) { ret = AVERROR(ENOMEM); goto fail; } // MPP decoder needs to be closed only when all frames have been released. framecontext = (RKMPPFrameContext *)framecontextref->data; framecontext->decoder_ref = av_buffer_ref(rk_context->decoder_ref); framecontext->frame = mppframe; frame->data[0] = (uint8_t *)desc; frame->buf[0] = av_buffer_create((uint8_t *)desc, sizeof(*desc), rkmpp_release_frame, framecontextref, AV_BUFFER_FLAG_READONLY); if (!frame->buf[0]) { ret = AVERROR(ENOMEM); goto fail; } frame->hw_frames_ctx = av_buffer_ref(decoder->frames_ref); if (!frame->hw_frames_ctx) { ret = AVERROR(ENOMEM); goto fail; } decoder->first_frame = 0; return 0; } else { av_log(avctx, AV_LOG_ERROR, "Failed to retrieve the frame buffer, frame is dropped (code = %d)\n", ret); mpp_frame_deinit(&mppframe); } } else if (decoder->eos_reached) { return AVERROR_EOF; } else if (ret == MPP_ERR_TIMEOUT) { av_log(avctx, AV_LOG_DEBUG, "Timeout when trying to get a frame from MPP\n"); } return AVERROR(EAGAIN); fail: if (mppframe) mpp_frame_deinit(&mppframe); if (framecontext) av_buffer_unref(&framecontext->decoder_ref); if (framecontextref) av_buffer_unref(&framecontextref); if (desc) av_free(desc); return ret; } | 18,909 |
0 | START_TEST(qfloat_from_double_test) { QFloat *qf; const double value = -42.23423; qf = qfloat_from_double(value); fail_unless(qf != NULL); fail_unless(qf->value == value); fail_unless(qf->base.refcnt == 1); fail_unless(qobject_type(QOBJECT(qf)) == QTYPE_QFLOAT); // destroy doesn't exit yet g_free(qf); } | 18,910 |
0 | static int vnc_start_vencrypt_handshake(struct VncState *vs) { int ret; if ((ret = gnutls_handshake(vs->tls.session)) < 0) { if (!gnutls_error_is_fatal(ret)) { VNC_DEBUG("Handshake interrupted (blocking)\n"); if (!gnutls_record_get_direction(vs->tls.session)) qemu_set_fd_handler(vs->csock, vnc_tls_handshake_io, NULL, vs); else qemu_set_fd_handler(vs->csock, NULL, vnc_tls_handshake_io, vs); return 0; } VNC_DEBUG("Handshake failed %s\n", gnutls_strerror(ret)); vnc_client_error(vs); return -1; } if (vs->vd->tls.x509verify) { if (vnc_tls_validate_certificate(vs) < 0) { VNC_DEBUG("Client verification failed\n"); vnc_client_error(vs); return -1; } else { VNC_DEBUG("Client verification passed\n"); } } VNC_DEBUG("Handshake done, switching to TLS data mode\n"); qemu_set_fd_handler2(vs->csock, NULL, vnc_client_read, vnc_client_write, vs); start_auth_vencrypt_subauth(vs); return 0; } | 18,911 |
0 | static void spapr_powerdown_req(Notifier *n, void *opaque) { sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); struct rtas_error_log *hdr; struct rtas_event_log_v6 *v6hdr; struct rtas_event_log_v6_maina *maina; struct rtas_event_log_v6_mainb *mainb; struct rtas_event_log_v6_epow *epow; struct epow_log_full *new_epow; new_epow = g_malloc0(sizeof(*new_epow)); hdr = &new_epow->hdr; v6hdr = &new_epow->v6hdr; maina = &new_epow->maina; mainb = &new_epow->mainb; epow = &new_epow->epow; hdr->summary = cpu_to_be32(RTAS_LOG_VERSION_6 | RTAS_LOG_SEVERITY_EVENT | RTAS_LOG_DISPOSITION_NOT_RECOVERED | RTAS_LOG_OPTIONAL_PART_PRESENT | RTAS_LOG_TYPE_EPOW); hdr->extended_length = cpu_to_be32(sizeof(*new_epow) - sizeof(new_epow->hdr)); spapr_init_v6hdr(v6hdr); spapr_init_maina(maina, 3 /* Main-A, Main-B and EPOW */); mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB); mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb)); /* FIXME: section version, subtype and creator id? */ mainb->subsystem_id = 0xa0; /* External environment */ mainb->event_severity = 0x00; /* Informational / non-error */ mainb->event_subtype = 0xd0; /* Normal shutdown */ epow->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_EPOW); epow->hdr.section_length = cpu_to_be16(sizeof(*epow)); epow->hdr.section_version = 2; /* includes extended modifier */ /* FIXME: section subtype and creator id? */ epow->sensor_value = RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN; epow->event_modifier = RTAS_LOG_V6_EPOW_MODIFIER_NORMAL; epow->extended_modifier = RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC; rtas_event_log_queue(RTAS_LOG_TYPE_EPOW, new_epow); qemu_irq_pulse(xics_get_qirq(XICS_FABRIC(spapr), rtas_event_log_to_irq(spapr, RTAS_LOG_TYPE_EPOW))); } | 18,912 |
0 | static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file, QDict *options, int flags, BlockDriver *drv, Error **errp) { int ret, open_flags; const char *filename; const char *node_name = NULL; Error *local_err = NULL; assert(drv != NULL); assert(bs->file == NULL); assert(options != NULL && bs->options != options); if (file != NULL) { filename = file->filename; } else { filename = qdict_get_try_str(options, "filename"); } if (drv->bdrv_needs_filename && !filename) { error_setg(errp, "The '%s' block driver requires a file name", drv->format_name); return -EINVAL; } trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name); node_name = qdict_get_try_str(options, "node-name"); bdrv_assign_node_name(bs, node_name, &local_err); if (local_err) { error_propagate(errp, local_err); return -EINVAL; } qdict_del(options, "node-name"); /* bdrv_open() with directly using a protocol as drv. This layer is already * opened, so assign it to bs (while file becomes a closed BlockDriverState) * and return immediately. */ if (file != NULL && drv->bdrv_file_open) { bdrv_swap(file, bs); return 0; } bs->open_flags = flags; bs->guest_block_size = 512; bs->request_alignment = 512; bs->zero_beyond_eof = true; open_flags = bdrv_open_flags(bs, flags); bs->read_only = !(open_flags & BDRV_O_RDWR); if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) { error_setg(errp, !bs->read_only && bdrv_is_whitelisted(drv, true) ? "Driver '%s' can only be used for read-only devices" : "Driver '%s' is not whitelisted", drv->format_name); return -ENOTSUP; } assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */ if (flags & BDRV_O_COPY_ON_READ) { if (!bs->read_only) { bdrv_enable_copy_on_read(bs); } else { error_setg(errp, "Can't use copy-on-read on read-only device"); return -EINVAL; } } if (filename != NULL) { pstrcpy(bs->filename, sizeof(bs->filename), filename); } else { bs->filename[0] = '\0'; } pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), bs->filename); bs->drv = drv; bs->opaque = g_malloc0(drv->instance_size); bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB); /* Open the image, either directly or using a protocol */ if (drv->bdrv_file_open) { assert(file == NULL); assert(!drv->bdrv_needs_filename || filename != NULL); ret = drv->bdrv_file_open(bs, options, open_flags, &local_err); } else { if (file == NULL) { error_setg(errp, "Can't use '%s' as a block driver for the " "protocol level", drv->format_name); ret = -EINVAL; goto free_and_fail; } bs->file = file; ret = drv->bdrv_open(bs, options, open_flags, &local_err); } if (ret < 0) { if (local_err) { error_propagate(errp, local_err); } else if (bs->filename[0]) { error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename); } else { error_setg_errno(errp, -ret, "Could not open image"); } goto free_and_fail; } if (bs->encrypted) { error_report("Encrypted images are deprecated"); error_printf("Support for them will be removed in a future release.\n" "You can use 'qemu-img convert' to convert your image" " to an unencrypted one.\n"); } ret = refresh_total_sectors(bs, bs->total_sectors); if (ret < 0) { error_setg_errno(errp, -ret, "Could not refresh total sector count"); goto free_and_fail; } bdrv_refresh_limits(bs, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto free_and_fail; } assert(bdrv_opt_mem_align(bs) != 0); assert(bdrv_min_mem_align(bs) != 0); assert((bs->request_alignment != 0) || bs->sg); return 0; free_and_fail: bs->file = NULL; g_free(bs->opaque); bs->opaque = NULL; bs->drv = NULL; return ret; } | 18,913 |
0 | static uint32_t bonito_spciconf_readl(void *opaque, target_phys_addr_t addr) { PCIBonitoState *s = opaque; PCIDevice *d = PCI_DEVICE(s); PCIHostState *phb = PCI_HOST_BRIDGE(s->pcihost); uint32_t pciaddr; uint16_t status; DPRINTF("bonito_spciconf_readl "TARGET_FMT_plx"\n", addr); assert((addr & 0x3) == 0); pciaddr = bonito_sbridge_pciaddr(s, addr); if (pciaddr == 0xffffffff) { return 0xffffffff; } /* set the pci address in s->config_reg */ phb->config_reg = (pciaddr) | (1u << 31); /* clear PCI_STATUS_REC_MASTER_ABORT and PCI_STATUS_REC_TARGET_ABORT */ status = pci_get_word(d->config + PCI_STATUS); status &= ~(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT); pci_set_word(d->config + PCI_STATUS, status); return pci_data_read(phb->bus, phb->config_reg, 4); } | 18,914 |
0 | static VirtIOSerialPort *find_port_by_name(char *name) { VirtIOSerial *vser; QLIST_FOREACH(vser, &vserdevices.devices, next) { VirtIOSerialPort *port; QTAILQ_FOREACH(port, &vser->ports, next) { if (!strcmp(port->name, name)) { return port; } } } return NULL; } | 18,917 |
0 | void op_mtc0_status (void) { uint32_t val, old; uint32_t mask = env->Status_rw_bitmask; /* No reverse endianness, no MDMX/DSP, no 64bit ops implemented. */ val = T0 & mask; old = env->CP0_Status; if (!(val & (1 << CP0St_EXL)) && !(val & (1 << CP0St_ERL)) && !(env->hflags & MIPS_HFLAG_DM) && (val & (1 << CP0St_UM))) env->hflags |= MIPS_HFLAG_UM; #ifdef TARGET_MIPS64 if ((env->hflags & MIPS_HFLAG_UM) && !(val & (1 << CP0St_PX)) && !(val & (1 << CP0St_UX))) env->hflags &= ~MIPS_HFLAG_64; #endif env->CP0_Status = (env->CP0_Status & ~mask) | val; if (loglevel & CPU_LOG_EXEC) CALL_FROM_TB2(do_mtc0_status_debug, old, val); CALL_FROM_TB1(cpu_mips_update_irq, env); RETURN(); } | 18,918 |
0 | static inline uint32_t ldl_phys_internal(target_phys_addr_t addr, enum device_endian endian) { uint8_t *ptr; uint32_t val; MemoryRegionSection *section; section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS); if (!(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr))) { /* I/O case */ addr = memory_region_section_addr(section, addr); val = io_mem_read(section->mr, addr, 4); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap32(val); } #else if (endian == DEVICE_BIG_ENDIAN) { val = bswap32(val); } #endif } else { /* RAM case */ ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) + memory_region_section_addr(section, addr)); switch (endian) { case DEVICE_LITTLE_ENDIAN: val = ldl_le_p(ptr); break; case DEVICE_BIG_ENDIAN: val = ldl_be_p(ptr); break; default: val = ldl_p(ptr); break; } } return val; } | 18,919 |
0 | static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index, VTD_IRTE *entry) { dma_addr_t addr = 0x00; addr = iommu->intr_root + index * sizeof(*entry); if (dma_memory_read(&address_space_memory, addr, entry, sizeof(*entry))) { VTD_DPRINTF(GENERAL, "error: fail to access IR root at 0x%"PRIx64 " + %"PRIu16, iommu->intr_root, index); return -VTD_FR_IR_ROOT_INVAL; } if (!entry->present) { VTD_DPRINTF(GENERAL, "error: present flag not set in IRTE" " entry index %u value 0x%"PRIx64 " 0x%"PRIx64, index, le64_to_cpu(entry->data[1]), le64_to_cpu(entry->data[0])); return -VTD_FR_IR_ENTRY_P; } if (entry->__reserved_0 || entry->__reserved_1 || \ entry->__reserved_2) { VTD_DPRINTF(GENERAL, "error: IRTE entry index %"PRIu16 " reserved fields non-zero: 0x%"PRIx64 " 0x%"PRIx64, index, le64_to_cpu(entry->data[1]), le64_to_cpu(entry->data[0])); return -VTD_FR_IR_IRTE_RSVD; } /* * TODO: Check Source-ID corresponds to SVT (Source Validation * Type) bits */ return 0; } | 18,920 |
0 | static CharDriverState *qemu_chr_open_pty(void) { struct termios tty; int master_fd, slave_fd; if (openpty(&master_fd, &slave_fd, NULL, NULL, NULL) < 0) { return NULL; } /* Set raw attributes on the pty. */ cfmakeraw(&tty); tcsetattr(slave_fd, TCSAFLUSH, &tty); fprintf(stderr, "char device redirected to %s\n", ptsname(master_fd)); return qemu_chr_open_fd(master_fd, master_fd); } | 18,921 |
0 | static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, uint32_t *value, uint32_t valid_mask) { XenPTRegInfo *reg = cfg_entry->reg; uint32_t valid_emu_mask = 0; /* emulate long register */ valid_emu_mask = reg->emu_mask & valid_mask; *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); return 0; } | 18,922 |
0 | static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, TCGReg dest, TCGReg c1, TCGArg c2, int c2const) { int cc; switch (cond) { case TCG_COND_GTU: case TCG_COND_GT: do_greater: /* The result of a compare has CC=2 for GT and CC=3 unused. ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */ tgen_cmp(s, type, cond, c1, c2, c2const, true); tcg_out_movi(s, type, dest, 0); tcg_out_insn(s, RRE, ALCGR, dest, dest); return; case TCG_COND_GEU: do_geu: /* We need "real" carry semantics, so use SUBTRACT LOGICAL instead of COMPARE LOGICAL. This needs an extra move. */ tcg_out_mov(s, type, TCG_TMP0, c1); if (c2const) { tcg_out_movi(s, TCG_TYPE_I64, dest, 0); if (type == TCG_TYPE_I32) { tcg_out_insn(s, RIL, SLFI, TCG_TMP0, c2); } else { tcg_out_insn(s, RIL, SLGFI, TCG_TMP0, c2); } } else { if (type == TCG_TYPE_I32) { tcg_out_insn(s, RR, SLR, TCG_TMP0, c2); } else { tcg_out_insn(s, RRE, SLGR, TCG_TMP0, c2); } tcg_out_movi(s, TCG_TYPE_I64, dest, 0); } tcg_out_insn(s, RRE, ALCGR, dest, dest); return; case TCG_COND_LEU: case TCG_COND_LTU: case TCG_COND_LT: /* Swap operands so that we can use GEU/GTU/GT. */ if (c2const) { tcg_out_movi(s, type, TCG_TMP0, c2); c2 = c1; c2const = 0; c1 = TCG_TMP0; } else { TCGReg t = c1; c1 = c2; c2 = t; } if (cond == TCG_COND_LEU) { goto do_geu; } cond = tcg_swap_cond(cond); goto do_greater; case TCG_COND_NE: /* X != 0 is X > 0. */ if (c2const && c2 == 0) { cond = TCG_COND_GTU; goto do_greater; } break; case TCG_COND_EQ: /* X == 0 is X <= 0 is 0 >= X. */ if (c2const && c2 == 0) { tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 0); c2 = c1; c2const = 0; c1 = TCG_TMP0; goto do_geu; } break; default: break; } cc = tgen_cmp(s, type, cond, c1, c2, c2const, false); if (facilities & FACILITY_LOAD_ON_COND) { /* Emit: d = 0, t = 1, d = (cc ? t : d). */ tcg_out_movi(s, TCG_TYPE_I64, dest, 0); tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1); tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc); } else { /* Emit: d = 1; if (cc) goto over; d = 0; over: */ tcg_out_movi(s, type, dest, 1); tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1); tcg_out_movi(s, type, dest, 0); } } | 18,923 |
0 | static CharDriverState *qemu_chr_open_pty(const char *id, ChardevReturn *ret) { CharDriverState *chr; PtyCharDriver *s; int master_fd, slave_fd; char pty_name[PATH_MAX]; master_fd = qemu_openpty_raw(&slave_fd, pty_name); if (master_fd < 0) { return NULL; } close(slave_fd); chr = g_malloc0(sizeof(CharDriverState)); chr->filename = g_strdup_printf("pty:%s", pty_name); ret->pty = g_strdup(pty_name); ret->has_pty = true; fprintf(stderr, "char device redirected to %s (label %s)\n", pty_name, id); s = g_malloc0(sizeof(PtyCharDriver)); chr->opaque = s; chr->chr_write = pty_chr_write; chr->chr_update_read_handler = pty_chr_update_read_handler; chr->chr_close = pty_chr_close; chr->chr_add_watch = pty_chr_add_watch; chr->explicit_be_open = true; s->fd = io_channel_from_fd(master_fd); s->timer_tag = 0; return chr; } | 18,924 |
0 | static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPRMachineState *spapr, target_ulong opcode, target_ulong *args) { /* FIXME: actually implement this */ return H_HARDWARE; } | 18,925 |
0 | static uint32_t arm_timer_read(void *opaque, target_phys_addr_t offset) { arm_timer_state *s = (arm_timer_state *)opaque; switch (offset >> 2) { case 0: /* TimerLoad */ case 6: /* TimerBGLoad */ return s->limit; case 1: /* TimerValue */ return ptimer_get_count(s->timer); case 2: /* TimerControl */ return s->control; case 4: /* TimerRIS */ return s->int_level; case 5: /* TimerMIS */ if ((s->control & TIMER_CTRL_IE) == 0) return 0; return s->int_level; default: hw_error("%s: Bad offset %x\n", __func__, (int)offset); return 0; } } | 18,926 |
0 | char *string_output_get_string(StringOutputVisitor *sov) { char *string = g_string_free(sov->string, false); sov->string = NULL; return string; } | 18,927 |
0 | static void css_init(void) { QTAILQ_INIT(&channel_subsys.pending_crws); channel_subsys.sei_pending = false; channel_subsys.do_crw_mchk = true; channel_subsys.crws_lost = false; channel_subsys.chnmon_active = false; QTAILQ_INIT(&channel_subsys.io_adapters); QTAILQ_INIT(&channel_subsys.indicator_addresses); } | 18,928 |
0 | static void test_validate_struct(TestInputVisitorData *data, const void *unused) { TestStruct *p = NULL; Visitor *v; v = validate_test_init(data, "{ 'integer': -42, 'boolean': true, 'string': 'foo' }"); visit_type_TestStruct(v, NULL, &p, &error_abort); g_free(p->string); g_free(p); } | 18,929 |
0 | static uint32_t ecc_mem_readw(void *opaque, target_phys_addr_t addr) { printf("ECC: Unsupported read 0x" TARGET_FMT_plx " 0000\n", addr); return 0; } | 18,932 |
0 | static av_cold int libssh_authentication(LIBSSHContext *libssh, const char *user, const char *password) { int authorized = 0; int auth_methods; if (user) ssh_options_set(libssh->session, SSH_OPTIONS_USER, user); if (ssh_userauth_none(libssh->session, NULL) == SSH_AUTH_SUCCESS) return 0; auth_methods = ssh_userauth_list(libssh->session, NULL); if (auth_methods & SSH_AUTH_METHOD_PUBLICKEY) { if (libssh->priv_key) { ssh_string pub_key; ssh_private_key priv_key; int type; if (!ssh_try_publickey_from_file(libssh->session, libssh->priv_key, &pub_key, &type)) { priv_key = privatekey_from_file(libssh->session, libssh->priv_key, type, password); if (ssh_userauth_pubkey(libssh->session, NULL, pub_key, priv_key) == SSH_AUTH_SUCCESS) { av_log(libssh, AV_LOG_DEBUG, "Authentication successful with selected private key.\n"); authorized = 1; } } else { av_log(libssh, AV_LOG_DEBUG, "Invalid key is provided.\n"); return AVERROR(EACCES); } } else if (ssh_userauth_autopubkey(libssh->session, password) == SSH_AUTH_SUCCESS) { av_log(libssh, AV_LOG_DEBUG, "Authentication successful with auto selected key.\n"); authorized = 1; } } if (!authorized && (auth_methods & SSH_AUTH_METHOD_PASSWORD)) { if (ssh_userauth_password(libssh->session, NULL, password) == SSH_AUTH_SUCCESS) { av_log(libssh, AV_LOG_DEBUG, "Authentication successful with password.\n"); authorized = 1; } } if (!authorized) { av_log(libssh, AV_LOG_ERROR, "Authentication failed.\n"); return AVERROR(EACCES); } return 0; } | 18,933 |
0 | int gif_write(ByteIOContext *pb, AVImageInfo *info) { gif_image_write_header(pb, info->width, info->height, (uint32_t *)info->pict.data[1]); gif_image_write_image(pb, 0, 0, info->width, info->height, info->pict.data[0], info->pict.linesize[0], PIX_FMT_PAL8); put_byte(pb, 0x3b); put_flush_packet(pb); return 0; } | 18,934 |
0 | void monitor_init(CharDriverState *chr, int flags) { static int is_first_init = 1; Monitor *mon; if (is_first_init) { key_timer = qemu_new_timer(vm_clock, release_keys, NULL); is_first_init = 0; } mon = qemu_mallocz(sizeof(*mon)); mon->chr = chr; mon->flags = flags; if (flags & MONITOR_USE_READLINE) { mon->rs = readline_init(mon, monitor_find_completion); monitor_read_command(mon, 0); } qemu_chr_add_handlers(chr, monitor_can_read, monitor_read, monitor_event, mon); LIST_INSERT_HEAD(&mon_list, mon, entry); if (!cur_mon || (flags & MONITOR_IS_DEFAULT)) cur_mon = mon; } | 18,935 |
0 | static void dsound_write_sample (HWVoiceOut *hw, uint8_t *dst, int dst_len) { int src_len1 = dst_len; int src_len2 = 0; int pos = hw->rpos + dst_len; st_sample_t *src1 = hw->mix_buf + hw->rpos; st_sample_t *src2 = NULL; if (pos > hw->samples) { src_len1 = hw->samples - hw->rpos; src2 = hw->mix_buf; src_len2 = dst_len - src_len1; pos = src_len2; } if (src_len1) { hw->clip (dst, src1, src_len1); } if (src_len2) { dst = advance (dst, src_len1 << hw->info.shift); hw->clip (dst, src2, src_len2); } hw->rpos = pos % hw->samples; } | 18,936 |
0 | static void n8x0_init(QEMUMachineInitArgs *args, struct arm_boot_info *binfo, int model) { MemoryRegion *sysmem = get_system_memory(); struct n800_s *s = (struct n800_s *) g_malloc0(sizeof(*s)); int sdram_size = binfo->ram_size; s->mpu = omap2420_mpu_init(sysmem, sdram_size, args->cpu_model); /* Setup peripherals * * Believed external peripherals layout in the N810: * (spi bus 1) * tsc2005 * lcd_mipid * (spi bus 2) * Conexant cx3110x (WLAN) * optional: pc2400m (WiMAX) * (i2c bus 0) * TLV320AIC33 (audio codec) * TCM825x (camera by Toshiba) * lp5521 (clever LEDs) * tsl2563 (light sensor, hwmon, model 7, rev. 0) * lm8323 (keypad, manf 00, rev 04) * (i2c bus 1) * tmp105 (temperature sensor, hwmon) * menelaus (pm) * (somewhere on i2c - maybe N800-only) * tea5761 (FM tuner) * (serial 0) * GPS * (some serial port) * csr41814 (Bluetooth) */ n8x0_gpio_setup(s); n8x0_nand_setup(s); n8x0_i2c_setup(s); if (model == 800) n800_tsc_kbd_setup(s); else if (model == 810) { n810_tsc_setup(s); n810_kbd_setup(s); } n8x0_spi_setup(s); n8x0_dss_setup(s); n8x0_cbus_setup(s); n8x0_uart_setup(s); if (usb_enabled(false)) { n8x0_usb_setup(s); } if (args->kernel_filename) { /* Or at the linux loader. */ binfo->kernel_filename = args->kernel_filename; binfo->kernel_cmdline = args->kernel_cmdline; binfo->initrd_filename = args->initrd_filename; arm_load_kernel(s->mpu->cpu, binfo); qemu_register_reset(n8x0_boot_init, s); } if (option_rom[0].name && (args->boot_device[0] == 'n' || !args->kernel_filename)) { uint8_t nolo_tags[0x10000]; /* No, wait, better start at the ROM. */ s->mpu->cpu->env.regs[15] = OMAP2_Q2_BASE + 0x400000; /* This is intended for loading the `secondary.bin' program from * Nokia images (the NOLO bootloader). The entry point seems * to be at OMAP2_Q2_BASE + 0x400000. * * The `2nd.bin' files contain some kind of earlier boot code and * for them the entry point needs to be set to OMAP2_SRAM_BASE. * * The code above is for loading the `zImage' file from Nokia * images. */ load_image_targphys(option_rom[0].name, OMAP2_Q2_BASE + 0x400000, sdram_size - 0x400000); n800_setup_nolo_tags(nolo_tags); cpu_physical_memory_write(OMAP2_SRAM_BASE, nolo_tags, 0x10000); } } | 18,937 |
0 | build_fadt(GArray *table_data, GArray *linker, unsigned dsdt) { AcpiFadtDescriptorRev5_1 *fadt = acpi_data_push(table_data, sizeof(*fadt)); /* Hardware Reduced = 1 and use PSCI 0.2+ and with HVC */ fadt->flags = cpu_to_le32(1 << ACPI_FADT_F_HW_REDUCED_ACPI); fadt->arm_boot_flags = cpu_to_le16((1 << ACPI_FADT_ARM_USE_PSCI_G_0_2) | (1 << ACPI_FADT_ARM_PSCI_USE_HVC)); /* ACPI v5.1 (fadt->revision.fadt->minor_revision) */ fadt->minor_revision = 0x1; fadt->dsdt = cpu_to_le32(dsdt); /* DSDT address to be filled by Guest linker */ bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, ACPI_BUILD_TABLE_FILE, table_data, &fadt->dsdt, sizeof fadt->dsdt); build_header(linker, table_data, (void *)fadt, "FACP", sizeof(*fadt), 5, NULL, NULL); } | 18,938 |
0 | static int config_input_overlay(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; OverlayContext *over = inlink->dst->priv; char *expr; double var_values[VAR_VARS_NB], res; int ret; const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format); av_image_fill_max_pixsteps(over->overlay_pix_step, NULL, pix_desc); /* Finish the configuration by evaluating the expressions now when both inputs are configured. */ var_values[VAR_MAIN_W ] = var_values[VAR_MW] = ctx->inputs[MAIN ]->w; var_values[VAR_MAIN_H ] = var_values[VAR_MH] = ctx->inputs[MAIN ]->h; var_values[VAR_OVERLAY_W] = var_values[VAR_OW] = ctx->inputs[OVERLAY]->w; var_values[VAR_OVERLAY_H] = var_values[VAR_OH] = ctx->inputs[OVERLAY]->h; if ((ret = av_expr_parse_and_eval(&res, (expr = over->x_expr), var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail; over->x = res; if ((ret = av_expr_parse_and_eval(&res, (expr = over->y_expr), var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx))) goto fail; over->y = res; /* x may depend on y */ if ((ret = av_expr_parse_and_eval(&res, (expr = over->x_expr), var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail; over->x = res; over->overlay_is_packed_rgb = ff_fill_rgba_map(over->overlay_rgba_map, inlink->format) >= 0; over->overlay_has_alpha = ff_fmt_is_in(inlink->format, alpha_pix_fmts); av_log(ctx, AV_LOG_VERBOSE, "main w:%d h:%d fmt:%s overlay x:%d y:%d w:%d h:%d fmt:%s\n", ctx->inputs[MAIN]->w, ctx->inputs[MAIN]->h, av_get_pix_fmt_name(ctx->inputs[MAIN]->format), over->x, over->y, ctx->inputs[OVERLAY]->w, ctx->inputs[OVERLAY]->h, av_get_pix_fmt_name(ctx->inputs[OVERLAY]->format)); if (over->x < 0 || over->y < 0 || over->x + var_values[VAR_OVERLAY_W] > var_values[VAR_MAIN_W] || over->y + var_values[VAR_OVERLAY_H] > var_values[VAR_MAIN_H]) { av_log(ctx, AV_LOG_ERROR, "Overlay area (%d,%d)<->(%d,%d) not within the main area (0,0)<->(%d,%d) or zero-sized\n", over->x, over->y, (int)(over->x + var_values[VAR_OVERLAY_W]), (int)(over->y + var_values[VAR_OVERLAY_H]), (int)var_values[VAR_MAIN_W], (int)var_values[VAR_MAIN_H]); return AVERROR(EINVAL); } return 0; fail: av_log(NULL, AV_LOG_ERROR, "Error when evaluating the expression '%s'\n", expr); return ret; } | 18,939 |
0 | static int dvdsub_decode(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { DVDSubContext *ctx = avctx->priv_data; const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; AVSubtitle *sub = data; int is_menu; if (ctx->buf_size) { int ret = append_to_cached_buf(avctx, buf, buf_size); if (ret < 0) { *data_size = 0; return ret; } buf = ctx->buf; buf_size = ctx->buf_size; } is_menu = decode_dvd_subtitles(ctx, sub, buf, buf_size); if (is_menu == AVERROR(EAGAIN)) { *data_size = 0; return append_to_cached_buf(avctx, buf, buf_size); } if (is_menu < 0) { no_subtitle: reset_rects(sub); *data_size = 0; return buf_size; } if (!is_menu && find_smallest_bounding_rectangle(sub) == 0) goto no_subtitle; if (ctx->forced_subs_only && !(sub->rects[0]->flags & AV_SUBTITLE_FLAG_FORCED)) goto no_subtitle; #if defined(DEBUG) { char ppm_name[32]; snprintf(ppm_name, sizeof(ppm_name), "/tmp/%05d.ppm", ctx->sub_id++); ff_dlog(NULL, "start=%d ms end =%d ms\n", sub->start_display_time, sub->end_display_time); ppm_save(ppm_name, sub->rects[0]->pict.data[0], sub->rects[0]->w, sub->rects[0]->h, (uint32_t*) sub->rects[0]->pict.data[1]); } #endif ctx->buf_size = 0; *data_size = 1; return buf_size; } | 18,940 |
0 | static int mov_write_moov_tag(ByteIOContext *pb, MOVContext *mov, AVFormatContext *s) { int i; offset_t pos = url_ftell(pb); put_be32(pb, 0); /* size placeholder*/ put_tag(pb, "moov"); mov->timescale = globalTimescale; for (i=0; i<MAX_STREAMS; i++) { if(mov->tracks[i].entry <= 0) continue; if(mov->tracks[i].enc->codec_type == CODEC_TYPE_VIDEO) { mov->tracks[i].timescale = mov->tracks[i].enc->time_base.den; mov->tracks[i].sampleDuration = mov->tracks[i].enc->time_base.num; } else if(mov->tracks[i].enc->codec_type == CODEC_TYPE_AUDIO) { mov->tracks[i].timescale = mov->tracks[i].enc->sample_rate; mov->tracks[i].sampleDuration = mov->tracks[i].enc->frame_size; } mov->tracks[i].trackDuration = (int64_t)mov->tracks[i].sampleCount * mov->tracks[i].sampleDuration; mov->tracks[i].time = mov->time; mov->tracks[i].trackID = i+1; } mov_write_mvhd_tag(pb, mov); //mov_write_iods_tag(pb, mov); for (i=0; i<MAX_STREAMS; i++) { if(mov->tracks[i].entry > 0) { mov_write_trak_tag(pb, &(mov->tracks[i])); } } if (mov->mode == MODE_PSP) mov_write_uuidusmt_tag(pb, s); else mov_write_udta_tag(pb, mov, s); return updateSize(pb, pos); } | 18,941 |
1 | int vncws_decode_frame(Buffer *input, uint8_t **payload, size_t *payload_size, size_t *frame_size) { unsigned char opcode = 0, fin = 0, has_mask = 0; size_t header_size = 0; uint32_t *payload32; WsHeader *header = (WsHeader *)input->buffer; WsMask mask; int i; if (input->offset < WS_HEAD_MIN_LEN + 4) { /* header not complete */ return 0; } fin = (header->b0 & 0x80) >> 7; opcode = header->b0 & 0x0f; has_mask = (header->b1 & 0x80) >> 7; *payload_size = header->b1 & 0x7f; if (opcode == WS_OPCODE_CLOSE) { /* disconnect */ return -1; } /* Websocket frame sanity check: * * Websocket fragmentation is not supported. * * All websockets frames sent by a client have to be masked. * * Only binary encoding is supported. */ if (!fin || !has_mask || opcode != WS_OPCODE_BINARY_FRAME) { VNC_DEBUG("Received faulty/unsupported Websocket frame\n"); return -2; } if (*payload_size < 126) { header_size = 6; mask = header->u.m; } else if (*payload_size == 126 && input->offset >= 8) { *payload_size = be16_to_cpu(header->u.s16.l16); header_size = 8; mask = header->u.s16.m16; } else if (*payload_size == 127 && input->offset >= 14) { *payload_size = be64_to_cpu(header->u.s64.l64); header_size = 14; mask = header->u.s64.m64; } else { /* header not complete */ return 0; } *frame_size = header_size + *payload_size; if (input->offset < *frame_size) { /* frame not complete */ return 0; } *payload = input->buffer + header_size; /* unmask frame */ /* process 1 frame (32 bit op) */ payload32 = (uint32_t *)(*payload); for (i = 0; i < *payload_size / 4; i++) { payload32[i] ^= mask.u; } /* process the remaining bytes (if any) */ for (i *= 4; i < *payload_size; i++) { (*payload)[i] ^= mask.c[i % 4]; } return 1; } | 18,944 |
1 | static void send_framebuffer_update_raw(VncState *vs, int x, int y, int w, int h) { int i; uint8_t *row; row = ds_get_data(vs->ds) + y * ds_get_linesize(vs->ds) + x * ds_get_bytes_per_pixel(vs->ds); for (i = 0; i < h; i++) { vs->write_pixels(vs, row, w * ds_get_bytes_per_pixel(vs->ds)); row += ds_get_linesize(vs->ds); } } | 18,945 |
1 | static int alloc_frame_buffer(MpegEncContext *s, Picture *pic) { int r; if (s->avctx->hwaccel) { assert(!pic->f.hwaccel_picture_private); if (s->avctx->hwaccel->priv_data_size) { pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size); if (!pic->f.hwaccel_picture_private) { av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n"); return -1; } } } if (s->codec_id != AV_CODEC_ID_WMV3IMAGE && s->codec_id != AV_CODEC_ID_VC1IMAGE && s->codec_id != AV_CODEC_ID_MSS2) r = ff_thread_get_buffer(s->avctx, &pic->f); else r = avcodec_default_get_buffer(s->avctx, &pic->f); if (r < 0 || !pic->f.type || !pic->f.data[0]) { av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n", r, pic->f.type, pic->f.data[0]); av_freep(&pic->f.hwaccel_picture_private); return -1; } if (s->linesize && (s->linesize != pic->f.linesize[0] || s->uvlinesize != pic->f.linesize[1])) { av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n"); free_frame_buffer(s, pic); return -1; } if (pic->f.linesize[1] != pic->f.linesize[2]) { av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n"); free_frame_buffer(s, pic); return -1; } return 0; } | 18,947 |
1 | static void pflash_write (pflash_t *pfl, target_ulong offset, uint32_t value, int width) { target_ulong boff; uint8_t *p; uint8_t cmd; /* WARNING: when the memory area is in ROMD mode, the offset is a ram offset, not a physical address */ if (pfl->wcycle == 0) offset -= (target_ulong)(long)pfl->storage; else offset -= pfl->base; cmd = value; DPRINTF("%s: offset " TARGET_FMT_lx " %08x %d\n", __func__, offset, value, width); if (pfl->cmd != 0xA0 && cmd == 0xF0) { DPRINTF("%s: flash reset asked (%02x %02x)\n", __func__, pfl->cmd, cmd); goto reset_flash; } /* Set the device in I/O access mode */ cpu_register_physical_memory(pfl->base, pfl->total_len, pfl->fl_mem); boff = offset & (pfl->sector_len - 1); if (pfl->width == 2) boff = boff >> 1; else if (pfl->width == 4) boff = boff >> 2; switch (pfl->wcycle) { case 0: /* We're in read mode */ check_unlock0: if (boff == 0x55 && cmd == 0x98) { enter_CFI_mode: /* Enter CFI query mode */ pfl->wcycle = 7; pfl->cmd = 0x98; return; } if (boff != 0x555 || cmd != 0xAA) { DPRINTF("%s: unlock0 failed " TARGET_FMT_lx " %02x %04x\n", __func__, boff, cmd, 0x555); goto reset_flash; } DPRINTF("%s: unlock sequence started\n", __func__); break; case 1: /* We started an unlock sequence */ check_unlock1: if (boff != 0x2AA || cmd != 0x55) { DPRINTF("%s: unlock1 failed " TARGET_FMT_lx " %02x\n", __func__, boff, cmd); goto reset_flash; } DPRINTF("%s: unlock sequence done\n", __func__); break; case 2: /* We finished an unlock sequence */ if (!pfl->bypass && boff != 0x555) { DPRINTF("%s: command failed " TARGET_FMT_lx " %02x\n", __func__, boff, cmd); goto reset_flash; } switch (cmd) { case 0x20: pfl->bypass = 1; goto do_bypass; case 0x80: case 0x90: case 0xA0: pfl->cmd = cmd; DPRINTF("%s: starting command %02x\n", __func__, cmd); break; default: DPRINTF("%s: unknown command %02x\n", __func__, cmd); goto reset_flash; } break; case 3: switch (pfl->cmd) { case 0x80: /* We need another unlock sequence */ goto check_unlock0; case 0xA0: DPRINTF("%s: write data offset " TARGET_FMT_lx " %08x %d\n", __func__, offset, value, width); p = pfl->storage; switch (width) { case 1: p[offset] &= value; pflash_update(pfl, offset, 1); break; case 2: #if defined(TARGET_WORDS_BIGENDIAN) p[offset] &= value >> 8; p[offset + 1] &= value; #else p[offset] &= value; p[offset + 1] &= value >> 8; #endif pflash_update(pfl, offset, 2); break; case 4: #if defined(TARGET_WORDS_BIGENDIAN) p[offset] &= value >> 24; p[offset + 1] &= value >> 16; p[offset + 2] &= value >> 8; p[offset + 3] &= value; #else p[offset] &= value; p[offset + 1] &= value >> 8; p[offset + 2] &= value >> 16; p[offset + 3] &= value >> 24; #endif pflash_update(pfl, offset, 4); break; } pfl->status = 0x00 | ~(value & 0x80); /* Let's pretend write is immediate */ if (pfl->bypass) goto do_bypass; goto reset_flash; case 0x90: if (pfl->bypass && cmd == 0x00) { /* Unlock bypass reset */ goto reset_flash; } /* We can enter CFI query mode from autoselect mode */ if (boff == 0x55 && cmd == 0x98) goto enter_CFI_mode; /* No break here */ default: DPRINTF("%s: invalid write for command %02x\n", __func__, pfl->cmd); goto reset_flash; } case 4: switch (pfl->cmd) { case 0xA0: /* Ignore writes while flash data write is occuring */ /* As we suppose write is immediate, this should never happen */ return; case 0x80: goto check_unlock1; default: /* Should never happen */ DPRINTF("%s: invalid command state %02x (wc 4)\n", __func__, pfl->cmd); goto reset_flash; } break; case 5: switch (cmd) { case 0x10: if (boff != 0x555) { DPRINTF("%s: chip erase: invalid address " TARGET_FMT_lx "\n", __func__, offset); goto reset_flash; } /* Chip erase */ DPRINTF("%s: start chip erase\n", __func__); memset(pfl->storage, 0xFF, pfl->total_len); pfl->status = 0x00; pflash_update(pfl, 0, pfl->total_len); /* Let's wait 5 seconds before chip erase is done */ qemu_mod_timer(pfl->timer, qemu_get_clock(vm_clock) + (ticks_per_sec * 5)); break; case 0x30: /* Sector erase */ p = pfl->storage; offset &= ~(pfl->sector_len - 1); DPRINTF("%s: start sector erase at " TARGET_FMT_lx "\n", __func__, offset); memset(p + offset, 0xFF, pfl->sector_len); pflash_update(pfl, offset, pfl->sector_len); pfl->status = 0x00; /* Let's wait 1/2 second before sector erase is done */ qemu_mod_timer(pfl->timer, qemu_get_clock(vm_clock) + (ticks_per_sec / 2)); break; default: DPRINTF("%s: invalid command %02x (wc 5)\n", __func__, cmd); goto reset_flash; } pfl->cmd = cmd; break; case 6: switch (pfl->cmd) { case 0x10: /* Ignore writes during chip erase */ return; case 0x30: /* Ignore writes during sector erase */ return; default: /* Should never happen */ DPRINTF("%s: invalid command state %02x (wc 6)\n", __func__, pfl->cmd); goto reset_flash; } break; case 7: /* Special value for CFI queries */ DPRINTF("%s: invalid write in CFI query mode\n", __func__); goto reset_flash; default: /* Should never happen */ DPRINTF("%s: invalid write state (wc 7)\n", __func__); goto reset_flash; } pfl->wcycle++; return; /* Reset flash */ reset_flash: if (pfl->wcycle != 0) { cpu_register_physical_memory(pfl->base, pfl->total_len, pfl->off | IO_MEM_ROMD | pfl->fl_mem); } pfl->bypass = 0; pfl->wcycle = 0; pfl->cmd = 0; return; do_bypass: pfl->wcycle = 2; pfl->cmd = 0; return; } | 18,948 |
1 | void cpu_loop(CPUPPCState *env) { CPUState *cs = CPU(ppc_env_get_cpu(env)); target_siginfo_t info; int trapnr; target_ulong ret; for(;;) { cpu_exec_start(cs); trapnr = cpu_ppc_exec(cs); cpu_exec_end(cs); switch(trapnr) { case POWERPC_EXCP_NONE: /* Just go on */ break; case POWERPC_EXCP_CRITICAL: /* Critical input */ cpu_abort(cs, "Critical interrupt while in user mode. " "Aborting\n"); break; case POWERPC_EXCP_MCHECK: /* Machine check exception */ cpu_abort(cs, "Machine check exception while in user mode. " "Aborting\n"); break; case POWERPC_EXCP_DSI: /* Data storage exception */ EXCP_DUMP(env, "Invalid data memory access: 0x" TARGET_FMT_lx "\n", env->spr[SPR_DAR]); /* XXX: check this. Seems bugged */ switch (env->error_code & 0xFF000000) { case 0x40000000: info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = TARGET_SEGV_MAPERR; break; case 0x04000000: info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_ILLADR; break; case 0x08000000: info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = TARGET_SEGV_ACCERR; break; default: /* Let's send a regular segfault... */ EXCP_DUMP(env, "Invalid segfault errno (%02x)\n", env->error_code); info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = TARGET_SEGV_MAPERR; break; } info._sifields._sigfault._addr = env->nip; queue_signal(env, info.si_signo, &info); break; case POWERPC_EXCP_ISI: /* Instruction storage exception */ EXCP_DUMP(env, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx "\n", env->spr[SPR_SRR0]); /* XXX: check this */ switch (env->error_code & 0xFF000000) { case 0x40000000: info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = TARGET_SEGV_MAPERR; break; case 0x10000000: case 0x08000000: info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = TARGET_SEGV_ACCERR; break; default: /* Let's send a regular segfault... */ EXCP_DUMP(env, "Invalid segfault errno (%02x)\n", env->error_code); info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = TARGET_SEGV_MAPERR; break; } info._sifields._sigfault._addr = env->nip - 4; queue_signal(env, info.si_signo, &info); break; case POWERPC_EXCP_EXTERNAL: /* External input */ cpu_abort(cs, "External interrupt while in user mode. " "Aborting\n"); break; case POWERPC_EXCP_ALIGN: /* Alignment exception */ EXCP_DUMP(env, "Unaligned memory access\n"); /* XXX: check this */ info.si_signo = TARGET_SIGBUS; info.si_errno = 0; info.si_code = TARGET_BUS_ADRALN; info._sifields._sigfault._addr = env->nip; queue_signal(env, info.si_signo, &info); break; case POWERPC_EXCP_PROGRAM: /* Program exception */ /* XXX: check this */ switch (env->error_code & ~0xF) { case POWERPC_EXCP_FP: EXCP_DUMP(env, "Floating point program exception\n"); info.si_signo = TARGET_SIGFPE; info.si_errno = 0; switch (env->error_code & 0xF) { case POWERPC_EXCP_FP_OX: info.si_code = TARGET_FPE_FLTOVF; break; case POWERPC_EXCP_FP_UX: info.si_code = TARGET_FPE_FLTUND; break; case POWERPC_EXCP_FP_ZX: case POWERPC_EXCP_FP_VXZDZ: info.si_code = TARGET_FPE_FLTDIV; break; case POWERPC_EXCP_FP_XX: info.si_code = TARGET_FPE_FLTRES; break; case POWERPC_EXCP_FP_VXSOFT: info.si_code = TARGET_FPE_FLTINV; break; case POWERPC_EXCP_FP_VXSNAN: case POWERPC_EXCP_FP_VXISI: case POWERPC_EXCP_FP_VXIDI: case POWERPC_EXCP_FP_VXIMZ: case POWERPC_EXCP_FP_VXVC: case POWERPC_EXCP_FP_VXSQRT: case POWERPC_EXCP_FP_VXCVI: info.si_code = TARGET_FPE_FLTSUB; break; default: EXCP_DUMP(env, "Unknown floating point exception (%02x)\n", env->error_code); break; } break; case POWERPC_EXCP_INVAL: EXCP_DUMP(env, "Invalid instruction\n"); info.si_signo = TARGET_SIGILL; info.si_errno = 0; switch (env->error_code & 0xF) { case POWERPC_EXCP_INVAL_INVAL: info.si_code = TARGET_ILL_ILLOPC; break; case POWERPC_EXCP_INVAL_LSWX: info.si_code = TARGET_ILL_ILLOPN; break; case POWERPC_EXCP_INVAL_SPR: info.si_code = TARGET_ILL_PRVREG; break; case POWERPC_EXCP_INVAL_FP: info.si_code = TARGET_ILL_COPROC; break; default: EXCP_DUMP(env, "Unknown invalid operation (%02x)\n", env->error_code & 0xF); info.si_code = TARGET_ILL_ILLADR; break; } break; case POWERPC_EXCP_PRIV: EXCP_DUMP(env, "Privilege violation\n"); info.si_signo = TARGET_SIGILL; info.si_errno = 0; switch (env->error_code & 0xF) { case POWERPC_EXCP_PRIV_OPC: info.si_code = TARGET_ILL_PRVOPC; break; case POWERPC_EXCP_PRIV_REG: info.si_code = TARGET_ILL_PRVREG; break; default: EXCP_DUMP(env, "Unknown privilege violation (%02x)\n", env->error_code & 0xF); info.si_code = TARGET_ILL_PRVOPC; break; } break; case POWERPC_EXCP_TRAP: cpu_abort(cs, "Tried to call a TRAP\n"); break; default: /* Should not happen ! */ cpu_abort(cs, "Unknown program exception (%02x)\n", env->error_code); break; } info._sifields._sigfault._addr = env->nip - 4; queue_signal(env, info.si_signo, &info); break; case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */ EXCP_DUMP(env, "No floating point allowed\n"); info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_COPROC; info._sifields._sigfault._addr = env->nip - 4; queue_signal(env, info.si_signo, &info); break; case POWERPC_EXCP_SYSCALL: /* System call exception */ cpu_abort(cs, "Syscall exception while in user mode. " "Aborting\n"); break; case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */ EXCP_DUMP(env, "No APU instruction allowed\n"); info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_COPROC; info._sifields._sigfault._addr = env->nip - 4; queue_signal(env, info.si_signo, &info); break; case POWERPC_EXCP_DECR: /* Decrementer exception */ cpu_abort(cs, "Decrementer interrupt while in user mode. " "Aborting\n"); break; case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ cpu_abort(cs, "Fix interval timer interrupt while in user mode. " "Aborting\n"); break; case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ cpu_abort(cs, "Watchdog timer interrupt while in user mode. " "Aborting\n"); break; case POWERPC_EXCP_DTLB: /* Data TLB error */ cpu_abort(cs, "Data TLB exception while in user mode. " "Aborting\n"); break; case POWERPC_EXCP_ITLB: /* Instruction TLB error */ cpu_abort(cs, "Instruction TLB exception while in user mode. " "Aborting\n"); break; case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavail. */ EXCP_DUMP(env, "No SPE/floating-point instruction allowed\n"); info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_COPROC; info._sifields._sigfault._addr = env->nip - 4; queue_signal(env, info.si_signo, &info); break; case POWERPC_EXCP_EFPDI: /* Embedded floating-point data IRQ */ cpu_abort(cs, "Embedded floating-point data IRQ not handled\n"); break; case POWERPC_EXCP_EFPRI: /* Embedded floating-point round IRQ */ cpu_abort(cs, "Embedded floating-point round IRQ not handled\n"); break; case POWERPC_EXCP_EPERFM: /* Embedded performance monitor IRQ */ cpu_abort(cs, "Performance monitor exception not handled\n"); break; case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */ cpu_abort(cs, "Doorbell interrupt while in user mode. " "Aborting\n"); break; case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */ cpu_abort(cs, "Doorbell critical interrupt while in user mode. " "Aborting\n"); break; case POWERPC_EXCP_RESET: /* System reset exception */ cpu_abort(cs, "Reset interrupt while in user mode. " "Aborting\n"); break; case POWERPC_EXCP_DSEG: /* Data segment exception */ cpu_abort(cs, "Data segment exception while in user mode. " "Aborting\n"); break; case POWERPC_EXCP_ISEG: /* Instruction segment exception */ cpu_abort(cs, "Instruction segment exception " "while in user mode. Aborting\n"); break; /* PowerPC 64 with hypervisor mode support */ case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ cpu_abort(cs, "Hypervisor decrementer interrupt " "while in user mode. Aborting\n"); break; case POWERPC_EXCP_TRACE: /* Trace exception */ /* Nothing to do: * we use this exception to emulate step-by-step execution mode. */ break; /* PowerPC 64 with hypervisor mode support */ case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ cpu_abort(cs, "Hypervisor data storage exception " "while in user mode. Aborting\n"); break; case POWERPC_EXCP_HISI: /* Hypervisor instruction storage excp */ cpu_abort(cs, "Hypervisor instruction storage exception " "while in user mode. Aborting\n"); break; case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ cpu_abort(cs, "Hypervisor data segment exception " "while in user mode. Aborting\n"); break; case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment excp */ cpu_abort(cs, "Hypervisor instruction segment exception " "while in user mode. Aborting\n"); break; case POWERPC_EXCP_VPU: /* Vector unavailable exception */ EXCP_DUMP(env, "No Altivec instructions allowed\n"); info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_COPROC; info._sifields._sigfault._addr = env->nip - 4; queue_signal(env, info.si_signo, &info); break; case POWERPC_EXCP_PIT: /* Programmable interval timer IRQ */ cpu_abort(cs, "Programmable interval timer interrupt " "while in user mode. Aborting\n"); break; case POWERPC_EXCP_IO: /* IO error exception */ cpu_abort(cs, "IO error exception while in user mode. " "Aborting\n"); break; case POWERPC_EXCP_RUNM: /* Run mode exception */ cpu_abort(cs, "Run mode exception while in user mode. " "Aborting\n"); break; case POWERPC_EXCP_EMUL: /* Emulation trap exception */ cpu_abort(cs, "Emulation trap exception not handled\n"); break; case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */ cpu_abort(cs, "Instruction fetch TLB exception " "while in user-mode. Aborting"); break; case POWERPC_EXCP_DLTLB: /* Data load TLB miss */ cpu_abort(cs, "Data load TLB exception while in user-mode. " "Aborting"); break; case POWERPC_EXCP_DSTLB: /* Data store TLB miss */ cpu_abort(cs, "Data store TLB exception while in user-mode. " "Aborting"); break; case POWERPC_EXCP_FPA: /* Floating-point assist exception */ cpu_abort(cs, "Floating-point assist exception not handled\n"); break; case POWERPC_EXCP_IABR: /* Instruction address breakpoint */ cpu_abort(cs, "Instruction address breakpoint exception " "not handled\n"); break; case POWERPC_EXCP_SMI: /* System management interrupt */ cpu_abort(cs, "System management interrupt while in user mode. " "Aborting\n"); break; case POWERPC_EXCP_THERM: /* Thermal interrupt */ cpu_abort(cs, "Thermal interrupt interrupt while in user mode. " "Aborting\n"); break; case POWERPC_EXCP_PERFM: /* Embedded performance monitor IRQ */ cpu_abort(cs, "Performance monitor exception not handled\n"); break; case POWERPC_EXCP_VPUA: /* Vector assist exception */ cpu_abort(cs, "Vector assist exception not handled\n"); break; case POWERPC_EXCP_SOFTP: /* Soft patch exception */ cpu_abort(cs, "Soft patch exception not handled\n"); break; case POWERPC_EXCP_MAINT: /* Maintenance exception */ cpu_abort(cs, "Maintenance exception while in user mode. " "Aborting\n"); break; case POWERPC_EXCP_STOP: /* stop translation */ /* We did invalidate the instruction cache. Go on */ break; case POWERPC_EXCP_BRANCH: /* branch instruction: */ /* We just stopped because of a branch. Go on */ break; case POWERPC_EXCP_SYSCALL_USER: /* system call in user-mode emulation */ /* WARNING: * PPC ABI uses overflow flag in cr0 to signal an error * in syscalls. */ env->crf[0] &= ~0x1; ret = do_syscall(env, env->gpr[0], env->gpr[3], env->gpr[4], env->gpr[5], env->gpr[6], env->gpr[7], env->gpr[8], 0, 0); if (ret == -TARGET_ERESTARTSYS) { env->nip -= 4; break; } if (ret == (target_ulong)(-TARGET_QEMU_ESIGRETURN)) { /* Returning from a successful sigreturn syscall. Avoid corrupting register state. */ break; } if (ret > (target_ulong)(-515)) { env->crf[0] |= 0x1; ret = -ret; } env->gpr[3] = ret; break; case POWERPC_EXCP_STCX: if (do_store_exclusive(env)) { info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = TARGET_SEGV_MAPERR; info._sifields._sigfault._addr = env->nip; queue_signal(env, info.si_signo, &info); } break; case EXCP_DEBUG: { int sig; sig = gdb_handlesig(cs, TARGET_SIGTRAP); if (sig) { info.si_signo = sig; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; queue_signal(env, info.si_signo, &info); } } break; case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; default: cpu_abort(cs, "Unknown exception 0x%d. Aborting\n", trapnr); break; } process_pending_signals(env); } } | 18,949 |
1 | static int check_video_codec_tag(int codec_tag) { if (codec_tag <= 0 || codec_tag > 15) { return AVERROR(ENOSYS); } else return 0; } | 18,950 |
1 | static int announce_self_create(uint8_t *buf, uint8_t *mac_addr) { uint32_t magic = EXPERIMENTAL_MAGIC; uint16_t proto = htons(ETH_P_EXPERIMENTAL); /* FIXME: should we send a different packet (arp/rarp/ping)? */ memset(buf, 0, 64); memset(buf, 0xff, 6); /* h_dst */ memcpy(buf + 6, mac_addr, 6); /* h_src */ memcpy(buf + 12, &proto, 2); /* h_proto */ memcpy(buf + 14, &magic, 4); /* magic */ return 64; /* len */ } | 18,951 |
1 | static int slirp_hostfwd(SlirpState *s, const char *redir_str, int legacy_format, Error **errp) { struct in_addr host_addr = { .s_addr = INADDR_ANY }; struct in_addr guest_addr = { .s_addr = 0 }; int host_port, guest_port; const char *p; char buf[256]; int is_udp; char *end; p = redir_str; if (!p || get_str_sep(buf, sizeof(buf), &p, ':') < 0) { goto fail_syntax; } if (!strcmp(buf, "tcp") || buf[0] == '\0') { is_udp = 0; } else if (!strcmp(buf, "udp")) { is_udp = 1; } else { goto fail_syntax; } if (!legacy_format) { if (get_str_sep(buf, sizeof(buf), &p, ':') < 0) { goto fail_syntax; } if (buf[0] != '\0' && !inet_aton(buf, &host_addr)) { goto fail_syntax; } } if (get_str_sep(buf, sizeof(buf), &p, legacy_format ? ':' : '-') < 0) { goto fail_syntax; } host_port = strtol(buf, &end, 0); if (*end != '\0' || host_port < 0 || host_port > 65535) { goto fail_syntax; } if (get_str_sep(buf, sizeof(buf), &p, ':') < 0) { goto fail_syntax; } if (buf[0] != '\0' && !inet_aton(buf, &guest_addr)) { goto fail_syntax; } guest_port = strtol(p, &end, 0); if (*end != '\0' || guest_port < 1 || guest_port > 65535) { goto fail_syntax; } if (slirp_add_hostfwd(s->slirp, is_udp, host_addr, host_port, guest_addr, guest_port) < 0) { error_setg(errp, "Could not set up host forwarding rule '%s'", redir_str); return -1; } return 0; fail_syntax: error_setg(errp, "Invalid host forwarding rule '%s'", redir_str); return -1; } | 18,952 |
1 | void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest, long width, long height, long src1Stride, long src2Stride, long dstStride){ long h; for(h=0; h < height; h++) { long w; #ifdef HAVE_MMX #ifdef HAVE_SSE2 asm( "xor %%"REG_a", %%"REG_a" \n\t" "1: \n\t" PREFETCH" 64(%1, %%"REG_a") \n\t" PREFETCH" 64(%2, %%"REG_a") \n\t" "movdqa (%1, %%"REG_a"), %%xmm0 \n\t" "movdqa (%1, %%"REG_a"), %%xmm1 \n\t" "movdqa (%2, %%"REG_a"), %%xmm2 \n\t" "punpcklbw %%xmm2, %%xmm0 \n\t" "punpckhbw %%xmm2, %%xmm1 \n\t" "movntdq %%xmm0, (%0, %%"REG_a", 2)\n\t" "movntdq %%xmm1, 16(%0, %%"REG_a", 2)\n\t" "add $16, %%"REG_a" \n\t" "cmp %3, %%"REG_a" \n\t" " jb 1b \n\t" ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15) : "memory", "%"REG_a"" ); #else asm( "xor %%"REG_a", %%"REG_a" \n\t" "1: \n\t" PREFETCH" 64(%1, %%"REG_a") \n\t" PREFETCH" 64(%2, %%"REG_a") \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" "movq 8(%1, %%"REG_a"), %%mm2 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm2, %%mm3 \n\t" "movq (%2, %%"REG_a"), %%mm4 \n\t" "movq 8(%2, %%"REG_a"), %%mm5 \n\t" "punpcklbw %%mm4, %%mm0 \n\t" "punpckhbw %%mm4, %%mm1 \n\t" "punpcklbw %%mm5, %%mm2 \n\t" "punpckhbw %%mm5, %%mm3 \n\t" MOVNTQ" %%mm0, (%0, %%"REG_a", 2)\n\t" MOVNTQ" %%mm1, 8(%0, %%"REG_a", 2)\n\t" MOVNTQ" %%mm2, 16(%0, %%"REG_a", 2)\n\t" MOVNTQ" %%mm3, 24(%0, %%"REG_a", 2)\n\t" "add $16, %%"REG_a" \n\t" "cmp %3, %%"REG_a" \n\t" " jb 1b \n\t" ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15) : "memory", "%"REG_a ); #endif for(w= (width&(~15)); w < width; w++) { dest[2*w+0] = src1[w]; dest[2*w+1] = src2[w]; } #else for(w=0; w < width; w++) { dest[2*w+0] = src1[w]; dest[2*w+1] = src2[w]; } #endif dest += dstStride; src1 += src1Stride; src2 += src2Stride; } #ifdef HAVE_MMX asm( EMMS" \n\t" SFENCE" \n\t" ::: "memory" ); #endif } | 18,953 |
1 | static inline void RENAME(yuv2rgb555_1)(SwsContext *c, const uint16_t *buf0, const uint16_t *ubuf0, const uint16_t *ubuf1, const uint16_t *vbuf0, const uint16_t *vbuf1, const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, enum PixelFormat dstFormat, int flags, int y) { x86_reg uv_off = c->uv_off << 1; const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" YSCALEYUV2RGB1(%%REGBP, %5, %6) "pxor %%mm7, %%mm7 \n\t" /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t" "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t" "paddusb "RED_DITHER"(%5), %%mm5 \n\t" #endif WRITERGB15(%%REGb, 8280(%5), %%REGBP) "pop %%"REG_BP" \n\t" "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither), "m"(uv_off) ); } else { __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" YSCALEYUV2RGB1b(%%REGBP, %5, %6) "pxor %%mm7, %%mm7 \n\t" /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */ #ifdef DITHER1XBPP "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t" "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t" "paddusb "RED_DITHER"(%5), %%mm5 \n\t" #endif WRITERGB15(%%REGb, 8280(%5), %%REGBP) "pop %%"REG_BP" \n\t" "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither), "m"(uv_off) ); } } | 18,954 |
1 | static int vhdx_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVVHDXState *s = bs->opaque; int ret = 0; uint32_t i; uint64_t signature; bool log_flushed = false; s->bat = NULL; s->first_visible_write = true; qemu_co_mutex_init(&s->lock); QLIST_INIT(&s->regions); /* validate the file signature */ ret = bdrv_pread(bs->file, 0, &signature, sizeof(uint64_t)); if (ret < 0) { goto fail; } if (memcmp(&signature, "vhdxfile", 8)) { ret = -EINVAL; goto fail; } /* This is used for any header updates, for the file_write_guid. * The spec dictates that a new value should be used for the first * header update */ vhdx_guid_generate(&s->session_guid); ret = vhdx_parse_header(bs, s); if (ret < 0) { goto fail; } ret = vhdx_parse_log(bs, s, &log_flushed); if (ret < 0) { goto fail; } ret = vhdx_open_region_tables(bs, s); if (ret < 0) { goto fail; } ret = vhdx_parse_metadata(bs, s); if (ret < 0) { goto fail; } s->block_size = s->params.block_size; /* the VHDX spec dictates that virtual_disk_size is always a multiple of * logical_sector_size */ bs->total_sectors = s->virtual_disk_size >> s->logical_sector_size_bits; vhdx_calc_bat_entries(s); s->bat_offset = s->bat_rt.file_offset; if (s->bat_entries > s->bat_rt.length / sizeof(VHDXBatEntry)) { /* BAT allocation is not large enough for all entries */ ret = -EINVAL; goto fail; } /* s->bat is freed in vhdx_close() */ s->bat = qemu_blockalign(bs, s->bat_rt.length); ret = bdrv_pread(bs->file, s->bat_offset, s->bat, s->bat_rt.length); if (ret < 0) { goto fail; } uint64_t payblocks = s->chunk_ratio; /* endian convert, and verify populated BAT field file offsets against * region table and log entries */ for (i = 0; i < s->bat_entries; i++) { le64_to_cpus(&s->bat[i]); if (payblocks--) { /* payload bat entries */ if ((s->bat[i] & VHDX_BAT_STATE_BIT_MASK) == PAYLOAD_BLOCK_FULLY_PRESENT) { ret = vhdx_region_check(s, s->bat[i] & VHDX_BAT_FILE_OFF_MASK, s->block_size); if (ret < 0) { goto fail; } } } else { payblocks = s->chunk_ratio; /* Once differencing files are supported, verify sector bitmap * blocks here */ } } if (flags & BDRV_O_RDWR) { ret = vhdx_update_headers(bs, s, false, NULL); if (ret < 0) { goto fail; } } /* TODO: differencing files */ /* Disable migration when VHDX images are used */ error_set(&s->migration_blocker, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, "vhdx", bs->device_name, "live migration"); migrate_add_blocker(s->migration_blocker); return 0; fail: vhdx_close(bs); return ret; } | 18,956 |
1 | FFAMediaCodec* ff_AMediaCodec_createEncoderByType(const char *mime) { JNIEnv *env = NULL; FFAMediaCodec *codec = NULL; jstring mime_type = NULL; codec = av_mallocz(sizeof(FFAMediaCodec)); if (!codec) { return NULL; } codec->class = &amediacodec_class; env = ff_jni_get_env(codec); if (!env) { av_freep(&codec); return NULL; } if (ff_jni_init_jfields(env, &codec->jfields, jni_amediacodec_mapping, 1, codec) < 0) { goto fail; } mime_type = ff_jni_utf_chars_to_jstring(env, mime, codec); if (!mime_type) { goto fail; } codec->object = (*env)->CallStaticObjectMethod(env, codec->jfields.mediacodec_class, codec->jfields.create_encoder_by_type_id, mime_type); if (ff_jni_exception_check(env, 1, codec) < 0) { goto fail; } codec->object = (*env)->NewGlobalRef(env, codec->object); if (!codec->object) { goto fail; } if (codec_init_static_fields(codec) < 0) { goto fail; } if (codec->jfields.get_input_buffer_id && codec->jfields.get_output_buffer_id) { codec->has_get_i_o_buffer = 1; } return codec; fail: ff_jni_reset_jfields(env, &codec->jfields, jni_amediacodec_mapping, 1, codec); if (mime_type) { (*env)->DeleteLocalRef(env, mime_type); } av_freep(&codec); return NULL; } | 18,957 |
1 | static int mov_write_uuidprof_tag(AVIOContext *pb, AVFormatContext *s) { AVStream *video_st = s->streams[0]; AVCodecParameters *video_par = s->streams[0]->codecpar; AVCodecParameters *audio_par = s->streams[1]->codecpar; int audio_rate = audio_par->sample_rate; int64_t frame_rate = (video_st->avg_frame_rate.num * 0x10000LL) / video_st->avg_frame_rate.den; int audio_kbitrate = audio_par->bit_rate / 1000; int video_kbitrate = FFMIN(video_par->bit_rate / 1000, 800 - audio_kbitrate); if (frame_rate < 0 || frame_rate > INT32_MAX) { av_log(s, AV_LOG_ERROR, "Frame rate %f outside supported range\n", frame_rate / (double)0x10000); return AVERROR(EINVAL); } avio_wb32(pb, 0x94); /* size */ ffio_wfourcc(pb, "uuid"); ffio_wfourcc(pb, "PROF"); avio_wb32(pb, 0x21d24fce); /* 96 bit UUID */ avio_wb32(pb, 0xbb88695c); avio_wb32(pb, 0xfac9c740); avio_wb32(pb, 0x0); /* ? */ avio_wb32(pb, 0x3); /* 3 sections ? */ avio_wb32(pb, 0x14); /* size */ ffio_wfourcc(pb, "FPRF"); avio_wb32(pb, 0x0); /* ? */ avio_wb32(pb, 0x0); /* ? */ avio_wb32(pb, 0x0); /* ? */ avio_wb32(pb, 0x2c); /* size */ ffio_wfourcc(pb, "APRF"); /* audio */ avio_wb32(pb, 0x0); avio_wb32(pb, 0x2); /* TrackID */ ffio_wfourcc(pb, "mp4a"); avio_wb32(pb, 0x20f); avio_wb32(pb, 0x0); avio_wb32(pb, audio_kbitrate); avio_wb32(pb, audio_kbitrate); avio_wb32(pb, audio_rate); avio_wb32(pb, audio_par->channels); avio_wb32(pb, 0x34); /* size */ ffio_wfourcc(pb, "VPRF"); /* video */ avio_wb32(pb, 0x0); avio_wb32(pb, 0x1); /* TrackID */ if (video_par->codec_id == AV_CODEC_ID_H264) { ffio_wfourcc(pb, "avc1"); avio_wb16(pb, 0x014D); avio_wb16(pb, 0x0015); } else { ffio_wfourcc(pb, "mp4v"); avio_wb16(pb, 0x0000); avio_wb16(pb, 0x0103); } avio_wb32(pb, 0x0); avio_wb32(pb, video_kbitrate); avio_wb32(pb, video_kbitrate); avio_wb32(pb, frame_rate); avio_wb32(pb, frame_rate); avio_wb16(pb, video_par->width); avio_wb16(pb, video_par->height); avio_wb32(pb, 0x010001); /* ? */ return 0; } | 18,958 |
1 | void av_thread_message_queue_free(AVThreadMessageQueue **mq) { #if HAVE_THREADS if (*mq) { av_thread_message_flush(*mq); av_fifo_freep(&(*mq)->fifo); pthread_cond_destroy(&(*mq)->cond); pthread_mutex_destroy(&(*mq)->lock); av_freep(mq); } #endif } | 18,960 |
1 | static void vhdx_parse_header(BlockDriverState *bs, BDRVVHDXState *s, Error **errp) { int ret; VHDXHeader *header1; VHDXHeader *header2; bool h1_valid = false; bool h2_valid = false; uint64_t h1_seq = 0; uint64_t h2_seq = 0; uint8_t *buffer; /* header1 & header2 are freed in vhdx_close() */ header1 = qemu_blockalign(bs, sizeof(VHDXHeader)); header2 = qemu_blockalign(bs, sizeof(VHDXHeader)); buffer = qemu_blockalign(bs, VHDX_HEADER_SIZE); s->headers[0] = header1; s->headers[1] = header2; /* We have to read the whole VHDX_HEADER_SIZE instead of * sizeof(VHDXHeader), because the checksum is over the whole * region */ ret = bdrv_pread(bs->file, VHDX_HEADER1_OFFSET, buffer, VHDX_HEADER_SIZE); if (ret < 0) { goto fail; } /* copy over just the relevant portion that we need */ memcpy(header1, buffer, sizeof(VHDXHeader)); vhdx_header_le_import(header1); if (vhdx_checksum_is_valid(buffer, VHDX_HEADER_SIZE, 4) && !memcmp(&header1->signature, "head", 4) && header1->version == 1) { h1_seq = header1->sequence_number; h1_valid = true; } ret = bdrv_pread(bs->file, VHDX_HEADER2_OFFSET, buffer, VHDX_HEADER_SIZE); if (ret < 0) { goto fail; } /* copy over just the relevant portion that we need */ memcpy(header2, buffer, sizeof(VHDXHeader)); vhdx_header_le_import(header2); if (vhdx_checksum_is_valid(buffer, VHDX_HEADER_SIZE, 4) && !memcmp(&header2->signature, "head", 4) && header2->version == 1) { h2_seq = header2->sequence_number; h2_valid = true; } /* If there is only 1 valid header (or no valid headers), we * don't care what the sequence numbers are */ if (h1_valid && !h2_valid) { s->curr_header = 0; } else if (!h1_valid && h2_valid) { s->curr_header = 1; } else if (!h1_valid && !h2_valid) { goto fail; } else { /* If both headers are valid, then we choose the active one by the * highest sequence number. If the sequence numbers are equal, that is * invalid */ if (h1_seq > h2_seq) { s->curr_header = 0; } else if (h2_seq > h1_seq) { s->curr_header = 1; } else { goto fail; } } vhdx_region_register(s, s->headers[s->curr_header]->log_offset, s->headers[s->curr_header]->log_length); goto exit; fail: error_setg_errno(errp, -ret, "No valid VHDX header found"); qemu_vfree(header1); qemu_vfree(header2); s->headers[0] = NULL; s->headers[1] = NULL; exit: qemu_vfree(buffer); } | 18,961 |
1 | int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt, int (*compare)(AVFormatContext *, AVPacket *, AVPacket *)) { int ret; AVPacketList **next_point, *this_pktl; AVStream *st = s->streams[pkt->stream_index]; int chunked = s->max_chunk_size || s->max_chunk_duration; this_pktl = av_mallocz(sizeof(AVPacketList)); if (!this_pktl) return AVERROR(ENOMEM); if ((pkt->flags & AV_PKT_FLAG_UNCODED_FRAME)) { av_assert0(pkt->size == UNCODED_FRAME_PACKET_SIZE); av_assert0(((AVFrame *)pkt->data)->buf); } if ((ret = av_packet_ref(&this_pktl->pkt, pkt)) < 0) { av_free(this_pktl); return ret; } if (s->streams[pkt->stream_index]->last_in_packet_buffer) { next_point = &(st->last_in_packet_buffer->next); } else { next_point = &s->internal->packet_buffer; } if (chunked) { uint64_t max= av_rescale_q_rnd(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base, AV_ROUND_UP); st->interleaver_chunk_size += pkt->size; st->interleaver_chunk_duration += pkt->duration; if ( (s->max_chunk_size && st->interleaver_chunk_size > s->max_chunk_size) || (max && st->interleaver_chunk_duration > max)) { st->interleaver_chunk_size = 0; this_pktl->pkt.flags |= CHUNK_START; if (max && st->interleaver_chunk_duration > max) { int64_t syncoffset = (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)*max/2; int64_t syncto = av_rescale(pkt->dts + syncoffset, 1, max)*max - syncoffset; st->interleaver_chunk_duration += (pkt->dts - syncto)/8 - max; } else st->interleaver_chunk_duration = 0; } } if (*next_point) { if (chunked && !(this_pktl->pkt.flags & CHUNK_START)) goto next_non_null; if (compare(s, &s->internal->packet_buffer_end->pkt, pkt)) { while ( *next_point && ((chunked && !((*next_point)->pkt.flags&CHUNK_START)) || !compare(s, &(*next_point)->pkt, pkt))) next_point = &(*next_point)->next; if (*next_point) goto next_non_null; } else { next_point = &(s->internal->packet_buffer_end->next); } } av_assert1(!*next_point); s->internal->packet_buffer_end = this_pktl; next_non_null: this_pktl->next = *next_point; s->streams[pkt->stream_index]->last_in_packet_buffer = *next_point = this_pktl; av_packet_unref(pkt); return 0; } | 18,962 |
1 | static void x8_init_block_index(MpegEncContext *s){ //FIXME maybe merge with ff_* //not s->linesize as this would be wrong for field pics //not that IntraX8 has interlacing support ;) const int linesize = s->current_picture.f.linesize[0]; const int uvlinesize = s->current_picture.f.linesize[1]; s->dest[0] = s->current_picture.f.data[0]; s->dest[1] = s->current_picture.f.data[1]; s->dest[2] = s->current_picture.f.data[2]; s->dest[0] += s->mb_y * linesize << 3; s->dest[1] += ( s->mb_y&(~1) ) * uvlinesize << 2;//chroma blocks are on add rows s->dest[2] += ( s->mb_y&(~1) ) * uvlinesize << 2; } | 18,964 |
1 | static int qemu_rdma_search_ram_block(RDMAContext *rdma, uint64_t block_offset, uint64_t offset, uint64_t length, uint64_t *block_index, uint64_t *chunk_index) { uint64_t current_addr = block_offset + offset; RDMALocalBlock *block = g_hash_table_lookup(rdma->blockmap, (void *) block_offset); assert(block); assert(current_addr >= block->offset); assert((current_addr + length) <= (block->offset + block->length)); *block_index = block->index; *chunk_index = ram_chunk_index(block->local_host_addr, block->local_host_addr + (current_addr - block->offset)); return 0; } | 18,966 |
1 | static int decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src) { H264Context *h = dst->priv_data, *h1 = src->priv_data; int inited = h->context_initialized, err = 0; int context_reinitialized = 0; int i, ret; if (dst == src) return 0; if (inited && (h->width != h1->width || h->height != h1->height || h->mb_width != h1->mb_width || h->mb_height != h1->mb_height || h->sps.bit_depth_luma != h1->sps.bit_depth_luma || h->sps.chroma_format_idc != h1->sps.chroma_format_idc || h->sps.colorspace != h1->sps.colorspace)) { /* set bits_per_raw_sample to the previous value. the check for changed * bit depth in h264_set_parameter_from_sps() uses it and sets it to * the current value */ h->avctx->bits_per_raw_sample = h->sps.bit_depth_luma; av_freep(&h->bipred_scratchpad); h->width = h1->width; h->height = h1->height; h->mb_height = h1->mb_height; h->mb_width = h1->mb_width; h->mb_num = h1->mb_num; h->mb_stride = h1->mb_stride; h->b_stride = h1->b_stride; // SPS/PPS copy_parameter_set((void **)h->sps_buffers, (void **)h1->sps_buffers, MAX_SPS_COUNT, sizeof(SPS)); h->sps = h1->sps; copy_parameter_set((void **)h->pps_buffers, (void **)h1->pps_buffers, MAX_PPS_COUNT, sizeof(PPS)); h->pps = h1->pps; if ((err = h264_slice_header_init(h, 1)) < 0) { av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed"); return err; } context_reinitialized = 1; #if 0 h264_set_parameter_from_sps(h); //Note we set context_reinitialized which will cause h264_set_parameter_from_sps to be reexecuted h->cur_chroma_format_idc = h1->cur_chroma_format_idc; #endif } /* update linesize on resize for h264. The h264 decoder doesn't * necessarily call ff_MPV_frame_start in the new thread */ h->linesize = h1->linesize; h->uvlinesize = h1->uvlinesize; /* copy block_offset since frame_start may not be called */ memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset)); if (!inited) { for (i = 0; i < MAX_SPS_COUNT; i++) av_freep(h->sps_buffers + i); for (i = 0; i < MAX_PPS_COUNT; i++) av_freep(h->pps_buffers + i); memcpy(h, h1, offsetof(H264Context, intra_pcm_ptr)); memcpy(&h->cabac, &h1->cabac, sizeof(H264Context) - offsetof(H264Context, cabac)); av_assert0((void*)&h->cabac == &h->mb_padding + 1); memset(h->sps_buffers, 0, sizeof(h->sps_buffers)); memset(h->pps_buffers, 0, sizeof(h->pps_buffers)); memset(&h->er, 0, sizeof(h->er)); memset(&h->me, 0, sizeof(h->me)); memset(&h->mb, 0, sizeof(h->mb)); memset(&h->mb_luma_dc, 0, sizeof(h->mb_luma_dc)); memset(&h->mb_padding, 0, sizeof(h->mb_padding)); h->avctx = dst; h->DPB = NULL; h->qscale_table_pool = NULL; h->mb_type_pool = NULL; h->ref_index_pool = NULL; h->motion_val_pool = NULL; if (h1->context_initialized) { h->context_initialized = 0; memset(&h->cur_pic, 0, sizeof(h->cur_pic)); avcodec_get_frame_defaults(&h->cur_pic.f); h->cur_pic.tf.f = &h->cur_pic.f; ret = ff_h264_alloc_tables(h); if (ret < 0) { av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n"); return ret; } ret = context_init(h); if (ret < 0) { av_log(dst, AV_LOG_ERROR, "context_init() failed.\n"); return ret; } } for (i = 0; i < 2; i++) { h->rbsp_buffer[i] = NULL; h->rbsp_buffer_size[i] = 0; } h->bipred_scratchpad = NULL; h->edge_emu_buffer = NULL; h->thread_context[0] = h; h->context_initialized = h1->context_initialized; } h->avctx->coded_height = h1->avctx->coded_height; h->avctx->coded_width = h1->avctx->coded_width; h->avctx->width = h1->avctx->width; h->avctx->height = h1->avctx->height; h->coded_picture_number = h1->coded_picture_number; h->first_field = h1->first_field; h->picture_structure = h1->picture_structure; h->qscale = h1->qscale; h->droppable = h1->droppable; h->data_partitioning = h1->data_partitioning; h->low_delay = h1->low_delay; for (i = 0; h->DPB && i < MAX_PICTURE_COUNT; i++) { unref_picture(h, &h->DPB[i]); if (h1->DPB[i].f.data[0] && (ret = ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0) return ret; } h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1); unref_picture(h, &h->cur_pic); if (h1->cur_pic.f.buf[0] && (ret = ref_picture(h, &h->cur_pic, &h1->cur_pic)) < 0) return ret; h->workaround_bugs = h1->workaround_bugs; h->low_delay = h1->low_delay; h->droppable = h1->droppable; // extradata/NAL handling h->is_avc = h1->is_avc; // SPS/PPS copy_parameter_set((void **)h->sps_buffers, (void **)h1->sps_buffers, MAX_SPS_COUNT, sizeof(SPS)); h->sps = h1->sps; copy_parameter_set((void **)h->pps_buffers, (void **)h1->pps_buffers, MAX_PPS_COUNT, sizeof(PPS)); h->pps = h1->pps; // Dequantization matrices // FIXME these are big - can they be only copied when PPS changes? copy_fields(h, h1, dequant4_buffer, dequant4_coeff); for (i = 0; i < 6; i++) h->dequant4_coeff[i] = h->dequant4_buffer[0] + (h1->dequant4_coeff[i] - h1->dequant4_buffer[0]); for (i = 0; i < 6; i++) h->dequant8_coeff[i] = h->dequant8_buffer[0] + (h1->dequant8_coeff[i] - h1->dequant8_buffer[0]); h->dequant_coeff_pps = h1->dequant_coeff_pps; // POC timing copy_fields(h, h1, poc_lsb, redundant_pic_count); // reference lists copy_fields(h, h1, short_ref, cabac_init_idc); copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1); copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1); copy_picture_range(h->delayed_pic, h1->delayed_pic, MAX_DELAYED_PIC_COUNT + 2, h, h1); h->sync = h1->sync; if (context_reinitialized) h264_set_parameter_from_sps(h); if (!h->cur_pic_ptr) return 0; if (!h->droppable) { err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); h->prev_poc_msb = h->poc_msb; h->prev_poc_lsb = h->poc_lsb; } h->prev_frame_num_offset = h->frame_num_offset; h->prev_frame_num = h->frame_num; h->outputed_poc = h->next_outputed_poc; return err; } | 18,967 |
1 | static bool use_exit_tb(DisasContext *ctx) { return ((ctx->base.tb->cflags & CF_LAST_IO) || ctx->base.singlestep_enabled || singlestep); } | 18,968 |
1 | static int register_insn (opc_handler_t **ppc_opcodes, opcode_t *insn) { if (insn->opc2 != 0xFF) { if (insn->opc3 != 0xFF) { if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2, insn->opc3, &insn->handler) < 0) return -1; } else { if (register_ind_insn(ppc_opcodes, insn->opc1, insn->opc2, &insn->handler) < 0) return -1; } } else { if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) return -1; } return 0; } | 18,970 |
1 | static ssize_t stellaris_enet_receive(NetClientState *nc, const uint8_t *buf, size_t size) { stellaris_enet_state *s = qemu_get_nic_opaque(nc); int n; uint8_t *p; uint32_t crc; if ((s->rctl & SE_RCTL_RXEN) == 0) return -1; if (s->np >= 31) { return 0; } DPRINTF("Received packet len=%zu\n", size); n = s->next_packet + s->np; if (n >= 31) n -= 31; s->np++; s->rx[n].len = size + 6; p = s->rx[n].data; *(p++) = (size + 6); *(p++) = (size + 6) >> 8; memcpy (p, buf, size); p += size; crc = crc32(~0, buf, size); *(p++) = crc; *(p++) = crc >> 8; *(p++) = crc >> 16; *(p++) = crc >> 24; /* Clear the remaining bytes in the last word. */ if ((size & 3) != 2) { memset(p, 0, (6 - size) & 3); } s->ris |= SE_INT_RX; stellaris_enet_update(s); return size; } | 18,971 |
1 | int ff_mp4_read_dec_config_descr(AVFormatContext *fc, AVStream *st, AVIOContext *pb) { int len, tag; int object_type_id = avio_r8(pb); avio_r8(pb); /* stream type */ avio_rb24(pb); /* buffer size db */ avio_rb32(pb); /* max bitrate */ avio_rb32(pb); /* avg bitrate */ if(avcodec_is_open(st->codec)) { av_log(fc, AV_LOG_DEBUG, "codec open in read_dec_config_descr\n"); return -1; } st->codec->codec_id= ff_codec_get_id(ff_mp4_obj_type, object_type_id); av_dlog(fc, "esds object type id 0x%02x\n", object_type_id); len = ff_mp4_read_descr(fc, pb, &tag); if (tag == MP4DecSpecificDescrTag) { av_dlog(fc, "Specific MPEG4 header len=%d\n", len); if (!len || (uint64_t)len > (1<<30)) return -1; av_free(st->codec->extradata); if (ff_alloc_extradata(st->codec, len)) return AVERROR(ENOMEM); avio_read(pb, st->codec->extradata, len); if (st->codec->codec_id == AV_CODEC_ID_AAC) { MPEG4AudioConfig cfg = {0}; avpriv_mpeg4audio_get_config(&cfg, st->codec->extradata, st->codec->extradata_size * 8, 1); st->codec->channels = cfg.channels; if (cfg.object_type == 29 && cfg.sampling_index < 3) // old mp3on4 st->codec->sample_rate = avpriv_mpa_freq_tab[cfg.sampling_index]; else if (cfg.ext_sample_rate) st->codec->sample_rate = cfg.ext_sample_rate; else st->codec->sample_rate = cfg.sample_rate; av_dlog(fc, "mp4a config channels %d obj %d ext obj %d " "sample rate %d ext sample rate %d\n", st->codec->channels, cfg.object_type, cfg.ext_object_type, cfg.sample_rate, cfg.ext_sample_rate); if (!(st->codec->codec_id = ff_codec_get_id(mp4_audio_types, cfg.object_type))) st->codec->codec_id = AV_CODEC_ID_AAC; } } return 0; } | 18,972 |
1 | static int swr_convert_internal(struct SwrContext *s, AudioData *out, int out_count, AudioData *in , int in_count){ AudioData *postin, *midbuf, *preout; int ret/*, in_max*/; AudioData preout_tmp, midbuf_tmp; if(s->full_convert){ av_assert0(!s->resample); swri_audio_convert(s->full_convert, out, in, in_count); return out_count; } // in_max= out_count*(int64_t)s->in_sample_rate / s->out_sample_rate + resample_filter_taps; // in_count= FFMIN(in_count, in_in + 2 - s->hist_buffer_count); if((ret=swri_realloc_audio(&s->postin, in_count))<0) return ret; if(s->resample_first){ av_assert0(s->midbuf.ch_count == s->used_ch_count); if((ret=swri_realloc_audio(&s->midbuf, out_count))<0) return ret; }else{ av_assert0(s->midbuf.ch_count == s->out.ch_count); if((ret=swri_realloc_audio(&s->midbuf, in_count))<0) return ret; } if((ret=swri_realloc_audio(&s->preout, out_count))<0) return ret; postin= &s->postin; midbuf_tmp= s->midbuf; midbuf= &midbuf_tmp; preout_tmp= s->preout; preout= &preout_tmp; if(s->int_sample_fmt == s-> in_sample_fmt && s->in.planar && !s->channel_map) postin= in; if(s->resample_first ? !s->resample : !s->rematrix) midbuf= postin; if(s->resample_first ? !s->rematrix : !s->resample) preout= midbuf; if(s->int_sample_fmt == s->out_sample_fmt && s->out.planar && !(s->out_sample_fmt==AV_SAMPLE_FMT_S32P && (s->dither.output_sample_bits&31))){ if(preout==in){ out_count= FFMIN(out_count, in_count); //TODO check at the end if this is needed or redundant av_assert0(s->in.planar); //we only support planar internally so it has to be, we support copying non planar though copy(out, in, out_count); return out_count; } else if(preout==postin) preout= midbuf= postin= out; else if(preout==midbuf) preout= midbuf= out; else preout= out; } if(in != postin){ swri_audio_convert(s->in_convert, postin, in, in_count); } if(s->resample_first){ if(postin != midbuf) out_count= resample(s, midbuf, out_count, postin, in_count); if(midbuf != preout) swri_rematrix(s, preout, midbuf, out_count, preout==out); }else{ if(postin != midbuf) swri_rematrix(s, midbuf, postin, in_count, midbuf==out); if(midbuf != preout) out_count= resample(s, preout, out_count, midbuf, in_count); } if(preout != out && out_count){ AudioData *conv_src = preout; if(s->dither.method){ int ch; int dither_count= FFMAX(out_count, 1<<16); if (preout == in) { conv_src = &s->dither.temp; if((ret=swri_realloc_audio(&s->dither.temp, dither_count))<0) return ret; } if((ret=swri_realloc_audio(&s->dither.noise, dither_count))<0) return ret; if(ret) for(ch=0; ch<s->dither.noise.ch_count; ch++) if((ret=swri_get_dither(s, s->dither.noise.ch[ch], s->dither.noise.count, 12345678913579<<ch, s->dither.noise.fmt))<0) return ret; av_assert0(s->dither.noise.ch_count == preout->ch_count); if(s->dither.noise_pos + out_count > s->dither.noise.count) s->dither.noise_pos = 0; if (s->dither.method < SWR_DITHER_NS){ if (s->mix_2_1_simd) { int len1= out_count&~15; int off = len1 * preout->bps; if(len1) for(ch=0; ch<preout->ch_count; ch++) s->mix_2_1_simd(conv_src->ch[ch], preout->ch[ch], s->dither.noise.ch[ch] + s->dither.noise.bps * s->dither.noise_pos, s->native_simd_one, 0, 0, len1); if(out_count != len1) for(ch=0; ch<preout->ch_count; ch++) s->mix_2_1_f(conv_src->ch[ch] + off, preout->ch[ch] + off, s->dither.noise.ch[ch] + s->dither.noise.bps * s->dither.noise_pos + off + len1, s->native_one, 0, 0, out_count - len1); } else { for(ch=0; ch<preout->ch_count; ch++) s->mix_2_1_f(conv_src->ch[ch], preout->ch[ch], s->dither.noise.ch[ch] + s->dither.noise.bps * s->dither.noise_pos, s->native_one, 0, 0, out_count); } } else { switch(s->int_sample_fmt) { case AV_SAMPLE_FMT_S16P :swri_noise_shaping_int16(s, conv_src, preout, &s->dither.noise, out_count); break; case AV_SAMPLE_FMT_S32P :swri_noise_shaping_int32(s, conv_src, preout, &s->dither.noise, out_count); break; case AV_SAMPLE_FMT_FLTP :swri_noise_shaping_float(s, conv_src, preout, &s->dither.noise, out_count); break; case AV_SAMPLE_FMT_DBLP :swri_noise_shaping_double(s,conv_src, preout, &s->dither.noise, out_count); break; } } s->dither.noise_pos += out_count; } //FIXME packed doesn't need more than 1 chan here! swri_audio_convert(s->out_convert, out, conv_src, out_count); } return out_count; } | 18,973 |
0 | int ff_probe_input_buffer(ByteIOContext **pb, AVInputFormat **fmt, const char *filename, void *logctx, unsigned int offset, unsigned int max_probe_size) { AVProbeData pd = { filename ? filename : "", NULL, -offset }; unsigned char *buf = NULL; int ret = 0, probe_size; if (!max_probe_size) { max_probe_size = PROBE_BUF_MAX; } else if (max_probe_size > PROBE_BUF_MAX) { max_probe_size = PROBE_BUF_MAX; } else if (max_probe_size < PROBE_BUF_MIN) { return AVERROR(EINVAL); } if (offset >= max_probe_size) { return AVERROR(EINVAL); } for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt && ret >= 0; probe_size<<=1){ int ret, score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0; int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1; if (probe_size < offset) { continue; } /* read probe data */ buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE); if ((ret = get_buffer(*pb, buf + buf_offset, probe_size - buf_offset)) < 0) { /* fail if error was not end of file, otherwise, lower score */ if (ret != AVERROR_EOF) { av_free(buf); return ret; } score = 0; ret = 0; /* error was end of file, nothing read */ } pd.buf_size += ret; pd.buf = &buf[offset]; memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE); /* guess file format */ *fmt = av_probe_input_format2(&pd, 1, &score); if(*fmt){ if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score); }else av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score); } } av_free(buf); if (url_fseek(*pb, 0, SEEK_SET) < 0) { url_fclose(*pb); if (url_fopen(pb, filename, URL_RDONLY) < 0) return AVERROR(EIO); } return 0; } | 18,974 |
0 | static int metasound_read_bitstream(AVCodecContext *avctx, TwinVQContext *tctx, const uint8_t *buf, int buf_size) { TwinVQFrameData *bits = &tctx->bits; const TwinVQModeTab *mtab = tctx->mtab; int channels = tctx->avctx->channels; int sub; GetBitContext gb; int i, j, k; if (buf_size * 8 < avctx->bit_rate * mtab->size / avctx->sample_rate) { av_log(avctx, AV_LOG_ERROR, "Frame too small (%d bytes). Truncated file?\n", buf_size); return AVERROR(EINVAL); } init_get_bits(&gb, buf, buf_size * 8); bits->window_type = get_bits(&gb, TWINVQ_WINDOW_TYPE_BITS); if (bits->window_type > 8) { av_log(avctx, AV_LOG_ERROR, "Invalid window type, broken sample?\n"); return AVERROR_INVALIDDATA; } bits->ftype = ff_twinvq_wtype_to_ftype_table[tctx->bits.window_type]; sub = mtab->fmode[bits->ftype].sub; if (bits->ftype != TWINVQ_FT_SHORT) get_bits(&gb, 2); read_cb_data(tctx, &gb, bits->main_coeffs, bits->ftype); for (i = 0; i < channels; i++) for (j = 0; j < sub; j++) for (k = 0; k < mtab->fmode[bits->ftype].bark_n_coef; k++) bits->bark1[i][j][k] = get_bits(&gb, mtab->fmode[bits->ftype].bark_n_bit); for (i = 0; i < channels; i++) for (j = 0; j < sub; j++) bits->bark_use_hist[i][j] = get_bits1(&gb); if (bits->ftype == TWINVQ_FT_LONG) { for (i = 0; i < channels; i++) bits->gain_bits[i] = get_bits(&gb, TWINVQ_GAIN_BITS); } else { for (i = 0; i < channels; i++) { bits->gain_bits[i] = get_bits(&gb, TWINVQ_GAIN_BITS); for (j = 0; j < sub; j++) bits->sub_gain_bits[i * sub + j] = get_bits(&gb, TWINVQ_SUB_GAIN_BITS); } } for (i = 0; i < channels; i++) { bits->lpc_hist_idx[i] = get_bits(&gb, mtab->lsp_bit0); bits->lpc_idx1[i] = get_bits(&gb, mtab->lsp_bit1); for (j = 0; j < mtab->lsp_split; j++) bits->lpc_idx2[i][j] = get_bits(&gb, mtab->lsp_bit2); } if (bits->ftype == TWINVQ_FT_LONG) { read_cb_data(tctx, &gb, bits->ppc_coeffs, 3); for (i = 0; i < channels; i++) { bits->p_coef[i] = get_bits(&gb, mtab->ppc_period_bit); bits->g_coef[i] = get_bits(&gb, mtab->pgain_bit); } } return 0; } | 18,975 |
0 | static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, uint8_t *dest, int dstW, int y) { int i; switch(c->dstFormat) { case IMGFMT_RGB32: case IMGFMT_BGR32: YSCALE_YUV_2_RGBX_C(uint32_t) ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1]; ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2]; } break; case IMGFMT_RGB24: YSCALE_YUV_2_RGBX_C(uint8_t) ((uint8_t*)dest)[0]= r[Y1]; ((uint8_t*)dest)[1]= g[Y1]; ((uint8_t*)dest)[2]= b[Y1]; ((uint8_t*)dest)[3]= r[Y2]; ((uint8_t*)dest)[4]= g[Y2]; ((uint8_t*)dest)[5]= b[Y2]; ((uint8_t*)dest)+=6; } | 18,976 |
0 | static int protocol_client_vencrypt_init(VncState *vs, uint8_t *data, size_t len) { if (data[0] != 0 || data[1] != 2) { VNC_DEBUG("Unsupported VeNCrypt protocol %d.%d\n", (int)data[0], (int)data[1]); vnc_write_u8(vs, 1); /* Reject version */ vnc_flush(vs); vnc_client_error(vs); } else { VNC_DEBUG("Sending allowed auth %d\n", vs->vd->subauth); vnc_write_u8(vs, 0); /* Accept version */ vnc_write_u8(vs, 1); /* Number of sub-auths */ vnc_write_u32(vs, vs->vd->subauth); /* The supported auth */ vnc_flush(vs); vnc_read_when(vs, protocol_client_vencrypt_auth, 4); } return 0; } | 18,977 |
0 | static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head, uint8_t *data, RDMAControlHeader *resp, int *resp_idx, int (*callback)(RDMAContext *rdma)) { int ret = 0; /* * Wait until the dest is ready before attempting to deliver the message * by waiting for a READY message. */ if (rdma->control_ready_expected) { RDMAControlHeader resp; ret = qemu_rdma_exchange_get_response(rdma, &resp, RDMA_CONTROL_READY, RDMA_WRID_READY); if (ret < 0) { return ret; } } /* * If the user is expecting a response, post a WR in anticipation of it. */ if (resp) { ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_DATA); if (ret) { error_report("rdma migration: error posting" " extra control recv for anticipated result!"); return ret; } } /* * Post a WR to replace the one we just consumed for the READY message. */ ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY); if (ret) { error_report("rdma migration: error posting first control recv!"); return ret; } /* * Deliver the control message that was requested. */ ret = qemu_rdma_post_send_control(rdma, data, head); if (ret < 0) { error_report("Failed to send control buffer!"); return ret; } /* * If we're expecting a response, block and wait for it. */ if (resp) { if (callback) { trace_qemu_rdma_exchange_send_issue_callback(); ret = callback(rdma); if (ret < 0) { return ret; } } trace_qemu_rdma_exchange_send_waiting(control_desc[resp->type]); ret = qemu_rdma_exchange_get_response(rdma, resp, resp->type, RDMA_WRID_DATA); if (ret < 0) { return ret; } qemu_rdma_move_header(rdma, RDMA_WRID_DATA, resp); if (resp_idx) { *resp_idx = RDMA_WRID_DATA; } trace_qemu_rdma_exchange_send_received(control_desc[resp->type]); } rdma->control_ready_expected = 1; return 0; } | 18,978 |
0 | static void isa_ne2000_set_bootindex(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { ISANE2000State *isa = ISA_NE2000(obj); NE2000State *s = &isa->ne2000; int32_t boot_index; Error *local_err = NULL; visit_type_int32(v, name, &boot_index, &local_err); if (local_err) { goto out; } /* check whether bootindex is present in fw_boot_order list */ check_boot_index(boot_index, &local_err); if (local_err) { goto out; } /* change bootindex to a new one */ s->c.bootindex = boot_index; out: if (local_err) { error_propagate(errp, local_err); } } | 18,979 |
0 | static void dynticks_rearm_timer(struct qemu_alarm_timer *t) { timer_t host_timer = (timer_t)(long)t->priv; struct itimerspec timeout; int64_t nearest_delta_us = INT64_MAX; int64_t current_us; if (!active_timers[QEMU_TIMER_REALTIME] && !active_timers[QEMU_TIMER_VIRTUAL]) return; nearest_delta_us = qemu_next_deadline_dyntick(); /* check whether a timer is already running */ if (timer_gettime(host_timer, &timeout)) { perror("gettime"); fprintf(stderr, "Internal timer error: aborting\n"); exit(1); } current_us = timeout.it_value.tv_sec * 1000000 + timeout.it_value.tv_nsec/1000; if (current_us && current_us <= nearest_delta_us) return; timeout.it_interval.tv_sec = 0; timeout.it_interval.tv_nsec = 0; /* 0 for one-shot timer */ timeout.it_value.tv_sec = nearest_delta_us / 1000000; timeout.it_value.tv_nsec = (nearest_delta_us % 1000000) * 1000; if (timer_settime(host_timer, 0 /* RELATIVE */, &timeout, NULL)) { perror("settime"); fprintf(stderr, "Internal timer error: aborting\n"); exit(1); } } | 18,980 |
0 | static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs, BlockDriverState *base, int64_t sector_num, int nb_sectors, int *pnum) { BlockDriverState *p; int64_t ret = 0; assert(bs != base); for (p = bs; p != base; p = backing_bs(p)) { ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum); if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) { break; } /* [sector_num, pnum] unallocated on this layer, which could be only * the first part of [sector_num, nb_sectors]. */ nb_sectors = MIN(nb_sectors, *pnum); } return ret; } | 18,981 |
0 | timer_write(void *opaque, hwaddr addr, uint64_t val64, unsigned int size) { struct timerblock *t = opaque; struct xlx_timer *xt; unsigned int timer; uint32_t value = val64; addr >>= 2; timer = timer_from_addr(addr); xt = &t->timers[timer]; D(fprintf(stderr, "%s addr=%x val=%x (timer=%d off=%d)\n", __func__, addr * 4, value, timer, addr & 3)); /* Further decoding to address a specific timers reg. */ addr &= 3; switch (addr) { case R_TCSR: if (value & TCSR_TINT) value &= ~TCSR_TINT; xt->regs[addr] = value; if (value & TCSR_ENT) timer_enable(xt); break; default: if (addr < ARRAY_SIZE(xt->regs)) xt->regs[addr] = value; break; } timer_update_irq(t); } | 18,982 |
0 | int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) { int i, count, ret, read_size, j; AVStream *st; AVPacket pkt1, *pkt; int64_t old_offset = avio_tell(ic->pb); int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those for(i=0;i<ic->nb_streams;i++) { AVCodec *codec; AVDictionary *thread_opt = NULL; st = ic->streams[i]; if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO || st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) { /* if(!st->time_base.num) st->time_base= */ if(!st->codec->time_base.num) st->codec->time_base= st->time_base; } //only for the split stuff if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) { st->parser = av_parser_init(st->codec->codec_id); if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){ st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; } } assert(!st->codec->codec); codec = avcodec_find_decoder(st->codec->codec_id); /* force thread count to 1 since the h264 decoder will not extract SPS * and PPS to extradata during multi-threaded decoding */ av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0); /* Ensure that subtitle_header is properly set. */ if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE && codec && !st->codec->codec) avcodec_open2(st->codec, codec, options ? &options[i] : &thread_opt); //try to just open decoders, in case this is enough to get parameters if(!has_codec_parameters(st->codec)){ if (codec && !st->codec->codec) avcodec_open2(st->codec, codec, options ? &options[i] : &thread_opt); } if (!options) av_dict_free(&thread_opt); } for (i=0; i<ic->nb_streams; i++) { ic->streams[i]->info->last_dts = AV_NOPTS_VALUE; } count = 0; read_size = 0; for(;;) { if (ff_check_interrupt(&ic->interrupt_callback)){ ret= AVERROR_EXIT; av_log(ic, AV_LOG_DEBUG, "interrupted\n"); break; } /* check if one codec still needs to be handled */ for(i=0;i<ic->nb_streams;i++) { int fps_analyze_framecount = 20; st = ic->streams[i]; if (!has_codec_parameters(st->codec)) break; /* if the timebase is coarse (like the usual millisecond precision of mkv), we need to analyze more frames to reliably arrive at the correct fps */ if (av_q2d(st->time_base) > 0.0005) fps_analyze_framecount *= 2; if (ic->fps_probe_size >= 0) fps_analyze_framecount = ic->fps_probe_size; /* variable fps and no guess at the real fps */ if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num) && st->info->duration_count < fps_analyze_framecount && st->codec->codec_type == AVMEDIA_TYPE_VIDEO) break; if(st->parser && st->parser->parser->split && !st->codec->extradata) break; if(st->first_dts == AV_NOPTS_VALUE) break; } if (i == ic->nb_streams) { /* NOTE: if the format has no header, then we need to read some packets to get most of the streams, so we cannot stop here */ if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) { /* if we found the info for all the codecs, we can stop */ ret = count; av_log(ic, AV_LOG_DEBUG, "All info found\n"); break; } } /* we did not get all the codec info, but we read too much data */ if (read_size >= ic->probesize) { ret = count; av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize); break; } /* NOTE: a new stream can be added there if no header in file (AVFMTCTX_NOHEADER) */ ret = read_frame_internal(ic, &pkt1); if (ret == AVERROR(EAGAIN)) continue; if (ret < 0) { /* EOF or error*/ AVPacket empty_pkt = { 0 }; int err; av_init_packet(&empty_pkt); ret = -1; /* we could not have all the codec parameters before EOF */ for(i=0;i<ic->nb_streams;i++) { st = ic->streams[i]; /* flush the decoders */ do { err = try_decode_frame(st, &empty_pkt, (options && i < orig_nb_streams) ? &options[i] : NULL); } while (err > 0 && !has_codec_parameters(st->codec)); if (err < 0) { av_log(ic, AV_LOG_WARNING, "decoding for stream %d failed\n", st->index); } else if (!has_codec_parameters(st->codec)){ char buf[256]; avcodec_string(buf, sizeof(buf), st->codec, 0); av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf); } else { ret = 0; } } break; } pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end); if ((ret = av_dup_packet(pkt)) < 0) goto find_stream_info_err; read_size += pkt->size; st = ic->streams[pkt->stream_index]; if (st->codec_info_nb_frames>1) { if (st->time_base.den > 0 && av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) { av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n"); break; } st->info->codec_info_duration += pkt->duration; } { int64_t last = st->info->last_dts; if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){ int64_t duration= pkt->dts - last; double dur= duration * av_q2d(st->time_base); // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO) // av_log(NULL, AV_LOG_ERROR, "%f\n", dur); if (st->info->duration_count < 2) memset(st->info->duration_error, 0, sizeof(st->info->duration_error)); for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) { int framerate= get_std_framerate(i); int ticks= lrintf(dur*framerate/(1001*12)); double error = dur - (double)ticks*1001*12 / framerate; st->info->duration_error[i] += error*error; } st->info->duration_count++; // ignore the first 4 values, they might have some random jitter if (st->info->duration_count > 3) st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration); } if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1) st->info->last_dts = pkt->dts; } if(st->parser && st->parser->parser->split && !st->codec->extradata){ int i= st->parser->parser->split(st->codec, pkt->data, pkt->size); if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) { st->codec->extradata_size= i; st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size); memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE); } } /* if still no information, we try to open the codec and to decompress the frame. We try to avoid that in most cases as it takes longer and uses more memory. For MPEG-4, we need to decompress for QuickTime. If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at least one frame of codec data, this makes sure the codec initializes the channel configuration and does not only trust the values from the container. */ try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL); st->codec_info_nb_frames++; count++; } // close codecs which were opened in try_decode_frame() for(i=0;i<ic->nb_streams;i++) { st = ic->streams[i]; if(st->codec->codec) avcodec_close(st->codec); } for(i=0;i<ic->nb_streams;i++) { st = ic->streams[i]; if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration) av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den, (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den, st->info->codec_info_duration*(int64_t)st->time_base.num, 60000); if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { // the check for tb_unreliable() is not completely correct, since this is not about handling // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g. // ipmovie.c produces. if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > 1 && !st->r_frame_rate.num) av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX); if (st->info->duration_count && !st->r_frame_rate.num && tb_unreliable(st->codec) /*&& //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ... st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){ int num = 0; double best_error= 2*av_q2d(st->time_base); best_error = best_error*best_error*st->info->duration_count*1000*12*30; for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error); j++) { double error = st->info->duration_error[j] * get_std_framerate(j); // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO) // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error); if(error < best_error){ best_error= error; num = get_std_framerate(j); } } // do not increase frame rate by more than 1 % in order to match a standard rate. if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate))) av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX); } if (!st->r_frame_rate.num){ if( st->codec->time_base.den * (int64_t)st->time_base.num <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){ st->r_frame_rate.num = st->codec->time_base.den; st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame; }else{ st->r_frame_rate.num = st->time_base.den; st->r_frame_rate.den = st->time_base.num; } } }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { if(!st->codec->bits_per_coded_sample) st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id); // set stream disposition based on audio service type switch (st->codec->audio_service_type) { case AV_AUDIO_SERVICE_TYPE_EFFECTS: st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break; case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED: st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break; case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED: st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break; case AV_AUDIO_SERVICE_TYPE_COMMENTARY: st->disposition = AV_DISPOSITION_COMMENT; break; case AV_AUDIO_SERVICE_TYPE_KARAOKE: st->disposition = AV_DISPOSITION_KARAOKE; break; } } } estimate_timings(ic, old_offset); compute_chapters_end(ic); #if 0 /* correct DTS for B-frame streams with no timestamps */ for(i=0;i<ic->nb_streams;i++) { st = ic->streams[i]; if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { if(b-frames){ ppktl = &ic->packet_buffer; while(ppkt1){ if(ppkt1->stream_index != i) continue; if(ppkt1->pkt->dts < 0) break; if(ppkt1->pkt->pts != AV_NOPTS_VALUE) break; ppkt1->pkt->dts -= delta; ppkt1= ppkt1->next; } if(ppkt1) continue; st->cur_dts -= delta; } } } #endif find_stream_info_err: for (i=0; i < ic->nb_streams; i++) { if (ic->streams[i]->codec) ic->streams[i]->codec->thread_count = 0; av_freep(&ic->streams[i]->info); } return ret; } | 18,983 |
0 | static void dcr_write_sdram (void *opaque, int dcrn, uint32_t val) { ppc4xx_sdram_t *sdram; sdram = opaque; switch (dcrn) { case SDRAM0_CFGADDR: sdram->addr = val; break; case SDRAM0_CFGDATA: switch (sdram->addr) { case 0x00: /* SDRAM_BESR0 */ sdram->besr0 &= ~val; break; case 0x08: /* SDRAM_BESR1 */ sdram->besr1 &= ~val; break; case 0x10: /* SDRAM_BEAR */ sdram->bear = val; break; case 0x20: /* SDRAM_CFG */ val &= 0xFFE00000; if (!(sdram->cfg & 0x80000000) && (val & 0x80000000)) { #ifdef DEBUG_SDRAM printf("%s: enable SDRAM controller\n", __func__); #endif /* validate all RAM mappings */ sdram_map_bcr(sdram); sdram->status &= ~0x80000000; } else if ((sdram->cfg & 0x80000000) && !(val & 0x80000000)) { #ifdef DEBUG_SDRAM printf("%s: disable SDRAM controller\n", __func__); #endif /* invalidate all RAM mappings */ sdram_unmap_bcr(sdram); sdram->status |= 0x80000000; } if (!(sdram->cfg & 0x40000000) && (val & 0x40000000)) sdram->status |= 0x40000000; else if ((sdram->cfg & 0x40000000) && !(val & 0x40000000)) sdram->status &= ~0x40000000; sdram->cfg = val; break; case 0x24: /* SDRAM_STATUS */ /* Read-only register */ break; case 0x30: /* SDRAM_RTR */ sdram->rtr = val & 0x3FF80000; break; case 0x34: /* SDRAM_PMIT */ sdram->pmit = (val & 0xF8000000) | 0x07C00000; break; case 0x40: /* SDRAM_B0CR */ sdram_set_bcr(&sdram->bcr[0], val, sdram->cfg & 0x80000000); break; case 0x44: /* SDRAM_B1CR */ sdram_set_bcr(&sdram->bcr[1], val, sdram->cfg & 0x80000000); break; case 0x48: /* SDRAM_B2CR */ sdram_set_bcr(&sdram->bcr[2], val, sdram->cfg & 0x80000000); break; case 0x4C: /* SDRAM_B3CR */ sdram_set_bcr(&sdram->bcr[3], val, sdram->cfg & 0x80000000); break; case 0x80: /* SDRAM_TR */ sdram->tr = val & 0x018FC01F; break; case 0x94: /* SDRAM_ECCCFG */ sdram->ecccfg = val & 0x00F00000; break; case 0x98: /* SDRAM_ECCESR */ val &= 0xFFF0F000; if (sdram->eccesr == 0 && val != 0) qemu_irq_raise(sdram->irq); else if (sdram->eccesr != 0 && val == 0) qemu_irq_lower(sdram->irq); sdram->eccesr = val; break; default: /* Error */ break; } break; } } | 18,984 |
0 | void object_property_add_alias(Object *obj, const char *name, Object *target_obj, const char *target_name, Error **errp) { AliasProperty *prop; ObjectProperty *target_prop; target_prop = object_property_find(target_obj, target_name, errp); if (!target_prop) { return; } prop = g_malloc(sizeof(*prop)); prop->target_obj = target_obj; prop->target_name = target_name; object_property_add(obj, name, target_prop->type, property_get_alias, property_set_alias, property_release_alias, prop, errp); } | 18,985 |
0 | static inline int tcg_global_reg_new_internal(TCGType type, int reg, const char *name) { TCGContext *s = &tcg_ctx; TCGTemp *ts; int idx; #if TCG_TARGET_REG_BITS == 32 if (type != TCG_TYPE_I32) tcg_abort(); #endif if (tcg_regset_test_reg(s->reserved_regs, reg)) tcg_abort(); idx = s->nb_globals; tcg_temp_alloc(s, s->nb_globals + 1); ts = &s->temps[s->nb_globals]; ts->base_type = type; ts->type = type; ts->fixed_reg = 1; ts->reg = reg; ts->name = name; s->nb_globals++; tcg_regset_set_reg(s->reserved_regs, reg); return idx; } | 18,986 |
0 | static SocketAddressLegacy *sd_socket_address(const char *path, const char *host, const char *port) { SocketAddressLegacy *addr = g_new0(SocketAddressLegacy, 1); if (path) { addr->type = SOCKET_ADDRESS_LEGACY_KIND_UNIX; addr->u.q_unix.data = g_new0(UnixSocketAddress, 1); addr->u.q_unix.data->path = g_strdup(path); } else { addr->type = SOCKET_ADDRESS_LEGACY_KIND_INET; addr->u.inet.data = g_new0(InetSocketAddress, 1); addr->u.inet.data->host = g_strdup(host ?: SD_DEFAULT_ADDR); addr->u.inet.data->port = g_strdup(port ?: stringify(SD_DEFAULT_PORT)); } return addr; } | 18,987 |
0 | void tcg_target_init(TCGContext *s) { tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); #if defined(__sparc_v9__) && !defined(__sparc_v8plus__) tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff); #endif tcg_regset_set32(tcg_target_call_clobber_regs, 0, (1 << TCG_REG_G1) | (1 << TCG_REG_G2) | (1 << TCG_REG_G3) | (1 << TCG_REG_G4) | (1 << TCG_REG_G5) | (1 << TCG_REG_G6) | (1 << TCG_REG_G7) | (1 << TCG_REG_O0) | (1 << TCG_REG_O1) | (1 << TCG_REG_O2) | (1 << TCG_REG_O3) | (1 << TCG_REG_O4) | (1 << TCG_REG_O5) | (1 << TCG_REG_O7)); tcg_regset_clear(s->reserved_regs); tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); #if defined(__sparc_v9__) && !defined(__sparc_v8plus__) tcg_regset_set_reg(s->reserved_regs, TCG_REG_I4); // for internal use #endif tcg_regset_set_reg(s->reserved_regs, TCG_REG_I5); // for internal use tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); tcg_regset_set_reg(s->reserved_regs, TCG_REG_O7); tcg_add_target_add_op_defs(sparc_op_defs); } | 18,989 |
0 | int kvm_has_sync_mmu(void) { return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU); } | 18,991 |
0 | static void cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h) { int sx = 0, sy = 0; int dx = 0, dy = 0; int depth = 0; int notify = 0; /* make sure to only copy if it's a plain copy ROP */ if (*s->cirrus_rop == cirrus_bitblt_rop_fwd_src || *s->cirrus_rop == cirrus_bitblt_rop_bkwd_src) { int width, height; depth = s->vga.get_bpp(&s->vga) / 8; s->vga.get_resolution(&s->vga, &width, &height); /* extra x, y */ sx = (src % ABS(s->cirrus_blt_srcpitch)) / depth; sy = (src / ABS(s->cirrus_blt_srcpitch)); dx = (dst % ABS(s->cirrus_blt_dstpitch)) / depth; dy = (dst / ABS(s->cirrus_blt_dstpitch)); /* normalize width */ w /= depth; /* if we're doing a backward copy, we have to adjust our x/y to be the upper left corner (instead of the lower right corner) */ if (s->cirrus_blt_dstpitch < 0) { sx -= (s->cirrus_blt_width / depth) - 1; dx -= (s->cirrus_blt_width / depth) - 1; sy -= s->cirrus_blt_height - 1; dy -= s->cirrus_blt_height - 1; } /* are we in the visible portion of memory? */ if (sx >= 0 && sy >= 0 && dx >= 0 && dy >= 0 && (sx + w) <= width && (sy + h) <= height && (dx + w) <= width && (dy + h) <= height) { notify = 1; } } /* we have to flush all pending changes so that the copy is generated at the appropriate moment in time */ if (notify) graphic_hw_update(s->vga.con); (*s->cirrus_rop) (s, s->vga.vram_ptr + (s->cirrus_blt_dstaddr & s->cirrus_addr_mask), s->vga.vram_ptr + (s->cirrus_blt_srcaddr & s->cirrus_addr_mask), s->cirrus_blt_dstpitch, s->cirrus_blt_srcpitch, s->cirrus_blt_width, s->cirrus_blt_height); if (notify) { qemu_console_copy(s->vga.con, sx, sy, dx, dy, s->cirrus_blt_width / depth, s->cirrus_blt_height); } /* we don't have to notify the display that this portion has changed since qemu_console_copy implies this */ cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); } | 18,992 |
0 | static uint64_t pic_ioport_read(void *opaque, target_phys_addr_t addr, unsigned size) { PICCommonState *s = opaque; int ret; if (s->poll) { ret = pic_get_irq(s); if (ret >= 0) { pic_intack(s, ret); ret |= 0x80; } else { ret = 0; } s->poll = 0; } else { if (addr == 0) { if (s->read_reg_select) { ret = s->isr; } else { ret = s->irr; } } else { ret = s->imr; } } DPRINTF("read: addr=0x%02x val=0x%02x\n", addr, ret); return ret; } | 18,993 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.