label
int64
0
1
func1
stringlengths
23
97k
id
int64
0
27.3k
0
static int init_event_facility(SCLPEventFacility *event_facility) { DeviceState *sdev = DEVICE(event_facility); DeviceState *quiesce; /* Spawn a new bus for SCLP events */ qbus_create_inplace(&event_facility->sbus, sizeof(event_facility->sbus), TYPE_SCLP_EVENTS_BUS, sdev, NULL); quiesce = qdev_create(&event_facility->sbus.qbus, "sclpquiesce"); if (!quiesce) { return -1; } qdev_init_nofail(quiesce); object_initialize(&cpu_hotplug, sizeof(cpu_hotplug), TYPE_SCLP_CPU_HOTPLUG); qdev_set_parent_bus(DEVICE(&cpu_hotplug), BUS(&event_facility->sbus)); object_property_set_bool(OBJECT(&cpu_hotplug), true, "realized", NULL); return 0; }
23,737
0
static ssize_t qio_channel_websock_writev(QIOChannel *ioc, const struct iovec *iov, size_t niov, int *fds, size_t nfds, Error **errp) { QIOChannelWebsock *wioc = QIO_CHANNEL_WEBSOCK(ioc); size_t i; ssize_t done = 0; ssize_t ret; if (wioc->io_err) { *errp = error_copy(wioc->io_err); return -1; } if (wioc->io_eof) { error_setg(errp, "%s", "Broken pipe"); return -1; } for (i = 0; i < niov; i++) { size_t want = iov[i].iov_len; if ((want + wioc->rawoutput.offset) > QIO_CHANNEL_WEBSOCK_MAX_BUFFER) { want = (QIO_CHANNEL_WEBSOCK_MAX_BUFFER - wioc->rawoutput.offset); } if (want == 0) { goto done; } buffer_reserve(&wioc->rawoutput, want); buffer_append(&wioc->rawoutput, iov[i].iov_base, want); done += want; if (want < iov[i].iov_len) { break; } } done: ret = qio_channel_websock_write_wire(wioc, errp); if (ret < 0 && ret != QIO_CHANNEL_ERR_BLOCK) { qio_channel_websock_unset_watch(wioc); return -1; } qio_channel_websock_set_watch(wioc); if (done == 0) { return QIO_CHANNEL_ERR_BLOCK; } return done; }
23,738
0
static int virtio_blk_load(QEMUFile *f, void *opaque, int version_id) { VirtIOBlock *s = opaque; if (version_id != 2) return -EINVAL; virtio_load(&s->vdev, f); while (qemu_get_sbyte(f)) { VirtIOBlockReq *req = virtio_blk_alloc_request(s); qemu_get_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem)); req->next = s->rq; s->rq = req->next; } return 0; }
23,739
0
bool timerlist_has_timers(QEMUTimerList *timer_list) { return !!timer_list->active_timers; }
23,740
0
int inet_connect_opts(QemuOpts *opts, Error **errp) { struct addrinfo ai,*res,*e; const char *addr; const char *port; char uaddr[INET6_ADDRSTRLEN+1]; char uport[33]; int sock,rc; bool block; memset(&ai,0, sizeof(ai)); ai.ai_flags = AI_CANONNAME | AI_ADDRCONFIG; ai.ai_family = PF_UNSPEC; ai.ai_socktype = SOCK_STREAM; addr = qemu_opt_get(opts, "host"); port = qemu_opt_get(opts, "port"); block = qemu_opt_get_bool(opts, "block", 0); if (addr == NULL || port == NULL) { fprintf(stderr, "inet_connect: host and/or port not specified\n"); error_set(errp, QERR_SOCKET_CREATE_FAILED); return -1; } if (qemu_opt_get_bool(opts, "ipv4", 0)) ai.ai_family = PF_INET; if (qemu_opt_get_bool(opts, "ipv6", 0)) ai.ai_family = PF_INET6; /* lookup */ if (0 != (rc = getaddrinfo(addr, port, &ai, &res))) { fprintf(stderr,"getaddrinfo(%s,%s): %s\n", addr, port, gai_strerror(rc)); error_set(errp, QERR_SOCKET_CREATE_FAILED); return -1; } for (e = res; e != NULL; e = e->ai_next) { if (getnameinfo((struct sockaddr*)e->ai_addr,e->ai_addrlen, uaddr,INET6_ADDRSTRLEN,uport,32, NI_NUMERICHOST | NI_NUMERICSERV) != 0) { fprintf(stderr,"%s: getnameinfo: oops\n", __FUNCTION__); continue; } sock = qemu_socket(e->ai_family, e->ai_socktype, e->ai_protocol); if (sock < 0) { fprintf(stderr,"%s: socket(%s): %s\n", __FUNCTION__, inet_strfamily(e->ai_family), strerror(errno)); continue; } setsockopt(sock,SOL_SOCKET,SO_REUSEADDR,(void*)&on,sizeof(on)); if (!block) { socket_set_nonblock(sock); } /* connect to peer */ do { rc = 0; if (connect(sock, e->ai_addr, e->ai_addrlen) < 0) { rc = -socket_error(); } } while (rc == -EINTR); #ifdef _WIN32 if (!block && (rc == -EINPROGRESS || rc == -EWOULDBLOCK || rc == -WSAEALREADY)) { #else if (!block && (rc == -EINPROGRESS)) { #endif error_set(errp, QERR_SOCKET_CONNECT_IN_PROGRESS); } else if (rc < 0) { if (NULL == e->ai_next) fprintf(stderr, "%s: connect(%s,%s,%s,%s): %s\n", __FUNCTION__, inet_strfamily(e->ai_family), e->ai_canonname, uaddr, uport, strerror(errno)); closesocket(sock); continue; } freeaddrinfo(res); return sock; } error_set(errp, QERR_SOCKET_CONNECT_FAILED); freeaddrinfo(res); return -1; }
23,741
0
static void ppc_cpu_do_nmi_on_cpu(CPUState *cs, run_on_cpu_data arg) { cpu_synchronize_state(cs); ppc_cpu_do_system_reset(cs); }
23,742
0
void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val) { uintptr_t ra = GETPC(); switch (addr & 3) { case 3: /* The 3 byte store must appear atomic. */ if (parallel_cpus) { atomic_store_3(env, addr - 3, val, 0xffffff00u, ra); } else { cpu_stw_data_ra(env, addr - 3, val >> 16, ra); cpu_stb_data_ra(env, addr - 1, val >> 8, ra); } break; case 2: cpu_stw_data_ra(env, addr - 2, val >> 16, ra); break; case 1: cpu_stb_data_ra(env, addr - 1, val >> 24, ra); break; default: /* Nothing is stored, but protection is checked and the cacheline is marked dirty. */ #ifndef CONFIG_USER_ONLY probe_write(env, addr, cpu_mmu_index(env, 0), ra); #endif break; } }
23,743
0
static int rv10_decode_packet(AVCodecContext *avctx, uint8_t *buf, int buf_size) { MpegEncContext *s = avctx->priv_data; int i, mb_count, mb_pos, left; init_get_bits(&s->gb, buf, buf_size*8); #if 0 for(i=0; i<buf_size*8 && i<100; i++) printf("%d", get_bits1(&s->gb)); printf("\n"); return 0; #endif if(s->codec_id ==CODEC_ID_RV10) mb_count = rv10_decode_picture_header(s); else mb_count = rv20_decode_picture_header(s); if (mb_count < 0) { av_log(s->avctx, AV_LOG_ERROR, "HEADER ERROR\n"); return -1; } if (s->mb_x >= s->mb_width || s->mb_y >= s->mb_height) { av_log(s->avctx, AV_LOG_ERROR, "POS ERROR %d %d\n", s->mb_x, s->mb_y); return -1; } mb_pos = s->mb_y * s->mb_width + s->mb_x; left = s->mb_width * s->mb_height - mb_pos; if (mb_count > left) { av_log(s->avctx, AV_LOG_ERROR, "COUNT ERROR\n"); return -1; } //if(s->pict_type == P_TYPE) return 0; if (s->mb_x == 0 && s->mb_y == 0) { if(MPV_frame_start(s, avctx) < 0) return -1; } #ifdef DEBUG printf("qscale=%d\n", s->qscale); #endif /* default quantization values */ if(s->codec_id== CODEC_ID_RV10){ if(s->mb_y==0) s->first_slice_line=1; }else{ s->first_slice_line=1; s->resync_mb_x= s->mb_x; s->resync_mb_y= s->mb_y; } if(s->h263_aic){ s->y_dc_scale_table= s->c_dc_scale_table= ff_aic_dc_scale_table; }else{ s->y_dc_scale_table= s->c_dc_scale_table= ff_mpeg1_dc_scale_table; } s->y_dc_scale= s->y_dc_scale_table[ s->qscale ]; s->c_dc_scale= s->c_dc_scale_table[ s->qscale ]; s->rv10_first_dc_coded[0] = 0; s->rv10_first_dc_coded[1] = 0; s->rv10_first_dc_coded[2] = 0; s->block_wrap[0]= s->block_wrap[1]= s->block_wrap[2]= s->block_wrap[3]= s->mb_width*2 + 2; s->block_wrap[4]= s->block_wrap[5]= s->mb_width + 2; ff_init_block_index(s); /* decode each macroblock */ for(i=0;i<mb_count;i++) { int ret; ff_update_block_index(s); #ifdef DEBUG printf("**mb x=%d y=%d\n", s->mb_x, s->mb_y); #endif s->dsp.clear_blocks(s->block[0]); s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; ret=ff_h263_decode_mb(s, s->block); if (ret == SLICE_ERROR) { av_log(s->avctx, AV_LOG_ERROR, "ERROR at MB %d %d\n", s->mb_x, s->mb_y); return -1; } ff_h263_update_motion_val(s); MPV_decode_mb(s, s->block); if (++s->mb_x == s->mb_width) { s->mb_x = 0; s->mb_y++; ff_init_block_index(s); } if(s->mb_x == s->resync_mb_x) s->first_slice_line=0; if(ret == SLICE_END) break; } return buf_size; }
23,745
0
uint32_t net_checksum_add_cont(int len, uint8_t *buf, int seq) { uint32_t sum = 0; int i; for (i = seq; i < seq + len; i++) { if (i & 1) { sum += (uint32_t)buf[i - seq]; } else { sum += (uint32_t)buf[i - seq] << 8; } } return sum; }
23,746
0
static void write_dump_header(DumpState *s, Error **errp) { Error *local_err = NULL; if (s->dump_info.d_class == ELFCLASS32) { create_header32(s, &local_err); } else { create_header64(s, &local_err); } if (local_err) { error_propagate(errp, local_err); } }
23,748
0
static int parse_short_name(BDRVVVFATState* s, long_file_name* lfn, direntry_t* direntry) { int i, j; if (!is_short_name(direntry)) return 1; for (j = 7; j >= 0 && direntry->name[j] == ' '; j--); for (i = 0; i <= j; i++) { if (direntry->name[i] <= ' ' || direntry->name[i] > 0x7f) return -1; else if (s->downcase_short_names) lfn->name[i] = qemu_tolower(direntry->name[i]); else lfn->name[i] = direntry->name[i]; } for (j = 2; j >= 0 && direntry->extension[j] == ' '; j--); if (j >= 0) { lfn->name[i++] = '.'; lfn->name[i + j + 1] = '\0'; for (;j >= 0; j--) { if (direntry->extension[j] <= ' ' || direntry->extension[j] > 0x7f) return -2; else if (s->downcase_short_names) lfn->name[i + j] = qemu_tolower(direntry->extension[j]); else lfn->name[i + j] = direntry->extension[j]; } } else lfn->name[i + j + 1] = '\0'; lfn->len = strlen((char*)lfn->name); return 0; }
23,749
0
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb) { CPUARMState *env = cs->env_ptr; ARMCPU *cpu = arm_env_get_cpu(env); DisasContext dc1, *dc = &dc1; target_ulong pc_start; target_ulong next_page_start; int num_insns; int max_insns; bool end_of_page; /* generate intermediate code */ /* The A64 decoder has its own top level loop, because it doesn't need * the A32/T32 complexity to do with conditional execution/IT blocks/etc. */ if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) { gen_intermediate_code_a64(cs, tb); return; } pc_start = tb->pc; dc->tb = tb; dc->is_jmp = DISAS_NEXT; dc->pc = pc_start; dc->singlestep_enabled = cs->singlestep_enabled; dc->condjmp = 0; dc->aarch64 = 0; /* If we are coming from secure EL0 in a system with a 32-bit EL3, then * there is no secure EL1, so we route exceptions to EL3. */ dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3); dc->thumb = ARM_TBFLAG_THUMB(tb->flags); dc->sctlr_b = ARM_TBFLAG_SCTLR_B(tb->flags); dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE; dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1; dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4; dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(tb->flags)); dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); #if !defined(CONFIG_USER_ONLY) dc->user = (dc->current_el == 0); #endif dc->ns = ARM_TBFLAG_NS(tb->flags); dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags); dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags); dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags); dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags); dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags); dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(tb->flags); dc->cp_regs = cpu->cp_regs; dc->features = env->features; /* Single step state. The code-generation logic here is: * SS_ACTIVE == 0: * generate code with no special handling for single-stepping (except * that anything that can make us go to SS_ACTIVE == 1 must end the TB; * this happens anyway because those changes are all system register or * PSTATE writes). * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending) * emit code for one insn * emit code to clear PSTATE.SS * emit code to generate software step exception for completed step * end TB (as usual for having generated an exception) * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending) * emit code to generate a software step exception * end the TB */ dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags); dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags); dc->is_ldex = false; dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */ cpu_F0s = tcg_temp_new_i32(); cpu_F1s = tcg_temp_new_i32(); cpu_F0d = tcg_temp_new_i64(); cpu_F1d = tcg_temp_new_i64(); cpu_V0 = cpu_F0d; cpu_V1 = cpu_F1d; /* FIXME: cpu_M0 can probably be the same as cpu_V0. */ cpu_M0 = tcg_temp_new_i64(); next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; num_insns = 0; max_insns = tb->cflags & CF_COUNT_MASK; if (max_insns == 0) { max_insns = CF_COUNT_MASK; } if (max_insns > TCG_MAX_INSNS) { max_insns = TCG_MAX_INSNS; } gen_tb_start(tb); tcg_clear_temp_count(); /* A note on handling of the condexec (IT) bits: * * We want to avoid the overhead of having to write the updated condexec * bits back to the CPUARMState for every instruction in an IT block. So: * (1) if the condexec bits are not already zero then we write * zero back into the CPUARMState now. This avoids complications trying * to do it at the end of the block. (For example if we don't do this * it's hard to identify whether we can safely skip writing condexec * at the end of the TB, which we definitely want to do for the case * where a TB doesn't do anything with the IT state at all.) * (2) if we are going to leave the TB then we call gen_set_condexec() * which will write the correct value into CPUARMState if zero is wrong. * This is done both for leaving the TB at the end, and for leaving * it because of an exception we know will happen, which is done in * gen_exception_insn(). The latter is necessary because we need to * leave the TB with the PC/IT state just prior to execution of the * instruction which caused the exception. * (3) if we leave the TB unexpectedly (eg a data abort on a load) * then the CPUARMState will be wrong and we need to reset it. * This is handled in the same way as restoration of the * PC in these situations; we save the value of the condexec bits * for each PC via tcg_gen_insn_start(), and restore_state_to_opc() * then uses this to restore them after an exception. * * Note that there are no instructions which can read the condexec * bits, and none which can write non-static values to them, so * we don't need to care about whether CPUARMState is correct in the * middle of a TB. */ /* Reset the conditional execution bits immediately. This avoids complications trying to do it at the end of the block. */ if (dc->condexec_mask || dc->condexec_cond) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); store_cpu_field(tmp, condexec_bits); } do { dc->insn_start_idx = tcg_op_buf_count(); tcg_gen_insn_start(dc->pc, (dc->condexec_cond << 4) | (dc->condexec_mask >> 1), 0); num_insns++; #ifdef CONFIG_USER_ONLY /* Intercept jump to the magic kernel page. */ if (dc->pc >= 0xffff0000) { /* We always get here via a jump, so know we are not in a conditional execution block. */ gen_exception_internal(EXCP_KERNEL_TRAP); dc->is_jmp = DISAS_NORETURN; break; } #endif if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { CPUBreakpoint *bp; QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { if (bp->pc == dc->pc) { if (bp->flags & BP_CPU) { gen_set_condexec(dc); gen_set_pc_im(dc, dc->pc); gen_helper_check_breakpoints(cpu_env); /* End the TB early; it's likely not going to be executed */ dc->is_jmp = DISAS_UPDATE; } else { gen_exception_internal_insn(dc, 0, EXCP_DEBUG); /* The address covered by the breakpoint must be included in [tb->pc, tb->pc + tb->size) in order to for it to be properly cleared -- thus we increment the PC here so that the logic setting tb->size below does the right thing. */ /* TODO: Advance PC by correct instruction length to * avoid disassembler error messages */ dc->pc += 2; goto done_generating; } break; } } } if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) { gen_io_start(); } if (dc->ss_active && !dc->pstate_ss) { /* Singlestep state is Active-pending. * If we're in this state at the start of a TB then either * a) we just took an exception to an EL which is being debugged * and this is the first insn in the exception handler * b) debug exceptions were masked and we just unmasked them * without changing EL (eg by clearing PSTATE.D) * In either case we're going to take a swstep exception in the * "did not step an insn" case, and so the syndrome ISV and EX * bits should be zero. */ assert(num_insns == 1); gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0), default_exception_el(dc)); goto done_generating; } if (dc->thumb) { disas_thumb_insn(env, dc); if (dc->condexec_mask) { dc->condexec_cond = (dc->condexec_cond & 0xe) | ((dc->condexec_mask >> 4) & 1); dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f; if (dc->condexec_mask == 0) { dc->condexec_cond = 0; } } } else { unsigned int insn = arm_ldl_code(env, dc->pc, dc->sctlr_b); dc->pc += 4; disas_arm_insn(dc, insn); } if (dc->condjmp && !dc->is_jmp) { gen_set_label(dc->condlabel); dc->condjmp = 0; } if (tcg_check_temp_count()) { fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n", dc->pc); } /* Translation stops when a conditional branch is encountered. * Otherwise the subsequent code could get translated several times. * Also stop translation when a page boundary is reached. This * ensures prefetch aborts occur at the right place. */ /* We want to stop the TB if the next insn starts in a new page, * or if it spans between this page and the next. This means that * if we're looking at the last halfword in the page we need to * see if it's a 16-bit Thumb insn (which will fit in this TB) * or a 32-bit Thumb insn (which won't). * This is to avoid generating a silly TB with a single 16-bit insn * in it at the end of this page (which would execute correctly * but isn't very efficient). */ end_of_page = (dc->pc >= next_page_start) || ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc)); } while (!dc->is_jmp && !tcg_op_buf_full() && !is_singlestepping(dc) && !singlestep && !end_of_page && num_insns < max_insns); if (tb->cflags & CF_LAST_IO) { if (dc->condjmp) { /* FIXME: This can theoretically happen with self-modifying code. */ cpu_abort(cs, "IO on conditional branch instruction"); } gen_io_end(); } /* At this stage dc->condjmp will only be set when the skipped instruction was a conditional branch or trap, and the PC has already been written. */ gen_set_condexec(dc); if (dc->is_jmp == DISAS_BX_EXCRET) { /* Exception return branches need some special case code at the * end of the TB, which is complex enough that it has to * handle the single-step vs not and the condition-failed * insn codepath itself. */ gen_bx_excret_final_code(dc); } else if (unlikely(is_singlestepping(dc))) { /* Unconditional and "condition passed" instruction codepath. */ switch (dc->is_jmp) { case DISAS_SWI: gen_ss_advance(dc); gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb), default_exception_el(dc)); break; case DISAS_HVC: gen_ss_advance(dc); gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); break; case DISAS_SMC: gen_ss_advance(dc); gen_exception(EXCP_SMC, syn_aa32_smc(), 3); break; case DISAS_NEXT: case DISAS_UPDATE: gen_set_pc_im(dc, dc->pc); /* fall through */ default: /* FIXME: Single stepping a WFI insn will not halt the CPU. */ gen_singlestep_exception(dc); break; case DISAS_NORETURN: break; } } else { /* While branches must always occur at the end of an IT block, there are a few other things that can cause us to terminate the TB in the middle of an IT block: - Exception generating instructions (bkpt, swi, undefined). - Page boundaries. - Hardware watchpoints. Hardware breakpoints have already been handled and skip this code. */ switch(dc->is_jmp) { case DISAS_NEXT: gen_goto_tb(dc, 1, dc->pc); break; case DISAS_JUMP: gen_goto_ptr(); break; case DISAS_UPDATE: gen_set_pc_im(dc, dc->pc); /* fall through */ default: /* indicate that the hash table must be used to find the next TB */ tcg_gen_exit_tb(0); break; case DISAS_NORETURN: /* nothing more to generate */ break; case DISAS_WFI: gen_helper_wfi(cpu_env); /* The helper doesn't necessarily throw an exception, but we * must go back to the main loop to check for interrupts anyway. */ tcg_gen_exit_tb(0); break; case DISAS_WFE: gen_helper_wfe(cpu_env); break; case DISAS_YIELD: gen_helper_yield(cpu_env); break; case DISAS_SWI: gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb), default_exception_el(dc)); break; case DISAS_HVC: gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); break; case DISAS_SMC: gen_exception(EXCP_SMC, syn_aa32_smc(), 3); break; } } if (dc->condjmp) { /* "Condition failed" instruction codepath for the branch/trap insn */ gen_set_label(dc->condlabel); gen_set_condexec(dc); if (unlikely(is_singlestepping(dc))) { gen_set_pc_im(dc, dc->pc); gen_singlestep_exception(dc); } else { gen_goto_tb(dc, 1, dc->pc); } } done_generating: gen_tb_end(tb, num_insns); #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) && qemu_log_in_addr_range(pc_start)) { qemu_log_lock(); qemu_log("----------------\n"); qemu_log("IN: %s\n", lookup_symbol(pc_start)); log_target_disas(cs, pc_start, dc->pc - pc_start, dc->thumb | (dc->sctlr_b << 1)); qemu_log("\n"); qemu_log_unlock(); } #endif tb->size = dc->pc - pc_start; tb->icount = num_insns; }
23,750
0
static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c, TCGReg r1, TCGArg c2, int c2const, int labelno) { int cc; if (facilities & FACILITY_GEN_INST_EXT) { bool is_unsigned = is_unsigned_cond(c); bool in_range; S390Opcode opc; cc = tcg_cond_to_s390_cond[c]; if (!c2const) { opc = (type == TCG_TYPE_I32 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ) : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ)); tgen_compare_branch(s, opc, cc, r1, c2, labelno); return; } /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field. If the immediate we've been given does not fit that range, we'll fall back to separate compare and branch instructions using the larger comparison range afforded by COMPARE IMMEDIATE. */ if (type == TCG_TYPE_I32) { if (is_unsigned) { opc = RIE_CLIJ; in_range = (uint32_t)c2 == (uint8_t)c2; } else { opc = RIE_CIJ; in_range = (int32_t)c2 == (int8_t)c2; } } else { if (is_unsigned) { opc = RIE_CLGIJ; in_range = (uint64_t)c2 == (uint8_t)c2; } else { opc = RIE_CGIJ; in_range = (int64_t)c2 == (int8_t)c2; } } if (in_range) { tgen_compare_imm_branch(s, opc, cc, r1, c2, labelno); return; } } cc = tgen_cmp(s, type, c, r1, c2, c2const); tgen_branch(s, cc, labelno); }
23,752
0
void mips_malta_init (ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { char *filename; pflash_t *fl; MemoryRegion *system_memory = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); MemoryRegion *bios, *bios_alias = g_new(MemoryRegion, 1); target_long bios_size; int64_t kernel_entry; PCIBus *pci_bus; ISABus *isa_bus; CPUState *env; qemu_irq *isa_irq; qemu_irq *cpu_exit_irq; int piix4_devfn; i2c_bus *smbus; int i; DriveInfo *dinfo; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; DriveInfo *fd[MAX_FD]; int fl_idx = 0; int fl_sectors = 0; int be; DeviceState *dev = qdev_create(NULL, "mips-malta"); MaltaState *s = DO_UPCAST(MaltaState, busdev.qdev, dev); qdev_init_nofail(dev); /* Make sure the first 3 serial ports are associated with a device. */ for(i = 0; i < 3; i++) { if (!serial_hds[i]) { char label[32]; snprintf(label, sizeof(label), "serial%d", i); serial_hds[i] = qemu_chr_new(label, "null", NULL); } } /* init CPUs */ if (cpu_model == NULL) { #ifdef TARGET_MIPS64 cpu_model = "20Kc"; #else cpu_model = "24Kf"; #endif } for (i = 0; i < smp_cpus; i++) { env = cpu_init(cpu_model); if (!env) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } /* Init internal devices */ cpu_mips_irq_init_cpu(env); cpu_mips_clock_init(env); qemu_register_reset(main_cpu_reset, env); } env = first_cpu; /* allocate RAM */ if (ram_size > (256 << 20)) { fprintf(stderr, "qemu: Too much memory for this machine: %d MB, maximum 256 MB\n", ((unsigned int)ram_size / (1 << 20))); exit(1); } memory_region_init_ram(ram, "mips_malta.ram", ram_size); vmstate_register_ram_global(ram); memory_region_add_subregion(system_memory, 0, ram); #ifdef TARGET_WORDS_BIGENDIAN be = 1; #else be = 0; #endif /* FPGA */ malta_fpga_init(system_memory, 0x1f000000LL, env->irq[2], serial_hds[2]); /* Load firmware in flash / BIOS unless we boot directly into a kernel. */ if (kernel_filename) { /* Write a small bootloader to the flash location. */ bios = g_new(MemoryRegion, 1); memory_region_init_ram(bios, "mips_malta.bios", BIOS_SIZE); vmstate_register_ram_global(bios); memory_region_set_readonly(bios, true); memory_region_init_alias(bios_alias, "bios.1fc", bios, 0, BIOS_SIZE); /* Map the bios at two physical locations, as on the real board. */ memory_region_add_subregion(system_memory, 0x1e000000LL, bios); memory_region_add_subregion(system_memory, 0x1fc00000LL, bios_alias); loaderparams.ram_size = ram_size; loaderparams.kernel_filename = kernel_filename; loaderparams.kernel_cmdline = kernel_cmdline; loaderparams.initrd_filename = initrd_filename; kernel_entry = load_kernel(); write_bootloader(env, memory_region_get_ram_ptr(bios), kernel_entry); } else { dinfo = drive_get(IF_PFLASH, 0, fl_idx); if (dinfo) { /* Load firmware from flash. */ bios_size = 0x400000; fl_sectors = bios_size >> 16; #ifdef DEBUG_BOARD_INIT printf("Register parallel flash %d size " TARGET_FMT_lx " at " "addr %08llx '%s' %x\n", fl_idx, bios_size, 0x1e000000LL, bdrv_get_device_name(dinfo->bdrv), fl_sectors); #endif fl = pflash_cfi01_register(0x1e000000LL, NULL, "mips_malta.bios", BIOS_SIZE, dinfo->bdrv, 65536, fl_sectors, 4, 0x0000, 0x0000, 0x0000, 0x0000, be); bios = pflash_cfi01_get_memory(fl); /* Map the bios at two physical locations, as on the real board. */ memory_region_init_alias(bios_alias, "bios.1fc", bios, 0, BIOS_SIZE); memory_region_add_subregion(system_memory, 0x1fc00000LL, bios_alias); fl_idx++; } else { bios = g_new(MemoryRegion, 1); memory_region_init_ram(bios, "mips_malta.bios", BIOS_SIZE); vmstate_register_ram_global(bios); memory_region_set_readonly(bios, true); memory_region_init_alias(bios_alias, "bios.1fc", bios, 0, BIOS_SIZE); /* Map the bios at two physical locations, as on the real board. */ memory_region_add_subregion(system_memory, 0x1e000000LL, bios); memory_region_add_subregion(system_memory, 0x1fc00000LL, bios_alias); /* Load a BIOS image. */ if (bios_name == NULL) bios_name = BIOS_FILENAME; filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); if (filename) { bios_size = load_image_targphys(filename, 0x1fc00000LL, BIOS_SIZE); g_free(filename); } else { bios_size = -1; } if ((bios_size < 0 || bios_size > BIOS_SIZE) && !kernel_filename) { fprintf(stderr, "qemu: Could not load MIPS bios '%s', and no -kernel argument was specified\n", bios_name); exit(1); } } /* In little endian mode the 32bit words in the bios are swapped, a neat trick which allows bi-endian firmware. */ #ifndef TARGET_WORDS_BIGENDIAN { uint32_t *addr = memory_region_get_ram_ptr(bios); uint32_t *end = addr + bios_size; while (addr < end) { bswap32s(addr); addr++; } } #endif } /* Board ID = 0x420 (Malta Board with CoreLV) XXX: theoretically 0x1e000010 should map to flash and 0x1fc00010 should map to the board ID. */ stl_p(memory_region_get_ram_ptr(bios) + 0x10, 0x00000420); /* Init internal devices */ cpu_mips_irq_init_cpu(env); cpu_mips_clock_init(env); /* * We have a circular dependency problem: pci_bus depends on isa_irq, * isa_irq is provided by i8259, i8259 depends on ISA, ISA depends * on piix4, and piix4 depends on pci_bus. To stop the cycle we have * qemu_irq_proxy() adds an extra bit of indirection, allowing us * to resolve the isa_irq -> i8259 dependency after i8259 is initialized. */ isa_irq = qemu_irq_proxy(&s->i8259, 16); /* Northbridge */ pci_bus = gt64120_register(isa_irq); /* Southbridge */ ide_drive_get(hd, MAX_IDE_BUS); piix4_devfn = piix4_init(pci_bus, &isa_bus, 80); /* Interrupt controller */ /* The 8259 is attached to the MIPS CPU INT0 pin, ie interrupt 2 */ s->i8259 = i8259_init(isa_bus, env->irq[2]); isa_bus_irqs(isa_bus, s->i8259); pci_piix4_ide_init(pci_bus, hd, piix4_devfn + 1); usb_uhci_piix4_init(pci_bus, piix4_devfn + 2); smbus = piix4_pm_init(pci_bus, piix4_devfn + 3, 0x1100, isa_get_irq(NULL, 9), NULL, NULL, 0); /* TODO: Populate SPD eeprom data. */ smbus_eeprom_init(smbus, 8, NULL, 0); pit = pit_init(isa_bus, 0x40, 0); cpu_exit_irq = qemu_allocate_irqs(cpu_request_exit, NULL, 1); DMA_init(0, cpu_exit_irq); /* Super I/O */ isa_create_simple(isa_bus, "i8042"); rtc_init(isa_bus, 2000, NULL); serial_isa_init(isa_bus, 0, serial_hds[0]); serial_isa_init(isa_bus, 1, serial_hds[1]); if (parallel_hds[0]) parallel_init(isa_bus, 0, parallel_hds[0]); for(i = 0; i < MAX_FD; i++) { fd[i] = drive_get(IF_FLOPPY, 0, i); } fdctrl_init_isa(isa_bus, fd); /* Sound card */ audio_init(isa_bus, pci_bus); /* Network card */ network_init(); /* Optional PCI video card */ if (cirrus_vga_enabled) { pci_cirrus_vga_init(pci_bus); } else if (vmsvga_enabled) { if (!pci_vmsvga_init(pci_bus)) { fprintf(stderr, "Warning: vmware_vga not available," " using standard VGA instead\n"); pci_vga_init(pci_bus); } } else if (std_vga_enabled) { pci_vga_init(pci_bus); } }
23,753
0
void qmp_guest_set_time(bool has_time, int64_t time_ns, Error **errp) { Error *local_err = NULL; SYSTEMTIME ts; FILETIME tf; LONGLONG time; if (!has_time) { /* Unfortunately, Windows libraries don't provide an easy way to access * RTC yet: * * https://msdn.microsoft.com/en-us/library/aa908981.aspx */ error_setg(errp, "Time argument is required on this platform"); return; } /* Validate time passed by user. */ if (time_ns < 0 || time_ns / 100 > INT64_MAX - W32_FT_OFFSET) { error_setg(errp, "Time %" PRId64 "is invalid", time_ns); return; } time = time_ns / 100 + W32_FT_OFFSET; tf.dwLowDateTime = (DWORD) time; tf.dwHighDateTime = (DWORD) (time >> 32); if (!FileTimeToSystemTime(&tf, &ts)) { error_setg(errp, "Failed to convert system time %d", (int)GetLastError()); return; } acquire_privilege(SE_SYSTEMTIME_NAME, &local_err); if (local_err) { error_propagate(errp, local_err); return; } if (!SetSystemTime(&ts)) { error_setg(errp, "Failed to set time to guest: %d", (int)GetLastError()); return; } }
23,754
0
static uint32_t qpi_mem_readb(void *opaque, target_phys_addr_t addr) { return 0; }
23,755
0
struct MUSBState *musb_init(qemu_irq *irqs) { MUSBState *s = g_malloc0(sizeof(*s)); int i; s->irqs = irqs; s->faddr = 0x00; s->power = MGC_M_POWER_HSENAB; s->tx_intr = 0x0000; s->rx_intr = 0x0000; s->tx_mask = 0xffff; s->rx_mask = 0xffff; s->intr = 0x00; s->mask = 0x06; s->idx = 0; /* TODO: _DW */ s->ep[0].config = MGC_M_CONFIGDATA_SOFTCONE | MGC_M_CONFIGDATA_DYNFIFO; for (i = 0; i < 16; i ++) { s->ep[i].fifosize = 64; s->ep[i].maxp[0] = 0x40; s->ep[i].maxp[1] = 0x40; s->ep[i].musb = s; s->ep[i].epnum = i; usb_packet_init(&s->ep[i].packey[0].p); usb_packet_init(&s->ep[i].packey[1].p); } usb_bus_new(&s->bus, &musb_bus_ops, NULL /* FIXME */); usb_register_port(&s->bus, &s->port, s, 0, &musb_port_ops, USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL); return s; }
23,757
0
static void RENAME(extract_even)(const uint8_t *src, uint8_t *dst, x86_reg count) { dst += count; src += 2*count; count= - count; #if COMPILE_TEMPLATE_MMX if(count <= -16) { count += 15; __asm__ volatile( "pcmpeqw %%mm7, %%mm7 \n\t" "psrlw $8, %%mm7 \n\t" "1: \n\t" "movq -30(%1, %0, 2), %%mm0 \n\t" "movq -22(%1, %0, 2), %%mm1 \n\t" "movq -14(%1, %0, 2), %%mm2 \n\t" "movq -6(%1, %0, 2), %%mm3 \n\t" "pand %%mm7, %%mm0 \n\t" "pand %%mm7, %%mm1 \n\t" "pand %%mm7, %%mm2 \n\t" "pand %%mm7, %%mm3 \n\t" "packuswb %%mm1, %%mm0 \n\t" "packuswb %%mm3, %%mm2 \n\t" MOVNTQ" %%mm0,-15(%2, %0) \n\t" MOVNTQ" %%mm2,- 7(%2, %0) \n\t" "add $16, %0 \n\t" " js 1b \n\t" : "+r"(count) : "r"(src), "r"(dst) ); count -= 15; } #endif while(count<0) { dst[count]= src[2*count]; count++; } }
23,759
0
static void intra_predict_horiz_16x16_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride) { uint32_t row; uint8_t inp0, inp1, inp2, inp3; v16u8 src0, src1, src2, src3; for (row = 4; row--;) { inp0 = src[0]; src += src_stride; inp1 = src[0]; src += src_stride; inp2 = src[0]; src += src_stride; inp3 = src[0]; src += src_stride; src0 = (v16u8) __msa_fill_b(inp0); src1 = (v16u8) __msa_fill_b(inp1); src2 = (v16u8) __msa_fill_b(inp2); src3 = (v16u8) __msa_fill_b(inp3); ST_UB4(src0, src1, src2, src3, dst, dst_stride); dst += (4 * dst_stride); } }
23,760
1
void pc_cmos_init(ram_addr_t ram_size, ram_addr_t above_4g_mem_size, const char *boot_device, ISADevice *floppy, BusState *idebus0, BusState *idebus1, ISADevice *s) { int val, nb, i; FDriveType fd_type[2] = { FDRIVE_DRV_NONE, FDRIVE_DRV_NONE }; static pc_cmos_init_late_arg arg; /* various important CMOS locations needed by PC/Bochs bios */ /* memory size */ val = 640; /* base memory in K */ rtc_set_memory(s, 0x15, val); rtc_set_memory(s, 0x16, val >> 8); val = (ram_size / 1024) - 1024; if (val > 65535) val = 65535; rtc_set_memory(s, 0x17, val); rtc_set_memory(s, 0x18, val >> 8); rtc_set_memory(s, 0x30, val); rtc_set_memory(s, 0x31, val >> 8); if (above_4g_mem_size) { rtc_set_memory(s, 0x5b, (unsigned int)above_4g_mem_size >> 16); rtc_set_memory(s, 0x5c, (unsigned int)above_4g_mem_size >> 24); rtc_set_memory(s, 0x5d, (uint64_t)above_4g_mem_size >> 32); } if (ram_size > (16 * 1024 * 1024)) val = (ram_size / 65536) - ((16 * 1024 * 1024) / 65536); else val = 0; if (val > 65535) val = 65535; rtc_set_memory(s, 0x34, val); rtc_set_memory(s, 0x35, val >> 8); /* set the number of CPU */ rtc_set_memory(s, 0x5f, smp_cpus - 1); /* set boot devices, and disable floppy signature check if requested */ if (set_boot_dev(s, boot_device, fd_bootchk)) { exit(1); } /* floppy type */ if (floppy) { for (i = 0; i < 2; i++) { fd_type[i] = isa_fdc_get_drive_type(floppy, i); } } val = (cmos_get_fd_drive_type(fd_type[0]) << 4) | cmos_get_fd_drive_type(fd_type[1]); rtc_set_memory(s, 0x10, val); val = 0; nb = 0; if (fd_type[0] < FDRIVE_DRV_NONE) { nb++; } if (fd_type[1] < FDRIVE_DRV_NONE) { nb++; } switch (nb) { case 0: break; case 1: val |= 0x01; /* 1 drive, ready for boot */ break; case 2: val |= 0x41; /* 2 drives, ready for boot */ break; } val |= 0x02; /* FPU is there */ val |= 0x04; /* PS/2 mouse installed */ rtc_set_memory(s, REG_EQUIPMENT_BYTE, val); /* hard drives */ arg.rtc_state = s; arg.idebus[0] = idebus0; arg.idebus[1] = idebus1; qemu_register_reset(pc_cmos_init_late, &arg); }
23,763
0
int read_file(const char *filename, char **bufptr, size_t *size) { FILE *f = fopen(filename, "rb"); if (!f) { av_log(NULL, AV_LOG_ERROR, "Cannot read file '%s': %s\n", filename, strerror(errno)); return AVERROR(errno); } fseek(f, 0, SEEK_END); *size = ftell(f); fseek(f, 0, SEEK_SET); *bufptr = av_malloc(*size + 1); if (!*bufptr) { av_log(NULL, AV_LOG_ERROR, "Could not allocate file buffer\n"); fclose(f); return AVERROR(ENOMEM); } fread(*bufptr, 1, *size, f); (*bufptr)[*size++] = '\0'; fclose(f); return 0; }
23,764
1
int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )) { int64_t pos, ts; int64_t start_pos, filesize; int no_change; av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts)); if(ts_min == AV_NOPTS_VALUE){ pos_min = s->data_offset; ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp); if (ts_min == AV_NOPTS_VALUE) return -1; } if(ts_min >= target_ts){ *ts_ret= ts_min; return pos_min; } if(ts_max == AV_NOPTS_VALUE){ int step= 1024; filesize = avio_size(s->pb); pos_max = filesize - 1; do{ pos_max = FFMAX(0, pos_max - step); ts_max = ff_read_timestamp(s, stream_index, &pos_max, pos_max + step, read_timestamp); step += step; }while(ts_max == AV_NOPTS_VALUE && pos_max > 0); if (ts_max == AV_NOPTS_VALUE) return -1; for(;;){ int64_t tmp_pos= pos_max + 1; int64_t tmp_ts= ff_read_timestamp(s, stream_index, &tmp_pos, INT64_MAX, read_timestamp); if(tmp_ts == AV_NOPTS_VALUE) break; ts_max= tmp_ts; pos_max= tmp_pos; if(tmp_pos >= filesize) break; } pos_limit= pos_max; } if(ts_max <= target_ts){ *ts_ret= ts_max; return pos_max; } if(ts_min > ts_max){ return -1; }else if(ts_min == ts_max){ pos_limit= pos_min; } no_change=0; while (pos_min < pos_limit) { av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n", pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max)); assert(pos_limit <= pos_max); if(no_change==0){ int64_t approximate_keyframe_distance= pos_max - pos_limit; // interpolate position (better than dichotomy) pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min) + pos_min - approximate_keyframe_distance; }else if(no_change==1){ // bisection, if interpolation failed to change min or max pos last time pos = (pos_min + pos_limit)>>1; }else{ /* linear search if bisection failed, can only happen if there are very few or no keyframes between min/max */ pos=pos_min; } if(pos <= pos_min) pos= pos_min + 1; else if(pos > pos_limit) pos= pos_limit; start_pos= pos; ts = ff_read_timestamp(s, stream_index, &pos, INT64_MAX, read_timestamp); //may pass pos_limit instead of -1 if(pos == pos_max) no_change++; else no_change=0; av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts), pos_limit, start_pos, no_change); if(ts == AV_NOPTS_VALUE){ av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n"); return -1; } assert(ts != AV_NOPTS_VALUE); if (target_ts <= ts) { pos_limit = start_pos - 1; pos_max = pos; ts_max = ts; } if (target_ts >= ts) { pos_min = pos; ts_min = ts; } } pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max; ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max; #if 0 pos_min = pos; ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp); pos_min++; ts_max = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp); av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n", pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max)); #endif *ts_ret= ts; return pos; }
23,765
1
struct GuestFileRead *qmp_guest_file_read(int64_t handle, bool has_count, int64_t count, Error **errp) { GuestFileHandle *gfh = guest_file_handle_find(handle, errp); GuestFileRead *read_data = NULL; guchar *buf; FILE *fh; size_t read_count; if (!gfh) { if (!has_count) { count = QGA_READ_COUNT_DEFAULT; } else if (count < 0) { error_setg(errp, "value '%" PRId64 "' is invalid for argument count", count); fh = gfh->fh; buf = g_malloc0(count+1); read_count = fread(buf, 1, count, fh); if (ferror(fh)) { error_setg_errno(errp, errno, "failed to read file"); slog("guest-file-read failed, handle: %" PRId64, handle); } else { buf[read_count] = 0; read_data = g_new0(GuestFileRead, 1); read_data->count = read_count; read_data->eof = feof(fh); if (read_count) { read_data->buf_b64 = g_base64_encode(buf, read_count); gfh->state = RW_STATE_READING; g_free(buf); clearerr(fh); return read_data;
23,766
1
static inline int get_amv(Mpeg4DecContext *ctx, int n) { MpegEncContext *s = &ctx->m; int x, y, mb_v, sum, dx, dy, shift; int len = 1 << (s->f_code + 4); const int a = s->sprite_warping_accuracy; if (s->workaround_bugs & FF_BUG_AMV) len >>= s->quarter_sample; if (s->real_sprite_warping_points == 1) { if (ctx->divx_version == 500 && ctx->divx_build == 413) sum = s->sprite_offset[0][n] / (1 << (a - s->quarter_sample)); else sum = RSHIFT(s->sprite_offset[0][n] << s->quarter_sample, a); } else { dx = s->sprite_delta[n][0]; dy = s->sprite_delta[n][1]; shift = ctx->sprite_shift[0]; if (n) dy -= 1 << (shift + a + 1); else dx -= 1 << (shift + a + 1); mb_v = s->sprite_offset[0][n] + dx * s->mb_x * 16 + dy * s->mb_y * 16; sum = 0; for (y = 0; y < 16; y++) { int v; v = mb_v + dy * y; // FIXME optimize for (x = 0; x < 16; x++) { sum += v >> shift; v += dx; } } sum = RSHIFT(sum, a + 8 - s->quarter_sample); } if (sum < -len) sum = -len; else if (sum >= len) sum = len - 1; return sum; }
23,767
1
void qemu_aio_wait_start(void) { }
23,768
1
int64_t cpu_get_ticks(void) { if (use_icount) { return cpu_get_icount(); } if (!timers_state.cpu_ticks_enabled) { return timers_state.cpu_ticks_offset; } else { int64_t ticks; ticks = cpu_get_real_ticks(); if (timers_state.cpu_ticks_prev > ticks) { /* Note: non increasing ticks may happen if the host uses software suspend */ timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks; } timers_state.cpu_ticks_prev = ticks; return ticks + timers_state.cpu_ticks_offset; } }
23,770
1
static void gen_rev16(TCGv var) { TCGv tmp = new_tmp(); tcg_gen_shri_i32(tmp, var, 8); tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff); tcg_gen_shli_i32(var, var, 8); tcg_gen_andi_i32(var, var, 0xff00ff00); tcg_gen_or_i32(var, var, tmp); dead_tmp(tmp); }
23,773
1
static void ready_codebook(vorbis_enc_codebook *cb) { int i; ff_vorbis_len2vlc(cb->lens, cb->codewords, cb->nentries); if (!cb->lookup) { cb->pow2 = cb->dimentions = NULL; } else { int vals = cb_lookup_vals(cb->lookup, cb->ndimentions, cb->nentries); cb->dimentions = av_malloc(sizeof(float) * cb->nentries * cb->ndimentions); cb->pow2 = av_mallocz(sizeof(float) * cb->nentries); for (i = 0; i < cb->nentries; i++) { float last = 0; int j; int div = 1; for (j = 0; j < cb->ndimentions; j++) { int off; if (cb->lookup == 1) off = (i / div) % vals; // lookup type 1 else off = i * cb->ndimentions + j; // lookup type 2 cb->dimentions[i * cb->ndimentions + j] = last + cb->min + cb->quantlist[off] * cb->delta; if (cb->seq_p) last = cb->dimentions[i * cb->ndimentions + j]; cb->pow2[i] += cb->dimentions[i * cb->ndimentions + j] * cb->dimentions[i * cb->ndimentions + j]; div *= vals; } cb->pow2[i] /= 2.; } } }
23,775
1
static int adx_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { int buf_size = avpkt->size; ADXContext *c = avctx->priv_data; int16_t *samples; const uint8_t *buf = avpkt->data; int num_blocks, ch, ret; if (c->eof) { *got_frame_ptr = 0; return buf_size; } if(AV_RB16(buf) == 0x8000){ int header_size; if ((ret = avpriv_adx_decode_header(avctx, buf, buf_size, &header_size, c->coeff)) < 0) { av_log(avctx, AV_LOG_ERROR, "error parsing ADX header\n"); } c->channels = avctx->channels; if(buf_size < header_size) buf += header_size; buf_size -= header_size; } /* calculate number of blocks in the packet */ num_blocks = buf_size / (BLOCK_SIZE * c->channels); /* if the packet is not an even multiple of BLOCK_SIZE, check for an EOF packet */ if (!num_blocks || buf_size % (BLOCK_SIZE * avctx->channels)) { if (buf_size >= 4 && (AV_RB16(buf) & 0x8000)) { c->eof = 1; *got_frame_ptr = 0; return avpkt->size; } } /* get output buffer */ c->frame.nb_samples = num_blocks * BLOCK_SAMPLES; if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } samples = (int16_t *)c->frame.data[0]; while (num_blocks--) { for (ch = 0; ch < c->channels; ch++) { if (adx_decode(c, samples + ch, buf, ch)) { c->eof = 1; buf = avpkt->data + avpkt->size; break; } buf_size -= BLOCK_SIZE; buf += BLOCK_SIZE; } samples += BLOCK_SAMPLES * c->channels; } *got_frame_ptr = 1; *(AVFrame *)data = c->frame; return buf - avpkt->data; }
23,776
1
static void spapr_dt_ov5_platform_support(void *fdt, int chosen) { PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); char val[2 * 4] = { 23, 0x00, /* Xive mode: 0 = legacy (as in ISA 2.7), 1 = Exploitation */ 24, 0x00, /* Hash/Radix, filled in below. */ 25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */ 26, 0x40, /* Radix options: GTSE == yes. */ }; if (kvm_enabled()) { if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) { val[3] = 0x80; /* OV5_MMU_BOTH */ } else if (kvmppc_has_cap_mmu_radix()) { val[3] = 0x40; /* OV5_MMU_RADIX_300 */ } else { val[3] = 0x00; /* Hash */ } } else { if (first_ppc_cpu->env.mmu_model & POWERPC_MMU_V3) { /* V3 MMU supports both hash and radix (with dynamic switching) */ val[3] = 0xC0; } else { /* Otherwise we can only do hash */ val[3] = 0x00; } } _FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support", val, sizeof(val))); }
23,777
1
vmstate_get_subsection(const VMStateSubsection *sub, char *idstr) { while (sub && sub->needed) { if (strcmp(idstr, sub->vmsd->name) == 0) { return sub->vmsd; } sub++; } return NULL; }
23,778
1
static int gif_read_image(GifState *s, AVFrame *frame) { int left, top, width, height, bits_per_pixel, code_size, flags; int is_interleaved, has_local_palette, y, pass, y1, linesize, pal_size; uint32_t *ptr, *pal, *px, *pr, *ptr1; int ret; uint8_t *idx; /* At least 9 bytes of Image Descriptor. */ if (bytestream2_get_bytes_left(&s->gb) < 9) return AVERROR_INVALIDDATA; left = bytestream2_get_le16u(&s->gb); top = bytestream2_get_le16u(&s->gb); width = bytestream2_get_le16u(&s->gb); height = bytestream2_get_le16u(&s->gb); flags = bytestream2_get_byteu(&s->gb); is_interleaved = flags & 0x40; has_local_palette = flags & 0x80; bits_per_pixel = (flags & 0x07) + 1; av_dlog(s->avctx, "image x=%d y=%d w=%d h=%d\n", left, top, width, height); if (has_local_palette) { pal_size = 1 << bits_per_pixel; if (bytestream2_get_bytes_left(&s->gb) < pal_size * 3) return AVERROR_INVALIDDATA; gif_read_palette(s, s->local_palette, pal_size); pal = s->local_palette; } else { if (!s->has_global_palette) { av_log(s->avctx, AV_LOG_ERROR, "picture doesn't have either global or local palette.\n"); return AVERROR_INVALIDDATA; } pal = s->global_palette; } if (s->keyframe) { if (s->transparent_color_index == -1 && s->has_global_palette) { /* transparency wasn't set before the first frame, fill with background color */ gif_fill(frame, s->bg_color); } else { /* otherwise fill with transparent color. * this is necessary since by default picture filled with 0x80808080. */ gif_fill(frame, s->trans_color); } } /* verify that all the image is inside the screen dimensions */ if (left + width > s->screen_width || top + height > s->screen_height) { av_log(s->avctx, AV_LOG_ERROR, "image is outside the screen dimensions.\n"); return AVERROR_INVALIDDATA; } if (width <= 0 || height <= 0) { av_log(s->avctx, AV_LOG_ERROR, "Invalid image dimensions.\n"); return AVERROR_INVALIDDATA; } /* process disposal method */ if (s->gce_prev_disposal == GCE_DISPOSAL_BACKGROUND) { gif_fill_rect(frame, s->stored_bg_color, s->gce_l, s->gce_t, s->gce_w, s->gce_h); } else if (s->gce_prev_disposal == GCE_DISPOSAL_RESTORE) { gif_copy_img_rect(s->stored_img, (uint32_t *)frame->data[0], frame->linesize[0] / sizeof(uint32_t), s->gce_l, s->gce_t, s->gce_w, s->gce_h); } s->gce_prev_disposal = s->gce_disposal; if (s->gce_disposal != GCE_DISPOSAL_NONE) { s->gce_l = left; s->gce_t = top; s->gce_w = width; s->gce_h = height; if (s->gce_disposal == GCE_DISPOSAL_BACKGROUND) { if (s->transparent_color_index >= 0) s->stored_bg_color = s->trans_color; else s->stored_bg_color = s->bg_color; } else if (s->gce_disposal == GCE_DISPOSAL_RESTORE) { av_fast_malloc(&s->stored_img, &s->stored_img_size, frame->linesize[0] * frame->height); if (!s->stored_img) return AVERROR(ENOMEM); gif_copy_img_rect((uint32_t *)frame->data[0], s->stored_img, frame->linesize[0] / sizeof(uint32_t), left, top, width, height); } } /* Expect at least 2 bytes: 1 for lzw code size and 1 for block size. */ if (bytestream2_get_bytes_left(&s->gb) < 2) return AVERROR_INVALIDDATA; /* now get the image data */ code_size = bytestream2_get_byteu(&s->gb); if ((ret = ff_lzw_decode_init(s->lzw, code_size, s->gb.buffer, bytestream2_get_bytes_left(&s->gb), FF_LZW_GIF)) < 0) { av_log(s->avctx, AV_LOG_ERROR, "LZW init failed\n"); return ret; } /* read all the image */ linesize = frame->linesize[0] / sizeof(uint32_t); ptr1 = (uint32_t *)frame->data[0] + top * linesize + left; ptr = ptr1; pass = 0; y1 = 0; for (y = 0; y < height; y++) { int count = ff_lzw_decode(s->lzw, s->idx_line, width); if (count != width) { if (count) av_log(s->avctx, AV_LOG_ERROR, "LZW decode failed\n"); goto decode_tail; } pr = ptr + width; for (px = ptr, idx = s->idx_line; px < pr; px++, idx++) { if (*idx != s->transparent_color_index) *px = pal[*idx]; } if (is_interleaved) { switch(pass) { default: case 0: case 1: y1 += 8; ptr += linesize * 8; if (y1 >= height) { y1 = pass ? 2 : 4; ptr = ptr1 + linesize * y1; pass++; } break; case 2: y1 += 4; ptr += linesize * 4; if (y1 >= height) { y1 = 1; ptr = ptr1 + linesize; pass++; } break; case 3: y1 += 2; ptr += linesize * 2; break; } } else { ptr += linesize; } } decode_tail: /* read the garbage data until end marker is found */ ff_lzw_decode_tail(s->lzw); /* Graphic Control Extension's scope is single frame. * Remove its influence. */ s->transparent_color_index = -1; s->gce_disposal = GCE_DISPOSAL_NONE; return 0; }
23,780
1
static int decode_slice_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr) { ProresContext *ctx = avctx->priv_data; SliceContext *slice = &ctx->slices[jobnr]; const uint8_t *buf = slice->data; AVFrame *pic = ctx->frame; int i, hdr_size, qscale, log2_chroma_blocks_per_mb; int luma_stride, chroma_stride; int y_data_size, u_data_size, v_data_size, a_data_size; uint8_t *dest_y, *dest_u, *dest_v, *dest_a; int16_t qmat_luma_scaled[64]; int16_t qmat_chroma_scaled[64]; int mb_x_shift; slice->ret = -1; //av_log(avctx, AV_LOG_INFO, "slice %d mb width %d mb x %d y %d\n", // jobnr, slice->mb_count, slice->mb_x, slice->mb_y); // slice header hdr_size = buf[0] >> 3; qscale = av_clip(buf[1], 1, 224); qscale = qscale > 128 ? qscale - 96 << 2: qscale; y_data_size = AV_RB16(buf + 2); u_data_size = AV_RB16(buf + 4); v_data_size = slice->data_size - y_data_size - u_data_size - hdr_size; if (hdr_size > 7) v_data_size = AV_RB16(buf + 6); a_data_size = slice->data_size - y_data_size - u_data_size - v_data_size - hdr_size; if (y_data_size < 0 || u_data_size < 0 || v_data_size < 0 || hdr_size+y_data_size+u_data_size+v_data_size > slice->data_size){ av_log(avctx, AV_LOG_ERROR, "invalid plane data size\n"); return -1; } buf += hdr_size; for (i = 0; i < 64; i++) { qmat_luma_scaled [i] = ctx->qmat_luma [i] * qscale; qmat_chroma_scaled[i] = ctx->qmat_chroma[i] * qscale; } if (ctx->frame_type == 0) { luma_stride = pic->linesize[0]; chroma_stride = pic->linesize[1]; } else { luma_stride = pic->linesize[0] << 1; chroma_stride = pic->linesize[1] << 1; } if (avctx->pix_fmt == AV_PIX_FMT_YUV444P10 || avctx->pix_fmt == AV_PIX_FMT_YUVA444P10) { mb_x_shift = 5; log2_chroma_blocks_per_mb = 2; } else { mb_x_shift = 4; log2_chroma_blocks_per_mb = 1; } dest_y = pic->data[0] + (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5); dest_u = pic->data[1] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift); dest_v = pic->data[2] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift); dest_a = pic->data[3] + (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5); if (ctx->frame_type && ctx->first_field ^ ctx->frame->top_field_first) { dest_y += pic->linesize[0]; dest_u += pic->linesize[1]; dest_v += pic->linesize[2]; dest_a += pic->linesize[3]; } decode_slice_luma(avctx, slice, (uint16_t*)dest_y, luma_stride, buf, y_data_size, qmat_luma_scaled); if (!(avctx->flags & CODEC_FLAG_GRAY)) { decode_slice_chroma(avctx, slice, (uint16_t*)dest_u, chroma_stride, buf + y_data_size, u_data_size, qmat_chroma_scaled, log2_chroma_blocks_per_mb); decode_slice_chroma(avctx, slice, (uint16_t*)dest_v, chroma_stride, buf + y_data_size + u_data_size, v_data_size, qmat_chroma_scaled, log2_chroma_blocks_per_mb); } /* decode alpha plane if available */ if (ctx->alpha_info && dest_a && a_data_size) decode_slice_alpha(ctx, (uint16_t*)dest_a, luma_stride, buf + y_data_size + u_data_size + v_data_size, a_data_size, slice->mb_count); slice->ret = 0; return 0; }
23,781
1
static int img_rebase(int argc, char **argv) { BlockDriverState *bs, *bs_old_backing, *bs_new_backing; BlockDriver *old_backing_drv, *new_backing_drv; char *filename; const char *fmt, *out_basefmt, *out_baseimg; int c, flags, ret; int unsafe = 0; /* Parse commandline parameters */ fmt = NULL; out_baseimg = NULL; out_basefmt = NULL; for(;;) { c = getopt(argc, argv, "uhf:F:b:"); if (c == -1) break; switch(c) { case 'h': help(); return 0; case 'f': fmt = optarg; break; case 'F': out_basefmt = optarg; break; case 'b': out_baseimg = optarg; break; case 'u': unsafe = 1; break; } } if ((optind >= argc) || !out_baseimg) help(); filename = argv[optind++]; /* * Open the images. * * Ignore the old backing file for unsafe rebase in case we want to correct * the reference to a renamed or moved backing file. */ flags = BDRV_O_FLAGS | BDRV_O_RDWR | (unsafe ? BDRV_O_NO_BACKING : 0); bs = bdrv_new_open(filename, fmt, flags); /* Find the right drivers for the backing files */ old_backing_drv = NULL; new_backing_drv = NULL; if (!unsafe && bs->backing_format[0] != '\0') { old_backing_drv = bdrv_find_format(bs->backing_format); if (old_backing_drv == NULL) { error("Invalid format name: '%s'", bs->backing_format); } } if (out_basefmt != NULL) { new_backing_drv = bdrv_find_format(out_basefmt); if (new_backing_drv == NULL) { error("Invalid format name: '%s'", out_basefmt); } } /* For safe rebasing we need to compare old and new backing file */ if (unsafe) { /* Make the compiler happy */ bs_old_backing = NULL; bs_new_backing = NULL; } else { char backing_name[1024]; bs_old_backing = bdrv_new("old_backing"); bdrv_get_backing_filename(bs, backing_name, sizeof(backing_name)); if (bdrv_open(bs_old_backing, backing_name, BDRV_O_FLAGS, old_backing_drv)) { error("Could not open old backing file '%s'", backing_name); return -1; } bs_new_backing = bdrv_new("new_backing"); if (bdrv_open(bs_new_backing, out_baseimg, BDRV_O_FLAGS | BDRV_O_RDWR, new_backing_drv)) { error("Could not open new backing file '%s'", out_baseimg); return -1; } } /* * Check each unallocated cluster in the COW file. If it is unallocated, * accesses go to the backing file. We must therefore compare this cluster * in the old and new backing file, and if they differ we need to copy it * from the old backing file into the COW file. * * If qemu-img crashes during this step, no harm is done. The content of * the image is the same as the original one at any time. */ if (!unsafe) { uint64_t num_sectors; uint64_t sector; int n, n1; uint8_t * buf_old; uint8_t * buf_new; buf_old = qemu_malloc(IO_BUF_SIZE); buf_new = qemu_malloc(IO_BUF_SIZE); bdrv_get_geometry(bs, &num_sectors); for (sector = 0; sector < num_sectors; sector += n) { /* How many sectors can we handle with the next read? */ if (sector + (IO_BUF_SIZE / 512) <= num_sectors) { n = (IO_BUF_SIZE / 512); } else { n = num_sectors - sector; } /* If the cluster is allocated, we don't need to take action */ if (bdrv_is_allocated(bs, sector, n, &n1)) { n = n1; continue; } /* Read old and new backing file */ if (bdrv_read(bs_old_backing, sector, buf_old, n) < 0) { error("error while reading from old backing file"); } if (bdrv_read(bs_new_backing, sector, buf_new, n) < 0) { error("error while reading from new backing file"); } /* If they differ, we need to write to the COW file */ uint64_t written = 0; while (written < n) { int pnum; if (compare_sectors(buf_old + written * 512, buf_new + written * 512, n - written, &pnum)) { ret = bdrv_write(bs, sector + written, buf_old + written * 512, pnum); if (ret < 0) { error("Error while writing to COW image: %s", strerror(-ret)); } } written += pnum; } } qemu_free(buf_old); qemu_free(buf_new); } /* * Change the backing file. All clusters that are different from the old * backing file are overwritten in the COW file now, so the visible content * doesn't change when we switch the backing file. */ ret = bdrv_change_backing_file(bs, out_baseimg, out_basefmt); if (ret == -ENOSPC) { error("Could not change the backing file to '%s': No space left in " "the file header", out_baseimg); } else if (ret < 0) { error("Could not change the backing file to '%s': %s", out_baseimg, strerror(-ret)); } /* * TODO At this point it is possible to check if any clusters that are * allocated in the COW file are the same in the backing file. If so, they * could be dropped from the COW file. Don't do this before switching the * backing file, in case of a crash this would lead to corruption. */ /* Cleanup */ if (!unsafe) { bdrv_delete(bs_old_backing); bdrv_delete(bs_new_backing); } bdrv_delete(bs); return 0; }
23,782
1
static void flush_buffered(AVFormatContext *s1, int last) { RTPMuxContext *s = s1->priv_data; if (s->buf_ptr != s->buf) { // If only sending one single NAL unit, skip the aggregation framing if (s->buffered_nals == 1) ff_rtp_send_data(s1, s->buf + 4, s->buf_ptr - s->buf - 4, last); else ff_rtp_send_data(s1, s->buf, s->buf_ptr - s->buf, last); } s->buf_ptr = s->buf; s->buffered_nals = 0; }
23,783
1
static int vp8_lossy_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, uint8_t *data_start, unsigned int data_size) { WebPContext *s = avctx->priv_data; AVPacket pkt; int ret; if (!s->initialized) { ff_vp8_decode_init(avctx); s->initialized = 1; } avctx->pix_fmt = s->has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P; s->lossless = 0; if (data_size > INT_MAX) { av_log(avctx, AV_LOG_ERROR, "unsupported chunk size\n"); return AVERROR_PATCHWELCOME; } av_init_packet(&pkt); pkt.data = data_start; pkt.size = data_size; ret = ff_vp8_decode_frame(avctx, p, got_frame, &pkt); if (ret < 0) return ret; update_canvas_size(avctx, avctx->width, avctx->height); if (s->has_alpha) { ret = vp8_lossy_decode_alpha(avctx, p, s->alpha_data, s->alpha_data_size); if (ret < 0) return ret; } return ret; }
23,784
1
static void s390_ccw_realize(S390CCWDevice *cdev, char *sysfsdev, Error **errp) { CcwDevice *ccw_dev = CCW_DEVICE(cdev); CCWDeviceClass *ck = CCW_DEVICE_GET_CLASS(ccw_dev); DeviceState *parent = DEVICE(ccw_dev); BusState *qbus = qdev_get_parent_bus(parent); VirtualCssBus *cbus = VIRTUAL_CSS_BUS(qbus); SubchDev *sch; int ret; Error *err = NULL; s390_ccw_get_dev_info(cdev, sysfsdev, &err); if (err) { goto out_err_propagate; } sch = css_create_sch(ccw_dev->devno, false, cbus->squash_mcss, &err); if (!sch) { goto out_mdevid_free; } sch->driver_data = cdev; sch->do_subchannel_work = do_subchannel_work_passthrough; ccw_dev->sch = sch; ret = css_sch_build_schib(sch, &cdev->hostid); if (ret) { error_setg_errno(&err, -ret, "%s: Failed to build initial schib", __func__); goto out_err; } ck->realize(ccw_dev, &err); if (err) { goto out_err; } css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid, parent->hotplugged, 1); return; out_err: css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL); ccw_dev->sch = NULL; g_free(sch); out_mdevid_free: g_free(cdev->mdevid); out_err_propagate: error_propagate(errp, err); }
23,785
1
static void build_pci_bus_end(PCIBus *bus, void *bus_state) { AcpiBuildPciBusHotplugState *child = bus_state; AcpiBuildPciBusHotplugState *parent = child->parent; GArray *bus_table = build_alloc_array(); DECLARE_BITMAP(slot_hotplug_enable, PCI_SLOT_MAX); DECLARE_BITMAP(slot_device_present, PCI_SLOT_MAX); DECLARE_BITMAP(slot_device_system, PCI_SLOT_MAX); DECLARE_BITMAP(slot_device_vga, PCI_SLOT_MAX); DECLARE_BITMAP(slot_device_qxl, PCI_SLOT_MAX); uint8_t op; int i; QObject *bsel; GArray *method; bool bus_hotplug_support = false; if (bus->parent_dev) { op = 0x82; /* DeviceOp */ build_append_nameseg(bus_table, "S%.02X_", bus->parent_dev->devfn); build_append_byte(bus_table, 0x08); /* NameOp */ build_append_nameseg(bus_table, "_SUN"); build_append_value(bus_table, PCI_SLOT(bus->parent_dev->devfn), 1); build_append_byte(bus_table, 0x08); /* NameOp */ build_append_nameseg(bus_table, "_ADR"); build_append_value(bus_table, (PCI_SLOT(bus->parent_dev->devfn) << 16) | PCI_FUNC(bus->parent_dev->devfn), 4); } else { op = 0x10; /* ScopeOp */; build_append_nameseg(bus_table, "PCI0"); } bsel = object_property_get_qobject(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, NULL); if (bsel) { build_append_byte(bus_table, 0x08); /* NameOp */ build_append_nameseg(bus_table, "BSEL"); build_append_int(bus_table, qint_get_int(qobject_to_qint(bsel))); memset(slot_hotplug_enable, 0xff, sizeof slot_hotplug_enable); } else { /* No bsel - no slots are hot-pluggable */ memset(slot_hotplug_enable, 0x00, sizeof slot_hotplug_enable); } memset(slot_device_present, 0x00, sizeof slot_device_present); memset(slot_device_system, 0x00, sizeof slot_device_present); memset(slot_device_vga, 0x00, sizeof slot_device_vga); memset(slot_device_qxl, 0x00, sizeof slot_device_qxl); for (i = 0; i < ARRAY_SIZE(bus->devices); i += PCI_FUNC_MAX) { DeviceClass *dc; PCIDeviceClass *pc; PCIDevice *pdev = bus->devices[i]; int slot = PCI_SLOT(i); if (!pdev) { continue; } set_bit(slot, slot_device_present); pc = PCI_DEVICE_GET_CLASS(pdev); dc = DEVICE_GET_CLASS(pdev); if (pc->class_id == PCI_CLASS_BRIDGE_ISA) { set_bit(slot, slot_device_system); } if (pc->class_id == PCI_CLASS_DISPLAY_VGA) { set_bit(slot, slot_device_vga); if (object_dynamic_cast(OBJECT(pdev), "qxl-vga")) { set_bit(slot, slot_device_qxl); } } if (!dc->hotpluggable || pc->is_bridge) { clear_bit(slot, slot_hotplug_enable); } } /* Append Device object for each slot */ for (i = 0; i < PCI_SLOT_MAX; i++) { bool can_eject = test_bit(i, slot_hotplug_enable); bool present = test_bit(i, slot_device_present); bool vga = test_bit(i, slot_device_vga); bool qxl = test_bit(i, slot_device_qxl); bool system = test_bit(i, slot_device_system); if (can_eject) { void *pcihp = acpi_data_push(bus_table, ACPI_PCIHP_SIZEOF); memcpy(pcihp, ACPI_PCIHP_AML, ACPI_PCIHP_SIZEOF); patch_pcihp(i, pcihp); bus_hotplug_support = true; } else if (qxl) { void *pcihp = acpi_data_push(bus_table, ACPI_PCIQXL_SIZEOF); memcpy(pcihp, ACPI_PCIQXL_AML, ACPI_PCIQXL_SIZEOF); patch_pciqxl(i, pcihp); } else if (vga) { void *pcihp = acpi_data_push(bus_table, ACPI_PCIVGA_SIZEOF); memcpy(pcihp, ACPI_PCIVGA_AML, ACPI_PCIVGA_SIZEOF); patch_pcivga(i, pcihp); } else if (system) { /* Nothing to do: system devices are in DSDT. */ } else if (present) { void *pcihp = acpi_data_push(bus_table, ACPI_PCINOHP_SIZEOF); memcpy(pcihp, ACPI_PCINOHP_AML, ACPI_PCINOHP_SIZEOF); patch_pcinohp(i, pcihp); } } if (bsel) { method = build_alloc_method("DVNT", 2); for (i = 0; i < PCI_SLOT_MAX; i++) { GArray *notify; uint8_t op; if (!test_bit(i, slot_hotplug_enable)) { continue; } notify = build_alloc_array(); op = 0xA0; /* IfOp */ build_append_byte(notify, 0x7B); /* AndOp */ build_append_byte(notify, 0x68); /* Arg0Op */ build_append_int(notify, 0x1 << i); build_append_byte(notify, 0x00); /* NullName */ build_append_byte(notify, 0x86); /* NotifyOp */ build_append_nameseg(notify, "S%.02X_", PCI_DEVFN(i, 0)); build_append_byte(notify, 0x69); /* Arg1Op */ /* Pack it up */ build_package(notify, op, 0); build_append_array(method, notify); build_free_array(notify); } build_append_and_cleanup_method(bus_table, method); } /* Append PCNT method to notify about events on local and child buses. * Add unconditionally for root since DSDT expects it. */ if (bus_hotplug_support || child->notify_table->len || !bus->parent_dev) { method = build_alloc_method("PCNT", 0); /* If bus supports hotplug select it and notify about local events */ if (bsel) { build_append_byte(method, 0x70); /* StoreOp */ build_append_int(method, qint_get_int(qobject_to_qint(bsel))); build_append_nameseg(method, "BNUM"); build_append_nameseg(method, "DVNT"); build_append_nameseg(method, "PCIU"); build_append_int(method, 1); /* Device Check */ build_append_nameseg(method, "DVNT"); build_append_nameseg(method, "PCID"); build_append_int(method, 3); /* Eject Request */ } /* Notify about child bus events in any case */ build_append_array(method, child->notify_table); build_append_and_cleanup_method(bus_table, method); /* Append description of child buses */ build_append_array(bus_table, child->device_table); /* Pack it up */ if (bus->parent_dev) { build_extop_package(bus_table, op); } else { build_package(bus_table, op, 0); } /* Append our bus description to parent table */ build_append_array(parent->device_table, bus_table); /* Also tell parent how to notify us, invoking PCNT method. * At the moment this is not needed for root as we have a single root. */ if (bus->parent_dev) { build_append_byte(parent->notify_table, '^'); /* ParentPrefixChar */ build_append_byte(parent->notify_table, 0x2E); /* DualNamePrefix */ build_append_nameseg(parent->notify_table, "S%.02X_", bus->parent_dev->devfn); build_append_nameseg(parent->notify_table, "PCNT"); } } build_free_array(bus_table); build_pci_bus_state_cleanup(child); g_free(child); }
23,787
1
void memory_region_sync_dirty_bitmap(MemoryRegion *mr) { FlatRange *fr; FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) { if (fr->mr == mr) { MEMORY_LISTENER_UPDATE_REGION(fr, &address_space_memory, Forward, log_sync); } } }
23,788
1
static void coroutine_fn resend_aioreq(BDRVSheepdogState *s, AIOReq *aio_req) { SheepdogAIOCB *acb = aio_req->aiocb; bool create = false; /* check whether this request becomes a CoW one */ if (acb->aiocb_type == AIOCB_WRITE_UDATA && is_data_obj(aio_req->oid)) { int idx = data_oid_to_idx(aio_req->oid); if (is_data_obj_writable(&s->inode, idx)) { goto out; } if (check_simultaneous_create(s, aio_req)) { return; } if (s->inode.data_vdi_id[idx]) { aio_req->base_oid = vid_to_data_oid(s->inode.data_vdi_id[idx], idx); aio_req->flags |= SD_FLAG_CMD_COW; } create = true; } out: if (is_data_obj(aio_req->oid)) { add_aio_request(s, aio_req, acb->qiov->iov, acb->qiov->niov, create, acb->aiocb_type); } else { struct iovec iov; iov.iov_base = &s->inode; iov.iov_len = sizeof(s->inode); add_aio_request(s, aio_req, &iov, 1, false, AIOCB_WRITE_UDATA); } }
23,789
1
static void *do_data_decompress(void *opaque) { DecompressParam *param = opaque; unsigned long pagesize; while (!quit_decomp_thread) { qemu_mutex_lock(&param->mutex); while (!param->start && !quit_decomp_thread) { qemu_cond_wait(&param->cond, &param->mutex); pagesize = TARGET_PAGE_SIZE; if (!quit_decomp_thread) { /* uncompress() will return failed in some case, especially * when the page is dirted when doing the compression, it's * not a problem because the dirty page will be retransferred * and uncompress() won't break the data in other pages. */ uncompress((Bytef *)param->des, &pagesize, (const Bytef *)param->compbuf, param->len); } param->start = false; } qemu_mutex_unlock(&param->mutex); } return NULL; }
23,790
1
static int v4l2_send_frame(AVCodecContext *avctx, const AVFrame *frame) { V4L2m2mContext *s = avctx->priv_data; V4L2Context *const output = &s->output; return ff_v4l2_context_enqueue_frame(output, frame); }
23,791
0
static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { VP8Context *s = avctx->priv_data; int ret, mb_x, mb_y, i, y, referenced; enum AVDiscard skip_thresh; AVFrame *curframe; if ((ret = decode_frame_header(s, avpkt->data, avpkt->size)) < 0) return ret; referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT || s->update_altref == VP56_FRAME_CURRENT; skip_thresh = !referenced ? AVDISCARD_NONREF : !s->keyframe ? AVDISCARD_NONKEY : AVDISCARD_ALL; if (avctx->skip_frame >= skip_thresh) { s->invisible = 1; goto skip_decode; } for (i = 0; i < 4; i++) if (&s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] && &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] && &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) { curframe = s->framep[VP56_FRAME_CURRENT] = &s->frames[i]; break; } if (curframe->data[0]) avctx->release_buffer(avctx, curframe); curframe->key_frame = s->keyframe; curframe->pict_type = s->keyframe ? FF_I_TYPE : FF_P_TYPE; curframe->reference = referenced ? 3 : 0; if ((ret = avctx->get_buffer(avctx, curframe))) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed!\n"); return ret; } // Given that arithmetic probabilities are updated every frame, it's quite likely // that the values we have on a random interframe are complete junk if we didn't // start decode on a keyframe. So just don't display anything rather than junk. if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] || !s->framep[VP56_FRAME_GOLDEN] || !s->framep[VP56_FRAME_GOLDEN2])) { av_log(avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n"); return AVERROR_INVALIDDATA; } s->linesize = curframe->linesize[0]; s->uvlinesize = curframe->linesize[1]; if (!s->edge_emu_buffer) s->edge_emu_buffer = av_malloc(21*s->linesize); memset(s->top_nnz, 0, s->mb_width*sizeof(*s->top_nnz)); // top edge of 127 for intra prediction if (!(avctx->flags & CODEC_FLAG_EMU_EDGE)) { memset(curframe->data[0] - s->linesize -1, 127, s->linesize +1); memset(curframe->data[1] - s->uvlinesize-1, 127, s->uvlinesize+1); memset(curframe->data[2] - s->uvlinesize-1, 127, s->uvlinesize+1); } for (mb_y = 0; mb_y < s->mb_height; mb_y++) { VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions-1)]; VP8Macroblock *mb = s->macroblocks + mb_y*s->mb_stride; uint8_t *intra4x4 = s->intra4x4_pred_mode + 4*mb_y*s->b4_stride; uint8_t *dst[3] = { curframe->data[0] + 16*mb_y*s->linesize, curframe->data[1] + 8*mb_y*s->uvlinesize, curframe->data[2] + 8*mb_y*s->uvlinesize }; memset(s->left_nnz, 0, sizeof(s->left_nnz)); // left edge of 129 for intra prediction if (!(avctx->flags & CODEC_FLAG_EMU_EDGE)) for (i = 0; i < 3; i++) for (y = 0; y < 16>>!!i; y++) dst[i][y*curframe->linesize[i]-1] = 129; for (mb_x = 0; mb_x < s->mb_width; mb_x++) { decode_mb_mode(s, mb, mb_x, mb_y, intra4x4 + 4*mb_x); if (!mb->skip) decode_mb_coeffs(s, c, mb, s->top_nnz[mb_x], s->left_nnz); else { AV_ZERO128(s->non_zero_count_cache); // luma AV_ZERO64(s->non_zero_count_cache[4]); // chroma } if (mb->mode <= MODE_I4x4) { intra_predict(s, dst, mb, intra4x4 + 4*mb_x, mb_x, mb_y); memset(mb->bmv, 0, sizeof(mb->bmv)); } else { inter_predict(s, dst, mb, mb_x, mb_y); } if (!mb->skip) { idct_mb(s, dst[0], dst[1], dst[2], mb); } else { AV_ZERO64(s->left_nnz); AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned // Reset DC block predictors if they would exist if the mb had coefficients if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) { s->left_nnz[8] = 0; s->top_nnz[mb_x][8] = 0; } } dst[0] += 16; dst[1] += 8; dst[2] += 8; mb++; } if (mb_y && s->filter.level && avctx->skip_loop_filter < skip_thresh) { if (s->filter.simple) filter_mb_row_simple(s, mb_y-1); else filter_mb_row(s, mb_y-1); } } if (s->filter.level && avctx->skip_loop_filter < skip_thresh) { if (s->filter.simple) filter_mb_row_simple(s, mb_y-1); else filter_mb_row(s, mb_y-1); } skip_decode: // if future frames don't use the updated probabilities, // reset them to the values we saved if (!s->update_probabilities) s->prob[0] = s->prob[1]; // check if golden and altref are swapped if (s->update_altref == VP56_FRAME_GOLDEN && s->update_golden == VP56_FRAME_GOLDEN2) FFSWAP(AVFrame *, s->framep[VP56_FRAME_GOLDEN], s->framep[VP56_FRAME_GOLDEN2]); else { if (s->update_altref != VP56_FRAME_NONE) s->framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref]; if (s->update_golden != VP56_FRAME_NONE) s->framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden]; } if (s->update_last) // move cur->prev s->framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_CURRENT]; // release no longer referenced frames for (i = 0; i < 4; i++) if (s->frames[i].data[0] && &s->frames[i] != s->framep[VP56_FRAME_CURRENT] && &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] && &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] && &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) avctx->release_buffer(avctx, &s->frames[i]); if (!s->invisible) { *(AVFrame*)data = *s->framep[VP56_FRAME_CURRENT]; *data_size = sizeof(AVFrame); } return avpkt->size; }
23,792
0
void ff_init_vscale_pfn(SwsContext *c, yuv2planar1_fn yuv2plane1, yuv2planarX_fn yuv2planeX, yuv2interleavedX_fn yuv2nv12cX, yuv2packed1_fn yuv2packed1, yuv2packed2_fn yuv2packed2, yuv2packedX_fn yuv2packedX, yuv2anyX_fn yuv2anyX, int use_mmx) { VScalerContext *lumCtx = NULL; VScalerContext *chrCtx = NULL; int idx = c->numDesc - (c->is_internal_gamma ? 2 : 1); //FIXME avoid hardcoding indexes if (isPlanarYUV(c->dstFormat) || (isGray(c->dstFormat) && !isALPHA(c->dstFormat))) { if (!isGray(c->dstFormat)) { chrCtx = c->desc[idx].instance; chrCtx->filter[0] = use_mmx ? (int16_t*)c->chrMmxFilter : c->vChrFilter; chrCtx->filter_size = c->vChrFilterSize; chrCtx->filter_pos = c->vChrFilterPos; chrCtx->isMMX = use_mmx; --idx; if (yuv2nv12cX) chrCtx->pfn = yuv2nv12cX; else if (c->vChrFilterSize == 1) chrCtx->pfn = yuv2plane1; else chrCtx->pfn = yuv2planeX; } lumCtx = c->desc[idx].instance; lumCtx->filter[0] = use_mmx ? (int16_t*)c->lumMmxFilter : c->vLumFilter; lumCtx->filter[1] = use_mmx ? (int16_t*)c->alpMmxFilter : c->vLumFilter; lumCtx->filter_size = c->vLumFilterSize; lumCtx->filter_pos = c->vLumFilterPos; lumCtx->isMMX = use_mmx; if (c->vLumFilterSize == 1) lumCtx->pfn = yuv2plane1; else lumCtx->pfn = yuv2planeX; } else { lumCtx = c->desc[idx].instance; chrCtx = &lumCtx[1]; lumCtx->filter[0] = c->vLumFilter; lumCtx->filter_size = c->vLumFilterSize; lumCtx->filter_pos = c->vLumFilterPos; chrCtx->filter[0] = c->vChrFilter; chrCtx->filter_size = c->vChrFilterSize; chrCtx->filter_pos = c->vChrFilterPos; lumCtx->isMMX = use_mmx; chrCtx->isMMX = use_mmx; if (yuv2packedX) { if (c->yuv2packed1 && c->vLumFilterSize == 1 && c->vChrFilterSize <= 2) lumCtx->pfn = yuv2packed1; else if (c->yuv2packed2 && c->vLumFilterSize == 2 && c->vChrFilterSize == 2) lumCtx->pfn = yuv2packed2; else lumCtx->pfn = yuv2packedX; } else lumCtx->pfn = yuv2anyX; } }
23,793
0
static int flv_read_packet(AVFormatContext *s, AVPacket *pkt) { int ret, i, type, size, pts, flags, is_audio, next, pos; AVStream *st = NULL; for(;;){ pos = url_ftell(s->pb); url_fskip(s->pb, 4); /* size of previous packet */ type = get_byte(s->pb); size = get_be24(s->pb); pts = get_be24(s->pb); pts |= get_byte(s->pb) << 24; // av_log(s, AV_LOG_DEBUG, "type:%d, size:%d, pts:%d\n", type, size, pts); if (url_feof(s->pb)) return AVERROR(EIO); url_fskip(s->pb, 3); /* stream id, always 0 */ flags = 0; if(size == 0) continue; next= size + url_ftell(s->pb); if (type == FLV_TAG_TYPE_AUDIO) { is_audio=1; flags = get_byte(s->pb); } else if (type == FLV_TAG_TYPE_VIDEO) { is_audio=0; flags = get_byte(s->pb); } else { if (type == FLV_TAG_TYPE_META && size > 13+1+4) flv_read_metabody(s, next); else /* skip packet */ av_log(s, AV_LOG_ERROR, "skipping flv packet: type %d, size %d, flags %d\n", type, size, flags); url_fseek(s->pb, next, SEEK_SET); continue; } /* now find stream */ for(i=0;i<s->nb_streams;i++) { st = s->streams[i]; if (st->id == is_audio) break; } if(i == s->nb_streams){ av_log(NULL, AV_LOG_ERROR, "invalid stream\n"); st= create_stream(s, is_audio); s->ctx_flags &= ~AVFMTCTX_NOHEADER; } // av_log(NULL, AV_LOG_DEBUG, "%d %X %d \n", is_audio, flags, st->discard); if( (st->discard >= AVDISCARD_NONKEY && !((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY || is_audio)) ||(st->discard >= AVDISCARD_BIDIR && ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_DISP_INTER && !is_audio)) || st->discard >= AVDISCARD_ALL ){ url_fseek(s->pb, next, SEEK_SET); continue; } if ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY) av_add_index_entry(st, pos, pts, size, 0, AVINDEX_KEYFRAME); break; } // if not streamed and no duration from metadata then seek to end to find the duration from the timestamps if(!url_is_streamed(s->pb) && s->duration==AV_NOPTS_VALUE){ int size; const int pos= url_ftell(s->pb); const int fsize= url_fsize(s->pb); url_fseek(s->pb, fsize-4, SEEK_SET); size= get_be32(s->pb); url_fseek(s->pb, fsize-3-size, SEEK_SET); if(size == get_be24(s->pb) + 11){ s->duration= get_be24(s->pb) * (int64_t)AV_TIME_BASE / 1000; } url_fseek(s->pb, pos, SEEK_SET); } if(is_audio){ if(!st->codec->sample_rate || !st->codec->bits_per_sample || (!st->codec->codec_id && !st->codec->codec_tag)) { st->codec->channels = (flags & FLV_AUDIO_CHANNEL_MASK) == FLV_STEREO ? 2 : 1; if((flags & FLV_AUDIO_CODECID_MASK) == FLV_CODECID_NELLYMOSER_8HZ_MONO) st->codec->sample_rate= 8000; else st->codec->sample_rate = (44100 << ((flags & FLV_AUDIO_SAMPLERATE_MASK) >> FLV_AUDIO_SAMPLERATE_OFFSET) >> 3); st->codec->bits_per_sample = (flags & FLV_AUDIO_SAMPLESIZE_MASK) ? 16 : 8; flv_set_audio_codec(s, st, flags & FLV_AUDIO_CODECID_MASK); } }else{ size -= flv_set_video_codec(s, st, flags & FLV_VIDEO_CODECID_MASK); } ret= av_get_packet(s->pb, pkt, size - 1); if (ret <= 0) { return AVERROR(EIO); } /* note: we need to modify the packet size here to handle the last packet */ pkt->size = ret; pkt->pts = pts; pkt->stream_index = st->index; if (is_audio || ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY)) pkt->flags |= PKT_FLAG_KEY; return ret; }
23,794
0
static inline void fill_caches(H264Context *h, int mb_type, int for_deblock){ MpegEncContext * const s = &h->s; const int mb_xy= s->mb_x + s->mb_y*s->mb_stride; int topleft_xy, top_xy, topright_xy, left_xy[2]; int topleft_type, top_type, topright_type, left_type[2]; int left_block[4]; int i; //wow what a mess, why didnt they simplify the interlacing&intra stuff, i cant imagine that these complex rules are worth it if(h->sps.mb_aff){ //FIXME topleft_xy = 0; /* avoid warning */ top_xy = 0; /* avoid warning */ topright_xy = 0; /* avoid warning */ }else{ topleft_xy = mb_xy-1 - s->mb_stride; top_xy = mb_xy - s->mb_stride; topright_xy= mb_xy+1 - s->mb_stride; left_xy[0] = mb_xy-1; left_xy[1] = mb_xy-1; left_block[0]= 0; left_block[1]= 1; left_block[2]= 2; left_block[3]= 3; } if(for_deblock){ topleft_type = h->slice_table[topleft_xy ] < 255 ? s->current_picture.mb_type[topleft_xy] : 0; top_type = h->slice_table[top_xy ] < 255 ? s->current_picture.mb_type[top_xy] : 0; topright_type= h->slice_table[topright_xy] < 255 ? s->current_picture.mb_type[topright_xy]: 0; left_type[0] = h->slice_table[left_xy[0] ] < 255 ? s->current_picture.mb_type[left_xy[0]] : 0; left_type[1] = h->slice_table[left_xy[1] ] < 255 ? s->current_picture.mb_type[left_xy[1]] : 0; }else{ topleft_type = h->slice_table[topleft_xy ] == h->slice_num ? s->current_picture.mb_type[topleft_xy] : 0; top_type = h->slice_table[top_xy ] == h->slice_num ? s->current_picture.mb_type[top_xy] : 0; topright_type= h->slice_table[topright_xy] == h->slice_num ? s->current_picture.mb_type[topright_xy]: 0; left_type[0] = h->slice_table[left_xy[0] ] == h->slice_num ? s->current_picture.mb_type[left_xy[0]] : 0; left_type[1] = h->slice_table[left_xy[1] ] == h->slice_num ? s->current_picture.mb_type[left_xy[1]] : 0; } if(IS_INTRA(mb_type)){ h->topleft_samples_available= h->top_samples_available= h->left_samples_available= 0xFFFF; h->topright_samples_available= 0xEEEA; if(!IS_INTRA(top_type) && (top_type==0 || h->pps.constrained_intra_pred)){ h->topleft_samples_available= 0xB3FF; h->top_samples_available= 0x33FF; h->topright_samples_available= 0x26EA; } for(i=0; i<2; i++){ if(!IS_INTRA(left_type[i]) && (left_type[i]==0 || h->pps.constrained_intra_pred)){ h->topleft_samples_available&= 0xDF5F; h->left_samples_available&= 0x5F5F; } } if(!IS_INTRA(topleft_type) && (topleft_type==0 || h->pps.constrained_intra_pred)) h->topleft_samples_available&= 0x7FFF; if(!IS_INTRA(topright_type) && (topright_type==0 || h->pps.constrained_intra_pred)) h->topright_samples_available&= 0xFBFF; if(IS_INTRA4x4(mb_type)){ if(IS_INTRA4x4(top_type)){ h->intra4x4_pred_mode_cache[4+8*0]= h->intra4x4_pred_mode[top_xy][4]; h->intra4x4_pred_mode_cache[5+8*0]= h->intra4x4_pred_mode[top_xy][5]; h->intra4x4_pred_mode_cache[6+8*0]= h->intra4x4_pred_mode[top_xy][6]; h->intra4x4_pred_mode_cache[7+8*0]= h->intra4x4_pred_mode[top_xy][3]; }else{ int pred; if(!top_type || (IS_INTER(top_type) && h->pps.constrained_intra_pred)) pred= -1; else{ pred= 2; } h->intra4x4_pred_mode_cache[4+8*0]= h->intra4x4_pred_mode_cache[5+8*0]= h->intra4x4_pred_mode_cache[6+8*0]= h->intra4x4_pred_mode_cache[7+8*0]= pred; } for(i=0; i<2; i++){ if(IS_INTRA4x4(left_type[i])){ h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[0+2*i]]; h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= h->intra4x4_pred_mode[left_xy[i]][left_block[1+2*i]]; }else{ int pred; if(!left_type[i] || (IS_INTER(left_type[i]) && h->pps.constrained_intra_pred)) pred= -1; else{ pred= 2; } h->intra4x4_pred_mode_cache[3+8*1 + 2*8*i]= h->intra4x4_pred_mode_cache[3+8*2 + 2*8*i]= pred; } } } } /* 0 . T T. T T T T 1 L . .L . . . . 2 L . .L . . . . 3 . T TL . . . . 4 L . .L . . . . 5 L . .. . . . . */ //FIXME constraint_intra_pred & partitioning & nnz (lets hope this is just a typo in the spec) if(top_type){ h->non_zero_count_cache[4+8*0]= h->non_zero_count[top_xy][0]; h->non_zero_count_cache[5+8*0]= h->non_zero_count[top_xy][1]; h->non_zero_count_cache[6+8*0]= h->non_zero_count[top_xy][2]; h->non_zero_count_cache[7+8*0]= h->non_zero_count[top_xy][3]; h->non_zero_count_cache[1+8*0]= h->non_zero_count[top_xy][7]; h->non_zero_count_cache[2+8*0]= h->non_zero_count[top_xy][8]; h->non_zero_count_cache[1+8*3]= h->non_zero_count[top_xy][10]; h->non_zero_count_cache[2+8*3]= h->non_zero_count[top_xy][11]; h->top_cbp= h->cbp_table[top_xy]; }else{ h->non_zero_count_cache[4+8*0]= h->non_zero_count_cache[5+8*0]= h->non_zero_count_cache[6+8*0]= h->non_zero_count_cache[7+8*0]= h->non_zero_count_cache[1+8*0]= h->non_zero_count_cache[2+8*0]= h->non_zero_count_cache[1+8*3]= h->non_zero_count_cache[2+8*3]= h->pps.cabac && !IS_INTRA(mb_type) ? 0 : 64; if(IS_INTRA(mb_type)) h->top_cbp= 0x1C0; else h->top_cbp= 0; } if(left_type[0]){ h->non_zero_count_cache[3+8*1]= h->non_zero_count[left_xy[0]][6]; h->non_zero_count_cache[3+8*2]= h->non_zero_count[left_xy[0]][5]; h->non_zero_count_cache[0+8*1]= h->non_zero_count[left_xy[0]][9]; //FIXME left_block h->non_zero_count_cache[0+8*4]= h->non_zero_count[left_xy[0]][12]; h->left_cbp= h->cbp_table[left_xy[0]]; //FIXME interlacing }else{ h->non_zero_count_cache[3+8*1]= h->non_zero_count_cache[3+8*2]= h->non_zero_count_cache[0+8*1]= h->non_zero_count_cache[0+8*4]= h->pps.cabac && !IS_INTRA(mb_type) ? 0 : 64; if(IS_INTRA(mb_type)) h->left_cbp= 0x1C0;//FIXME interlacing else h->left_cbp= 0; } if(left_type[1]){ h->non_zero_count_cache[3+8*3]= h->non_zero_count[left_xy[1]][4]; h->non_zero_count_cache[3+8*4]= h->non_zero_count[left_xy[1]][3]; h->non_zero_count_cache[0+8*2]= h->non_zero_count[left_xy[1]][8]; h->non_zero_count_cache[0+8*5]= h->non_zero_count[left_xy[1]][11]; }else{ h->non_zero_count_cache[3+8*3]= h->non_zero_count_cache[3+8*4]= h->non_zero_count_cache[0+8*2]= h->non_zero_count_cache[0+8*5]= h->pps.cabac && !IS_INTRA(mb_type) ? 0 : 64; } #if 1 //FIXME direct mb can skip much of this if(IS_INTER(mb_type) || (IS_DIRECT(mb_type) && h->direct_spatial_mv_pred)){ int list; for(list=0; list<2; list++){ if((!IS_8X8(mb_type)) && !USES_LIST(mb_type, list) && !IS_DIRECT(mb_type)){ /*if(!h->mv_cache_clean[list]){ memset(h->mv_cache [list], 0, 8*5*2*sizeof(int16_t)); //FIXME clean only input? clean at all? memset(h->ref_cache[list], PART_NOT_AVAILABLE, 8*5*sizeof(int8_t)); h->mv_cache_clean[list]= 1; }*/ continue; } h->mv_cache_clean[list]= 0; if(IS_INTER(topleft_type)){ const int b_xy = h->mb2b_xy[topleft_xy] + 3 + 3*h->b_stride; const int b8_xy= h->mb2b8_xy[topleft_xy] + 1 + h->b8_stride; *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy]; h->ref_cache[list][scan8[0] - 1 - 1*8]= s->current_picture.ref_index[list][b8_xy]; }else{ *(uint32_t*)h->mv_cache[list][scan8[0] - 1 - 1*8]= 0; h->ref_cache[list][scan8[0] - 1 - 1*8]= topleft_type ? LIST_NOT_USED : PART_NOT_AVAILABLE; } if(IS_INTER(top_type)){ const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride; const int b8_xy= h->mb2b8_xy[top_xy] + h->b8_stride; *(uint32_t*)h->mv_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 0]; *(uint32_t*)h->mv_cache[list][scan8[0] + 1 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 1]; *(uint32_t*)h->mv_cache[list][scan8[0] + 2 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 2]; *(uint32_t*)h->mv_cache[list][scan8[0] + 3 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + 3]; h->ref_cache[list][scan8[0] + 0 - 1*8]= h->ref_cache[list][scan8[0] + 1 - 1*8]= s->current_picture.ref_index[list][b8_xy + 0]; h->ref_cache[list][scan8[0] + 2 - 1*8]= h->ref_cache[list][scan8[0] + 3 - 1*8]= s->current_picture.ref_index[list][b8_xy + 1]; }else{ *(uint32_t*)h->mv_cache [list][scan8[0] + 0 - 1*8]= *(uint32_t*)h->mv_cache [list][scan8[0] + 1 - 1*8]= *(uint32_t*)h->mv_cache [list][scan8[0] + 2 - 1*8]= *(uint32_t*)h->mv_cache [list][scan8[0] + 3 - 1*8]= 0; *(uint32_t*)&h->ref_cache[list][scan8[0] + 0 - 1*8]= ((top_type ? LIST_NOT_USED : PART_NOT_AVAILABLE)&0xFF)*0x01010101; } if(IS_INTER(topright_type)){ const int b_xy= h->mb2b_xy[topright_xy] + 3*h->b_stride; const int b8_xy= h->mb2b8_xy[topright_xy] + h->b8_stride; *(uint32_t*)h->mv_cache[list][scan8[0] + 4 - 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy]; h->ref_cache[list][scan8[0] + 4 - 1*8]= s->current_picture.ref_index[list][b8_xy]; }else{ *(uint32_t*)h->mv_cache [list][scan8[0] + 4 - 1*8]= 0; h->ref_cache[list][scan8[0] + 4 - 1*8]= topright_type ? LIST_NOT_USED : PART_NOT_AVAILABLE; } //FIXME unify cleanup or sth if(IS_INTER(left_type[0])){ const int b_xy= h->mb2b_xy[left_xy[0]] + 3; const int b8_xy= h->mb2b8_xy[left_xy[0]] + 1; *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[0]]; *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[1]]; h->ref_cache[list][scan8[0] - 1 + 0*8]= h->ref_cache[list][scan8[0] - 1 + 1*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[0]>>1)]; }else{ *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 0*8]= *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 1*8]= 0; h->ref_cache[list][scan8[0] - 1 + 0*8]= h->ref_cache[list][scan8[0] - 1 + 1*8]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE; } if(IS_INTER(left_type[1])){ const int b_xy= h->mb2b_xy[left_xy[1]] + 3; const int b8_xy= h->mb2b8_xy[left_xy[1]] + 1; *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[2]]; *(uint32_t*)h->mv_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)s->current_picture.motion_val[list][b_xy + h->b_stride*left_block[3]]; h->ref_cache[list][scan8[0] - 1 + 2*8]= h->ref_cache[list][scan8[0] - 1 + 3*8]= s->current_picture.ref_index[list][b8_xy + h->b8_stride*(left_block[2]>>1)]; }else{ *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 2*8]= *(uint32_t*)h->mv_cache [list][scan8[0] - 1 + 3*8]= 0; h->ref_cache[list][scan8[0] - 1 + 2*8]= h->ref_cache[list][scan8[0] - 1 + 3*8]= left_type[0] ? LIST_NOT_USED : PART_NOT_AVAILABLE; } if(for_deblock) continue; h->ref_cache[list][scan8[5 ]+1] = h->ref_cache[list][scan8[7 ]+1] = h->ref_cache[list][scan8[13]+1] = //FIXME remove past 3 (init somewher else) h->ref_cache[list][scan8[4 ]] = h->ref_cache[list][scan8[12]] = PART_NOT_AVAILABLE; *(uint32_t*)h->mv_cache [list][scan8[5 ]+1]= *(uint32_t*)h->mv_cache [list][scan8[7 ]+1]= *(uint32_t*)h->mv_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewher else) *(uint32_t*)h->mv_cache [list][scan8[4 ]]= *(uint32_t*)h->mv_cache [list][scan8[12]]= 0; if( h->pps.cabac ) { /* XXX beurk, Load mvd */ if(IS_INTER(topleft_type)){ const int b_xy = h->mb2b_xy[topleft_xy] + 3 + 3*h->b_stride; *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy]; }else{ *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 - 1*8]= 0; } if(IS_INTER(top_type)){ const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride; *(uint32_t*)h->mvd_cache[list][scan8[0] + 0 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 0]; *(uint32_t*)h->mvd_cache[list][scan8[0] + 1 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 1]; *(uint32_t*)h->mvd_cache[list][scan8[0] + 2 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 2]; *(uint32_t*)h->mvd_cache[list][scan8[0] + 3 - 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + 3]; }else{ *(uint32_t*)h->mvd_cache [list][scan8[0] + 0 - 1*8]= *(uint32_t*)h->mvd_cache [list][scan8[0] + 1 - 1*8]= *(uint32_t*)h->mvd_cache [list][scan8[0] + 2 - 1*8]= *(uint32_t*)h->mvd_cache [list][scan8[0] + 3 - 1*8]= 0; } if(IS_INTER(left_type[0])){ const int b_xy= h->mb2b_xy[left_xy[0]] + 3; *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 0*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[0]]; *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 1*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[1]]; }else{ *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 0*8]= *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 1*8]= 0; } if(IS_INTER(left_type[1])){ const int b_xy= h->mb2b_xy[left_xy[1]] + 3; *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 2*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[2]]; *(uint32_t*)h->mvd_cache[list][scan8[0] - 1 + 3*8]= *(uint32_t*)h->mvd_table[list][b_xy + h->b_stride*left_block[3]]; }else{ *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 2*8]= *(uint32_t*)h->mvd_cache [list][scan8[0] - 1 + 3*8]= 0; } *(uint32_t*)h->mvd_cache [list][scan8[5 ]+1]= *(uint32_t*)h->mvd_cache [list][scan8[7 ]+1]= *(uint32_t*)h->mvd_cache [list][scan8[13]+1]= //FIXME remove past 3 (init somewher else) *(uint32_t*)h->mvd_cache [list][scan8[4 ]]= *(uint32_t*)h->mvd_cache [list][scan8[12]]= 0; if(h->slice_type == B_TYPE){ fill_rectangle(&h->direct_cache[scan8[0]], 4, 4, 8, 0, 1); if(IS_DIRECT(top_type)){ *(uint32_t*)&h->direct_cache[scan8[0] - 1*8]= 0x01010101; }else if(IS_8X8(top_type)){ int b8_xy = h->mb2b8_xy[top_xy] + h->b8_stride; h->direct_cache[scan8[0] + 0 - 1*8]= h->direct_table[b8_xy]; h->direct_cache[scan8[0] + 2 - 1*8]= h->direct_table[b8_xy + 1]; }else{ *(uint32_t*)&h->direct_cache[scan8[0] - 1*8]= 0; } //FIXME interlacing if(IS_DIRECT(left_type[0])){ h->direct_cache[scan8[0] - 1 + 0*8]= h->direct_cache[scan8[0] - 1 + 2*8]= 1; }else if(IS_8X8(left_type[0])){ int b8_xy = h->mb2b8_xy[left_xy[0]] + 1; h->direct_cache[scan8[0] - 1 + 0*8]= h->direct_table[b8_xy]; h->direct_cache[scan8[0] - 1 + 2*8]= h->direct_table[b8_xy + h->b8_stride]; }else{ h->direct_cache[scan8[0] - 1 + 0*8]= h->direct_cache[scan8[0] - 1 + 2*8]= 0; } } } } } #endif }
23,795
0
static int update_dimensions(VP8Context *s, int width, int height) { int i; if (avcodec_check_dimensions(s->avctx, width, height)) return AVERROR_INVALIDDATA; vp8_decode_flush(s->avctx); avcodec_set_dimensions(s->avctx, width, height); s->mb_width = (s->avctx->coded_width +15) / 16; s->mb_height = (s->avctx->coded_height+15) / 16; // we allocate a border around the top/left of intra4x4 modes // this is 4 blocks for intra4x4 to keep 4-byte alignment for fill_rectangle s->mb_stride = s->mb_width+1; s->b4_stride = 4*s->mb_stride; s->macroblocks_base = av_mallocz(s->mb_stride*(s->mb_height+1)*sizeof(*s->macroblocks)); s->intra4x4_pred_mode_base = av_mallocz(s->b4_stride*(4*s->mb_height+1)); s->top_nnz = av_mallocz(s->mb_width*sizeof(*s->top_nnz)); if (!s->macroblocks_base || !s->intra4x4_pred_mode_base || !s->top_nnz) return AVERROR(ENOMEM); s->macroblocks = s->macroblocks_base + 1 + s->mb_stride; s->intra4x4_pred_mode = s->intra4x4_pred_mode_base + 4 + s->b4_stride; memset(s->intra4x4_pred_mode_base, DC_PRED, s->b4_stride); for (i = 0; i < 4*s->mb_height; i++) s->intra4x4_pred_mode[i*s->b4_stride-1] = DC_PRED; return 0; }
23,796
0
static void predictor_decompress_fir_adapt(int32_t *error_buffer, int32_t *buffer_out, int output_size, int readsamplesize, int16_t *predictor_coef_table, int predictor_coef_num, int predictor_quantitization) { int i; /* first sample always copies */ *buffer_out = *error_buffer; if (!predictor_coef_num) { if (output_size <= 1) return; memcpy(&buffer_out[1], &error_buffer[1], (output_size - 1) * sizeof(*buffer_out)); return; } if (predictor_coef_num == 31) { /* simple 1st-order prediction */ if (output_size <= 1) return; for (i = 1; i < output_size; i++) { buffer_out[i] = sign_extend(buffer_out[i - 1] + error_buffer[i], readsamplesize); } return; } /* read warm-up samples */ for (i = 0; i < predictor_coef_num; i++) { buffer_out[i + 1] = sign_extend(buffer_out[i] + error_buffer[i + 1], readsamplesize); } /* NOTE: 4 and 8 are very common cases that could be optimized. */ /* general case */ for (i = predictor_coef_num; i < output_size - 1; i++) { int j; int val = 0; int error_val = error_buffer[i + 1]; int error_sign; int d = buffer_out[i - predictor_coef_num]; for (j = 0; j < predictor_coef_num; j++) { val += (buffer_out[i - j] - d) * predictor_coef_table[j]; } val = (val + (1 << (predictor_quantitization - 1))) >> predictor_quantitization; val += d + error_val; buffer_out[i + 1] = sign_extend(val, readsamplesize); /* adapt LPC coefficients */ error_sign = sign_only(error_val); if (error_sign) { for (j = predictor_coef_num - 1; j >= 0 && error_val * error_sign > 0; j--) { int sign; val = d - buffer_out[i - j]; sign = sign_only(val) * error_sign; predictor_coef_table[j] -= sign; val *= sign; error_val -= ((val >> predictor_quantitization) * (predictor_coef_num - j)); } } } }
23,798
1
static int alloc_audio_output_buf(AVCodecContext *dec, AVCodecContext *enc, int nb_samples, int *buf_linesize) { int64_t audio_buf_samples; int audio_buf_size; /* calculate required number of samples to allocate */ audio_buf_samples = ((int64_t)nb_samples * enc->sample_rate + dec->sample_rate) / dec->sample_rate; audio_buf_samples = 4 * audio_buf_samples + 10000; // safety factors for resampling audio_buf_samples = FFMAX(audio_buf_samples, enc->frame_size); if (audio_buf_samples > INT_MAX) return AVERROR(EINVAL); audio_buf_size = av_samples_get_buffer_size(buf_linesize, enc->channels, audio_buf_samples, enc->sample_fmt, 0); if (audio_buf_size < 0) return audio_buf_size; av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size); if (!audio_buf) return AVERROR(ENOMEM); return 0; }
23,799
1
struct omap_mmc_s *omap_mmc_init(hwaddr base, MemoryRegion *sysmem, BlockBackend *blk, qemu_irq irq, qemu_irq dma[], omap_clk clk) { struct omap_mmc_s *s = (struct omap_mmc_s *) g_malloc0(sizeof(struct omap_mmc_s)); s->irq = irq; s->dma = dma; s->clk = clk; s->lines = 1; /* TODO: needs to be settable per-board */ s->rev = 1; omap_mmc_reset(s); memory_region_init_io(&s->iomem, NULL, &omap_mmc_ops, s, "omap.mmc", 0x800); memory_region_add_subregion(sysmem, base, &s->iomem); /* Instantiate the storage */ s->card = sd_init(blk, false); if (s->card == NULL) { exit(1); } return s; }
23,800
1
static int nut_write_header(AVFormatContext * avf) { NUTContext * priv = avf->priv_data; AVIOContext * bc = avf->pb; nut_muxer_opts_tt mopts = { .output = { .priv = bc, .write = av_write, }, .alloc = { av_malloc, av_realloc, av_free }, .write_index = 1, .realtime_stream = 0, .max_distance = 32768, .fti = NULL, }; nut_stream_header_tt * s; int i; priv->s = s = av_mallocz((avf->nb_streams + 1) * sizeof*s); for (i = 0; i < avf->nb_streams; i++) { AVCodecContext * codec = avf->streams[i]->codec; int j; int fourcc = 0; int num, denom, ssize; s[i].type = codec->codec_type == AVMEDIA_TYPE_VIDEO ? NUT_VIDEO_CLASS : NUT_AUDIO_CLASS; if (codec->codec_tag) fourcc = codec->codec_tag; else fourcc = ff_codec_get_tag(nut_tags, codec->codec_id); if (!fourcc) { if (codec->codec_type == AVMEDIA_TYPE_VIDEO) fourcc = ff_codec_get_tag(ff_codec_bmp_tags, codec->codec_id); if (codec->codec_type == AVMEDIA_TYPE_AUDIO) fourcc = ff_codec_get_tag(ff_codec_wav_tags, codec->codec_id); } s[i].fourcc_len = 4; s[i].fourcc = av_malloc(s[i].fourcc_len); for (j = 0; j < s[i].fourcc_len; j++) s[i].fourcc[j] = (fourcc >> (j*8)) & 0xFF; ff_parse_specific_params(codec, &num, &ssize, &denom); avpriv_set_pts_info(avf->streams[i], 60, denom, num); s[i].time_base.num = denom; s[i].time_base.den = num; s[i].fixed_fps = 0; s[i].decode_delay = codec->has_b_frames; s[i].codec_specific_len = codec->extradata_size; s[i].codec_specific = codec->extradata; if (codec->codec_type == AVMEDIA_TYPE_VIDEO) { s[i].width = codec->width; s[i].height = codec->height; s[i].sample_width = 0; s[i].sample_height = 0; s[i].colorspace_type = 0; } else { s[i].samplerate_num = codec->sample_rate; s[i].samplerate_denom = 1; s[i].channel_count = codec->channels; } } s[avf->nb_streams].type = -1; priv->nut = nut_muxer_init(&mopts, s, NULL); return 0; }
23,801
0
static inline int RENAME(yuv420_rgb16)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[]){ int y, h_size; if(c->srcFormat == PIX_FMT_YUV422P){ srcStride[1] *= 2; srcStride[2] *= 2; } h_size= (c->dstW+7)&~7; if(h_size*2 > dstStride[0]) h_size-=8; __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ ); //printf("%X %X %X %X %X %X %X %X %X %X\n", (int)&c->redDither, (int)&b5Dither, (int)src[0], (int)src[1], (int)src[2], (int)dst[0], //srcStride[0],srcStride[1],srcStride[2],dstStride[0]); for (y= 0; y<srcSliceH; y++ ) { uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0]; uint8_t *_py = src[0] + y*srcStride[0]; uint8_t *_pu = src[1] + (y>>1)*srcStride[1]; uint8_t *_pv = src[2] + (y>>1)*srcStride[2]; long index= -h_size/2; b5Dither= dither8[y&1]; g6Dither= dither4[y&1]; g5Dither= dither8[y&1]; r5Dither= dither8[(y+1)&1]; /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8 pixels in each iteration */ __asm__ __volatile__ ( /* load data for start of next scan line */ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ // ".balign 16 \n\t" "1: \n\t" /* no speed diference on my p3@500 with prefetch, * if it is faster for anyone with -benchmark then tell me PREFETCH" 64(%0) \n\t" PREFETCH" 64(%1) \n\t" PREFETCH" 64(%2) \n\t" */ YUV2RGB #ifdef DITHER1XBPP "paddusb "MANGLE(b5Dither)", %%mm0;" "paddusb "MANGLE(g6Dither)", %%mm2;" "paddusb "MANGLE(r5Dither)", %%mm1;" #endif /* mask unneeded bits off */ "pand "MANGLE(mmx_redmask)", %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */ "pand "MANGLE(mmx_grnmask)", %%mm2;" /* g7g6g5g4 g3g2_0_0 g7g6g5g4 g3g2_0_0 */ "pand "MANGLE(mmx_redmask)", %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */ "psrlw $3,%%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */ "pxor %%mm4, %%mm4;" /* zero mm4 */ "movq %%mm0, %%mm5;" /* Copy B7-B0 */ "movq %%mm2, %%mm7;" /* Copy G7-G0 */ /* convert rgb24 plane to rgb16 pack for pixel 0-3 */ "punpcklbw %%mm4, %%mm2;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */ "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */ "psllw $3, %%mm2;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */ "por %%mm2, %%mm0;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ MOVNTQ " %%mm0, (%1);" /* store pixel 0-3 */ /* convert rgb24 plane to rgb16 pack for pixel 0-3 */ "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */ "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */ "psllw $3, %%mm7;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ "por %%mm7, %%mm5;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */ "add $16, %1 \n\t" "add $4, %0 \n\t" " js 1b \n\t" : "+r" (index), "+r" (_image) : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index) ); } __asm__ __volatile__ (EMMS); return srcSliceH; }
23,802
1
static void pc_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); PCMachineClass *pcmc = PC_MACHINE_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); pcmc->inter_dimm_gap = true; pcmc->get_hotplug_handler = mc->get_hotplug_handler; mc->get_hotplug_handler = pc_get_hotpug_handler; mc->cpu_index_to_socket_id = pc_cpu_index_to_socket_id; mc->default_boot_order = "cad"; mc->hot_add_cpu = pc_hot_add_cpu; mc->max_cpus = 255; mc->reset = pc_machine_reset; hc->plug = pc_machine_device_plug_cb; hc->unplug_request = pc_machine_device_unplug_request_cb; hc->unplug = pc_machine_device_unplug_cb; }
23,803
1
static void test_none(void) { struct qdist dist; char *pr; qdist_init(&dist); g_assert(isnan(qdist_avg(&dist))); g_assert(isnan(qdist_xmin(&dist))); g_assert(isnan(qdist_xmax(&dist))); pr = qdist_pr_plain(&dist, 0); g_assert(pr == NULL); pr = qdist_pr_plain(&dist, 2); g_assert(pr == NULL); qdist_destroy(&dist); }
23,804
1
void process_pending_signals(CPUArchState *cpu_env) { CPUState *cpu = ENV_GET_CPU(cpu_env); int sig; TaskState *ts = cpu->opaque; if (!ts->signal_pending) return; /* FIXME: This is not threadsafe. */ for(sig = 1; sig <= TARGET_NSIG; sig++) { if (ts->sigtab[sig - 1].pending) { handle_pending_signal(cpu_env, sig); return; } } /* if no signal is pending, just return */ ts->signal_pending = 0; return; }
23,805
1
ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name, ram_addr_t size, void *host) { RAMBlock *new_block, *block; size = TARGET_PAGE_ALIGN(size); new_block = qemu_mallocz(sizeof(*new_block)); if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) { char *id = dev->parent_bus->info->get_dev_path(dev); if (id) { snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); qemu_free(id); pstrcat(new_block->idstr, sizeof(new_block->idstr), name); QLIST_FOREACH(block, &ram_list.blocks, next) { if (!strcmp(block->idstr, new_block->idstr)) { fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", new_block->idstr); new_block->offset = find_ram_offset(size); if (host) { new_block->host = host; new_block->flags |= RAM_PREALLOC_MASK; } else { if (mem_path) { #if defined (__linux__) && !defined(TARGET_S390X) new_block->host = file_ram_alloc(new_block, size, mem_path); if (!new_block->host) { new_block->host = qemu_vmalloc(size); qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE); #else fprintf(stderr, "-mem-path option unsupported\n"); exit(1); #endif } else { #if defined(TARGET_S390X) && defined(CONFIG_KVM) /* S390 KVM requires the topmost vma of the RAM to be smaller than an system defined value, which is at least 256GB. Larger systems have larger values. We put the guest between the end of data segment (system break) and this value. We use 32GB as a base to have enough room for the system break to grow. */ new_block->host = mmap((void*)0x800000000, size, PROT_EXEC|PROT_READ|PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); #else if (xen_mapcache_enabled()) { xen_ram_alloc(new_block->offset, size); } else { new_block->host = qemu_vmalloc(size); #endif qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE); new_block->length = size; QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next); ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty, last_ram_offset() >> TARGET_PAGE_BITS); memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS), 0xff, size >> TARGET_PAGE_BITS); if (kvm_enabled()) kvm_setup_guest_memory(new_block->host, size); return new_block->offset;
23,807
1
char *qmp_memchar_read(const char *device, int64_t size, bool has_format, enum DataFormat format, Error **errp) { CharDriverState *chr; uint8_t *read_data; size_t count; char *data; chr = qemu_chr_find(device); if (!chr) { error_setg(errp, "Device '%s' not found", device); return NULL; } if (qemu_is_chr(chr, "memory")) { error_setg(errp,"%s is not memory char device", device); return NULL; } if (size <= 0) { error_setg(errp, "size must be greater than zero"); return NULL; } count = qemu_chr_cirmem_count(chr); if (count == 0) { return g_strdup(""); } size = size > count ? count : size; read_data = g_malloc0(size + 1); cirmem_chr_read(chr, read_data, size); if (has_format && (format == DATA_FORMAT_BASE64)) { data = g_base64_encode(read_data, size); } else { data = (char *)read_data; } return data; }
23,808
1
static void xbzrle_cache_zero_page(ram_addr_t current_addr) { if (ram_bulk_stage || !migrate_use_xbzrle()) { return; } /* We don't care if this fails to allocate a new cache page * as long as it updated an old one */ cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE); }
23,810
1
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) { /* Flush the whole TB as this will not have race conditions * even if we don't have proper locking yet. * Ideally we would just invalidate the TBs for the * specified PC. */ tb_flush(cpu); }
23,811
1
static void vfio_bar_quirk_teardown(VFIODevice *vdev, int nr) { VFIOBAR *bar = &vdev->bars[nr]; while (!QLIST_EMPTY(&bar->quirks)) { VFIOQuirk *quirk = QLIST_FIRST(&bar->quirks); memory_region_del_subregion(&bar->mem, &quirk->mem); QLIST_REMOVE(quirk, next); g_free(quirk); } }
23,812
0
static void spapr_populate_pci_devices_dt(PCIBus *bus, PCIDevice *pdev, void *opaque) { PCIBus *sec_bus; sPAPRFDT *p = opaque; int offset; sPAPRFDT s_fdt; uint32_t drc_index = spapr_phb_get_pci_drc_index(p->sphb, pdev); offset = spapr_create_pci_child_dt(p->sphb, pdev, drc_index, NULL, p->fdt, p->node_off); if (!offset) { error_report("Failed to create pci child device tree node"); return; } if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) != PCI_HEADER_TYPE_BRIDGE)) { return; } sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev)); if (!sec_bus) { return; } s_fdt.fdt = p->fdt; s_fdt.node_off = offset; s_fdt.sphb = p->sphb; pci_for_each_device(sec_bus, pci_bus_num(sec_bus), spapr_populate_pci_devices_dt, &s_fdt); }
23,813
0
static void fifo_trigger_update(void *opaque) { CadenceUARTState *s = opaque; s->r[R_CISR] |= UART_INTR_TIMEOUT; uart_update_status(s); }
23,814
0
const char *qemu_get_version(void) { return qemu_version; }
23,815
0
static int v9fs_synth_truncate(FsContext *ctx, V9fsPath *path, off_t offset) { errno = ENOSYS; return -1; }
23,817
0
int pit_get_mode(PITState *pit, int channel) { PITChannelState *s = &pit->channels[channel]; return s->mode; }
23,818
0
START_TEST(unterminated_sq_string) { QObject *obj = qobject_from_json("'abc"); fail_unless(obj == NULL); }
23,819
0
static void release_drive(Object *obj, const char *name, void *opaque) { DeviceState *dev = DEVICE(obj); Property *prop = opaque; BlockDriverState **ptr = qdev_get_prop_ptr(dev, prop); if (*ptr) { bdrv_detach_dev(*ptr, dev); blockdev_auto_del(*ptr); } }
23,820
0
size_t av_cpu_max_align(void) { int flags = av_get_cpu_flags(); if (flags & AV_CPU_FLAG_AVX) return 32; if (flags & (AV_CPU_FLAG_ALTIVEC | AV_CPU_FLAG_SSE | AV_CPU_FLAG_NEON)) return 16; return 8; }
23,821
0
void store_booke_tsr (CPUState *env, target_ulong val) { LOG_TB("%s: val " TARGET_FMT_lx "\n", __func__, val); env->spr[SPR_40x_TSR] &= ~(val & 0xFC000000); if (val & 0x80000000) ppc_set_irq(env, PPC_INTERRUPT_PIT, 0); }
23,823
0
static ssize_t local_readlink(FsContext *fs_ctx, V9fsPath *fs_path, char *buf, size_t bufsz) { ssize_t tsize = -1; char buffer[PATH_MAX]; char *path = fs_path->data; if ((fs_ctx->export_flags & V9FS_SM_MAPPED) || (fs_ctx->export_flags & V9FS_SM_MAPPED_FILE)) { int fd; fd = open(rpath(fs_ctx, path, buffer), O_RDONLY | O_NOFOLLOW); if (fd == -1) { return -1; } do { tsize = read(fd, (void *)buf, bufsz); } while (tsize == -1 && errno == EINTR); close(fd); return tsize; } else if ((fs_ctx->export_flags & V9FS_SM_PASSTHROUGH) || (fs_ctx->export_flags & V9FS_SM_NONE)) { tsize = readlink(rpath(fs_ctx, path, buffer), buf, bufsz); } return tsize; }
23,824
0
static void pc_dimm_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { PCMachineState *pcms = PC_MACHINE(hotplug_dev); PCDIMMDevice *dimm = PC_DIMM(dev); PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm); MemoryRegion *mr = ddc->get_memory_region(dimm); HotplugHandlerClass *hhc; Error *local_err = NULL; if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { error_setg(&local_err, "nvdimm device hot unplug is not supported yet."); goto out; } hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev); hhc->unplug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &local_err); if (local_err) { goto out; } pc_dimm_memory_unplug(dev, &pcms->hotplug_memory, mr); object_unparent(OBJECT(dev)); out: error_propagate(errp, local_err); }
23,825
0
static void pflash_cfi02_realize(DeviceState *dev, Error **errp) { pflash_t *pfl = CFI_PFLASH02(dev); uint32_t chip_len; int ret; Error *local_err = NULL; chip_len = pfl->sector_len * pfl->nb_blocs; /* XXX: to be fixed */ #if 0 if (total_len != (8 * 1024 * 1024) && total_len != (16 * 1024 * 1024) && total_len != (32 * 1024 * 1024) && total_len != (64 * 1024 * 1024)) return NULL; #endif memory_region_init_rom_device(&pfl->orig_mem, OBJECT(pfl), pfl->be ? &pflash_cfi02_ops_be : &pflash_cfi02_ops_le, pfl, pfl->name, chip_len, &local_err); if (local_err) { error_propagate(errp, local_err); return; } vmstate_register_ram(&pfl->orig_mem, DEVICE(pfl)); pfl->storage = memory_region_get_ram_ptr(&pfl->orig_mem); pfl->chip_len = chip_len; if (pfl->bs) { /* read the initial flash content */ ret = bdrv_read(pfl->bs, 0, pfl->storage, chip_len >> 9); if (ret < 0) { vmstate_unregister_ram(&pfl->orig_mem, DEVICE(pfl)); error_setg(errp, "failed to read the initial flash content"); return; } } pflash_setup_mappings(pfl); pfl->rom_mode = 1; sysbus_init_mmio(SYS_BUS_DEVICE(dev), &pfl->mem); if (pfl->bs) { pfl->ro = bdrv_is_read_only(pfl->bs); } else { pfl->ro = 0; } pfl->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, pflash_timer, pfl); pfl->wcycle = 0; pfl->cmd = 0; pfl->status = 0; /* Hardcoded CFI table (mostly from SG29 Spansion flash) */ pfl->cfi_len = 0x52; /* Standard "QRY" string */ pfl->cfi_table[0x10] = 'Q'; pfl->cfi_table[0x11] = 'R'; pfl->cfi_table[0x12] = 'Y'; /* Command set (AMD/Fujitsu) */ pfl->cfi_table[0x13] = 0x02; pfl->cfi_table[0x14] = 0x00; /* Primary extended table address */ pfl->cfi_table[0x15] = 0x31; pfl->cfi_table[0x16] = 0x00; /* Alternate command set (none) */ pfl->cfi_table[0x17] = 0x00; pfl->cfi_table[0x18] = 0x00; /* Alternate extended table (none) */ pfl->cfi_table[0x19] = 0x00; pfl->cfi_table[0x1A] = 0x00; /* Vcc min */ pfl->cfi_table[0x1B] = 0x27; /* Vcc max */ pfl->cfi_table[0x1C] = 0x36; /* Vpp min (no Vpp pin) */ pfl->cfi_table[0x1D] = 0x00; /* Vpp max (no Vpp pin) */ pfl->cfi_table[0x1E] = 0x00; /* Reserved */ pfl->cfi_table[0x1F] = 0x07; /* Timeout for min size buffer write (NA) */ pfl->cfi_table[0x20] = 0x00; /* Typical timeout for block erase (512 ms) */ pfl->cfi_table[0x21] = 0x09; /* Typical timeout for full chip erase (4096 ms) */ pfl->cfi_table[0x22] = 0x0C; /* Reserved */ pfl->cfi_table[0x23] = 0x01; /* Max timeout for buffer write (NA) */ pfl->cfi_table[0x24] = 0x00; /* Max timeout for block erase */ pfl->cfi_table[0x25] = 0x0A; /* Max timeout for chip erase */ pfl->cfi_table[0x26] = 0x0D; /* Device size */ pfl->cfi_table[0x27] = ctz32(chip_len); /* Flash device interface (8 & 16 bits) */ pfl->cfi_table[0x28] = 0x02; pfl->cfi_table[0x29] = 0x00; /* Max number of bytes in multi-bytes write */ /* XXX: disable buffered write as it's not supported */ // pfl->cfi_table[0x2A] = 0x05; pfl->cfi_table[0x2A] = 0x00; pfl->cfi_table[0x2B] = 0x00; /* Number of erase block regions (uniform) */ pfl->cfi_table[0x2C] = 0x01; /* Erase block region 1 */ pfl->cfi_table[0x2D] = pfl->nb_blocs - 1; pfl->cfi_table[0x2E] = (pfl->nb_blocs - 1) >> 8; pfl->cfi_table[0x2F] = pfl->sector_len >> 8; pfl->cfi_table[0x30] = pfl->sector_len >> 16; /* Extended */ pfl->cfi_table[0x31] = 'P'; pfl->cfi_table[0x32] = 'R'; pfl->cfi_table[0x33] = 'I'; pfl->cfi_table[0x34] = '1'; pfl->cfi_table[0x35] = '0'; pfl->cfi_table[0x36] = 0x00; pfl->cfi_table[0x37] = 0x00; pfl->cfi_table[0x38] = 0x00; pfl->cfi_table[0x39] = 0x00; pfl->cfi_table[0x3a] = 0x00; pfl->cfi_table[0x3b] = 0x00; pfl->cfi_table[0x3c] = 0x00; }
23,826
0
static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, int sign, int compute_ov) { TCGLabel *l1 = gen_new_label(); TCGLabel *l2 = gen_new_label(); TCGv_i32 t0 = tcg_temp_local_new_i32(); TCGv_i32 t1 = tcg_temp_local_new_i32(); tcg_gen_trunc_tl_i32(t0, arg1); tcg_gen_trunc_tl_i32(t1, arg2); tcg_gen_brcondi_i32(TCG_COND_EQ, t1, 0, l1); if (sign) { TCGLabel *l3 = gen_new_label(); tcg_gen_brcondi_i32(TCG_COND_NE, t1, -1, l3); tcg_gen_brcondi_i32(TCG_COND_EQ, t0, INT32_MIN, l1); gen_set_label(l3); tcg_gen_div_i32(t0, t0, t1); } else { tcg_gen_divu_i32(t0, t0, t1); } if (compute_ov) { tcg_gen_movi_tl(cpu_ov, 0); } tcg_gen_br(l2); gen_set_label(l1); if (sign) { tcg_gen_sari_i32(t0, t0, 31); } else { tcg_gen_movi_i32(t0, 0); } if (compute_ov) { tcg_gen_movi_tl(cpu_ov, 1); tcg_gen_movi_tl(cpu_so, 1); } gen_set_label(l2); tcg_gen_extu_i32_tl(ret, t0); tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, ret); }
23,827
0
static void omap_prcm_apll_update(struct omap_prcm_s *s) { int mode[2]; mode[0] = (s->clken[9] >> 6) & 3; s->apll_lock[0] = (mode[0] == 3); mode[1] = (s->clken[9] >> 2) & 3; s->apll_lock[1] = (mode[1] == 3); /* TODO: update clocks */ if (mode[0] == 1 || mode[0] == 2 || mode[1] == 1 || mode[1] == 2) fprintf(stderr, "%s: bad EN_54M_PLL or bad EN_96M_PLL\n", __FUNCTION__); }
23,828
0
static uint64_t musicpal_gpio_read(void *opaque, target_phys_addr_t offset, unsigned size) { musicpal_gpio_state *s = opaque; switch (offset) { case MP_GPIO_OE_HI: /* used for LCD brightness control */ return s->lcd_brightness & MP_OE_LCD_BRIGHTNESS; case MP_GPIO_OUT_LO: return s->out_state & 0xFFFF; case MP_GPIO_OUT_HI: return s->out_state >> 16; case MP_GPIO_IN_LO: return s->in_state & 0xFFFF; case MP_GPIO_IN_HI: return s->in_state >> 16; case MP_GPIO_IER_LO: return s->ier & 0xFFFF; case MP_GPIO_IER_HI: return s->ier >> 16; case MP_GPIO_IMR_LO: return s->imr & 0xFFFF; case MP_GPIO_IMR_HI: return s->imr >> 16; case MP_GPIO_ISR_LO: return s->isr & 0xFFFF; case MP_GPIO_ISR_HI: return s->isr >> 16; default: return 0; } }
23,829
0
static void test_sanity(void) { AHCIQState *ahci; ahci = ahci_boot(); ahci_shutdown(ahci); }
23,830
0
static int qcow2_create(const char *filename, QemuOpts *opts, Error **errp) { char *backing_file = NULL; char *backing_fmt = NULL; char *buf = NULL; uint64_t size = 0; int flags = 0; size_t cluster_size = DEFAULT_CLUSTER_SIZE; PreallocMode prealloc; int version = 3; uint64_t refcount_bits = 16; int refcount_order; Error *local_err = NULL; int ret; /* Read out options */ size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), BDRV_SECTOR_SIZE); backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT); if (qemu_opt_get_bool_del(opts, BLOCK_OPT_ENCRYPT, false)) { flags |= BLOCK_FLAG_ENCRYPT; } cluster_size = qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE, DEFAULT_CLUSTER_SIZE); buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); prealloc = qapi_enum_parse(PreallocMode_lookup, buf, PREALLOC_MODE__MAX, PREALLOC_MODE_OFF, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto finish; } g_free(buf); buf = qemu_opt_get_del(opts, BLOCK_OPT_COMPAT_LEVEL); if (!buf) { /* keep the default */ } else if (!strcmp(buf, "0.10")) { version = 2; } else if (!strcmp(buf, "1.1")) { version = 3; } else { error_setg(errp, "Invalid compatibility level: '%s'", buf); ret = -EINVAL; goto finish; } if (qemu_opt_get_bool_del(opts, BLOCK_OPT_LAZY_REFCOUNTS, false)) { flags |= BLOCK_FLAG_LAZY_REFCOUNTS; } if (backing_file && prealloc != PREALLOC_MODE_OFF) { error_setg(errp, "Backing file and preallocation cannot be used at " "the same time"); ret = -EINVAL; goto finish; } if (version < 3 && (flags & BLOCK_FLAG_LAZY_REFCOUNTS)) { error_setg(errp, "Lazy refcounts only supported with compatibility " "level 1.1 and above (use compat=1.1 or greater)"); ret = -EINVAL; goto finish; } refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS, refcount_bits); if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) { error_setg(errp, "Refcount width must be a power of two and may not " "exceed 64 bits"); ret = -EINVAL; goto finish; } if (version < 3 && refcount_bits != 16) { error_setg(errp, "Different refcount widths than 16 bits require " "compatibility level 1.1 or above (use compat=1.1 or " "greater)"); ret = -EINVAL; goto finish; } refcount_order = ctz32(refcount_bits); ret = qcow2_create2(filename, size, backing_file, backing_fmt, flags, cluster_size, prealloc, opts, version, refcount_order, &local_err); if (local_err) { error_propagate(errp, local_err); } finish: g_free(backing_file); g_free(backing_fmt); g_free(buf); return ret; }
23,831
0
static void h261_loop_filter_c(uint8_t *src, int stride){ int x,y,xy,yz; int temp[64]; for(x=0; x<8; x++){ temp[x ] = 4*src[x ]; temp[x + 7*8] = 4*src[x + 7*stride]; } for(y=1; y<7; y++){ for(x=0; x<8; x++){ xy = y * stride + x; yz = y * 8 + x; temp[yz] = src[xy - stride] + 2*src[xy] + src[xy + stride]; } } for(y=0; y<8; y++){ src[ y*stride] = (temp[ y*8] + 2)>>2; src[7+y*stride] = (temp[7+y*8] + 2)>>2; for(x=1; x<7; x++){ xy = y * stride + x; yz = y * 8 + x; src[xy] = (temp[yz-1] + 2*temp[yz] + temp[yz+1] + 8)>>4; } } }
23,832
0
int ff_rtsp_connect(AVFormatContext *s) { RTSPState *rt = s->priv_data; char host[1024], path[1024], tcpname[1024], cmd[2048], auth[128]; char *option_list, *option, *filename; int port, err, tcp_fd; RTSPMessageHeader reply1 = {}, *reply = &reply1; int lower_transport_mask = 0; char real_challenge[64]; struct sockaddr_storage peer; socklen_t peer_len = sizeof(peer); if (!ff_network_init()) return AVERROR(EIO); redirect: rt->control_transport = RTSP_MODE_PLAIN; /* extract hostname and port */ av_url_split(NULL, 0, auth, sizeof(auth), host, sizeof(host), &port, path, sizeof(path), s->filename); if (*auth) { av_strlcpy(rt->auth, auth, sizeof(rt->auth)); } if (port < 0) port = RTSP_DEFAULT_PORT; /* search for options */ option_list = strrchr(path, '?'); if (option_list) { /* Strip out the RTSP specific options, write out the rest of * the options back into the same string. */ filename = option_list; while (option_list) { /* move the option pointer */ option = ++option_list; option_list = strchr(option_list, '&'); if (option_list) *option_list = 0; /* handle the options */ if (!strcmp(option, "udp")) { lower_transport_mask |= (1<< RTSP_LOWER_TRANSPORT_UDP); } else if (!strcmp(option, "multicast")) { lower_transport_mask |= (1<< RTSP_LOWER_TRANSPORT_UDP_MULTICAST); } else if (!strcmp(option, "tcp")) { lower_transport_mask |= (1<< RTSP_LOWER_TRANSPORT_TCP); } else if(!strcmp(option, "http")) { lower_transport_mask |= (1<< RTSP_LOWER_TRANSPORT_TCP); rt->control_transport = RTSP_MODE_TUNNEL; } else { /* Write options back into the buffer, using memmove instead * of strcpy since the strings may overlap. */ int len = strlen(option); memmove(++filename, option, len); filename += len; if (option_list) *filename = '&'; } } *filename = 0; } if (!lower_transport_mask) lower_transport_mask = (1 << RTSP_LOWER_TRANSPORT_NB) - 1; if (s->oformat) { /* Only UDP or TCP - UDP multicast isn't supported. */ lower_transport_mask &= (1 << RTSP_LOWER_TRANSPORT_UDP) | (1 << RTSP_LOWER_TRANSPORT_TCP); if (!lower_transport_mask || rt->control_transport == RTSP_MODE_TUNNEL) { av_log(s, AV_LOG_ERROR, "Unsupported lower transport method, " "only UDP and TCP are supported for output.\n"); err = AVERROR(EINVAL); goto fail; } } /* Construct the URI used in request; this is similar to s->filename, * but with authentication credentials removed and RTSP specific options * stripped out. */ ff_url_join(rt->control_uri, sizeof(rt->control_uri), "rtsp", NULL, host, port, "%s", path); if (rt->control_transport == RTSP_MODE_TUNNEL) { /* set up initial handshake for tunneling */ char httpname[1024]; char sessioncookie[17]; char headers[1024]; ff_url_join(httpname, sizeof(httpname), "http", auth, host, port, "%s", path); snprintf(sessioncookie, sizeof(sessioncookie), "%08x%08x", av_get_random_seed(), av_get_random_seed()); /* GET requests */ if (url_alloc(&rt->rtsp_hd, httpname, URL_RDONLY) < 0) { err = AVERROR(EIO); goto fail; } /* generate GET headers */ snprintf(headers, sizeof(headers), "x-sessioncookie: %s\r\n" "Accept: application/x-rtsp-tunnelled\r\n" "Pragma: no-cache\r\n" "Cache-Control: no-cache\r\n", sessioncookie); ff_http_set_headers(rt->rtsp_hd, headers); /* complete the connection */ if (url_connect(rt->rtsp_hd)) { err = AVERROR(EIO); goto fail; } /* POST requests */ if (url_alloc(&rt->rtsp_hd_out, httpname, URL_WRONLY) < 0 ) { err = AVERROR(EIO); goto fail; } /* generate POST headers */ snprintf(headers, sizeof(headers), "x-sessioncookie: %s\r\n" "Content-Type: application/x-rtsp-tunnelled\r\n" "Pragma: no-cache\r\n" "Cache-Control: no-cache\r\n" "Content-Length: 32767\r\n" "Expires: Sun, 9 Jan 1972 00:00:00 GMT\r\n", sessioncookie); ff_http_set_headers(rt->rtsp_hd_out, headers); ff_http_set_chunked_transfer_encoding(rt->rtsp_hd_out, 0); /* Initialize the authentication state for the POST session. The HTTP * protocol implementation doesn't properly handle multi-pass * authentication for POST requests, since it would require one of * the following: * - implementing Expect: 100-continue, which many HTTP servers * don't support anyway, even less the RTSP servers that do HTTP * tunneling * - sending the whole POST data until getting a 401 reply specifying * what authentication method to use, then resending all that data * - waiting for potential 401 replies directly after sending the * POST header (waiting for some unspecified time) * Therefore, we copy the full auth state, which works for both basic * and digest. (For digest, we would have to synchronize the nonce * count variable between the two sessions, if we'd do more requests * with the original session, though.) */ ff_http_init_auth_state(rt->rtsp_hd_out, rt->rtsp_hd); /* complete the connection */ if (url_connect(rt->rtsp_hd_out)) { err = AVERROR(EIO); goto fail; } } else { /* open the tcp connection */ ff_url_join(tcpname, sizeof(tcpname), "tcp", NULL, host, port, NULL); if (url_open(&rt->rtsp_hd, tcpname, URL_RDWR) < 0) { err = AVERROR(EIO); goto fail; } rt->rtsp_hd_out = rt->rtsp_hd; } rt->seq = 0; tcp_fd = url_get_file_handle(rt->rtsp_hd); if (!getpeername(tcp_fd, (struct sockaddr*) &peer, &peer_len)) { getnameinfo((struct sockaddr*) &peer, peer_len, host, sizeof(host), NULL, 0, NI_NUMERICHOST); } /* request options supported by the server; this also detects server * type */ for (rt->server_type = RTSP_SERVER_RTP;;) { cmd[0] = 0; if (rt->server_type == RTSP_SERVER_REAL) av_strlcat(cmd, /** * The following entries are required for proper * streaming from a Realmedia server. They are * interdependent in some way although we currently * don't quite understand how. Values were copied * from mplayer SVN r23589. * @param CompanyID is a 16-byte ID in base64 * @param ClientChallenge is a 16-byte ID in hex */ "ClientChallenge: 9e26d33f2984236010ef6253fb1887f7\r\n" "PlayerStarttime: [28/03/2003:22:50:23 00:00]\r\n" "CompanyID: KnKV4M4I/B2FjJ1TToLycw==\r\n" "GUID: 00000000-0000-0000-0000-000000000000\r\n", sizeof(cmd)); ff_rtsp_send_cmd(s, "OPTIONS", rt->control_uri, cmd, reply, NULL); if (reply->status_code != RTSP_STATUS_OK) { err = AVERROR_INVALIDDATA; goto fail; } /* detect server type if not standard-compliant RTP */ if (rt->server_type != RTSP_SERVER_REAL && reply->real_challenge[0]) { rt->server_type = RTSP_SERVER_REAL; continue; } else if (!strncasecmp(reply->server, "WMServer/", 9)) { rt->server_type = RTSP_SERVER_WMS; } else if (rt->server_type == RTSP_SERVER_REAL) strcpy(real_challenge, reply->real_challenge); break; } if (s->iformat) err = rtsp_setup_input_streams(s, reply); else err = rtsp_setup_output_streams(s, host); if (err) goto fail; do { int lower_transport = ff_log2_tab[lower_transport_mask & ~(lower_transport_mask - 1)]; err = make_setup_request(s, host, port, lower_transport, rt->server_type == RTSP_SERVER_REAL ? real_challenge : NULL); if (err < 0) goto fail; lower_transport_mask &= ~(1 << lower_transport); if (lower_transport_mask == 0 && err == 1) { err = FF_NETERROR(EPROTONOSUPPORT); goto fail; } } while (err); rt->state = RTSP_STATE_IDLE; rt->seek_timestamp = 0; /* default is to start stream at position zero */ return 0; fail: ff_rtsp_close_streams(s); ff_rtsp_close_connections(s); if (reply->status_code >=300 && reply->status_code < 400 && s->iformat) { av_strlcpy(s->filename, reply->location, sizeof(s->filename)); av_log(s, AV_LOG_INFO, "Status %d: Redirecting to %s\n", reply->status_code, s->filename); goto redirect; } ff_network_close(); return err; }
23,833
1
static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info, Error **errp) { BlockInfo *info = g_malloc0(sizeof(*info)); BlockDriverState *bs = blk_bs(blk); BlockDriverState *bs0; ImageInfo **p_image_info; Error *local_err = NULL; info->device = g_strdup(blk_name(blk)); info->type = g_strdup("unknown"); info->locked = blk_dev_is_medium_locked(blk); info->removable = blk_dev_has_removable_media(blk); if (blk_dev_has_removable_media(blk)) { info->has_tray_open = true; info->tray_open = blk_dev_is_tray_open(blk); } if (bdrv_iostatus_is_enabled(bs)) { info->has_io_status = true; info->io_status = bs->iostatus; } if (!QLIST_EMPTY(&bs->dirty_bitmaps)) { info->has_dirty_bitmaps = true; info->dirty_bitmaps = bdrv_query_dirty_bitmaps(bs); } if (bs->drv) { info->has_inserted = true; info->inserted = bdrv_block_device_info(bs); bs0 = bs; p_image_info = &info->inserted->image; while (1) { bdrv_query_image_info(bs0, p_image_info, &local_err); if (local_err) { error_propagate(errp, local_err); goto err; } if (bs0->drv && bs0->backing_hd) { bs0 = bs0->backing_hd; (*p_image_info)->has_backing_image = true; p_image_info = &((*p_image_info)->backing_image); } else { break; } } } *p_info = info; return; err: qapi_free_BlockInfo(info); }
23,834
1
static int ipvideo_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; IpvideoContext *s = avctx->priv_data; AVFrame *frame = data; int ret; if (buf_size < 2) return AVERROR_INVALIDDATA; /* decoding map contains 4 bits of information per 8x8 block */ s->decoding_map_size = AV_RL16(avpkt->data); /* compressed buffer needs to be large enough to at least hold an entire * decoding map */ if (buf_size < s->decoding_map_size + 2) return buf_size; if (av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, NULL)) { av_frame_unref(s->last_frame); av_frame_unref(s->second_last_frame); } s->decoding_map = buf + 2; bytestream2_init(&s->stream_ptr, buf + 2 + s->decoding_map_size, buf_size - s->decoding_map_size); if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) return ret; if (!s->is_16bpp) { int size; const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, &size); if (pal && size == AVPALETTE_SIZE) { frame->palette_has_changed = 1; memcpy(s->pal, pal, AVPALETTE_SIZE); } else if (pal) { av_log(avctx, AV_LOG_ERROR, "Palette size %d is wrong\n", size); } } ipvideo_decode_opcodes(s, frame); *got_frame = 1; /* shuffle frames */ av_frame_unref(s->second_last_frame); FFSWAP(AVFrame*, s->second_last_frame, s->last_frame); if ((ret = av_frame_ref(s->last_frame, frame)) < 0) return ret; /* report that the buffer was completely consumed */ return buf_size; }
23,835
0
static int process_ipmovie_chunk(IPMVEContext *s, AVIOContext *pb, AVPacket *pkt) { unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE]; int chunk_type; int chunk_size; unsigned char opcode_preamble[OPCODE_PREAMBLE_SIZE]; unsigned char opcode_type; unsigned char opcode_version; int opcode_size; unsigned char scratch[1024]; int i, j; int first_color, last_color; int audio_flags; unsigned char r, g, b; unsigned int width, height; /* see if there are any pending packets */ chunk_type = load_ipmovie_packet(s, pb, pkt); if (chunk_type != CHUNK_DONE) return chunk_type; /* read the next chunk, wherever the file happens to be pointing */ if (avio_feof(pb)) return CHUNK_EOF; if (avio_read(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) != CHUNK_PREAMBLE_SIZE) return CHUNK_BAD; chunk_size = AV_RL16(&chunk_preamble[0]); chunk_type = AV_RL16(&chunk_preamble[2]); av_log(s->avf, AV_LOG_TRACE, "chunk type 0x%04X, 0x%04X bytes: ", chunk_type, chunk_size); switch (chunk_type) { case CHUNK_INIT_AUDIO: av_log(s->avf, AV_LOG_TRACE, "initialize audio\n"); break; case CHUNK_AUDIO_ONLY: av_log(s->avf, AV_LOG_TRACE, "audio only\n"); break; case CHUNK_INIT_VIDEO: av_log(s->avf, AV_LOG_TRACE, "initialize video\n"); break; case CHUNK_VIDEO: av_log(s->avf, AV_LOG_TRACE, "video (and audio)\n"); break; case CHUNK_SHUTDOWN: av_log(s->avf, AV_LOG_TRACE, "shutdown\n"); break; case CHUNK_END: av_log(s->avf, AV_LOG_TRACE, "end\n"); break; default: av_log(s->avf, AV_LOG_TRACE, "invalid chunk\n"); chunk_type = CHUNK_BAD; break; } while ((chunk_size > 0) && (chunk_type != CHUNK_BAD)) { /* read the next chunk, wherever the file happens to be pointing */ if (avio_feof(pb)) { chunk_type = CHUNK_EOF; break; } if (avio_read(pb, opcode_preamble, CHUNK_PREAMBLE_SIZE) != CHUNK_PREAMBLE_SIZE) { chunk_type = CHUNK_BAD; break; } opcode_size = AV_RL16(&opcode_preamble[0]); opcode_type = opcode_preamble[2]; opcode_version = opcode_preamble[3]; chunk_size -= OPCODE_PREAMBLE_SIZE; chunk_size -= opcode_size; if (chunk_size < 0) { av_log(s->avf, AV_LOG_TRACE, "chunk_size countdown just went negative\n"); chunk_type = CHUNK_BAD; break; } av_log(s->avf, AV_LOG_TRACE, " opcode type %02X, version %d, 0x%04X bytes: ", opcode_type, opcode_version, opcode_size); switch (opcode_type) { case OPCODE_END_OF_STREAM: av_log(s->avf, AV_LOG_TRACE, "end of stream\n"); avio_skip(pb, opcode_size); break; case OPCODE_END_OF_CHUNK: av_log(s->avf, AV_LOG_TRACE, "end of chunk\n"); avio_skip(pb, opcode_size); break; case OPCODE_CREATE_TIMER: av_log(s->avf, AV_LOG_TRACE, "create timer\n"); if ((opcode_version > 0) || (opcode_size != 6)) { av_log(s->avf, AV_LOG_TRACE, "bad create_timer opcode\n"); chunk_type = CHUNK_BAD; break; } if (avio_read(pb, scratch, opcode_size) != opcode_size) { chunk_type = CHUNK_BAD; break; } s->frame_pts_inc = ((uint64_t)AV_RL32(&scratch[0])) * AV_RL16(&scratch[4]); break; case OPCODE_INIT_AUDIO_BUFFERS: av_log(s->avf, AV_LOG_TRACE, "initialize audio buffers\n"); if (opcode_version > 1 || opcode_size > 10 || opcode_size < 6) { av_log(s->avf, AV_LOG_TRACE, "bad init_audio_buffers opcode\n"); chunk_type = CHUNK_BAD; break; } if (avio_read(pb, scratch, opcode_size) != opcode_size) { chunk_type = CHUNK_BAD; break; } s->audio_sample_rate = AV_RL16(&scratch[4]); audio_flags = AV_RL16(&scratch[2]); /* bit 0 of the flags: 0 = mono, 1 = stereo */ s->audio_channels = (audio_flags & 1) + 1; /* bit 1 of the flags: 0 = 8 bit, 1 = 16 bit */ s->audio_bits = (((audio_flags >> 1) & 1) + 1) * 8; /* bit 2 indicates compressed audio in version 1 opcode */ if ((opcode_version == 1) && (audio_flags & 0x4)) s->audio_type = AV_CODEC_ID_INTERPLAY_DPCM; else if (s->audio_bits == 16) s->audio_type = AV_CODEC_ID_PCM_S16LE; else s->audio_type = AV_CODEC_ID_PCM_U8; av_log(s->avf, AV_LOG_TRACE, "audio: %d bits, %d Hz, %s, %s format\n", s->audio_bits, s->audio_sample_rate, (s->audio_channels == 2) ? "stereo" : "mono", (s->audio_type == AV_CODEC_ID_INTERPLAY_DPCM) ? "Interplay audio" : "PCM"); break; case OPCODE_START_STOP_AUDIO: av_log(s->avf, AV_LOG_TRACE, "start/stop audio\n"); avio_skip(pb, opcode_size); break; case OPCODE_INIT_VIDEO_BUFFERS: av_log(s->avf, AV_LOG_TRACE, "initialize video buffers\n"); if ((opcode_version > 2) || (opcode_size > 8) || opcode_size < 4 || opcode_version == 2 && opcode_size < 8 ) { av_log(s->avf, AV_LOG_TRACE, "bad init_video_buffers opcode\n"); chunk_type = CHUNK_BAD; break; } if (avio_read(pb, scratch, opcode_size) != opcode_size) { chunk_type = CHUNK_BAD; break; } width = AV_RL16(&scratch[0]) * 8; height = AV_RL16(&scratch[2]) * 8; if (width != s->video_width) { s->video_width = width; s->changed++; } if (height != s->video_height) { s->video_height = height; s->changed++; } if (opcode_version < 2 || !AV_RL16(&scratch[6])) { s->video_bpp = 8; } else { s->video_bpp = 16; } av_log(s->avf, AV_LOG_TRACE, "video resolution: %d x %d\n", s->video_width, s->video_height); break; case OPCODE_UNKNOWN_06: case OPCODE_UNKNOWN_0E: case OPCODE_UNKNOWN_10: case OPCODE_UNKNOWN_12: case OPCODE_UNKNOWN_13: case OPCODE_UNKNOWN_14: case OPCODE_UNKNOWN_15: av_log(s->avf, AV_LOG_TRACE, "unknown (but documented) opcode %02X\n", opcode_type); avio_skip(pb, opcode_size); break; case OPCODE_SEND_BUFFER: av_log(s->avf, AV_LOG_TRACE, "send buffer\n"); avio_skip(pb, opcode_size); s->send_buffer = 1; break; case OPCODE_AUDIO_FRAME: av_log(s->avf, AV_LOG_TRACE, "audio frame\n"); /* log position and move on for now */ s->audio_chunk_offset = avio_tell(pb); s->audio_chunk_size = opcode_size; avio_skip(pb, opcode_size); break; case OPCODE_SILENCE_FRAME: av_log(s->avf, AV_LOG_TRACE, "silence frame\n"); avio_skip(pb, opcode_size); break; case OPCODE_INIT_VIDEO_MODE: av_log(s->avf, AV_LOG_TRACE, "initialize video mode\n"); avio_skip(pb, opcode_size); break; case OPCODE_CREATE_GRADIENT: av_log(s->avf, AV_LOG_TRACE, "create gradient\n"); avio_skip(pb, opcode_size); break; case OPCODE_SET_PALETTE: av_log(s->avf, AV_LOG_TRACE, "set palette\n"); /* check for the logical maximum palette size * (3 * 256 + 4 bytes) */ if (opcode_size > 0x304 || opcode_size < 4) { av_log(s->avf, AV_LOG_TRACE, "demux_ipmovie: set_palette opcode with invalid size\n"); chunk_type = CHUNK_BAD; break; } if (avio_read(pb, scratch, opcode_size) != opcode_size) { chunk_type = CHUNK_BAD; break; } /* load the palette into internal data structure */ first_color = AV_RL16(&scratch[0]); last_color = first_color + AV_RL16(&scratch[2]) - 1; /* sanity check (since they are 16 bit values) */ if ( (first_color > 0xFF) || (last_color > 0xFF) || (last_color - first_color + 1)*3 + 4 > opcode_size) { av_log(s->avf, AV_LOG_TRACE, "demux_ipmovie: set_palette indexes out of range (%d -> %d)\n", first_color, last_color); chunk_type = CHUNK_BAD; break; } j = 4; /* offset of first palette data */ for (i = first_color; i <= last_color; i++) { /* the palette is stored as a 6-bit VGA palette, thus each * component is shifted up to a 8-bit range */ r = scratch[j++] * 4; g = scratch[j++] * 4; b = scratch[j++] * 4; s->palette[i] = (0xFFU << 24) | (r << 16) | (g << 8) | (b); s->palette[i] |= s->palette[i] >> 6 & 0x30303; } s->has_palette = 1; break; case OPCODE_SET_PALETTE_COMPRESSED: av_log(s->avf, AV_LOG_TRACE, "set palette compressed\n"); avio_skip(pb, opcode_size); break; case OPCODE_SET_DECODING_MAP: av_log(s->avf, AV_LOG_TRACE, "set decoding map\n"); /* log position and move on for now */ s->decode_map_chunk_offset = avio_tell(pb); s->decode_map_chunk_size = opcode_size; avio_skip(pb, opcode_size); break; case OPCODE_VIDEO_DATA_11: av_log(s->avf, AV_LOG_TRACE, "set video data\n"); s->frame_format = 0x11; /* log position and move on for now */ s->video_chunk_offset = avio_tell(pb); s->video_chunk_size = opcode_size; avio_skip(pb, opcode_size); break; default: av_log(s->avf, AV_LOG_TRACE, "*** unknown opcode type\n"); chunk_type = CHUNK_BAD; break; } } if (s->avf->nb_streams == 1 && s->audio_type) init_audio(s->avf); /* make a note of where the stream is sitting */ s->next_chunk_offset = avio_tell(pb); /* dispatch the first of any pending packets */ if ((chunk_type == CHUNK_VIDEO) || (chunk_type == CHUNK_AUDIO_ONLY)) chunk_type = load_ipmovie_packet(s, pb, pkt); return chunk_type; }
23,836
1
static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, int source_index) { AVStream *st; OutputStream *ost; AVCodecContext *video_enc; char *frame_rate = NULL, *frame_aspect_ratio = NULL; ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO, source_index); st = ost->st; video_enc = ost->enc_ctx; MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st); if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) { av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate); exit_program(1); } if (frame_rate && video_sync_method == VSYNC_PASSTHROUGH) av_log(NULL, AV_LOG_ERROR, "Using -vsync 0 and -r can produce invalid output files\n"); MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st); if (frame_aspect_ratio) { AVRational q; if (av_parse_ratio(&q, frame_aspect_ratio, 255, 0, NULL) < 0 || q.num <= 0 || q.den <= 0) { av_log(NULL, AV_LOG_FATAL, "Invalid aspect ratio: %s\n", frame_aspect_ratio); exit_program(1); } ost->frame_aspect_ratio = q; } MATCH_PER_STREAM_OPT(filter_scripts, str, ost->filters_script, oc, st); MATCH_PER_STREAM_OPT(filters, str, ost->filters, oc, st); if (!ost->stream_copy) { const char *p = NULL; char *frame_size = NULL; char *frame_pix_fmt = NULL; char *intra_matrix = NULL, *inter_matrix = NULL; char *chroma_intra_matrix = NULL; int do_pass = 0; int i; MATCH_PER_STREAM_OPT(frame_sizes, str, frame_size, oc, st); if (frame_size && av_parse_video_size(&video_enc->width, &video_enc->height, frame_size) < 0) { av_log(NULL, AV_LOG_FATAL, "Invalid frame size: %s.\n", frame_size); exit_program(1); } video_enc->bits_per_raw_sample = frame_bits_per_raw_sample; MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st); if (frame_pix_fmt && *frame_pix_fmt == '+') { ost->keep_pix_fmt = 1; if (!*++frame_pix_fmt) frame_pix_fmt = NULL; } if (frame_pix_fmt && (video_enc->pix_fmt = av_get_pix_fmt(frame_pix_fmt)) == AV_PIX_FMT_NONE) { av_log(NULL, AV_LOG_FATAL, "Unknown pixel format requested: %s.\n", frame_pix_fmt); exit_program(1); } st->sample_aspect_ratio = video_enc->sample_aspect_ratio; if (intra_only) video_enc->gop_size = 0; MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st); if (intra_matrix) { if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64))) { av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n"); exit_program(1); } parse_matrix_coeffs(video_enc->intra_matrix, intra_matrix); } MATCH_PER_STREAM_OPT(chroma_intra_matrices, str, chroma_intra_matrix, oc, st); if (chroma_intra_matrix) { uint16_t *p = av_mallocz(sizeof(*video_enc->chroma_intra_matrix) * 64); if (!p) { av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n"); exit_program(1); } av_codec_set_chroma_intra_matrix(video_enc, p); parse_matrix_coeffs(p, chroma_intra_matrix); } MATCH_PER_STREAM_OPT(inter_matrices, str, inter_matrix, oc, st); if (inter_matrix) { if (!(video_enc->inter_matrix = av_mallocz(sizeof(*video_enc->inter_matrix) * 64))) { av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for inter matrix.\n"); exit_program(1); } parse_matrix_coeffs(video_enc->inter_matrix, inter_matrix); } MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st); for (i = 0; p; i++) { int start, end, q; int e = sscanf(p, "%d,%d,%d", &start, &end, &q); if (e != 3) { av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n"); exit_program(1); } /* FIXME realloc failure */ video_enc->rc_override = av_realloc_array(video_enc->rc_override, i + 1, sizeof(RcOverride)); video_enc->rc_override[i].start_frame = start; video_enc->rc_override[i].end_frame = end; if (q > 0) { video_enc->rc_override[i].qscale = q; video_enc->rc_override[i].quality_factor = 1.0; } else { video_enc->rc_override[i].qscale = 0; video_enc->rc_override[i].quality_factor = -q/100.0; } p = strchr(p, '/'); if (p) p++; } video_enc->rc_override_count = i; if (do_psnr) video_enc->flags|= CODEC_FLAG_PSNR; /* two pass mode */ MATCH_PER_STREAM_OPT(pass, i, do_pass, oc, st); if (do_pass) { if (do_pass & 1) { video_enc->flags |= CODEC_FLAG_PASS1; av_dict_set(&ost->encoder_opts, "flags", "+pass1", AV_DICT_APPEND); } if (do_pass & 2) { video_enc->flags |= CODEC_FLAG_PASS2; av_dict_set(&ost->encoder_opts, "flags", "+pass2", AV_DICT_APPEND); } } MATCH_PER_STREAM_OPT(passlogfiles, str, ost->logfile_prefix, oc, st); if (ost->logfile_prefix && !(ost->logfile_prefix = av_strdup(ost->logfile_prefix))) exit_program(1); MATCH_PER_STREAM_OPT(forced_key_frames, str, ost->forced_keyframes, oc, st); if (ost->forced_keyframes) ost->forced_keyframes = av_strdup(ost->forced_keyframes); MATCH_PER_STREAM_OPT(force_fps, i, ost->force_fps, oc, st); ost->top_field_first = -1; MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st); ost->avfilter = get_ost_filters(o, oc, ost); if (!ost->avfilter) exit_program(1); } else { MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st); } if (ost->stream_copy) check_streamcopy_filters(o, oc, ost, AVMEDIA_TYPE_VIDEO); return ost; }
23,837
1
static void put_float64(QEMUFile *f, void *pv, size_t size) { uint64_t *v = pv; qemu_put_be64(f, float64_val(*v)); }
23,838
1
static void pci_host_config_write(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned len) { PCIHostState *s = opaque; PCI_DPRINTF("%s addr " TARGET_FMT_plx " len %d val %"PRIx64"\n", __func__, addr, len, val); s->config_reg = val;
23,839
1
static void qobject_input_type_number(Visitor *v, const char *name, double *obj, Error **errp) { QObjectInputVisitor *qiv = to_qiv(v); QObject *qobj = qobject_input_get_object(qiv, name, true, errp); QInt *qint; QFloat *qfloat; if (!qobj) { return; } qint = qobject_to_qint(qobj); if (qint) { *obj = qint_get_int(qobject_to_qint(qobj)); return; } qfloat = qobject_to_qfloat(qobj); if (qfloat) { *obj = qfloat_get_double(qobject_to_qfloat(qobj)); return; } error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null", "number"); }
23,842
1
int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)) { if (lockmgr_cb) { // There is no good way to rollback a failure to destroy the // mutex, so we ignore failures. lockmgr_cb(&codec_mutex, AV_LOCK_DESTROY); lockmgr_cb(&avformat_mutex, AV_LOCK_DESTROY); lockmgr_cb = NULL; codec_mutex = NULL; avformat_mutex = NULL; } if (cb) { void *new_codec_mutex = NULL; void *new_avformat_mutex = NULL; int err; if (err = cb(&new_codec_mutex, AV_LOCK_CREATE)) { return err > 0 ? AVERROR_UNKNOWN : err; } if (err = cb(&new_avformat_mutex, AV_LOCK_CREATE)) { // Ignore failures to destroy the newly created mutex. cb(&new_codec_mutex, AV_LOCK_DESTROY); return err > 0 ? AVERROR_UNKNOWN : err; } lockmgr_cb = cb; codec_mutex = new_codec_mutex; avformat_mutex = new_avformat_mutex; } return 0; }
23,843
1
static void compute_stereo(MPADecodeContext *s, GranuleDef *g0, GranuleDef *g1) { int i, j, k, l; int sf_max, sf, len, non_zero_found; INTFLOAT (*is_tab)[16], *tab0, *tab1, tmp0, tmp1, v1, v2; int non_zero_found_short[3]; /* intensity stereo */ if (s->mode_ext & MODE_EXT_I_STEREO) { if (!s->lsf) { is_tab = is_table; sf_max = 7; } else { is_tab = is_table_lsf[g1->scalefac_compress & 1]; sf_max = 16; } tab0 = g0->sb_hybrid + 576; tab1 = g1->sb_hybrid + 576; non_zero_found_short[0] = 0; non_zero_found_short[1] = 0; non_zero_found_short[2] = 0; k = (13 - g1->short_start) * 3 + g1->long_end - 3; for (i = 12; i >= g1->short_start; i--) { /* for last band, use previous scale factor */ if (i != 11) k -= 3; len = band_size_short[s->sample_rate_index][i]; for (l = 2; l >= 0; l--) { tab0 -= len; tab1 -= len; if (!non_zero_found_short[l]) { /* test if non zero band. if so, stop doing i-stereo */ for (j = 0; j < len; j++) { if (tab1[j] != 0) { non_zero_found_short[l] = 1; goto found1; } } sf = g1->scale_factors[k + l]; if (sf >= sf_max) goto found1; v1 = is_tab[0][sf]; v2 = is_tab[1][sf]; for (j = 0; j < len; j++) { tmp0 = tab0[j]; tab0[j] = MULLx(tmp0, v1, FRAC_BITS); tab1[j] = MULLx(tmp0, v2, FRAC_BITS); } } else { found1: if (s->mode_ext & MODE_EXT_MS_STEREO) { /* lower part of the spectrum : do ms stereo if enabled */ for (j = 0; j < len; j++) { tmp0 = tab0[j]; tmp1 = tab1[j]; tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS); tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS); } } } } } non_zero_found = non_zero_found_short[0] | non_zero_found_short[1] | non_zero_found_short[2]; for (i = g1->long_end - 1;i >= 0;i--) { len = band_size_long[s->sample_rate_index][i]; tab0 -= len; tab1 -= len; /* test if non zero band. if so, stop doing i-stereo */ if (!non_zero_found) { for (j = 0; j < len; j++) { if (tab1[j] != 0) { non_zero_found = 1; goto found2; } } /* for last band, use previous scale factor */ k = (i == 21) ? 20 : i; sf = g1->scale_factors[k]; if (sf >= sf_max) goto found2; v1 = is_tab[0][sf]; v2 = is_tab[1][sf]; for (j = 0; j < len; j++) { tmp0 = tab0[j]; tab0[j] = MULLx(tmp0, v1, FRAC_BITS); tab1[j] = MULLx(tmp0, v2, FRAC_BITS); } } else { found2: if (s->mode_ext & MODE_EXT_MS_STEREO) { /* lower part of the spectrum : do ms stereo if enabled */ for (j = 0; j < len; j++) { tmp0 = tab0[j]; tmp1 = tab1[j]; tab0[j] = MULLx(tmp0 + tmp1, ISQRT2, FRAC_BITS); tab1[j] = MULLx(tmp0 - tmp1, ISQRT2, FRAC_BITS); } } } } } else if (s->mode_ext & MODE_EXT_MS_STEREO) { /* ms stereo ONLY */ /* NOTE: the 1/sqrt(2) normalization factor is included in the global gain */ #if USE_FLOATS s->fdsp->butterflies_float(g0->sb_hybrid, g1->sb_hybrid, 576); #else tab0 = g0->sb_hybrid; tab1 = g1->sb_hybrid; for (i = 0; i < 576; i++) { tmp0 = tab0[i]; tmp1 = tab1[i]; tab0[i] = tmp0 + tmp1; tab1[i] = tmp0 - tmp1; } #endif } }
23,844
1
static MemoryRegion *nvdimm_get_memory_region(PCDIMMDevice *dimm) { NVDIMMDevice *nvdimm = NVDIMM(dimm); return &nvdimm->nvdimm_mr; }
23,845
0
static void usb_serial_realize(USBDevice *dev, Error **errp) { USBSerialState *s = DO_UPCAST(USBSerialState, dev, dev); usb_desc_create_serial(dev); usb_desc_init(dev); dev->auto_attach = 0; if (!s->cs) { error_setg(errp, "Property chardev is required"); return; } qemu_chr_add_handlers(s->cs, usb_serial_can_read, usb_serial_read, usb_serial_event, s); usb_serial_handle_reset(dev); if (s->cs->be_open && !dev->attached) { usb_device_attach(dev, errp); } }
23,846
0
static void tmu2_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { MilkymistTMU2State *s = opaque; trace_milkymist_tmu2_memory_write(addr, value); addr >>= 2; switch (addr) { case R_CTL: s->regs[addr] = value; if (value & CTL_START_BUSY) { tmu2_start(s); } break; case R_BRIGHTNESS: case R_HMESHLAST: case R_VMESHLAST: case R_CHROMAKEY: case R_VERTICESADDR: case R_TEXFBUF: case R_TEXHRES: case R_TEXVRES: case R_TEXHMASK: case R_TEXVMASK: case R_DSTFBUF: case R_DSTHRES: case R_DSTVRES: case R_DSTHOFFSET: case R_DSTVOFFSET: case R_DSTSQUAREW: case R_DSTSQUAREH: case R_ALPHA: s->regs[addr] = value; break; default: error_report("milkymist_tmu2: write access to unknown register 0x" TARGET_FMT_plx, addr << 2); break; } tmu2_check_registers(s); }
23,848
0
static int SocketAddress_to_str(char *dest, int max_len, const char *prefix, SocketAddress *addr, bool is_listen, bool is_telnet) { switch (addr->type) { case SOCKET_ADDRESS_KIND_INET: return snprintf(dest, max_len, "%s%s:%s:%s%s", prefix, is_telnet ? "telnet" : "tcp", addr->u.inet->host, addr->u.inet->port, is_listen ? ",server" : ""); break; case SOCKET_ADDRESS_KIND_UNIX: return snprintf(dest, max_len, "%sunix:%s%s", prefix, addr->u.q_unix->path, is_listen ? ",server" : ""); break; case SOCKET_ADDRESS_KIND_FD: return snprintf(dest, max_len, "%sfd:%s%s", prefix, addr->u.fd->str, is_listen ? ",server" : ""); break; default: abort(); } }
23,850
0
static int blkverify_open(BlockDriverState *bs, const char *filename, int flags) { BDRVBlkverifyState *s = bs->opaque; int ret; char *raw, *c; /* Parse the blkverify: prefix */ if (strncmp(filename, "blkverify:", strlen("blkverify:"))) { return -EINVAL; } filename += strlen("blkverify:"); /* Parse the raw image filename */ c = strchr(filename, ':'); if (c == NULL) { return -EINVAL; } raw = strdup(filename); raw[c - filename] = '\0'; ret = bdrv_file_open(&bs->file, raw, flags); free(raw); if (ret < 0) { return ret; } filename = c + 1; /* Open the test file */ s->test_file = bdrv_new(""); ret = bdrv_open(s->test_file, filename, flags, NULL); if (ret < 0) { bdrv_delete(s->test_file); s->test_file = NULL; return ret; } return 0; }
23,851
0
static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) { SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); uint32_t len; uint8_t command; command = buf[0]; if (s->tray_open || !bdrv_is_inserted(s->qdev.conf.bs)) { scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); return 0; } len = scsi_data_cdb_length(r->req.cmd.buf); switch (command) { case READ_6: case READ_10: case READ_12: case READ_16: DPRINTF("Read (sector %" PRId64 ", count %u)\n", r->req.cmd.lba, len); if (r->req.cmd.buf[1] & 0xe0) { goto illegal_request; } if (!check_lba_range(s, r->req.cmd.lba, len)) { goto illegal_lba; } r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); r->sector_count = len * (s->qdev.blocksize / 512); break; case WRITE_6: case WRITE_10: case WRITE_12: case WRITE_16: case WRITE_VERIFY_10: case WRITE_VERIFY_12: case WRITE_VERIFY_16: if (bdrv_is_read_only(s->qdev.conf.bs)) { scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); return 0; } DPRINTF("Write %s(sector %" PRId64 ", count %u)\n", (command & 0xe) == 0xe ? "And Verify " : "", r->req.cmd.lba, len); if (r->req.cmd.buf[1] & 0xe0) { goto illegal_request; } if (!check_lba_range(s, r->req.cmd.lba, len)) { goto illegal_lba; } r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); r->sector_count = len * (s->qdev.blocksize / 512); break; default: abort(); illegal_request: scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); return 0; illegal_lba: scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); return 0; } if (r->sector_count == 0) { scsi_req_complete(&r->req, GOOD); } assert(r->iov.iov_len == 0); if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { return -r->sector_count * 512; } else { return r->sector_count * 512; } }
23,852
0
static void core_begin(MemoryListener *listener) { destroy_all_mappings(); phys_sections_clear(); phys_map.ptr = PHYS_MAP_NODE_NIL; phys_section_unassigned = dummy_section(&io_mem_unassigned); phys_section_notdirty = dummy_section(&io_mem_notdirty); phys_section_rom = dummy_section(&io_mem_rom); phys_section_watch = dummy_section(&io_mem_watch); }
23,853
0
START_TEST(qlist_new_test) { QList *qlist; qlist = qlist_new(); fail_unless(qlist != NULL); fail_unless(qlist->base.refcnt == 1); fail_unless(qobject_type(QOBJECT(qlist)) == QTYPE_QLIST); // destroy doesn't exist yet g_free(qlist); }
23,856
0
static void do_info(int argc, const char **argv) { term_cmd_t *cmd; const char *item; if (argc < 2) goto help; item = argv[1]; for(cmd = info_cmds; cmd->name != NULL; cmd++) { if (compare_cmd(argv[1], cmd->name)) goto found; } help: help_cmd(argv[0]); return; found: cmd->handler(argc, argv); }
23,857
0
static void scsi_command_complete(void *opaque, int ret) { int status; SCSIGenericReq *r = (SCSIGenericReq *)opaque; r->req.aiocb = NULL; if (r->req.io_canceled) { goto done; } if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) { r->req.sense_len = r->io_header.sb_len_wr; } if (ret != 0) { switch (ret) { case -EDOM: status = TASK_SET_FULL; break; case -ENOMEM: status = CHECK_CONDITION; scsi_req_build_sense(&r->req, SENSE_CODE(TARGET_FAILURE)); break; default: status = CHECK_CONDITION; scsi_req_build_sense(&r->req, SENSE_CODE(IO_ERROR)); break; } } else { if (r->io_header.host_status == SG_ERR_DID_NO_CONNECT || r->io_header.host_status == SG_ERR_DID_BUS_BUSY || r->io_header.host_status == SG_ERR_DID_TIME_OUT || (r->io_header.driver_status & SG_ERR_DRIVER_TIMEOUT)) { status = BUSY; BADF("Driver Timeout\n"); } else if (r->io_header.host_status) { status = CHECK_CONDITION; scsi_req_build_sense(&r->req, SENSE_CODE(I_T_NEXUS_LOSS)); } else if (r->io_header.status) { status = r->io_header.status; } else if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) { status = CHECK_CONDITION; } else { status = GOOD; } } DPRINTF("Command complete 0x%p tag=0x%x status=%d\n", r, r->req.tag, status); scsi_req_complete(&r->req, status); done: if (!r->req.io_canceled) { scsi_req_unref(&r->req); } }
23,858
0
static bool iasl_installed(void) { gchar *out = NULL, *out_err = NULL; bool ret; /* pass 'out' and 'out_err' in order to be redirected */ ret = g_spawn_command_line_sync("iasl", &out, &out_err, NULL, NULL); if (out_err) { ret = ret && (out_err[0] == '\0'); g_free(out_err); } if (out) { g_free(out); } return ret; }
23,859
0
static void qxl_blit(PCIQXLDevice *qxl, QXLRect *rect) { DisplaySurface *surface = qemu_console_surface(qxl->vga.con); uint8_t *dst = surface_data(surface); uint8_t *src; int len, i; if (is_buffer_shared(surface)) { return; } if (!qxl->guest_primary.data) { trace_qxl_render_blit_guest_primary_initialized(); qxl->guest_primary.data = memory_region_get_ram_ptr(&qxl->vga.vram); } trace_qxl_render_blit(qxl->guest_primary.qxl_stride, rect->left, rect->right, rect->top, rect->bottom); src = qxl->guest_primary.data; if (qxl->guest_primary.qxl_stride < 0) { /* qxl surface is upside down, walk src scanlines * in reverse order to flip it */ src += (qxl->guest_primary.surface.height - rect->top - 1) * qxl->guest_primary.abs_stride; } else { src += rect->top * qxl->guest_primary.abs_stride; } dst += rect->top * qxl->guest_primary.abs_stride; src += rect->left * qxl->guest_primary.bytes_pp; dst += rect->left * qxl->guest_primary.bytes_pp; len = (rect->right - rect->left) * qxl->guest_primary.bytes_pp; for (i = rect->top; i < rect->bottom; i++) { memcpy(dst, src, len); dst += qxl->guest_primary.abs_stride; src += qxl->guest_primary.qxl_stride; } }
23,860