label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
0 | static inline uint64_t vtd_get_slpte_addr(uint64_t slpte) { return slpte & VTD_SL_PT_BASE_ADDR_MASK(VTD_HOST_ADDRESS_WIDTH); } | 18,237 |
0 | static void open_eth_cleanup(NetClientState *nc) { } | 18,239 |
0 | void HELPER(wsr_lbeg)(uint32_t v) { if (env->sregs[LBEG] != v) { tb_invalidate_phys_page_range( env->sregs[LEND] - 1, env->sregs[LEND], 0); env->sregs[LBEG] = v; } } | 18,240 |
0 | void mips_cpu_do_interrupt(CPUState *cs) { #if !defined(CONFIG_USER_ONLY) MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; bool update_badinstr = 0; target_ulong offset; int cause = -1; const char *name; if (qemu_loglevel_mask(CPU_LOG_INT) && cs->exception_index != EXCP_EXT_INTERRUPT) { if (cs->exception_index < 0 || cs->exception_index > EXCP_LAST) { name = "unknown"; } else { name = excp_names[cs->exception_index]; } qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " %s exception\n", __func__, env->active_tc.PC, env->CP0_EPC, name); } if (cs->exception_index == EXCP_EXT_INTERRUPT && (env->hflags & MIPS_HFLAG_DM)) { cs->exception_index = EXCP_DINT; } offset = 0x180; switch (cs->exception_index) { case EXCP_DSS: env->CP0_Debug |= 1 << CP0DB_DSS; /* Debug single step cannot be raised inside a delay slot and resume will always occur on the next instruction (but we assume the pc has always been updated during code translation). */ env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16); goto enter_debug_mode; case EXCP_DINT: env->CP0_Debug |= 1 << CP0DB_DINT; goto set_DEPC; case EXCP_DIB: env->CP0_Debug |= 1 << CP0DB_DIB; goto set_DEPC; case EXCP_DBp: env->CP0_Debug |= 1 << CP0DB_DBp; goto set_DEPC; case EXCP_DDBS: env->CP0_Debug |= 1 << CP0DB_DDBS; goto set_DEPC; case EXCP_DDBL: env->CP0_Debug |= 1 << CP0DB_DDBL; set_DEPC: env->CP0_DEPC = exception_resume_pc(env); env->hflags &= ~MIPS_HFLAG_BMASK; enter_debug_mode: if (env->insn_flags & ISA_MIPS3) { env->hflags |= MIPS_HFLAG_64; if (!(env->insn_flags & ISA_MIPS64R6) || env->CP0_Status & (1 << CP0St_KX)) { env->hflags &= ~MIPS_HFLAG_AWRAP; } } env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_CP0; env->hflags &= ~(MIPS_HFLAG_KSU); /* EJTAG probe trap enable is not implemented... */ if (!(env->CP0_Status & (1 << CP0St_EXL))) env->CP0_Cause &= ~(1U << CP0Ca_BD); env->active_tc.PC = (int32_t)0xBFC00480; set_hflags_for_handler(env); break; case EXCP_RESET: cpu_reset(CPU(cpu)); break; case EXCP_SRESET: env->CP0_Status |= (1 << CP0St_SR); memset(env->CP0_WatchLo, 0, sizeof(env->CP0_WatchLo)); goto set_error_EPC; case EXCP_NMI: env->CP0_Status |= (1 << CP0St_NMI); set_error_EPC: env->CP0_ErrorEPC = exception_resume_pc(env); env->hflags &= ~MIPS_HFLAG_BMASK; env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV); if (env->insn_flags & ISA_MIPS3) { env->hflags |= MIPS_HFLAG_64; if (!(env->insn_flags & ISA_MIPS64R6) || env->CP0_Status & (1 << CP0St_KX)) { env->hflags &= ~MIPS_HFLAG_AWRAP; } } env->hflags |= MIPS_HFLAG_CP0; env->hflags &= ~(MIPS_HFLAG_KSU); if (!(env->CP0_Status & (1 << CP0St_EXL))) env->CP0_Cause &= ~(1U << CP0Ca_BD); env->active_tc.PC = (int32_t)0xBFC00000; set_hflags_for_handler(env); break; case EXCP_EXT_INTERRUPT: cause = 0; if (env->CP0_Cause & (1 << CP0Ca_IV)) { uint32_t spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & 0x1f; if ((env->CP0_Status & (1 << CP0St_BEV)) || spacing == 0) { offset = 0x200; } else { uint32_t vector = 0; uint32_t pending = (env->CP0_Cause & CP0Ca_IP_mask) >> CP0Ca_IP; if (env->CP0_Config3 & (1 << CP0C3_VEIC)) { /* For VEIC mode, the external interrupt controller feeds * the vector through the CP0Cause IP lines. */ vector = pending; } else { /* Vectored Interrupts * Mask with Status.IM7-IM0 to get enabled interrupts. */ pending &= (env->CP0_Status >> CP0St_IM) & 0xff; /* Find the highest-priority interrupt. */ while (pending >>= 1) { vector++; } } offset = 0x200 + (vector * (spacing << 5)); } } goto set_EPC; case EXCP_LTLBL: cause = 1; update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); goto set_EPC; case EXCP_TLBL: cause = 2; update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); if ((env->error_code & EXCP_TLB_NOMATCH) && !(env->CP0_Status & (1 << CP0St_EXL))) { #if defined(TARGET_MIPS64) int R = env->CP0_BadVAddr >> 62; int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; if (((R == 0 && UX) || (R == 1 && SX) || (R == 3 && KX)) && (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) offset = 0x080; else #endif offset = 0x000; } goto set_EPC; case EXCP_TLBS: cause = 3; update_badinstr = 1; if ((env->error_code & EXCP_TLB_NOMATCH) && !(env->CP0_Status & (1 << CP0St_EXL))) { #if defined(TARGET_MIPS64) int R = env->CP0_BadVAddr >> 62; int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; if (((R == 0 && UX) || (R == 1 && SX) || (R == 3 && KX)) && (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) offset = 0x080; else #endif offset = 0x000; } goto set_EPC; case EXCP_AdEL: cause = 4; update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); goto set_EPC; case EXCP_AdES: cause = 5; update_badinstr = 1; goto set_EPC; case EXCP_IBE: cause = 6; goto set_EPC; case EXCP_DBE: cause = 7; goto set_EPC; case EXCP_SYSCALL: cause = 8; update_badinstr = 1; goto set_EPC; case EXCP_BREAK: cause = 9; update_badinstr = 1; goto set_EPC; case EXCP_RI: cause = 10; update_badinstr = 1; goto set_EPC; case EXCP_CpU: cause = 11; update_badinstr = 1; env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) | (env->error_code << CP0Ca_CE); goto set_EPC; case EXCP_OVERFLOW: cause = 12; update_badinstr = 1; goto set_EPC; case EXCP_TRAP: cause = 13; update_badinstr = 1; goto set_EPC; case EXCP_MSAFPE: cause = 14; update_badinstr = 1; goto set_EPC; case EXCP_FPE: cause = 15; update_badinstr = 1; goto set_EPC; case EXCP_C2E: cause = 18; goto set_EPC; case EXCP_TLBRI: cause = 19; update_badinstr = 1; goto set_EPC; case EXCP_TLBXI: cause = 20; goto set_EPC; case EXCP_MSADIS: cause = 21; update_badinstr = 1; goto set_EPC; case EXCP_MDMX: cause = 22; goto set_EPC; case EXCP_DWATCH: cause = 23; /* XXX: TODO: manage deferred watch exceptions */ goto set_EPC; case EXCP_MCHECK: cause = 24; goto set_EPC; case EXCP_THREAD: cause = 25; goto set_EPC; case EXCP_DSPDIS: cause = 26; goto set_EPC; case EXCP_CACHE: cause = 30; if (env->CP0_Status & (1 << CP0St_BEV)) { offset = 0x100; } else { offset = 0x20000100; } set_EPC: if (!(env->CP0_Status & (1 << CP0St_EXL))) { env->CP0_EPC = exception_resume_pc(env); if (update_badinstr) { set_badinstr_registers(env); } if (env->hflags & MIPS_HFLAG_BMASK) { env->CP0_Cause |= (1U << CP0Ca_BD); } else { env->CP0_Cause &= ~(1U << CP0Ca_BD); } env->CP0_Status |= (1 << CP0St_EXL); if (env->insn_flags & ISA_MIPS3) { env->hflags |= MIPS_HFLAG_64; if (!(env->insn_flags & ISA_MIPS64R6) || env->CP0_Status & (1 << CP0St_KX)) { env->hflags &= ~MIPS_HFLAG_AWRAP; } } env->hflags |= MIPS_HFLAG_CP0; env->hflags &= ~(MIPS_HFLAG_KSU); } env->hflags &= ~MIPS_HFLAG_BMASK; if (env->CP0_Status & (1 << CP0St_BEV)) { env->active_tc.PC = (int32_t)0xBFC00200; } else { env->active_tc.PC = (int32_t)(env->CP0_EBase & ~0x3ff); } env->active_tc.PC += offset; set_hflags_for_handler(env); env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) | (cause << CP0Ca_EC); break; default: abort(); } if (qemu_loglevel_mask(CPU_LOG_INT) && cs->exception_index != EXCP_EXT_INTERRUPT) { qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n" " S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n", __func__, env->active_tc.PC, env->CP0_EPC, cause, env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr, env->CP0_DEPC); } #endif cs->exception_index = EXCP_NONE; } | 18,241 |
0 | uri_resolve_relative (const char *uri, const char * base) { char *val = NULL; int ret; int ix; int pos = 0; int nbslash = 0; int len; URI *ref = NULL; URI *bas = NULL; char *bptr, *uptr, *vptr; int remove_path = 0; if ((uri == NULL) || (*uri == 0)) return NULL; /* * First parse URI into a standard form */ ref = uri_new (); /* If URI not already in "relative" form */ if (uri[0] != '.') { ret = uri_parse_into (ref, uri); if (ret != 0) goto done; /* Error in URI, return NULL */ } else ref->path = g_strdup(uri); /* * Next parse base into the same standard form */ if ((base == NULL) || (*base == 0)) { val = g_strdup (uri); goto done; } bas = uri_new (); if (base[0] != '.') { ret = uri_parse_into (bas, base); if (ret != 0) goto done; /* Error in base, return NULL */ } else bas->path = g_strdup(base); /* * If the scheme / server on the URI differs from the base, * just return the URI */ if ((ref->scheme != NULL) && ((bas->scheme == NULL) || (strcmp (bas->scheme, ref->scheme)) || (strcmp (bas->server, ref->server)))) { val = g_strdup (uri); goto done; } if (!strcmp(bas->path, ref->path)) { val = g_strdup(""); goto done; } if (bas->path == NULL) { val = g_strdup(ref->path); goto done; } if (ref->path == NULL) { ref->path = (char *) "/"; remove_path = 1; } /* * At this point (at last!) we can compare the two paths * * First we take care of the special case where either of the * two path components may be missing (bug 316224) */ if (bas->path == NULL) { if (ref->path != NULL) { uptr = ref->path; if (*uptr == '/') uptr++; /* exception characters from uri_to_string */ val = uri_string_escape(uptr, "/;&=+$,"); } goto done; } bptr = bas->path; if (ref->path == NULL) { for (ix = 0; bptr[ix] != 0; ix++) { if (bptr[ix] == '/') nbslash++; } uptr = NULL; len = 1; /* this is for a string terminator only */ } else { /* * Next we compare the two strings and find where they first differ */ if ((ref->path[pos] == '.') && (ref->path[pos+1] == '/')) pos += 2; if ((*bptr == '.') && (bptr[1] == '/')) bptr += 2; else if ((*bptr == '/') && (ref->path[pos] != '/')) bptr++; while ((bptr[pos] == ref->path[pos]) && (bptr[pos] != 0)) pos++; if (bptr[pos] == ref->path[pos]) { val = g_strdup(""); goto done; /* (I can't imagine why anyone would do this) */ } /* * In URI, "back up" to the last '/' encountered. This will be the * beginning of the "unique" suffix of URI */ ix = pos; if ((ref->path[ix] == '/') && (ix > 0)) ix--; else if ((ref->path[ix] == 0) && (ix > 1) && (ref->path[ix - 1] == '/')) ix -= 2; for (; ix > 0; ix--) { if (ref->path[ix] == '/') break; } if (ix == 0) { uptr = ref->path; } else { ix++; uptr = &ref->path[ix]; } /* * In base, count the number of '/' from the differing point */ if (bptr[pos] != ref->path[pos]) {/* check for trivial URI == base */ for (; bptr[ix] != 0; ix++) { if (bptr[ix] == '/') nbslash++; } } len = strlen (uptr) + 1; } if (nbslash == 0) { if (uptr != NULL) /* exception characters from uri_to_string */ val = uri_string_escape(uptr, "/;&=+$,"); goto done; } /* * Allocate just enough space for the returned string - * length of the remainder of the URI, plus enough space * for the "../" groups, plus one for the terminator */ val = g_malloc (len + 3 * nbslash); vptr = val; /* * Put in as many "../" as needed */ for (; nbslash>0; nbslash--) { *vptr++ = '.'; *vptr++ = '.'; *vptr++ = '/'; } /* * Finish up with the end of the URI */ if (uptr != NULL) { if ((vptr > val) && (len > 0) && (uptr[0] == '/') && (vptr[-1] == '/')) { memcpy (vptr, uptr + 1, len - 1); vptr[len - 2] = 0; } else { memcpy (vptr, uptr, len); vptr[len - 1] = 0; } } else { vptr[len - 1] = 0; } /* escape the freshly-built path */ vptr = val; /* exception characters from uri_to_string */ val = uri_string_escape(vptr, "/;&=+$,"); g_free(vptr); done: /* * Free the working variables */ if (remove_path != 0) ref->path = NULL; if (ref != NULL) uri_free (ref); if (bas != NULL) uri_free (bas); return val; } | 18,242 |
0 | static uint64_t omap_sti_fifo_read(void *opaque, target_phys_addr_t addr, unsigned size) { OMAP_BAD_REG(addr); return 0; } | 18,243 |
0 | static inline uint64_t hpet_calculate_diff(HPETTimer *t, uint64_t current) { if (t->config & HPET_TN_32BIT) { uint32_t diff, cmp; cmp = (uint32_t)t->cmp; diff = cmp - (uint32_t)current; diff = (int32_t)diff > 0 ? diff : (uint32_t)0; return (uint64_t)diff; } else { uint64_t diff, cmp; cmp = t->cmp; diff = cmp - current; diff = (int64_t)diff > 0 ? diff : (uint64_t)0; return diff; } } | 18,244 |
0 | static int mpc8_decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MPCContext *c = avctx->priv_data; GetBitContext gb2, *gb = &gb2; int i, j, k, ch, cnt, res, t; Band *bands = c->bands; int off; int maxband, keyframe; int last[2]; /* get output buffer */ c->frame.nb_samples = MPC_FRAME_SIZE; if ((res = avctx->get_buffer(avctx, &c->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return res; } keyframe = c->cur_frame == 0; if(keyframe){ memset(c->Q, 0, sizeof(c->Q)); c->last_bits_used = 0; } init_get_bits(gb, buf, buf_size * 8); skip_bits(gb, c->last_bits_used & 7); if(keyframe) maxband = mpc8_get_mod_golomb(gb, c->maxbands + 1); else{ maxband = c->last_max_band + get_vlc2(gb, band_vlc.table, MPC8_BANDS_BITS, 2); if(maxband > 32) maxband -= 33; } if(maxband > c->maxbands + 1 || maxband >= BANDS) { av_log(avctx, AV_LOG_ERROR, "maxband %d too large\n",maxband); return AVERROR_INVALIDDATA; } c->last_max_band = maxband; /* read subband indexes */ if(maxband){ last[0] = last[1] = 0; for(i = maxband - 1; i >= 0; i--){ for(ch = 0; ch < 2; ch++){ last[ch] = get_vlc2(gb, res_vlc[last[ch] > 2].table, MPC8_RES_BITS, 2) + last[ch]; if(last[ch] > 15) last[ch] -= 17; bands[i].res[ch] = last[ch]; } } if(c->MSS){ int mask; cnt = 0; for(i = 0; i < maxband; i++) if(bands[i].res[0] || bands[i].res[1]) cnt++; t = mpc8_get_mod_golomb(gb, cnt); mask = mpc8_get_mask(gb, cnt, t); for(i = maxband - 1; i >= 0; i--) if(bands[i].res[0] || bands[i].res[1]){ bands[i].msf = mask & 1; mask >>= 1; } } } for(i = maxband; i < c->maxbands; i++) bands[i].res[0] = bands[i].res[1] = 0; if(keyframe){ for(i = 0; i < 32; i++) c->oldDSCF[0][i] = c->oldDSCF[1][i] = 1; } for(i = 0; i < maxband; i++){ if(bands[i].res[0] || bands[i].res[1]){ cnt = !!bands[i].res[0] + !!bands[i].res[1] - 1; if(cnt >= 0){ t = get_vlc2(gb, scfi_vlc[cnt].table, scfi_vlc[cnt].bits, 1); if(bands[i].res[0]) bands[i].scfi[0] = t >> (2 * cnt); if(bands[i].res[1]) bands[i].scfi[1] = t & 3; } } } for(i = 0; i < maxband; i++){ for(ch = 0; ch < 2; ch++){ if(!bands[i].res[ch]) continue; if(c->oldDSCF[ch][i]){ bands[i].scf_idx[ch][0] = get_bits(gb, 7) - 6; c->oldDSCF[ch][i] = 0; }else{ t = get_vlc2(gb, dscf_vlc[1].table, MPC8_DSCF1_BITS, 2); if(t == 64) t += get_bits(gb, 6); bands[i].scf_idx[ch][0] = ((bands[i].scf_idx[ch][2] + t - 25) & 0x7F) - 6; } for(j = 0; j < 2; j++){ if((bands[i].scfi[ch] << j) & 2) bands[i].scf_idx[ch][j + 1] = bands[i].scf_idx[ch][j]; else{ t = get_vlc2(gb, dscf_vlc[0].table, MPC8_DSCF0_BITS, 2); if(t == 31) t = 64 + get_bits(gb, 6); bands[i].scf_idx[ch][j + 1] = ((bands[i].scf_idx[ch][j] + t - 25) & 0x7F) - 6; } } } } for(i = 0, off = 0; i < maxband; i++, off += SAMPLES_PER_BAND){ for(ch = 0; ch < 2; ch++){ res = bands[i].res[ch]; switch(res){ case -1: for(j = 0; j < SAMPLES_PER_BAND; j++) c->Q[ch][off + j] = (av_lfg_get(&c->rnd) & 0x3FC) - 510; break; case 0: break; case 1: for(j = 0; j < SAMPLES_PER_BAND; j += SAMPLES_PER_BAND / 2){ cnt = get_vlc2(gb, q1_vlc.table, MPC8_Q1_BITS, 2); t = mpc8_get_mask(gb, 18, cnt); for(k = 0; k < SAMPLES_PER_BAND / 2; k++, t <<= 1) c->Q[ch][off + j + k] = (t & 0x20000) ? (get_bits1(gb) << 1) - 1 : 0; } break; case 2: cnt = 6;//2*mpc8_thres[res] for(j = 0; j < SAMPLES_PER_BAND; j += 3){ t = get_vlc2(gb, q2_vlc[cnt > 3].table, MPC8_Q2_BITS, 2); c->Q[ch][off + j + 0] = mpc8_idx50[t]; c->Q[ch][off + j + 1] = mpc8_idx51[t]; c->Q[ch][off + j + 2] = mpc8_idx52[t]; cnt = (cnt >> 1) + mpc8_huffq2[t]; } break; case 3: case 4: for(j = 0; j < SAMPLES_PER_BAND; j += 2){ t = get_vlc2(gb, q3_vlc[res - 3].table, MPC8_Q3_BITS, 2) + q3_offsets[res - 3]; c->Q[ch][off + j + 1] = t >> 4; c->Q[ch][off + j + 0] = (t & 8) ? (t & 0xF) - 16 : (t & 0xF); } break; case 5: case 6: case 7: case 8: cnt = 2 * mpc8_thres[res]; for(j = 0; j < SAMPLES_PER_BAND; j++){ t = get_vlc2(gb, quant_vlc[res - 5][cnt > mpc8_thres[res]].table, quant_vlc[res - 5][cnt > mpc8_thres[res]].bits, 2) + quant_offsets[res - 5]; c->Q[ch][off + j] = t; cnt = (cnt >> 1) + FFABS(c->Q[ch][off + j]); } break; default: for(j = 0; j < SAMPLES_PER_BAND; j++){ c->Q[ch][off + j] = get_vlc2(gb, q9up_vlc.table, MPC8_Q9UP_BITS, 2); if(res != 9){ c->Q[ch][off + j] <<= res - 9; c->Q[ch][off + j] |= get_bits(gb, res - 9); } c->Q[ch][off + j] -= (1 << (res - 2)) - 1; } } } } ff_mpc_dequantize_and_synth(c, maxband - 1, c->frame.data[0], avctx->channels); c->cur_frame++; c->last_bits_used = get_bits_count(gb); if(c->cur_frame >= c->frames) c->cur_frame = 0; *got_frame_ptr = 1; *(AVFrame *)data = c->frame; return c->cur_frame ? c->last_bits_used >> 3 : buf_size; } | 18,245 |
0 | int configure_accelerator(MachineState *ms) { const char *p; char buf[10]; int ret; bool accel_initialised = false; bool init_failed = false; AccelClass *acc = NULL; p = qemu_opt_get(qemu_get_machine_opts(), "accel"); if (p == NULL) { /* Use the default "accelerator", tcg */ p = "tcg"; } while (!accel_initialised && *p != '\0') { if (*p == ':') { p++; } p = get_opt_name(buf, sizeof(buf), p, ':'); acc = accel_find(buf); if (!acc) { fprintf(stderr, "\"%s\" accelerator not found.\n", buf); continue; } if (acc->available && !acc->available()) { printf("%s not supported for this target\n", acc->name); continue; } ret = accel_init_machine(acc, ms); if (ret < 0) { init_failed = true; fprintf(stderr, "failed to initialize %s: %s\n", acc->name, strerror(-ret)); } else { accel_initialised = true; } } if (!accel_initialised) { if (!init_failed) { fprintf(stderr, "No accelerator found!\n"); } exit(1); } if (init_failed) { fprintf(stderr, "Back to %s accelerator.\n", acc->name); } return !accel_initialised; } | 18,246 |
0 | int bdrv_snapshot_goto(BlockDriverState *bs, const char *snapshot_id, Error **errp) { BlockDriver *drv = bs->drv; int ret, open_ret; int64_t len; if (!drv) { error_setg(errp, "Block driver is closed"); return -ENOMEDIUM; } len = bdrv_getlength(bs); if (len < 0) { error_setg_errno(errp, -len, "Cannot get block device size"); return len; } /* We should set all bits in all enabled dirty bitmaps, because dirty * bitmaps reflect active state of disk and snapshot switch operation * actually dirties active state. * TODO: It may make sense not to set all bits but analyze block status of * current state and destination snapshot and do not set bits corresponding * to both-zero or both-unallocated areas. */ bdrv_set_dirty(bs, 0, len); if (drv->bdrv_snapshot_goto) { ret = drv->bdrv_snapshot_goto(bs, snapshot_id); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to load snapshot"); } return ret; } if (bs->file) { BlockDriverState *file; QDict *options = qdict_clone_shallow(bs->options); QDict *file_options; Error *local_err = NULL; file = bs->file->bs; /* Prevent it from getting deleted when detached from bs */ bdrv_ref(file); qdict_extract_subqdict(options, &file_options, "file."); QDECREF(file_options); qdict_put_str(options, "file", bdrv_get_node_name(file)); drv->bdrv_close(bs); bdrv_unref_child(bs, bs->file); bs->file = NULL; ret = bdrv_snapshot_goto(file, snapshot_id, errp); open_ret = drv->bdrv_open(bs, options, bs->open_flags, &local_err); QDECREF(options); if (open_ret < 0) { bdrv_unref(file); bs->drv = NULL; /* A bdrv_snapshot_goto() error takes precedence */ error_propagate(errp, local_err); return ret < 0 ? ret : open_ret; } assert(bs->file->bs == file); bdrv_unref(file); return ret; } error_setg(errp, "Block driver does not support snapshots"); return -ENOTSUP; } | 18,247 |
0 | static void test_qemu_strtoull_negative(void) { const char *str = " \t -321"; char f = 'X'; const char *endptr = &f; uint64_t res = 999; int err; err = qemu_strtoull(str, &endptr, 0, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, -321); g_assert(endptr == str + strlen(str)); } | 18,248 |
0 | static void omap_clkdsp_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque; uint16_t diff; if (size != 2) { return omap_badwidth_write16(opaque, addr, value); } switch (addr) { case 0x04: /* DSP_IDLECT1 */ diff = s->clkm.dsp_idlect1 ^ value; s->clkm.dsp_idlect1 = value & 0x01f7; omap_clkdsp_idlect1_update(s, diff, value); break; case 0x08: /* DSP_IDLECT2 */ s->clkm.dsp_idlect2 = value & 0x0037; diff = s->clkm.dsp_idlect1 ^ value; omap_clkdsp_idlect2_update(s, diff, value); break; case 0x14: /* DSP_RSTCT2 */ s->clkm.dsp_rstct2 = value & 0x0001; break; case 0x18: /* DSP_SYSST */ s->clkm.cold_start &= value & 0x3f; break; default: OMAP_BAD_REG(addr); } } | 18,249 |
0 | uint64_t helper_frsqrte (uint64_t arg) { CPU_DoubleU fone, farg; fone.ll = 0x3FF0000000000000ULL; /* 1.0 */ farg.ll = arg; if (unlikely(float64_is_signaling_nan(farg.d))) { /* sNaN reciprocal square root */ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN); } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) { /* Reciprocal square root of a negative nonzero number */ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT); } else if (likely(isnormal(farg.d))) { farg.d = float64_sqrt(farg.d, &env->fp_status); farg.d = float32_div(fone.d, farg.d, &env->fp_status); } else { if (farg.ll == 0x8000000000000000ULL) { farg.ll = 0xFFF0000000000000ULL; } else if (farg.ll == 0x0000000000000000ULL) { farg.ll = 0x7FF0000000000000ULL; } else if (float64_is_nan(farg.d)) { farg.ll |= 0x000FFFFFFFFFFFFFULL; } else if (float64_is_neg(farg.d)) { farg.ll = 0x7FF8000000000000ULL; } else { farg.ll = 0x0000000000000000ULL; } } return farg.ll; } | 18,250 |
0 | static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, int rs, int rt, int32_t offset) { int bcond_compute = 0; TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); if (ctx->hflags & MIPS_HFLAG_BMASK) { #ifdef MIPS_DEBUG_DISAS LOG_DISAS("Branch in delay / forbidden slot at PC 0x" TARGET_FMT_lx "\n", ctx->pc); #endif generate_exception(ctx, EXCP_RI); goto out; } /* Load needed operands and calculate btarget */ switch (opc) { /* compact branch */ case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC */ case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */ gen_load_gpr(t0, rs); gen_load_gpr(t1, rt); bcond_compute = 1; ctx->btarget = addr_add(ctx, ctx->pc + 4, offset); if (rs <= rt && rs == 0) { /* OPC_BEQZALC, OPC_BNEZALC */ tcg_gen_movi_tl(cpu_gpr[31], ctx->pc + 4); } break; case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC */ case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC */ gen_load_gpr(t0, rs); gen_load_gpr(t1, rt); bcond_compute = 1; ctx->btarget = addr_add(ctx, ctx->pc + 4, offset); break; case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC */ case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC */ if (rs == 0 || rs == rt) { /* OPC_BLEZALC, OPC_BGEZALC */ /* OPC_BGTZALC, OPC_BLTZALC */ tcg_gen_movi_tl(cpu_gpr[31], ctx->pc + 4); } gen_load_gpr(t0, rs); gen_load_gpr(t1, rt); bcond_compute = 1; ctx->btarget = addr_add(ctx, ctx->pc + 4, offset); break; case OPC_BC: case OPC_BALC: ctx->btarget = addr_add(ctx, ctx->pc + 4, offset); break; case OPC_BEQZC: case OPC_BNEZC: if (rs != 0) { /* OPC_BEQZC, OPC_BNEZC */ gen_load_gpr(t0, rs); bcond_compute = 1; ctx->btarget = addr_add(ctx, ctx->pc + 4, offset); } else { /* OPC_JIC, OPC_JIALC */ TCGv tbase = tcg_temp_new(); TCGv toffset = tcg_temp_new(); gen_load_gpr(tbase, rt); tcg_gen_movi_tl(toffset, offset); gen_op_addr_add(ctx, btarget, tbase, toffset); tcg_temp_free(tbase); tcg_temp_free(toffset); } break; default: MIPS_INVAL("Compact branch/jump"); generate_exception(ctx, EXCP_RI); goto out; } if (bcond_compute == 0) { /* Uncoditional compact branch */ switch (opc) { case OPC_JIALC: tcg_gen_movi_tl(cpu_gpr[31], ctx->pc + 4); /* Fallthrough */ case OPC_JIC: ctx->hflags |= MIPS_HFLAG_BR; break; case OPC_BALC: tcg_gen_movi_tl(cpu_gpr[31], ctx->pc + 4); /* Fallthrough */ case OPC_BC: ctx->hflags |= MIPS_HFLAG_B; break; default: MIPS_INVAL("Compact branch/jump"); generate_exception(ctx, EXCP_RI); goto out; } /* Generating branch here as compact branches don't have delay slot */ gen_branch(ctx, 4); } else { /* Conditional compact branch */ int fs = gen_new_label(); save_cpu_state(ctx, 0); switch (opc) { case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC */ if (rs == 0 && rt != 0) { /* OPC_BLEZALC */ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LE), t1, 0, fs); } else if (rs != 0 && rt != 0 && rs == rt) { /* OPC_BGEZALC */ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GE), t1, 0, fs); } else { /* OPC_BGEUC */ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_GEU), t0, t1, fs); } break; case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC */ if (rs == 0 && rt != 0) { /* OPC_BGTZALC */ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GT), t1, 0, fs); } else if (rs != 0 && rt != 0 && rs == rt) { /* OPC_BLTZALC */ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LT), t1, 0, fs); } else { /* OPC_BLTUC */ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_LTU), t0, t1, fs); } break; case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC */ if (rs == 0 && rt != 0) { /* OPC_BLEZC */ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LE), t1, 0, fs); } else if (rs != 0 && rt != 0 && rs == rt) { /* OPC_BGEZC */ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GE), t1, 0, fs); } else { /* OPC_BGEC */ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_GE), t0, t1, fs); } break; case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC */ if (rs == 0 && rt != 0) { /* OPC_BGTZC */ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GT), t1, 0, fs); } else if (rs != 0 && rt != 0 && rs == rt) { /* OPC_BLTZC */ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LT), t1, 0, fs); } else { /* OPC_BLTC */ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_LT), t0, t1, fs); } break; case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC */ case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */ if (rs >= rt) { /* OPC_BOVC, OPC_BNVC */ TCGv t2 = tcg_temp_new(); TCGv t3 = tcg_temp_new(); TCGv t4 = tcg_temp_new(); TCGv input_overflow = tcg_temp_new(); gen_load_gpr(t0, rs); gen_load_gpr(t1, rt); tcg_gen_ext32s_tl(t2, t0); tcg_gen_setcond_tl(TCG_COND_NE, input_overflow, t2, t0); tcg_gen_ext32s_tl(t3, t1); tcg_gen_setcond_tl(TCG_COND_NE, t4, t3, t1); tcg_gen_or_tl(input_overflow, input_overflow, t4); tcg_gen_add_tl(t4, t2, t3); tcg_gen_ext32s_tl(t4, t4); tcg_gen_xor_tl(t2, t2, t3); tcg_gen_xor_tl(t3, t4, t3); tcg_gen_andc_tl(t2, t3, t2); tcg_gen_setcondi_tl(TCG_COND_LT, t4, t2, 0); tcg_gen_or_tl(t4, t4, input_overflow); if (opc == OPC_BOVC) { /* OPC_BOVC */ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_NE), t4, 0, fs); } else { /* OPC_BNVC */ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_EQ), t4, 0, fs); } tcg_temp_free(input_overflow); tcg_temp_free(t4); tcg_temp_free(t3); tcg_temp_free(t2); } else if (rs < rt && rs == 0) { /* OPC_BEQZALC, OPC_BNEZALC */ if (opc == OPC_BEQZALC) { /* OPC_BEQZALC */ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_EQ), t1, 0, fs); } else { /* OPC_BNEZALC */ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_NE), t1, 0, fs); } } else { /* OPC_BEQC, OPC_BNEC */ if (opc == OPC_BEQC) { /* OPC_BEQC */ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_EQ), t0, t1, fs); } else { /* OPC_BNEC */ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_NE), t0, t1, fs); } } break; case OPC_BEQZC: tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_EQ), t0, 0, fs); break; case OPC_BNEZC: tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_NE), t0, 0, fs); break; default: MIPS_INVAL("Compact conditional branch/jump"); generate_exception(ctx, EXCP_RI); goto out; } /* Generating branch here as compact branches don't have delay slot */ gen_goto_tb(ctx, 1, ctx->btarget); gen_set_label(fs); ctx->hflags |= MIPS_HFLAG_FBNSLOT; MIPS_DEBUG("Compact conditional branch"); } out: tcg_temp_free(t0); tcg_temp_free(t1); } | 18,251 |
0 | int qemu_chr_fe_get_msgfd(CharDriverState *s) { int fd; return (qemu_chr_fe_get_msgfds(s, &fd, 1) == 1) ? fd : -1; } | 18,252 |
0 | static void vnc_client_write_locked(void *opaque) { VncState *vs = opaque; #ifdef CONFIG_VNC_SASL if (vs->sasl.conn && vs->sasl.runSSF && !vs->sasl.waitWriteSSF) { vnc_client_write_sasl(vs); } else #endif /* CONFIG_VNC_SASL */ { #ifdef CONFIG_VNC_WS if (vs->encode_ws) { vnc_client_write_ws(vs); } else #endif /* CONFIG_VNC_WS */ { vnc_client_write_plain(vs); } } } | 18,253 |
0 | iscsi_aio_write16_cb(struct iscsi_context *iscsi, int status, void *command_data, void *opaque) { IscsiAIOCB *acb = opaque; trace_iscsi_aio_write16_cb(iscsi, status, acb, acb->canceled); g_free(acb->buf); acb->buf = NULL; if (acb->canceled != 0) { return; } acb->status = 0; if (status < 0) { error_report("Failed to write16 data to iSCSI lun. %s", iscsi_get_error(iscsi)); acb->status = -EIO; } iscsi_schedule_bh(acb); } | 18,255 |
0 | static void spr_write_40x_sler (void *opaque, int sprn) { DisasContext *ctx = opaque; gen_op_store_40x_sler(); /* We must stop the translation as we may have changed * some regions endianness */ RET_STOP(ctx); } | 18,256 |
0 | static void sclp_execute(SCLPDevice *sclp, SCCB *sccb, uint32_t code) { SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp); SCLPEventFacility *ef = sclp->event_facility; SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef); switch (code & SCLP_CMD_CODE_MASK) { case SCLP_CMDW_READ_SCP_INFO: case SCLP_CMDW_READ_SCP_INFO_FORCED: sclp_c->read_SCP_info(sclp, sccb); break; case SCLP_CMDW_READ_CPU_INFO: sclp_c->read_cpu_info(sclp, sccb); break; case SCLP_READ_STORAGE_ELEMENT_INFO: if (code & 0xff00) { sclp_c->read_storage_element1_info(sclp, sccb); } else { sclp_c->read_storage_element0_info(sclp, sccb); } break; case SCLP_ATTACH_STORAGE_ELEMENT: sclp_c->attach_storage_element(sclp, sccb, (code & 0xff00) >> 8); break; case SCLP_ASSIGN_STORAGE: sclp_c->assign_storage(sclp, sccb); break; case SCLP_UNASSIGN_STORAGE: sclp_c->unassign_storage(sclp, sccb); break; case SCLP_CMDW_CONFIGURE_PCI: s390_pci_sclp_configure(sccb); break; case SCLP_CMDW_DECONFIGURE_PCI: s390_pci_sclp_deconfigure(sccb); break; default: efc->command_handler(ef, sccb, code); break; } } | 18,258 |
0 | static void smbios_build_type_0_fields(const char *t) { char buf[1024]; unsigned char major, minor; if (get_param_value(buf, sizeof(buf), "vendor", t)) smbios_add_field(0, offsetof(struct smbios_type_0, vendor_str), buf, strlen(buf) + 1); if (get_param_value(buf, sizeof(buf), "version", t)) smbios_add_field(0, offsetof(struct smbios_type_0, bios_version_str), buf, strlen(buf) + 1); if (get_param_value(buf, sizeof(buf), "date", t)) smbios_add_field(0, offsetof(struct smbios_type_0, bios_release_date_str), buf, strlen(buf) + 1); if (get_param_value(buf, sizeof(buf), "release", t)) { sscanf(buf, "%hhu.%hhu", &major, &minor); smbios_add_field(0, offsetof(struct smbios_type_0, system_bios_major_release), &major, 1); smbios_add_field(0, offsetof(struct smbios_type_0, system_bios_minor_release), &minor, 1); } } | 18,259 |
0 | truncate_f(int argc, char **argv) { int64_t offset; int ret; offset = cvtnum(argv[1]); if (offset < 0) { printf("non-numeric truncate argument -- %s\n", argv[1]); return 0; } ret = bdrv_truncate(bs, offset); if (ret < 0) { printf("truncate: %s", strerror(ret)); return 0; } return 0; } | 18,260 |
0 | static inline void *host_from_stream_offset(QEMUFile *f, ram_addr_t offset, int flags) { static RAMBlock *block = NULL; char id[256]; uint8_t len; if (flags & RAM_SAVE_FLAG_CONTINUE) { if (!block || block->max_length <= offset) { error_report("Ack, bad migration stream!"); return NULL; } return block->host + offset; } len = qemu_get_byte(f); qemu_get_buffer(f, (uint8_t *)id, len); id[len] = 0; QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { if (!strncmp(id, block->idstr, sizeof(id)) && block->max_length > offset) { return block->host + offset; } } error_report("Can't find block %s!", id); return NULL; } | 18,261 |
0 | int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar) { CPUS390XState *env = &cpu->env; uint8_t oc, dmaas; uint32_t fh; ZpciFib fib; S390PCIBusDevice *pbdev; uint64_t cc = ZPCI_PCI_LS_OK; if (env->psw.mask & PSW_MASK_PSTATE) { program_interrupt(env, PGM_PRIVILEGED, 6); return 0; } oc = env->regs[r1] & 0xff; dmaas = (env->regs[r1] >> 16) & 0xff; fh = env->regs[r1] >> 32; if (fiba & 0x7) { program_interrupt(env, PGM_SPECIFICATION, 6); return 0; } pbdev = s390_pci_find_dev_by_fh(fh); if (!pbdev || !(pbdev->fh & FH_MASK_ENABLE)) { DPRINTF("mpcifc no pci dev fh 0x%x\n", fh); setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); return 0; } if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) { return 0; } if (fib.fmt != 0) { program_interrupt(env, PGM_OPERAND, 6); return 0; } switch (oc) { case ZPCI_MOD_FC_REG_INT: if (pbdev->summary_ind) { cc = ZPCI_PCI_LS_ERR; s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); } else if (reg_irqs(env, pbdev, fib)) { cc = ZPCI_PCI_LS_ERR; s390_set_status_code(env, r1, ZPCI_MOD_ST_RES_NOT_AVAIL); } break; case ZPCI_MOD_FC_DEREG_INT: if (!pbdev->summary_ind) { cc = ZPCI_PCI_LS_ERR; s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); } else { pci_dereg_irqs(pbdev); } break; case ZPCI_MOD_FC_REG_IOAT: if (dmaas != 0) { cc = ZPCI_PCI_LS_ERR; s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL); } else if (pbdev->iommu_enabled) { cc = ZPCI_PCI_LS_ERR; s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); } else if (reg_ioat(env, pbdev, fib)) { cc = ZPCI_PCI_LS_ERR; s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES); } break; case ZPCI_MOD_FC_DEREG_IOAT: if (dmaas != 0) { cc = ZPCI_PCI_LS_ERR; s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL); } else if (!pbdev->iommu_enabled) { cc = ZPCI_PCI_LS_ERR; s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); } else { pci_dereg_ioat(pbdev); } break; case ZPCI_MOD_FC_REREG_IOAT: if (dmaas != 0) { cc = ZPCI_PCI_LS_ERR; s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL); } else if (!pbdev->iommu_enabled) { cc = ZPCI_PCI_LS_ERR; s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); } else { pci_dereg_ioat(pbdev); if (reg_ioat(env, pbdev, fib)) { cc = ZPCI_PCI_LS_ERR; s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES); } } break; case ZPCI_MOD_FC_RESET_ERROR: pbdev->error_state = false; pbdev->lgstg_blocked = false; break; case ZPCI_MOD_FC_RESET_BLOCK: pbdev->lgstg_blocked = false; break; case ZPCI_MOD_FC_SET_MEASURE: pbdev->fmb_addr = ldq_p(&fib.fmb_addr); break; default: program_interrupt(&cpu->env, PGM_OPERAND, 6); cc = ZPCI_PCI_LS_ERR; } setcc(cpu, cc); return 0; } | 18,262 |
0 | static void s390_cpu_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { gchar *name; S390CPU *cpu = S390_CPU(dev); CPUState *cs = CPU(dev); name = g_strdup_printf("cpu[%i]", cpu->env.cpu_num); object_property_set_link(OBJECT(hotplug_dev), OBJECT(cs), name, errp); g_free(name); } | 18,263 |
0 | static void gen_maskg(DisasContext *ctx) { int l1 = gen_new_label(); TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); TCGv t3 = tcg_temp_new(); tcg_gen_movi_tl(t3, 0xFFFFFFFF); tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_andi_tl(t1, cpu_gpr[rS(ctx->opcode)], 0x1F); tcg_gen_addi_tl(t2, t0, 1); tcg_gen_shr_tl(t2, t3, t2); tcg_gen_shr_tl(t3, t3, t1); tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], t2, t3); tcg_gen_brcond_tl(TCG_COND_GE, t0, t1, l1); tcg_gen_neg_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); gen_set_label(l1); tcg_temp_free(t0); tcg_temp_free(t1); tcg_temp_free(t2); tcg_temp_free(t3); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } | 18,264 |
0 | uint64_t qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, int *num) { BDRVQcowState *s = bs->opaque; unsigned int l1_index, l2_index; uint64_t l2_offset, *l2_table, cluster_offset; int l1_bits, c; unsigned int index_in_cluster, nb_clusters; uint64_t nb_available, nb_needed; index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); nb_needed = *num + index_in_cluster; l1_bits = s->l2_bits + s->cluster_bits; /* compute how many bytes there are between the offset and * the end of the l1 entry */ nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)); /* compute the number of available sectors */ nb_available = (nb_available >> 9) + index_in_cluster; if (nb_needed > nb_available) { nb_needed = nb_available; } cluster_offset = 0; /* seek the the l2 offset in the l1 table */ l1_index = offset >> l1_bits; if (l1_index >= s->l1_size) goto out; l2_offset = s->l1_table[l1_index]; /* seek the l2 table of the given l2 offset */ if (!l2_offset) goto out; /* load the l2 table in memory */ l2_offset &= ~QCOW_OFLAG_COPIED; l2_table = l2_load(bs, l2_offset); if (l2_table == NULL) return 0; /* find the cluster offset for the given disk offset */ l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); cluster_offset = be64_to_cpu(l2_table[l2_index]); nb_clusters = size_to_clusters(s, nb_needed << 9); if (!cluster_offset) { /* how many empty clusters ? */ c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]); } else { /* how many allocated clusters ? */ c = count_contiguous_clusters(nb_clusters, s->cluster_size, &l2_table[l2_index], 0, QCOW_OFLAG_COPIED); } nb_available = (c * s->cluster_sectors); out: if (nb_available > nb_needed) nb_available = nb_needed; *num = nb_available - index_in_cluster; return cluster_offset & ~QCOW_OFLAG_COPIED; } | 18,265 |
0 | static bool lowprot_enabled(const CPUS390XState *env) { if (!(env->cregs[0] & CR0_LOWPROT)) { return false; } if (!(env->psw.mask & PSW_MASK_DAT)) { return true; } /* Check the private-space control bit */ switch (env->psw.mask & PSW_MASK_ASC) { case PSW_ASC_PRIMARY: return !(env->cregs[1] & _ASCE_PRIVATE_SPACE); case PSW_ASC_SECONDARY: return !(env->cregs[7] & _ASCE_PRIVATE_SPACE); case PSW_ASC_HOME: return !(env->cregs[13] & _ASCE_PRIVATE_SPACE); default: /* We don't support access register mode */ error_report("unsupported addressing mode"); exit(1); } } | 18,266 |
0 | static int net_slirp_init(VLANState *vlan, const char *model, const char *name, int restricted, const char *ip) { if (slirp_in_use) { /* slirp only supports a single instance so far */ return -1; } if (!slirp_inited) { slirp_inited = 1; slirp_init(restricted, ip); while (slirp_redirs) { struct slirp_config_str *config = slirp_redirs; slirp_redirection(NULL, config->str); slirp_redirs = config->next; qemu_free(config); } #ifndef _WIN32 if (slirp_smb_export) { slirp_smb(slirp_smb_export); } #endif } slirp_vc = qemu_new_vlan_client(vlan, model, name, NULL, slirp_receive, NULL, net_slirp_cleanup, NULL); slirp_vc->info_str[0] = '\0'; slirp_in_use = 1; return 0; } | 18,269 |
0 | static uint64_t hpdmc_read(void *opaque, target_phys_addr_t addr, unsigned size) { MilkymistHpdmcState *s = opaque; uint32_t r = 0; addr >>= 2; switch (addr) { case R_SYSTEM: case R_BYPASS: case R_TIMING: case R_IODELAY: r = s->regs[addr]; break; default: error_report("milkymist_hpdmc: read access to unknown register 0x" TARGET_FMT_plx, addr << 2); break; } trace_milkymist_hpdmc_memory_read(addr << 2, r); return r; } | 18,270 |
0 | int qemu_init_main_loop(Error **errp) { int ret; GSource *src; Error *local_error = NULL; init_clocks(); ret = qemu_signal_init(); if (ret) { return ret; } qemu_aio_context = aio_context_new(&local_error); if (!qemu_aio_context) { error_propagate(errp, local_error); return -EMFILE; } qemu_notify_bh = qemu_bh_new(notify_event_cb, NULL); gpollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD)); src = aio_get_g_source(qemu_aio_context); g_source_set_name(src, "aio-context"); g_source_attach(src, NULL); g_source_unref(src); src = iohandler_get_g_source(); g_source_set_name(src, "io-handler"); g_source_attach(src, NULL); g_source_unref(src); return 0; } | 18,271 |
0 | static int os_host_main_loop_wait(uint32_t timeout) { int ret; glib_select_fill(&nfds, &rfds, &wfds, &xfds, &timeout); if (timeout > 0) { qemu_mutex_unlock_iothread(); } /* We'll eventually drop fd_set completely. But for now we still have * *_fill() and *_poll() functions that use rfds/wfds/xfds. */ gpollfds_from_select(); ret = g_poll((GPollFD *)gpollfds->data, gpollfds->len, timeout); gpollfds_to_select(ret); if (timeout > 0) { qemu_mutex_lock_iothread(); } glib_select_poll(&rfds, &wfds, &xfds, (ret < 0)); return ret; } | 18,273 |
0 | static int cris_mmu_segmented_addr(int seg, uint32_t rw_mm_cfg) { return (1 << seg) & rw_mm_cfg; } | 18,274 |
0 | static uint64_t pfpu_read(void *opaque, target_phys_addr_t addr, unsigned size) { MilkymistPFPUState *s = opaque; uint32_t r = 0; addr >>= 2; switch (addr) { case R_CTL: case R_MESHBASE: case R_HMESHLAST: case R_VMESHLAST: case R_CODEPAGE: case R_VERTICES: case R_COLLISIONS: case R_STRAYWRITES: case R_LASTDMA: case R_PC: case R_DREGBASE: case R_CODEBASE: r = s->regs[addr]; break; case GPR_BEGIN ... GPR_END: r = s->gp_regs[addr - GPR_BEGIN]; break; case MICROCODE_BEGIN ... MICROCODE_END: r = s->microcode[get_microcode_address(s, addr)]; break; default: error_report("milkymist_pfpu: read access to unknown register 0x" TARGET_FMT_plx, addr << 2); break; } trace_milkymist_pfpu_memory_read(addr << 2, r); return r; } | 18,275 |
0 | static void qdev_reset(void *opaque) { DeviceState *dev = opaque; if (dev->info->reset) dev->info->reset(dev); } | 18,276 |
0 | static void disas_cond_b_imm(DisasContext *s, uint32_t insn) { unsigned int cond; uint64_t addr; if ((insn & (1 << 4)) || (insn & (1 << 24))) { unallocated_encoding(s); return; } addr = s->pc + sextract32(insn, 5, 19) * 4 - 4; cond = extract32(insn, 0, 4); if (cond < 0x0e) { /* genuinely conditional branches */ int label_match = gen_new_label(); arm_gen_test_cc(cond, label_match); gen_goto_tb(s, 0, s->pc); gen_set_label(label_match); gen_goto_tb(s, 1, addr); } else { /* 0xe and 0xf are both "always" conditions */ gen_goto_tb(s, 0, addr); } } | 18,277 |
0 | void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, TCGv_i64 newv, TCGArg idx, TCGMemOp memop) { memop = tcg_canonicalize_memop(memop, 1, 0); if (!parallel_cpus) { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE); tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN); tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1); tcg_gen_qemu_st_i64(t2, addr, idx, memop); tcg_temp_free_i64(t2); if (memop & MO_SIGN) { tcg_gen_ext_i64(retv, t1, memop); } else { tcg_gen_mov_i64(retv, t1); } tcg_temp_free_i64(t1); } else if ((memop & MO_SIZE) == MO_64) { #ifdef CONFIG_ATOMIC64 gen_atomic_cx_i64 gen; gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; tcg_debug_assert(gen != NULL); #ifdef CONFIG_SOFTMMU { TCGv_i32 oi = tcg_const_i32(make_memop_idx(memop, idx)); gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv, oi); tcg_temp_free_i32(oi); } #else gen(retv, tcg_ctx.tcg_env, addr, cmpv, newv); #endif #else gen_helper_exit_atomic(tcg_ctx.tcg_env); /* Produce a result, so that we have a well-formed opcode stream with respect to uses of the result in the (dead) code following. */ tcg_gen_movi_i64(retv, 0); #endif /* CONFIG_ATOMIC64 */ } else { TCGv_i32 c32 = tcg_temp_new_i32(); TCGv_i32 n32 = tcg_temp_new_i32(); TCGv_i32 r32 = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(c32, cmpv); tcg_gen_extrl_i64_i32(n32, newv); tcg_gen_atomic_cmpxchg_i32(r32, addr, c32, n32, idx, memop & ~MO_SIGN); tcg_temp_free_i32(c32); tcg_temp_free_i32(n32); tcg_gen_extu_i32_i64(retv, r32); tcg_temp_free_i32(r32); if (memop & MO_SIGN) { tcg_gen_ext_i64(retv, retv, memop); } } } | 18,280 |
0 | static void scsi_disk_unit_attention_reported(SCSIDevice *dev) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); if (s->media_changed) { s->media_changed = false; s->qdev.unit_attention = SENSE_CODE(MEDIUM_CHANGED); } } | 18,281 |
1 | static void lsi_command_complete(SCSIBus *bus, int reason, uint32_t tag, uint32_t arg) { LSIState *s = DO_UPCAST(LSIState, dev.qdev, bus->qbus.parent); int out; out = (s->sstat1 & PHASE_MASK) == PHASE_DO; if (reason == SCSI_REASON_DONE) { DPRINTF("Command complete status=%d\n", (int)arg); s->status = arg; s->command_complete = 2; if (s->waiting && s->dbc != 0) { /* Raise phase mismatch for short transfers. */ lsi_bad_phase(s, out, PHASE_ST); } else { lsi_set_phase(s, PHASE_ST); } qemu_free(s->current); s->current = NULL; lsi_resume_script(s); return; } if (s->waiting == 1 || !s->current || tag != s->current->tag || (lsi_irq_on_rsl(s) && !(s->scntl1 & LSI_SCNTL1_CON))) { if (lsi_queue_tag(s, tag, arg)) return; } /* host adapter (re)connected */ DPRINTF("Data ready tag=0x%x len=%d\n", tag, arg); s->current->dma_len = arg; s->command_complete = 1; if (!s->waiting) return; if (s->waiting == 1 || s->dbc == 0) { lsi_resume_script(s); } else { lsi_do_dma(s, out); } } | 18,284 |
1 | static void xenstore_record_dm_state(struct xs_handle *xs, const char *state) { char path[50]; if (xs == NULL) { fprintf(stderr, "xenstore connection not initialized\n"); exit(1); } snprintf(path, sizeof (path), "/local/domain/0/device-model/%u/state", xen_domid); if (!xs_write(xs, XBT_NULL, path, state, strlen(state))) { fprintf(stderr, "error recording dm state\n"); exit(1); } } | 18,286 |
1 | static void *ff_realloc_static(void *ptr, unsigned int size) { int i; if(!ptr) return av_mallocz_static(size); /* Look for the old ptr */ for(i = 0; i < last_static; i++) { if(array_static[i] == ptr) { array_static[i] = av_realloc(array_static[i], size); return array_static[i]; } } return NULL; } | 18,287 |
1 | static int vhdx_log_read_desc(BlockDriverState *bs, BDRVVHDXState *s, VHDXLogEntries *log, VHDXLogDescEntries **buffer, bool convert_endian) { int ret = 0; uint32_t desc_sectors; uint32_t sectors_read; VHDXLogEntryHeader hdr; VHDXLogDescEntries *desc_entries = NULL; VHDXLogDescriptor desc; int i; assert(*buffer == NULL); ret = vhdx_log_peek_hdr(bs, log, &hdr); if (ret < 0) { goto exit; } if (vhdx_log_hdr_is_valid(log, &hdr, s) == false) { ret = -EINVAL; goto exit; } desc_sectors = vhdx_compute_desc_sectors(hdr.descriptor_count); desc_entries = qemu_blockalign(bs, desc_sectors * VHDX_LOG_SECTOR_SIZE); ret = vhdx_log_read_sectors(bs, log, §ors_read, desc_entries, desc_sectors, false); if (ret < 0) { goto free_and_exit; } if (sectors_read != desc_sectors) { ret = -EINVAL; goto free_and_exit; } /* put in proper endianness, and validate each desc */ for (i = 0; i < hdr.descriptor_count; i++) { desc = desc_entries->desc[i]; vhdx_log_desc_le_import(&desc); if (convert_endian) { desc_entries->desc[i] = desc; } if (vhdx_log_desc_is_valid(&desc, &hdr) == false) { ret = -EINVAL; goto free_and_exit; } } if (convert_endian) { desc_entries->hdr = hdr; } *buffer = desc_entries; goto exit; free_and_exit: qemu_vfree(desc_entries); exit: return ret; } | 18,288 |
1 | static int irq_cpu_hotplug_init(SCLPEvent *event) { irq_cpu_hotplug = *qemu_allocate_irqs(trigger_signal, event, 1); return 0; } | 18,289 |
0 | static int escape124_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { int buf_size = avpkt->size; Escape124Context *s = avctx->priv_data; AVFrame *frame = data; GetBitContext gb; unsigned frame_flags, frame_size; unsigned i; unsigned superblock_index, cb_index = 1, superblock_col_index = 0, superblocks_per_row = avctx->width / 8, skip = -1; uint16_t* old_frame_data, *new_frame_data; unsigned old_stride, new_stride; int ret; if ((ret = init_get_bits8(&gb, avpkt->data, avpkt->size)) < 0) return ret; // This call also guards the potential depth reads for the // codebook unpacking. if (!can_safely_read(&gb, 64)) return -1; frame_flags = get_bits_long(&gb, 32); frame_size = get_bits_long(&gb, 32); // Leave last frame unchanged // FIXME: Is this necessary? I haven't seen it in any real samples if (!(frame_flags & 0x114) || !(frame_flags & 0x7800000)) { if (!s->frame.data[0]) return AVERROR_INVALIDDATA; av_log(avctx, AV_LOG_DEBUG, "Skipping frame\n"); *got_frame = 1; if ((ret = av_frame_ref(frame, &s->frame)) < 0) return ret; return frame_size; } for (i = 0; i < 3; i++) { if (frame_flags & (1 << (17 + i))) { unsigned cb_depth, cb_size; if (i == 2) { // This codebook can be cut off at places other than // powers of 2, leaving some of the entries undefined. cb_size = get_bits_long(&gb, 20); cb_depth = av_log2(cb_size - 1) + 1; } else { cb_depth = get_bits(&gb, 4); if (i == 0) { // This is the most basic codebook: pow(2,depth) entries // for a depth-length key cb_size = 1 << cb_depth; } else { // This codebook varies per superblock // FIXME: I don't think this handles integer overflow // properly cb_size = s->num_superblocks << cb_depth; } } av_free(s->codebooks[i].blocks); s->codebooks[i] = unpack_codebook(&gb, cb_depth, cb_size); if (!s->codebooks[i].blocks) return -1; } } if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) return ret; new_frame_data = (uint16_t*)frame->data[0]; new_stride = frame->linesize[0] / 2; old_frame_data = (uint16_t*)s->frame.data[0]; old_stride = s->frame.linesize[0] / 2; for (superblock_index = 0; superblock_index < s->num_superblocks; superblock_index++) { MacroBlock mb; SuperBlock sb; unsigned multi_mask = 0; if (skip == -1) { // Note that this call will make us skip the rest of the blocks // if the frame prematurely ends skip = decode_skip_count(&gb); } if (skip) { copy_superblock(new_frame_data, new_stride, old_frame_data, old_stride); } else { copy_superblock(sb.pixels, 8, old_frame_data, old_stride); while (can_safely_read(&gb, 1) && !get_bits1(&gb)) { unsigned mask; mb = decode_macroblock(s, &gb, &cb_index, superblock_index); mask = get_bits(&gb, 16); multi_mask |= mask; for (i = 0; i < 16; i++) { if (mask & mask_matrix[i]) { insert_mb_into_sb(&sb, mb, i); } } } if (can_safely_read(&gb, 1) && !get_bits1(&gb)) { unsigned inv_mask = get_bits(&gb, 4); for (i = 0; i < 4; i++) { if (inv_mask & (1 << i)) { multi_mask ^= 0xF << i*4; } else { multi_mask ^= get_bits(&gb, 4) << i*4; } } for (i = 0; i < 16; i++) { if (multi_mask & mask_matrix[i]) { if (!can_safely_read(&gb, 1)) break; mb = decode_macroblock(s, &gb, &cb_index, superblock_index); insert_mb_into_sb(&sb, mb, i); } } } else if (frame_flags & (1 << 16)) { while (can_safely_read(&gb, 1) && !get_bits1(&gb)) { mb = decode_macroblock(s, &gb, &cb_index, superblock_index); insert_mb_into_sb(&sb, mb, get_bits(&gb, 4)); } } copy_superblock(new_frame_data, new_stride, sb.pixels, 8); } superblock_col_index++; new_frame_data += 8; if (old_frame_data) old_frame_data += 8; if (superblock_col_index == superblocks_per_row) { new_frame_data += new_stride * 8 - superblocks_per_row * 8; if (old_frame_data) old_frame_data += old_stride * 8 - superblocks_per_row * 8; superblock_col_index = 0; } skip--; } av_log(avctx, AV_LOG_DEBUG, "Escape sizes: %i, %i, %i\n", frame_size, buf_size, get_bits_count(&gb) / 8); av_frame_unref(&s->frame); if ((ret = av_frame_ref(&s->frame, frame)) < 0) return ret; *got_frame = 1; return frame_size; } | 18,291 |
0 | static int adx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf0 = avpkt->data; int buf_size = avpkt->size; ADXContext *c = avctx->priv_data; int16_t *samples = data; const uint8_t *buf = buf0; int rest = buf_size; if (!c->header_parsed) { int hdrsize = adx_decode_header(avctx, buf, rest); if (!hdrsize) return -1; c->header_parsed = 1; buf += hdrsize; rest -= hdrsize; } /* 18 bytes of data are expanded into 32*2 bytes of audio, so guard against buffer overflows */ if (rest / 18 > *data_size / 64) rest = (*data_size / 64) * 18; if (c->in_temp) { int copysize = 18 * avctx->channels - c->in_temp; memcpy(c->dec_temp + c->in_temp, buf, copysize); rest -= copysize; buf += copysize; if (avctx->channels == 1) { adx_decode(samples, c->dec_temp, c->prev); samples += 32; } else { adx_decode_stereo(samples, c->dec_temp, c->prev); samples += 32*2; } } if (avctx->channels == 1) { while (rest >= 18) { adx_decode(samples, buf, c->prev); rest -= 18; buf += 18; samples += 32; } } else { while (rest >= 18 * 2) { adx_decode_stereo(samples, buf, c->prev); rest -= 18 * 2; buf += 18 * 2; samples += 32 * 2; } } c->in_temp = rest; if (rest) { memcpy(c->dec_temp, buf, rest); buf += rest; } *data_size = (uint8_t*)samples - (uint8_t*)data; return buf - buf0; } | 18,292 |
1 | static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant) { GetBitContext *gb = &v->s.gb; MpegEncContext *s = &v->s; int dc_pred_dir = 0; /* Direction of the DC prediction used */ int run_diff, i; int16_t *dc_val; int16_t *ac_val, *ac_val2; int dcdiff; int a_avail = v->a_avail, c_avail = v->c_avail; int use_pred = s->ac_pred; int scale; int q1, q2 = 0; int mb_pos = s->mb_x + s->mb_y * s->mb_stride; /* Get DC differential */ if (n < 4) { dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3); } else { dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3); } if (dcdiff < 0){ av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n"); return -1; } if (dcdiff) { if (dcdiff == 119 /* ESC index value */) { /* TODO: Optimize */ if (mquant == 1) dcdiff = get_bits(gb, 10); else if (mquant == 2) dcdiff = get_bits(gb, 9); else dcdiff = get_bits(gb, 8); } else { if (mquant == 1) dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3; else if (mquant == 2) dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1; } if (get_bits(gb, 1)) dcdiff = -dcdiff; } /* Prediction */ dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir); *dc_val = dcdiff; /* Store the quantized DC coeff, used for prediction */ if (n < 4) { block[0] = dcdiff * s->y_dc_scale; } else { block[0] = dcdiff * s->c_dc_scale; } /* Skip ? */ run_diff = 0; i = 0; //AC Decoding i = 1; /* check if AC is needed at all and adjust direction if needed */ if(!a_avail) dc_pred_dir = 1; if(!c_avail) dc_pred_dir = 0; if(!a_avail && !c_avail) use_pred = 0; ac_val = s->ac_val[0][0] + s->block_index[n] * 16; ac_val2 = ac_val; scale = mquant * 2 + v->halfpq; if(dc_pred_dir) //left ac_val -= 16; else //top ac_val -= 16 * s->block_wrap[n]; q1 = s->current_picture.qscale_table[mb_pos]; if(dc_pred_dir && c_avail) q2 = s->current_picture.qscale_table[mb_pos - 1]; if(!dc_pred_dir && a_avail) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride]; if(n && n<4) q2 = q1; if(coded) { int last = 0, skip, value; const int8_t *zz_table; int k; if(v->s.ac_pred) { if(!dc_pred_dir) zz_table = vc1_horizontal_zz; else zz_table = vc1_vertical_zz; } else zz_table = vc1_normal_zz; while (!last) { vc1_decode_ac_coeff(v, &last, &skip, &value, codingset); i += skip; if(i > 63) break; block[zz_table[i++]] = value; } /* apply AC prediction if needed */ if(use_pred) { /* scale predictors if needed*/ if(q2 && q1!=q2) { q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; if(dc_pred_dir) { //left for(k = 1; k < 8; k++) block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18; } else { //top for(k = 1; k < 8; k++) block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18; } } else { if(dc_pred_dir) { //left for(k = 1; k < 8; k++) block[k << 3] += ac_val[k]; } else { //top for(k = 1; k < 8; k++) block[k] += ac_val[k + 8]; } } } /* save AC coeffs for further prediction */ for(k = 1; k < 8; k++) { ac_val2[k] = block[k << 3]; ac_val2[k + 8] = block[k]; } /* scale AC coeffs */ for(k = 1; k < 64; k++) if(block[k]) { block[k] *= scale; if(!v->pquantizer) block[k] += (block[k] < 0) ? -mquant : mquant; } if(use_pred) i = 63; } else { // no AC coeffs int k; memset(ac_val2, 0, 16 * 2); if(dc_pred_dir) {//left if(use_pred) { memcpy(ac_val2, ac_val, 8 * 2); if(q2 && q1!=q2) { q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; for(k = 1; k < 8; k++) ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18; } } } else {//top if(use_pred) { memcpy(ac_val2 + 8, ac_val + 8, 8 * 2); if(q2 && q1!=q2) { q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1; q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1; for(k = 1; k < 8; k++) ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18; } } } /* apply AC prediction if needed */ if(use_pred) { if(dc_pred_dir) { //left for(k = 1; k < 8; k++) { block[k << 3] = ac_val2[k] * scale; if(!v->pquantizer && block[k << 3]) block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant; } } else { //top for(k = 1; k < 8; k++) { block[k] = ac_val2[k + 8] * scale; if(!v->pquantizer && block[k]) block[k] += (block[k] < 0) ? -mquant : mquant; } } i = 63; } } s->block_last_index[n] = i; return 0; } | 18,293 |
1 | int qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset, int nb_clusters) { BDRVQcowState *s = bs->opaque; uint64_t cluster_index; uint64_t old_free_cluster_index; int i, refcount, ret; /* Check how many clusters there are free */ cluster_index = offset >> s->cluster_bits; for(i = 0; i < nb_clusters; i++) { refcount = get_refcount(bs, cluster_index++); if (refcount < 0) { return refcount; } else if (refcount != 0) { break; } } /* And then allocate them */ ret = update_refcount(bs, offset, i << s->cluster_bits, 1); if (ret < 0) { return ret; } return i; } | 18,294 |
1 | static void oledate_to_iso8601(char *buf, int buf_size, int64_t value) { time_t t = 631112400LL + 86400*av_int2dbl(value); strftime(buf, buf_size, "%Y-%m-%d %H:%M:%S", gmtime(&t)); } | 18,295 |
1 | const char *get_register_name_32(unsigned int reg) { if (reg > CPU_NB_REGS32) { return NULL; } return x86_reg_info_32[reg].name; } | 18,296 |
1 | struct pxa2xx_state_s *pxa255_init(unsigned int sdram_size, DisplayState *ds) { struct pxa2xx_state_s *s; struct pxa2xx_ssp_s *ssp; int iomemtype, i; s = (struct pxa2xx_state_s *) qemu_mallocz(sizeof(struct pxa2xx_state_s)); s->env = cpu_init(); cpu_arm_set_model(s->env, "pxa255"); register_savevm("cpu", 0, 0, cpu_save, cpu_load, s->env); /* SDRAM & Internal Memory Storage */ cpu_register_physical_memory(PXA2XX_SDRAM_BASE, sdram_size, qemu_ram_alloc(sdram_size) | IO_MEM_RAM); cpu_register_physical_memory(PXA2XX_INTERNAL_BASE, PXA2XX_INTERNAL_SIZE, qemu_ram_alloc(PXA2XX_INTERNAL_SIZE) | IO_MEM_RAM); s->pic = pxa2xx_pic_init(0x40d00000, s->env); s->dma = pxa255_dma_init(0x40000000, s->pic[PXA2XX_PIC_DMA]); pxa25x_timer_init(0x40a00000, &s->pic[PXA2XX_PIC_OST_0]); s->gpio = pxa2xx_gpio_init(0x40e00000, s->env, s->pic, 85); s->mmc = pxa2xx_mmci_init(0x41100000, s->pic[PXA2XX_PIC_MMC], s->dma); for (i = 0; pxa255_serial[i].io_base; i ++) if (serial_hds[i]) serial_mm_init(pxa255_serial[i].io_base, 2, s->pic[pxa255_serial[i].irqn], serial_hds[i], 1); else break; if (serial_hds[i]) s->fir = pxa2xx_fir_init(0x40800000, s->pic[PXA2XX_PIC_ICP], s->dma, serial_hds[i]); if (ds) s->lcd = pxa2xx_lcdc_init(0x44000000, s->pic[PXA2XX_PIC_LCD], ds); s->cm_base = 0x41300000; s->cm_regs[CCCR >> 4] = 0x02000210; /* 416.0 MHz */ s->clkcfg = 0x00000009; /* Turbo mode active */ iomemtype = cpu_register_io_memory(0, pxa2xx_cm_readfn, pxa2xx_cm_writefn, s); cpu_register_physical_memory(s->cm_base, 0xfff, iomemtype); register_savevm("pxa2xx_cm", 0, 0, pxa2xx_cm_save, pxa2xx_cm_load, s); cpu_arm_set_cp_io(s->env, 14, pxa2xx_cp14_read, pxa2xx_cp14_write, s); s->mm_base = 0x48000000; s->mm_regs[MDMRS >> 2] = 0x00020002; s->mm_regs[MDREFR >> 2] = 0x03ca4000; s->mm_regs[MECR >> 2] = 0x00000001; /* Two PC Card sockets */ iomemtype = cpu_register_io_memory(0, pxa2xx_mm_readfn, pxa2xx_mm_writefn, s); cpu_register_physical_memory(s->mm_base, 0xfff, iomemtype); register_savevm("pxa2xx_mm", 0, 0, pxa2xx_mm_save, pxa2xx_mm_load, s); s->pm_base = 0x40f00000; iomemtype = cpu_register_io_memory(0, pxa2xx_pm_readfn, pxa2xx_pm_writefn, s); cpu_register_physical_memory(s->pm_base, 0xff, iomemtype); register_savevm("pxa2xx_pm", 0, 0, pxa2xx_pm_save, pxa2xx_pm_load, s); for (i = 0; pxa255_ssp[i].io_base; i ++); s->ssp = (struct pxa2xx_ssp_s **) qemu_mallocz(sizeof(struct pxa2xx_ssp_s *) * i); ssp = (struct pxa2xx_ssp_s *) qemu_mallocz(sizeof(struct pxa2xx_ssp_s) * i); for (i = 0; pxa255_ssp[i].io_base; i ++) { s->ssp[i] = &ssp[i]; ssp[i].base = pxa255_ssp[i].io_base; ssp[i].irq = s->pic[pxa255_ssp[i].irqn]; iomemtype = cpu_register_io_memory(0, pxa2xx_ssp_readfn, pxa2xx_ssp_writefn, &ssp[i]); cpu_register_physical_memory(ssp[i].base, 0xfff, iomemtype); register_savevm("pxa2xx_ssp", i, 0, pxa2xx_ssp_save, pxa2xx_ssp_load, s); } if (usb_enabled) { usb_ohci_init_pxa(0x4c000000, 3, -1, s->pic[PXA2XX_PIC_USBH1]); } s->pcmcia[0] = pxa2xx_pcmcia_init(0x20000000); s->pcmcia[1] = pxa2xx_pcmcia_init(0x30000000); s->rtc_base = 0x40900000; iomemtype = cpu_register_io_memory(0, pxa2xx_rtc_readfn, pxa2xx_rtc_writefn, s); cpu_register_physical_memory(s->rtc_base, 0xfff, iomemtype); pxa2xx_rtc_init(s); register_savevm("pxa2xx_rtc", 0, 0, pxa2xx_rtc_save, pxa2xx_rtc_load, s); s->i2c[0] = pxa2xx_i2c_init(0x40301600, s->pic[PXA2XX_PIC_I2C], 0xffff); s->i2c[1] = pxa2xx_i2c_init(0x40f00100, s->pic[PXA2XX_PIC_PWRI2C], 0xff); s->i2s = pxa2xx_i2s_init(0x40400000, s->pic[PXA2XX_PIC_I2S], s->dma); /* GPIO1 resets the processor */ /* The handler can be overriden by board-specific code */ pxa2xx_gpio_handler_set(s->gpio, 1, pxa2xx_reset, s); return s; } | 18,297 |
0 | int av_read_packet(AVFormatContext *s, AVPacket *pkt) { int ret, i; AVStream *st; for(;;){ AVPacketList *pktl = s->raw_packet_buffer; if (pktl) { *pkt = pktl->pkt; if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE || !s->streams[pkt->stream_index]->probe_packets){ s->raw_packet_buffer = pktl->next; av_free(pktl); return 0; } } av_init_packet(pkt); ret= s->iformat->read_packet(s, pkt); if (ret < 0) { if (!pktl || ret == AVERROR(EAGAIN)) return ret; for (i = 0; i < s->nb_streams; i++) s->streams[i]->probe_packets = 0; continue; } st= s->streams[pkt->stream_index]; switch(st->codec->codec_type){ case CODEC_TYPE_VIDEO: if(s->video_codec_id) st->codec->codec_id= s->video_codec_id; break; case CODEC_TYPE_AUDIO: if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id; break; case CODEC_TYPE_SUBTITLE: if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id; break; } if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE || !st->probe_packets)) return ret; add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end); if(st->codec->codec_id == CODEC_ID_PROBE){ AVProbeData *pd = &st->probe_data; --st->probe_packets; pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE); memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size); pd->buf_size += pkt->size; memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE); if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){ set_codec_from_probe_data(st, pd, 1); if(st->codec->codec_id != CODEC_ID_PROBE){ pd->buf_size=0; av_freep(&pd->buf); } } } } } | 18,299 |
1 | void nbd_export_close(NBDExport *exp) { NBDClient *client, *next; nbd_export_get(exp); QTAILQ_FOREACH_SAFE(client, &exp->clients, next, next) { client_close(client); } nbd_export_set_name(exp, NULL); nbd_export_put(exp); if (exp->blk) { blk_remove_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach, exp); blk_unref(exp->blk); exp->blk = NULL; } } | 18,301 |
1 | void iothread_stop(IOThread *iothread) { if (!iothread->ctx || iothread->stopping) { return; } iothread->stopping = true; aio_notify(iothread->ctx); if (atomic_read(&iothread->main_loop)) { g_main_loop_quit(iothread->main_loop); } qemu_thread_join(&iothread->thread); } | 18,302 |
1 | void qmp_stop(Error **errp) { vm_stop(RUN_STATE_PAUSED); } | 18,303 |
0 | const char *postproc_configuration(void) { return FFMPEG_CONFIGURATION; } | 18,305 |
0 | static void qdm2_decode_fft_packets(QDM2Context *q) { int i, j, min, max, value, type, unknown_flag; GetBitContext gb; if (q->sub_packet_list_B[0].packet == NULL) return; /* reset minimum indexes for FFT coefficients */ q->fft_coefs_index = 0; for (i = 0; i < 5; i++) q->fft_coefs_min_index[i] = -1; /* process subpackets ordered by type, largest type first */ for (i = 0, max = 256; i < q->sub_packets_B; i++) { QDM2SubPacket *packet = NULL; /* find subpacket with largest type less than max */ for (j = 0, min = 0; j < q->sub_packets_B; j++) { value = q->sub_packet_list_B[j].packet->type; if (value > min && value < max) { min = value; packet = q->sub_packet_list_B[j].packet; } } max = min; /* check for errors (?) */ if (!packet) return; if (i == 0 && (packet->type < 16 || packet->type >= 48 || fft_subpackets[packet->type - 16])) return; /* decode FFT tones */ init_get_bits(&gb, packet->data, packet->size * 8); if (packet->type >= 32 && packet->type < 48 && !fft_subpackets[packet->type - 16]) unknown_flag = 1; else unknown_flag = 0; type = packet->type; if ((type >= 17 && type < 24) || (type >= 33 && type < 40)) { int duration = q->sub_sampling + 5 - (type & 15); if (duration >= 0 && duration < 4) qdm2_fft_decode_tones(q, duration, &gb, unknown_flag); } else if (type == 31) { for (j = 0; j < 4; j++) qdm2_fft_decode_tones(q, j, &gb, unknown_flag); } else if (type == 46) { for (j = 0; j < 6; j++) q->fft_level_exp[j] = get_bits(&gb, 6); for (j = 0; j < 4; j++) qdm2_fft_decode_tones(q, j, &gb, unknown_flag); } } // Loop on B packets /* calculate maximum indexes for FFT coefficients */ for (i = 0, j = -1; i < 5; i++) if (q->fft_coefs_min_index[i] >= 0) { if (j >= 0) q->fft_coefs_max_index[j] = q->fft_coefs_min_index[i]; j = i; } if (j >= 0) q->fft_coefs_max_index[j] = q->fft_coefs_index; } | 18,306 |
0 | void ff_avg_h264_qpel16_mc22_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_mid_and_aver_dst_16x16_msa(src - (2 * stride) - 2, stride, dst, stride); } | 18,307 |
0 | int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name) { AVFilterContext *ret; *filter_ctx = NULL; if (!filter) return AVERROR(EINVAL); ret = av_mallocz(sizeof(AVFilterContext)); ret->av_class = &avfilter_class; ret->filter = filter; ret->name = inst_name ? av_strdup(inst_name) : NULL; if (filter->priv_size) ret->priv = av_mallocz(filter->priv_size); ret->input_count = pad_count(filter->inputs); if (ret->input_count) { ret->input_pads = av_malloc(sizeof(AVFilterPad) * ret->input_count); memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->input_count); ret->inputs = av_mallocz(sizeof(AVFilterLink*) * ret->input_count); } ret->output_count = pad_count(filter->outputs); if (ret->output_count) { ret->output_pads = av_malloc(sizeof(AVFilterPad) * ret->output_count); memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->output_count); ret->outputs = av_mallocz(sizeof(AVFilterLink*) * ret->output_count); } *filter_ctx = ret; return 0; } | 18,308 |
1 | static void test_panic(void) { uint8_t val; QDict *response, *data; val = inb(0x505); g_assert_cmpuint(val, ==, 1); outb(0x505, 0x1); response = qmp_receive(); g_assert(qdict_haskey(response, "event")); g_assert_cmpstr(qdict_get_str(response, "event"), ==, "GUEST_PANICKED"); g_assert(qdict_haskey(response, "data")); data = qdict_get_qdict(response, "data"); g_assert(qdict_haskey(data, "action")); g_assert_cmpstr(qdict_get_str(data, "action"), ==, "pause"); } | 18,309 |
1 | static void spr_write_decr(DisasContext *ctx, int sprn, int gprn) { if (ctx->tb->cflags & CF_USE_ICOUNT) { gen_io_start(); } gen_helper_store_decr(cpu_env, cpu_gpr[gprn]); if (ctx->tb->cflags & CF_USE_ICOUNT) { gen_io_end(); gen_stop_exception(ctx); } } | 18,310 |
1 | PPC_OP(b_T1) { regs->nip = T1 & ~3; RETURN(); } | 18,312 |
1 | static int make_cdt15_entry(int p1, int p2, int16_t *cdt) { int r, b, lo; b = cdt[p2]; r = cdt[p1] * 1024; lo = b + r; return (lo + (lo * (1 << 16))) * 2; } | 18,313 |
1 | static int ogg_build_flac_headers(AVCodecContext *avctx, OGGStreamContext *oggstream, int bitexact) { const char *vendor = bitexact ? "ffmpeg" : LIBAVFORMAT_IDENT; enum FLACExtradataFormat format; uint8_t *streaminfo; uint8_t *p; if (!ff_flac_is_extradata_valid(avctx, &format, &streaminfo)) return -1; oggstream->header_len[0] = 51; oggstream->header[0] = av_mallocz(51); // per ogg flac specs p = oggstream->header[0]; bytestream_put_byte(&p, 0x7F); bytestream_put_buffer(&p, "FLAC", 4); bytestream_put_byte(&p, 1); // major version bytestream_put_byte(&p, 0); // minor version bytestream_put_be16(&p, 1); // headers packets without this one bytestream_put_buffer(&p, "fLaC", 4); bytestream_put_byte(&p, 0x00); // streaminfo bytestream_put_be24(&p, 34); bytestream_put_buffer(&p, streaminfo, FLAC_STREAMINFO_SIZE); oggstream->header_len[1] = 1+3+4+strlen(vendor)+4; oggstream->header[1] = av_mallocz(oggstream->header_len[1]); p = oggstream->header[1]; bytestream_put_byte(&p, 0x84); // last metadata block and vorbis comment bytestream_put_be24(&p, oggstream->header_len[1] - 4); bytestream_put_le32(&p, strlen(vendor)); bytestream_put_buffer(&p, vendor, strlen(vendor)); bytestream_put_le32(&p, 0); // user comment list length return 0; } | 18,314 |
1 | void qmp_netdev_del(const char *id, Error **errp) { NetClientState *nc; nc = qemu_find_netdev(id); if (!nc) { error_set(errp, QERR_DEVICE_NOT_FOUND, id); return; } qemu_del_net_client(nc); qemu_opts_del(qemu_opts_find(qemu_find_opts_err("netdev", errp), id)); } | 18,315 |
1 | int qemu_savevm_state_iterate(QEMUFile *f) { SaveStateEntry *se; int ret = 1; trace_savevm_state_iterate(); QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { if (!se->ops || !se->ops->save_live_iterate) { continue; } if (se->ops && se->ops->is_active) { if (!se->ops->is_active(se->opaque)) { continue; } } if (qemu_file_rate_limit(f)) { return 0; } trace_savevm_section_start(se->idstr, se->section_id); save_section_header(f, se, QEMU_VM_SECTION_PART); ret = se->ops->save_live_iterate(f, se->opaque); trace_savevm_section_end(se->idstr, se->section_id, ret); if (ret < 0) { qemu_file_set_error(f, ret); } if (ret <= 0) { /* Do not proceed to the next vmstate before this one reported completion of the current stage. This serializes the migration and reduces the probability that a faster changing state is synchronized over and over again. */ break; } } return ret; } | 18,316 |
1 | yuv2rgba64_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha) { const int32_t *buf0 = buf[0], *buf1 = buf[1], *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], *vbuf0 = vbuf[0], *vbuf1 = vbuf[1], *abuf0 = hasAlpha ? abuf[0] : NULL, *abuf1 = hasAlpha ? abuf[1] : NULL; int yalpha1 = 4096 - yalpha; int uvalpha1 = 4096 - uvalpha; int i; for (i = 0; i < ((dstW + 1) >> 1); i++) { int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14; int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14; int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha + (-128 << 23)) >> 14; int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha + (-128 << 23)) >> 14; int A1, A2; int R, G, B; Y1 -= c->yuv2rgb_y_offset; Y2 -= c->yuv2rgb_y_offset; Y1 *= c->yuv2rgb_y_coeff; Y2 *= c->yuv2rgb_y_coeff; Y1 += 1 << 13; Y2 += 1 << 13; R = V * c->yuv2rgb_v2r_coeff; G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; B = U * c->yuv2rgb_u2b_coeff; if (hasAlpha) { A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 1; A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 1; A1 += 1 << 13; A2 += 1 << 13; } output_pixel(&dest[0], av_clip_uintp2(B_R + Y1, 30) >> 14); output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); output_pixel(&dest[2], av_clip_uintp2(R_B + Y1, 30) >> 14); output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14); output_pixel(&dest[4], av_clip_uintp2(B_R + Y2, 30) >> 14); output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14); output_pixel(&dest[6], av_clip_uintp2(R_B + Y2, 30) >> 14); output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14); dest += 8; } } | 18,317 |
1 | static int ogg_restore(AVFormatContext *s) { struct ogg *ogg = s->priv_data; AVIOContext *bc = s->pb; struct ogg_state *ost = ogg->state; int i, err; if (!ost) return 0; ogg->state = ost->next; for (i = 0; i < ogg->nstreams; i++) av_freep(&ogg->streams[i].buf); avio_seek(bc, ost->pos, SEEK_SET); ogg->page_pos = -1; ogg->curidx = ost->curidx; ogg->nstreams = ost->nstreams; if ((err = av_reallocp_array(&ogg->streams, ogg->nstreams, sizeof(*ogg->streams))) < 0) { ogg->nstreams = 0; return err; } else memcpy(ogg->streams, ost->streams, ost->nstreams * sizeof(*ogg->streams)); av_free(ost); return 0; } | 18,318 |
1 | static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx, DECODER_BUFFER_DESC *bs, DECODER_BUFFER_DESC *sc) { const H264Context *h = avctx->priv_data; const unsigned mb_count = h->mb_width * h->mb_height; AVDXVAContext *ctx = avctx->hwaccel_context; const H264Picture *current_picture = h->cur_pic_ptr; struct dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private; DXVA_Slice_H264_Short *slice = NULL; void *dxva_data_ptr; uint8_t *dxva_data, *current, *end; unsigned dxva_size; void *slice_data; unsigned slice_size; unsigned padding; unsigned i; unsigned type; /* Create an annex B bitstream buffer with only slice NAL and finalize slice */ #if CONFIG_D3D11VA if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) { type = D3D11_VIDEO_DECODER_BUFFER_BITSTREAM; if (FAILED(ID3D11VideoContext_GetDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context, D3D11VA_CONTEXT(ctx)->decoder, type, &dxva_size, &dxva_data_ptr))) return -1; } #endif #if CONFIG_DXVA2 if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) { type = DXVA2_BitStreamDateBufferType; if (FAILED(IDirectXVideoDecoder_GetBuffer(DXVA2_CONTEXT(ctx)->decoder, type, &dxva_data_ptr, &dxva_size))) return -1; } #endif dxva_data = dxva_data_ptr; current = dxva_data; end = dxva_data + dxva_size; for (i = 0; i < ctx_pic->slice_count; i++) { static const uint8_t start_code[] = { 0, 0, 1 }; static const unsigned start_code_size = sizeof(start_code); unsigned position, size; assert(offsetof(DXVA_Slice_H264_Short, BSNALunitDataLocation) == offsetof(DXVA_Slice_H264_Long, BSNALunitDataLocation)); assert(offsetof(DXVA_Slice_H264_Short, SliceBytesInBuffer) == offsetof(DXVA_Slice_H264_Long, SliceBytesInBuffer)); if (is_slice_short(avctx, ctx)) slice = &ctx_pic->slice_short[i]; else slice = (DXVA_Slice_H264_Short*)&ctx_pic->slice_long[i]; position = slice->BSNALunitDataLocation; size = slice->SliceBytesInBuffer; if (start_code_size + size > end - current) { av_log(avctx, AV_LOG_ERROR, "Failed to build bitstream"); break; } slice->BSNALunitDataLocation = current - dxva_data; slice->SliceBytesInBuffer = start_code_size + size; if (!is_slice_short(avctx, ctx)) { DXVA_Slice_H264_Long *slice_long = (DXVA_Slice_H264_Long*)slice; if (i < ctx_pic->slice_count - 1) slice_long->NumMbsForSlice = slice_long[1].first_mb_in_slice - slice_long[0].first_mb_in_slice; else slice_long->NumMbsForSlice = mb_count - slice_long->first_mb_in_slice; } memcpy(current, start_code, start_code_size); current += start_code_size; memcpy(current, &ctx_pic->bitstream[position], size); current += size; } padding = FFMIN(128 - ((current - dxva_data) & 127), end - current); if (slice && padding > 0) { memset(current, 0, padding); current += padding; slice->SliceBytesInBuffer += padding; } #if CONFIG_D3D11VA if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) if (FAILED(ID3D11VideoContext_ReleaseDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context, D3D11VA_CONTEXT(ctx)->decoder, type))) return -1; #endif #if CONFIG_DXVA2 if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) if (FAILED(IDirectXVideoDecoder_ReleaseBuffer(DXVA2_CONTEXT(ctx)->decoder, type))) return -1; #endif if (i < ctx_pic->slice_count) return -1; #if CONFIG_D3D11VA if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) { D3D11_VIDEO_DECODER_BUFFER_DESC *dsc11 = bs; memset(dsc11, 0, sizeof(*dsc11)); dsc11->BufferType = type; dsc11->DataSize = current - dxva_data; dsc11->NumMBsInBuffer = mb_count; type = D3D11_VIDEO_DECODER_BUFFER_SLICE_CONTROL; } #endif #if CONFIG_DXVA2 if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) { DXVA2_DecodeBufferDesc *dsc2 = bs; memset(dsc2, 0, sizeof(*dsc2)); dsc2->CompressedBufferType = type; dsc2->DataSize = current - dxva_data; dsc2->NumMBsInBuffer = mb_count; type = DXVA2_SliceControlBufferType; } #endif if (is_slice_short(avctx, ctx)) { slice_data = ctx_pic->slice_short; slice_size = ctx_pic->slice_count * sizeof(*ctx_pic->slice_short); } else { slice_data = ctx_pic->slice_long; slice_size = ctx_pic->slice_count * sizeof(*ctx_pic->slice_long); } assert((bs->DataSize & 127) == 0); return ff_dxva2_commit_buffer(avctx, ctx, sc, type, slice_data, slice_size, mb_count); } | 18,319 |
0 | static int qtrle_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { QtrleContext *s = avctx->priv_data; int header, start_line; int height, row_ptr; int has_palette = 0; int ret; bytestream2_init(&s->g, avpkt->data, avpkt->size); if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) return ret; /* check if this frame is even supposed to change */ if (avpkt->size < 8) goto done; /* start after the chunk size */ bytestream2_seek(&s->g, 4, SEEK_SET); /* fetch the header */ header = bytestream2_get_be16(&s->g); /* if a header is present, fetch additional decoding parameters */ if (header & 0x0008) { if (avpkt->size < 14) goto done; start_line = bytestream2_get_be16(&s->g); bytestream2_skip(&s->g, 2); height = bytestream2_get_be16(&s->g); bytestream2_skip(&s->g, 2); if (height > s->avctx->height - start_line) goto done; } else { start_line = 0; height = s->avctx->height; } row_ptr = s->frame->linesize[0] * start_line; switch (avctx->bits_per_coded_sample) { case 1: case 33: qtrle_decode_1bpp(s, row_ptr, height); has_palette = 1; break; case 2: case 34: qtrle_decode_2n4bpp(s, row_ptr, height, 2); has_palette = 1; break; case 4: case 36: qtrle_decode_2n4bpp(s, row_ptr, height, 4); has_palette = 1; break; case 8: case 40: qtrle_decode_8bpp(s, row_ptr, height); has_palette = 1; break; case 16: qtrle_decode_16bpp(s, row_ptr, height); break; case 24: qtrle_decode_24bpp(s, row_ptr, height); break; case 32: qtrle_decode_32bpp(s, row_ptr, height); break; default: av_log (s->avctx, AV_LOG_ERROR, "Unsupported colorspace: %d bits/sample?\n", avctx->bits_per_coded_sample); break; } if(has_palette) { const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); if (pal) { s->frame->palette_has_changed = 1; memcpy(s->pal, pal, AVPALETTE_SIZE); } /* make the palette available on the way out */ memcpy(s->frame->data[1], s->pal, AVPALETTE_SIZE); } done: if ((ret = av_frame_ref(data, s->frame)) < 0) return ret; *got_frame = 1; /* always report that the buffer was completely consumed */ return avpkt->size; } | 18,321 |
0 | int ffio_open2_wrapper(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options) { return avio_open2(pb, url, flags, int_cb, options); } | 18,322 |
1 | static void rbd_finish_aiocb(rbd_completion_t c, RADOSCB *rcb) { int ret; rcb->ret = rbd_aio_get_return_value(c); rbd_aio_release(c); ret = qemu_rbd_send_pipe(rcb->s, rcb); if (ret < 0) { error_report("failed writing to acb->s->fds"); g_free(rcb); } } | 18,325 |
1 | static void pcihotplug_write(void *opaque, uint32_t addr, uint32_t val) { struct pci_status *g = opaque; switch (addr) { case PCI_BASE: g->up = val; break; case PCI_BASE + 4: g->down = val; break; } PIIX4_DPRINTF("pcihotplug write %x <== %d\n", addr, val); } | 18,327 |
1 | static void put_pixels_clamped2_c(const DCTELEM *block, uint8_t *restrict pixels, int line_size) { int i; uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; /* read the pixels */ for(i=0;i<2;i++) { pixels[0] = cm[block[0]]; pixels[1] = cm[block[1]]; pixels += line_size; block += 8; } } | 18,330 |
1 | bool qemu_file_is_writable(QEMUFile *f) { return f->ops->writev_buffer || f->ops->put_buffer; } | 18,331 |
1 | static int encode_thread(AVCodecContext *c, void *arg){ MpegEncContext *s= *(void**)arg; int mb_x, mb_y, pdif = 0; int chr_h= 16>>s->chroma_y_shift; int i, j; MpegEncContext best_s, backup_s; uint8_t bit_buf[2][MAX_MB_BYTES]; uint8_t bit_buf2[2][MAX_MB_BYTES]; uint8_t bit_buf_tex[2][MAX_MB_BYTES]; PutBitContext pb[2], pb2[2], tex_pb[2]; for(i=0; i<2; i++){ init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES); init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES); init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES); } s->last_bits= put_bits_count(&s->pb); s->mv_bits=0; s->misc_bits=0; s->i_tex_bits=0; s->p_tex_bits=0; s->i_count=0; s->f_count=0; s->b_count=0; s->skip_count=0; for(i=0; i<3; i++){ /* init last dc values */ /* note: quant matrix value (8) is implied here */ s->last_dc[i] = 128 << s->intra_dc_precision; s->current_picture.f.error[i] = 0; } s->mb_skip_run = 0; memset(s->last_mv, 0, sizeof(s->last_mv)); s->last_mv_dir = 0; switch(s->codec_id){ case AV_CODEC_ID_H263: case AV_CODEC_ID_H263P: case AV_CODEC_ID_FLV1: if (CONFIG_H263_ENCODER) s->gob_index = ff_h263_get_gob_height(s); break; case AV_CODEC_ID_MPEG4: if(CONFIG_MPEG4_ENCODER && s->partitioned_frame) ff_mpeg4_init_partitions(s); break; } s->resync_mb_x=0; s->resync_mb_y=0; s->first_slice_line = 1; s->ptr_lastgob = s->pb.buf; for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) { s->mb_x=0; s->mb_y= mb_y; ff_set_qscale(s, s->qscale); ff_init_block_index(s); for(mb_x=0; mb_x < s->mb_width; mb_x++) { int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this int mb_type= s->mb_type[xy]; // int d; int dmin= INT_MAX; int dir; if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){ av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); return -1; } if(s->data_partitioning){ if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){ av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); return -1; } } s->mb_x = mb_x; s->mb_y = mb_y; // moved into loop, can get changed by H.261 ff_update_block_index(s); if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){ ff_h261_reorder_mb_index(s); xy= s->mb_y*s->mb_stride + s->mb_x; mb_type= s->mb_type[xy]; } /* write gob / video packet header */ if(s->rtp_mode){ int current_packet_size, is_gob_start; current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf); is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0; if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1; switch(s->codec_id){ case AV_CODEC_ID_H263: case AV_CODEC_ID_H263P: if(!s->h263_slice_structured) if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0; break; case AV_CODEC_ID_MPEG2VIDEO: if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1; case AV_CODEC_ID_MPEG1VIDEO: if(s->mb_skip_run) is_gob_start=0; break; } if(is_gob_start){ if(s->start_mb_y != mb_y || mb_x!=0){ write_slice_end(s); if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){ ff_mpeg4_init_partitions(s); } } assert((put_bits_count(&s->pb)&7) == 0); current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob; if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) { int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y; int d = 100 / s->error_rate; if(r % d == 0){ current_packet_size=0; s->pb.buf_ptr= s->ptr_lastgob; assert(put_bits_ptr(&s->pb) == s->ptr_lastgob); } } if (s->avctx->rtp_callback){ int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x; s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb); } update_mb_info(s, 1); switch(s->codec_id){ case AV_CODEC_ID_MPEG4: if (CONFIG_MPEG4_ENCODER) { ff_mpeg4_encode_video_packet_header(s); ff_mpeg4_clean_buffers(s); } break; case AV_CODEC_ID_MPEG1VIDEO: case AV_CODEC_ID_MPEG2VIDEO: if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) { ff_mpeg1_encode_slice_header(s); ff_mpeg1_clean_buffers(s); } break; case AV_CODEC_ID_H263: case AV_CODEC_ID_H263P: if (CONFIG_H263_ENCODER) ff_h263_encode_gob_header(s, mb_y); break; } if(s->flags&CODEC_FLAG_PASS1){ int bits= put_bits_count(&s->pb); s->misc_bits+= bits - s->last_bits; s->last_bits= bits; } s->ptr_lastgob += current_packet_size; s->first_slice_line=1; s->resync_mb_x=mb_x; s->resync_mb_y=mb_y; } } if( (s->resync_mb_x == s->mb_x) && s->resync_mb_y+1 == s->mb_y){ s->first_slice_line=0; } s->mb_skipped=0; s->dquant=0; //only for QP_RD update_mb_info(s, 0); if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD int next_block=0; int pb_bits_count, pb2_bits_count, tex_pb_bits_count; copy_context_before_encode(&backup_s, s, -1); backup_s.pb= s->pb; best_s.data_partitioning= s->data_partitioning; best_s.partitioned_frame= s->partitioned_frame; if(s->data_partitioning){ backup_s.pb2= s->pb2; backup_s.tex_pb= s->tex_pb; } if(mb_type&CANDIDATE_MB_TYPE_INTER){ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mb_intra= 0; s->mv[0][0][0] = s->p_mv_table[xy][0]; s->mv[0][0][1] = s->p_mv_table[xy][1]; encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb, &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]); } if(mb_type&CANDIDATE_MB_TYPE_INTER_I){ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_FIELD; s->mb_intra= 0; for(i=0; i<2; i++){ j= s->field_select[0][i] = s->p_field_select_table[i][xy]; s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0]; s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1]; } encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); } if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mb_intra= 0; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb, &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]); } if(mb_type&CANDIDATE_MB_TYPE_INTER4V){ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_8X8; s->mb_intra= 0; for(i=0; i<4; i++){ s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0]; s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1]; } encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); } if(mb_type&CANDIDATE_MB_TYPE_FORWARD){ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mb_intra= 0; s->mv[0][0][0] = s->b_forw_mv_table[xy][0]; s->mv[0][0][1] = s->b_forw_mv_table[xy][1]; encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb, &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]); } if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){ s->mv_dir = MV_DIR_BACKWARD; s->mv_type = MV_TYPE_16X16; s->mb_intra= 0; s->mv[1][0][0] = s->b_back_mv_table[xy][0]; s->mv[1][0][1] = s->b_back_mv_table[xy][1]; encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb, &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]); } if(mb_type&CANDIDATE_MB_TYPE_BIDIR){ s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; s->mv_type = MV_TYPE_16X16; s->mb_intra= 0; s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0]; s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1]; s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0]; s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1]; encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); } if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_FIELD; s->mb_intra= 0; for(i=0; i<2; i++){ j= s->field_select[0][i] = s->b_field_select_table[0][i][xy]; s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0]; s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1]; } encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); } if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){ s->mv_dir = MV_DIR_BACKWARD; s->mv_type = MV_TYPE_FIELD; s->mb_intra= 0; for(i=0; i<2; i++){ j= s->field_select[1][i] = s->b_field_select_table[1][i][xy]; s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0]; s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1]; } encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); } if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){ s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; s->mv_type = MV_TYPE_FIELD; s->mb_intra= 0; for(dir=0; dir<2; dir++){ for(i=0; i<2; i++){ j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy]; s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0]; s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1]; } } encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); } if(mb_type&CANDIDATE_MB_TYPE_INTRA){ s->mv_dir = 0; s->mv_type = MV_TYPE_16X16; s->mb_intra= 1; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); if(s->h263_pred || s->h263_aic){ if(best_s.mb_intra) s->mbintra_table[mb_x + mb_y*s->mb_stride]=1; else ff_clean_intra_table_entries(s); //old mode? } } if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) { if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD const int last_qp= backup_s.qscale; int qpi, qp, dc[6]; int16_t ac[6][16]; const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0; static const int dquant_tab[4]={-1,1,-2,2}; assert(backup_s.dquant == 0); //FIXME intra s->mv_dir= best_s.mv_dir; s->mv_type = MV_TYPE_16X16; s->mb_intra= best_s.mb_intra; s->mv[0][0][0] = best_s.mv[0][0][0]; s->mv[0][0][1] = best_s.mv[0][0][1]; s->mv[1][0][0] = best_s.mv[1][0][0]; s->mv[1][0][1] = best_s.mv[1][0][1]; qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0; for(; qpi<4; qpi++){ int dquant= dquant_tab[qpi]; qp= last_qp + dquant; if(qp < s->avctx->qmin || qp > s->avctx->qmax) continue; backup_s.dquant= dquant; if(s->mb_intra && s->dc_val[0]){ for(i=0; i<6; i++){ dc[i]= s->dc_val[0][ s->block_index[i] ]; memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16); } } encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb, &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]); if(best_s.qscale != qp){ if(s->mb_intra && s->dc_val[0]){ for(i=0; i<6; i++){ s->dc_val[0][ s->block_index[i] ]= dc[i]; memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16); } } } } } } if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){ int mx= s->b_direct_mv_table[xy][0]; int my= s->b_direct_mv_table[xy][1]; backup_s.dquant = 0; s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT; s->mb_intra= 0; ff_mpeg4_set_direct_mv(s, mx, my); encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb, &dmin, &next_block, mx, my); } if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){ backup_s.dquant = 0; s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT; s->mb_intra= 0; ff_mpeg4_set_direct_mv(s, 0, 0); encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); } if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) { int coded=0; for(i=0; i<6; i++) coded |= s->block_last_index[i]; if(coded){ int mx,my; memcpy(s->mv, best_s.mv, sizeof(s->mv)); if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){ mx=my=0; //FIXME find the one we actually used ff_mpeg4_set_direct_mv(s, mx, my); }else if(best_s.mv_dir&MV_DIR_BACKWARD){ mx= s->mv[1][0][0]; my= s->mv[1][0][1]; }else{ mx= s->mv[0][0][0]; my= s->mv[0][0][1]; } s->mv_dir= best_s.mv_dir; s->mv_type = best_s.mv_type; s->mb_intra= 0; /* s->mv[0][0][0] = best_s.mv[0][0][0]; s->mv[0][0][1] = best_s.mv[0][0][1]; s->mv[1][0][0] = best_s.mv[1][0][0]; s->mv[1][0][1] = best_s.mv[1][0][1];*/ backup_s.dquant= 0; s->skipdct=1; encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb, &dmin, &next_block, mx, my); s->skipdct=0; } } s->current_picture.qscale_table[xy] = best_s.qscale; copy_context_after_encode(s, &best_s, -1); pb_bits_count= put_bits_count(&s->pb); flush_put_bits(&s->pb); avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count); s->pb= backup_s.pb; if(s->data_partitioning){ pb2_bits_count= put_bits_count(&s->pb2); flush_put_bits(&s->pb2); avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count); s->pb2= backup_s.pb2; tex_pb_bits_count= put_bits_count(&s->tex_pb); flush_put_bits(&s->tex_pb); avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count); s->tex_pb= backup_s.tex_pb; } s->last_bits= put_bits_count(&s->pb); if (CONFIG_H263_ENCODER && s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B) ff_h263_update_motion_val(s); if(next_block==0){ //FIXME 16 vs linesize16 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16); s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8); s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8); } if(s->avctx->mb_decision == FF_MB_DECISION_BITS) ff_MPV_decode_mb(s, s->block); } else { int motion_x = 0, motion_y = 0; s->mv_type=MV_TYPE_16X16; // only one MB-Type possible switch(mb_type){ case CANDIDATE_MB_TYPE_INTRA: s->mv_dir = 0; s->mb_intra= 1; motion_x= s->mv[0][0][0] = 0; motion_y= s->mv[0][0][1] = 0; break; case CANDIDATE_MB_TYPE_INTER: s->mv_dir = MV_DIR_FORWARD; s->mb_intra= 0; motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0]; motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1]; break; case CANDIDATE_MB_TYPE_INTER_I: s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_FIELD; s->mb_intra= 0; for(i=0; i<2; i++){ j= s->field_select[0][i] = s->p_field_select_table[i][xy]; s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0]; s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1]; } break; case CANDIDATE_MB_TYPE_INTER4V: s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_8X8; s->mb_intra= 0; for(i=0; i<4; i++){ s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0]; s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1]; } break; case CANDIDATE_MB_TYPE_DIRECT: if (CONFIG_MPEG4_ENCODER) { s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT; s->mb_intra= 0; motion_x=s->b_direct_mv_table[xy][0]; motion_y=s->b_direct_mv_table[xy][1]; ff_mpeg4_set_direct_mv(s, motion_x, motion_y); } break; case CANDIDATE_MB_TYPE_DIRECT0: if (CONFIG_MPEG4_ENCODER) { s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT; s->mb_intra= 0; ff_mpeg4_set_direct_mv(s, 0, 0); } break; case CANDIDATE_MB_TYPE_BIDIR: s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; s->mb_intra= 0; s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0]; s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1]; s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0]; s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1]; break; case CANDIDATE_MB_TYPE_BACKWARD: s->mv_dir = MV_DIR_BACKWARD; s->mb_intra= 0; motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0]; motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1]; break; case CANDIDATE_MB_TYPE_FORWARD: s->mv_dir = MV_DIR_FORWARD; s->mb_intra= 0; motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0]; motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1]; break; case CANDIDATE_MB_TYPE_FORWARD_I: s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_FIELD; s->mb_intra= 0; for(i=0; i<2; i++){ j= s->field_select[0][i] = s->b_field_select_table[0][i][xy]; s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0]; s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1]; } break; case CANDIDATE_MB_TYPE_BACKWARD_I: s->mv_dir = MV_DIR_BACKWARD; s->mv_type = MV_TYPE_FIELD; s->mb_intra= 0; for(i=0; i<2; i++){ j= s->field_select[1][i] = s->b_field_select_table[1][i][xy]; s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0]; s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1]; } break; case CANDIDATE_MB_TYPE_BIDIR_I: s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; s->mv_type = MV_TYPE_FIELD; s->mb_intra= 0; for(dir=0; dir<2; dir++){ for(i=0; i<2; i++){ j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy]; s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0]; s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1]; } } break; default: av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n"); } encode_mb(s, motion_x, motion_y); // RAL: Update last macroblock type s->last_mv_dir = s->mv_dir; if (CONFIG_H263_ENCODER && s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B) ff_h263_update_motion_val(s); ff_MPV_decode_mb(s, s->block); } /* clean the MV table in IPS frames for direct mode in B frames */ if(s->mb_intra /* && I,P,S_TYPE */){ s->p_mv_table[xy][0]=0; s->p_mv_table[xy][1]=0; } if(s->flags&CODEC_FLAG_PSNR){ int w= 16; int h= 16; if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16; if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16; s->current_picture.f.error[0] += sse( s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize); s->current_picture.f.error[1] += sse( s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h, s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize); s->current_picture.f.error[2] += sse( s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h, s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize); } if(s->loop_filter){ if(CONFIG_H263_ENCODER && s->out_format == FMT_H263) ff_h263_loop_filter(s); } av_dlog(s->avctx, "MB %d %d bits\n", s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb)); } } //not beautiful here but we must write it before flushing so it has to be here if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I) ff_msmpeg4_encode_ext_header(s); write_slice_end(s); /* Send the last GOB if RTP */ if (s->avctx->rtp_callback) { int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x; pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob; /* Call the RTP callback to send the last GOB */ emms_c(); s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb); } return 0; } | 18,332 |
1 | static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num, int nb_sectors) { return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE, nb_sectors * BDRV_SECTOR_SIZE); | 18,333 |
1 | int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avccontext) { if(avccontext->coded_frame->quality) /* VBR requested */ return vorbis_encode_init_vbr(vi, avccontext->channels, avccontext->sample_rate, (float)avccontext->coded_frame->quality / 1000) ; return vorbis_encode_init(vi, avccontext->channels, avccontext->sample_rate, -1, avccontext->bit_rate, -1) ; } | 18,334 |
1 | static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = arm_env_get_cpu(env); int timeridx = ri->crm & 1; uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; env->cp15.c14_timer[timeridx].ctl = value & 3; if ((oldval ^ value) & 1) { /* Enable toggled */ gt_recalc_timer(cpu, timeridx); } else if ((oldval & value) & 2) { /* IMASK toggled: don't need to recalculate, * just set the interrupt line based on ISTATUS */ qemu_set_irq(cpu->gt_timer_outputs[timeridx], (oldval & 4) && (value & 2)); } } | 18,335 |
1 | static av_cold int vp8_decode_free(AVCodecContext *avctx) { vp8_decode_flush_impl(avctx, 0, 1); release_queued_segmaps(avctx->priv_data, 1); return 0; } | 18,336 |
0 | static int a64_write_header(AVFormatContext *s) { AVCodecContext *avctx = s->streams[0]->codec; uint8_t header[5] = { 0x00, //load 0x40, //address 0x00, //mode 0x00, //charset_lifetime (multi only) 0x00 //fps in 50/fps; }; if (avctx->extradata_size < 4) { av_log(s, AV_LOG_ERROR, "Missing extradata\n"); return AVERROR(EINVAL); } switch (avctx->codec->id) { case AV_CODEC_ID_A64_MULTI: header[2] = 0x00; header[3] = AV_RB32(avctx->extradata+0); header[4] = 2; break; case AV_CODEC_ID_A64_MULTI5: header[2] = 0x01; header[3] = AV_RB32(avctx->extradata+0); header[4] = 3; break; default: return AVERROR(EINVAL); } avio_write(s->pb, header, 2); return 0; } | 18,337 |
0 | static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num) { BDRVBochsState *s = bs->opaque; uint64_t offset = sector_num * 512; uint64_t extent_index, extent_offset, bitmap_offset; char bitmap_entry; // seek to sector extent_index = offset / s->extent_size; extent_offset = (offset % s->extent_size) / 512; if (s->catalog_bitmap[extent_index] == 0xffffffff) { return -1; /* not allocated */ } bitmap_offset = s->data_offset + (512 * (uint64_t) s->catalog_bitmap[extent_index] * (s->extent_blocks + s->bitmap_blocks)); /* read in bitmap for current extent */ if (bdrv_pread(bs->file, bitmap_offset + (extent_offset / 8), &bitmap_entry, 1) != 1) { return -1; } if (!((bitmap_entry >> (extent_offset % 8)) & 1)) { return -1; /* not allocated */ } return bitmap_offset + (512 * (s->bitmap_blocks + extent_offset)); } | 18,338 |
0 | static void slirp_bootp_save(QEMUFile *f, Slirp *slirp) { int i; for (i = 0; i < NB_BOOTP_CLIENTS; i++) { qemu_put_be16(f, slirp->bootp_clients[i].allocated); qemu_put_buffer(f, slirp->bootp_clients[i].macaddr, 6); } } | 18,339 |
0 | int qemu_opts_id_wellformed(const char *id) { int i; if (!qemu_isalpha(id[0])) { return 0; } for (i = 1; id[i]; i++) { if (!qemu_isalnum(id[i]) && !strchr("-._", id[i])) { return 0; } } return 1; } | 18,340 |
0 | ssize_t slirp_send(struct socket *so, const void *buf, size_t len, int flags) { if (so->s == -1 && so->extra) { qemu_chr_fe_write(so->extra, buf, len); return len; } return send(so->s, buf, len, flags); } | 18,341 |
0 | int nbd_init(int fd, int csock, uint32_t flags, off_t size, size_t blocksize) { TRACE("Setting NBD socket"); if (ioctl(fd, NBD_SET_SOCK, csock) < 0) { int serrno = errno; LOG("Failed to set NBD socket"); errno = serrno; return -1; } TRACE("Setting block size to %lu", (unsigned long)blocksize); if (ioctl(fd, NBD_SET_BLKSIZE, blocksize) < 0) { int serrno = errno; LOG("Failed setting NBD block size"); errno = serrno; return -1; } TRACE("Setting size to %zd block(s)", (size_t)(size / blocksize)); if (ioctl(fd, NBD_SET_SIZE_BLOCKS, size / blocksize) < 0) { int serrno = errno; LOG("Failed setting size (in blocks)"); errno = serrno; return -1; } if (flags & NBD_FLAG_READ_ONLY) { int read_only = 1; TRACE("Setting readonly attribute"); if (ioctl(fd, BLKROSET, (unsigned long) &read_only) < 0) { int serrno = errno; LOG("Failed setting read-only attribute"); errno = serrno; return -1; } } if (ioctl(fd, NBD_SET_FLAGS, flags) < 0 && errno != ENOTTY) { int serrno = errno; LOG("Failed setting flags"); errno = serrno; return -1; } TRACE("Negotiation ended"); return 0; } | 18,343 |
0 | static void rgb24_to_yuvj420p(AVPicture *dst, AVPicture *src, int width, int height) { int wrap, wrap3, width2; int r, g, b, r1, g1, b1, w; uint8_t *lum, *cb, *cr; const uint8_t *p; lum = dst->data[0]; cb = dst->data[1]; cr = dst->data[2]; width2 = (width + 1) >> 1; wrap = dst->linesize[0]; wrap3 = src->linesize[0]; p = src->data[0]; for(;height>=2;height -= 2) { for(w = width; w >= 2; w -= 2) { RGB_IN(r, g, b, p); r1 = r; g1 = g; b1 = b; lum[0] = RGB_TO_Y(r, g, b); RGB_IN(r, g, b, p + BPP); r1 += r; g1 += g; b1 += b; lum[1] = RGB_TO_Y(r, g, b); p += wrap3; lum += wrap; RGB_IN(r, g, b, p); r1 += r; g1 += g; b1 += b; lum[0] = RGB_TO_Y(r, g, b); RGB_IN(r, g, b, p + BPP); r1 += r; g1 += g; b1 += b; lum[1] = RGB_TO_Y(r, g, b); cb[0] = RGB_TO_U(r1, g1, b1, 2); cr[0] = RGB_TO_V(r1, g1, b1, 2); cb++; cr++; p += -wrap3 + 2 * BPP; lum += -wrap + 2; } if (w) { RGB_IN(r, g, b, p); r1 = r; g1 = g; b1 = b; lum[0] = RGB_TO_Y(r, g, b); p += wrap3; lum += wrap; RGB_IN(r, g, b, p); r1 += r; g1 += g; b1 += b; lum[0] = RGB_TO_Y(r, g, b); cb[0] = RGB_TO_U(r1, g1, b1, 1); cr[0] = RGB_TO_V(r1, g1, b1, 1); cb++; cr++; p += -wrap3 + BPP; lum += -wrap + 1; } p += wrap3 + (wrap3 - width * BPP); lum += wrap + (wrap - width); cb += dst->linesize[1] - width2; cr += dst->linesize[2] - width2; } /* handle odd height */ if (height) { for(w = width; w >= 2; w -= 2) { RGB_IN(r, g, b, p); r1 = r; g1 = g; b1 = b; lum[0] = RGB_TO_Y(r, g, b); RGB_IN(r, g, b, p + BPP); r1 += r; g1 += g; b1 += b; lum[1] = RGB_TO_Y(r, g, b); cb[0] = RGB_TO_U(r1, g1, b1, 1); cr[0] = RGB_TO_V(r1, g1, b1, 1); cb++; cr++; p += 2 * BPP; lum += 2; } if (w) { RGB_IN(r, g, b, p); lum[0] = RGB_TO_Y(r, g, b); cb[0] = RGB_TO_U(r, g, b, 0); cr[0] = RGB_TO_V(r, g, b, 0); } } } | 18,346 |
0 | static void do_multiwrite(BlockDriverState *bs, BlockRequest *blkreq, int num_writes) { int i, ret; ret = bdrv_aio_multiwrite(bs, blkreq, num_writes); if (ret != 0) { for (i = 0; i < num_writes; i++) { if (blkreq[i].error) { virtio_blk_rw_complete(blkreq[i].opaque, -EIO); } } } } | 18,348 |
0 | static void qjson_register_types(void) { type_register_static(&qjson_type_info); } | 18,350 |
0 | void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid, int hotplugged, int add) { uint8_t guest_cssid; bool chain_crw; if (add && !hotplugged) { return; } if (channel_subsys.max_cssid == 0) { /* Default cssid shows up as 0. */ guest_cssid = (cssid == channel_subsys.default_cssid) ? 0 : cssid; } else { /* Show real cssid to the guest. */ guest_cssid = cssid; } /* * Only notify for higher subchannel sets/channel subsystems if the * guest has enabled it. */ if ((ssid > channel_subsys.max_ssid) || (guest_cssid > channel_subsys.max_cssid) || ((channel_subsys.max_cssid == 0) && (cssid != channel_subsys.default_cssid))) { return; } chain_crw = (channel_subsys.max_ssid > 0) || (channel_subsys.max_cssid > 0); css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, chain_crw ? 1 : 0, schid); if (chain_crw) { css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0, (guest_cssid << 8) | (ssid << 4)); } /* RW_ERC_IPI --> clear pending interrupts */ css_clear_io_interrupt(css_do_build_subchannel_id(cssid, ssid), schid); } | 18,352 |
0 | static void term_insert_char(int ch) { if (term_cmd_buf_index < TERM_CMD_BUF_SIZE) { memmove(term_cmd_buf + term_cmd_buf_index + 1, term_cmd_buf + term_cmd_buf_index, term_cmd_buf_size - term_cmd_buf_index); term_cmd_buf[term_cmd_buf_index] = ch; term_cmd_buf_size++; term_cmd_buf_index++; } } | 18,354 |
0 | static void spapr_phb_add_pci_device(sPAPRDRConnector *drc, sPAPRPHBState *phb, PCIDevice *pdev, Error **errp) { sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); DeviceState *dev = DEVICE(pdev); int drc_index = drck->get_index(drc); void *fdt = NULL; int fdt_start_offset = 0, fdt_size; if (dev->hotplugged) { fdt = create_device_tree(&fdt_size); fdt_start_offset = spapr_create_pci_child_dt(phb, pdev, drc_index, NULL, fdt, 0); if (!fdt_start_offset) { error_setg(errp, "Failed to create pci child device tree node"); goto out; } } drck->attach(drc, DEVICE(pdev), fdt, fdt_start_offset, !dev->hotplugged, errp); out: if (*errp) { g_free(fdt); } } | 18,355 |
0 | static void decode_scaling_matrices(H264Context *h, SPS *sps, PPS *pps, int is_sps, uint8_t(*scaling_matrix4)[16], uint8_t(*scaling_matrix8)[64]) { int fallback_sps = !is_sps && sps->scaling_matrix_present; const uint8_t *fallback[4] = { fallback_sps ? sps->scaling_matrix4[0] : default_scaling4[0], fallback_sps ? sps->scaling_matrix4[3] : default_scaling4[1], fallback_sps ? sps->scaling_matrix8[0] : default_scaling8[0], fallback_sps ? sps->scaling_matrix8[3] : default_scaling8[1] }; if (get_bits1(&h->gb)) { sps->scaling_matrix_present |= is_sps; decode_scaling_list(h, scaling_matrix4[0], 16, default_scaling4[0], fallback[0]); // Intra, Y decode_scaling_list(h, scaling_matrix4[1], 16, default_scaling4[0], scaling_matrix4[0]); // Intra, Cr decode_scaling_list(h, scaling_matrix4[2], 16, default_scaling4[0], scaling_matrix4[1]); // Intra, Cb decode_scaling_list(h, scaling_matrix4[3], 16, default_scaling4[1], fallback[1]); // Inter, Y decode_scaling_list(h, scaling_matrix4[4], 16, default_scaling4[1], scaling_matrix4[3]); // Inter, Cr decode_scaling_list(h, scaling_matrix4[5], 16, default_scaling4[1], scaling_matrix4[4]); // Inter, Cb if (is_sps || pps->transform_8x8_mode) { decode_scaling_list(h, scaling_matrix8[0], 64, default_scaling8[0], fallback[2]); // Intra, Y if (sps->chroma_format_idc == 3) { decode_scaling_list(h, scaling_matrix8[1], 64, default_scaling8[0], scaling_matrix8[0]); // Intra, Cr decode_scaling_list(h, scaling_matrix8[2], 64, default_scaling8[0], scaling_matrix8[1]); // Intra, Cb } decode_scaling_list(h, scaling_matrix8[3], 64, default_scaling8[1], fallback[3]); // Inter, Y if (sps->chroma_format_idc == 3) { decode_scaling_list(h, scaling_matrix8[4], 64, default_scaling8[1], scaling_matrix8[3]); // Inter, Cr decode_scaling_list(h, scaling_matrix8[5], 64, default_scaling8[1], scaling_matrix8[4]); // Inter, Cb } } } } | 18,357 |
0 | static void macio_nvram_writeb(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { MacIONVRAMState *s = opaque; addr = (addr >> s->it_shift) & (s->size - 1); s->data[addr] = value; NVR_DPRINTF("writeb addr %04x val %x\n", (int)addr, value); } | 18,358 |
0 | static void fill_prstatus(struct target_elf_prstatus *prstatus, const TaskState *ts, int signr) { (void) memset(prstatus, 0, sizeof (*prstatus)); prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; prstatus->pr_pid = ts->ts_tid; prstatus->pr_ppid = getppid(); prstatus->pr_pgrp = getpgrp(); prstatus->pr_sid = getsid(0); #ifdef BSWAP_NEEDED bswap_prstatus(prstatus); #endif } | 18,359 |
0 | static int usb_host_read_file(char *line, size_t line_size, const char *device_file, const char *device_name) { FILE *f; int ret = 0; char filename[PATH_MAX]; snprintf(filename, PATH_MAX, device_file, device_name); f = fopen(filename, "r"); if (f) { fgets(line, line_size, f); fclose(f); ret = 1; } else { term_printf("husb: could not open %s\n", filename); } return ret; } | 18,360 |
0 | static uint32_t sdhci_read_dataport(SDHCIState *s, unsigned size) { uint32_t value = 0; int i; /* first check that a valid data exists in host controller input buffer */ if ((s->prnsts & SDHC_DATA_AVAILABLE) == 0) { ERRPRINT("Trying to read from empty buffer\n"); return 0; } for (i = 0; i < size; i++) { value |= s->fifo_buffer[s->data_count] << i * 8; s->data_count++; /* check if we've read all valid data (blksize bytes) from buffer */ if ((s->data_count) >= (s->blksize & 0x0fff)) { DPRINT_L2("All %u bytes of data have been read from input buffer\n", s->data_count); s->prnsts &= ~SDHC_DATA_AVAILABLE; /* no more data in a buffer */ s->data_count = 0; /* next buff read must start at position [0] */ if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) { s->blkcnt--; } /* if that was the last block of data */ if ((s->trnmod & SDHC_TRNS_MULTI) == 0 || ((s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0)) || /* stop at gap request */ (s->stopped_state == sdhc_gap_read && !(s->prnsts & SDHC_DAT_LINE_ACTIVE))) { SDHCI_GET_CLASS(s)->end_data_transfer(s); } else { /* if there are more data, read next block from card */ SDHCI_GET_CLASS(s)->read_block_from_card(s); } break; } } return value; } | 18,362 |
0 | static void pc_fw_add_pflash_drv(void) { QemuOpts *opts; QEMUMachine *machine; char *filename; if (bios_name == NULL) { bios_name = BIOS_FILENAME; } filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); opts = drive_add(IF_PFLASH, -1, filename, "readonly=on"); g_free(filename); if (opts == NULL) { return; } machine = find_default_machine(); if (machine == NULL) { return; } if (!drive_init(opts, machine->use_scsi)) { qemu_opts_del(opts); } } | 18,363 |
0 | static void escc_mem_write(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned size) { SerialState *serial = opaque; ChannelState *s; uint32_t saddr; int newreg, channel; val &= 0xff; saddr = (addr >> serial->it_shift) & 1; channel = (addr >> (serial->it_shift + 1)) & 1; s = &serial->chn[channel]; switch (saddr) { case SERIAL_CTRL: trace_escc_mem_writeb_ctrl(CHN_C(s), s->reg, val & 0xff); newreg = 0; switch (s->reg) { case W_CMD: newreg = val & CMD_PTR_MASK; val &= CMD_CMD_MASK; switch (val) { case CMD_HI: newreg |= CMD_HI; break; case CMD_CLR_TXINT: clr_txint(s); break; case CMD_CLR_IUS: if (s->rxint_under_svc) { s->rxint_under_svc = 0; if (s->txint) { set_txint(s); } } else if (s->txint_under_svc) { s->txint_under_svc = 0; } escc_update_irq(s); break; default: break; } break; case W_INTR ... W_RXCTRL: case W_SYNC1 ... W_TXBUF: case W_MISC1 ... W_CLOCK: case W_MISC2 ... W_EXTINT: s->wregs[s->reg] = val; break; case W_TXCTRL1: case W_TXCTRL2: s->wregs[s->reg] = val; escc_update_parameters(s); break; case W_BRGLO: case W_BRGHI: s->wregs[s->reg] = val; s->rregs[s->reg] = val; escc_update_parameters(s); break; case W_MINTR: switch (val & MINTR_RST_MASK) { case 0: default: break; case MINTR_RST_B: escc_reset_chn(&serial->chn[0]); return; case MINTR_RST_A: escc_reset_chn(&serial->chn[1]); return; case MINTR_RST_ALL: escc_reset(&serial->busdev.qdev); return; } break; default: break; } if (s->reg == 0) s->reg = newreg; else s->reg = 0; break; case SERIAL_DATA: trace_escc_mem_writeb_data(CHN_C(s), val); s->tx = val; if (s->wregs[W_TXCTRL2] & TXCTRL2_TXEN) { // tx enabled if (s->chr) qemu_chr_fe_write(s->chr, &s->tx, 1); else if (s->type == kbd && !s->disabled) { handle_kbd_command(s, val); } } s->rregs[R_STATUS] |= STATUS_TXEMPTY; // Tx buffer empty s->rregs[R_SPEC] |= SPEC_ALLSENT; // All sent set_txint(s); break; default: break; } } | 18,364 |
0 | int ff_cbs_write_packet(CodedBitstreamContext *ctx, AVPacket *pkt, CodedBitstreamFragment *frag) { int err; err = ff_cbs_write_fragment_data(ctx, frag); if (err < 0) return err; av_new_packet(pkt, frag->data_size); if (err < 0) return err; memcpy(pkt->data, frag->data, frag->data_size); pkt->size = frag->data_size; return 0; } | 18,365 |
0 | static av_cold int sunrast_encode_close(AVCodecContext *avctx) { av_frame_free(&avctx->coded_frame); return 0; } | 18,367 |
0 | static void RENAME(yadif_filter_line)(uint8_t *dst, uint8_t *prev, uint8_t *cur, uint8_t *next, int w, int prefs, int mrefs, int parity, int mode) { DECLARE_ALIGNED(16, uint8_t, tmp0)[16]; DECLARE_ALIGNED(16, uint8_t, tmp1)[16]; DECLARE_ALIGNED(16, uint8_t, tmp2)[16]; DECLARE_ALIGNED(16, uint8_t, tmp3)[16]; int x; #define FILTER\ for(x=0; x<w; x+=STEP){\ __asm__ volatile(\ "pxor "MM"7, "MM"7 \n\t"\ LOAD("(%[cur],%[mrefs])", MM"0") /* c = cur[x-refs] */\ LOAD("(%[cur],%[prefs])", MM"1") /* e = cur[x+refs] */\ LOAD("(%["prev2"])", MM"2") /* prev2[x] */\ LOAD("(%["next2"])", MM"3") /* next2[x] */\ MOVQ" "MM"3, "MM"4 \n\t"\ "paddw "MM"2, "MM"3 \n\t"\ "psraw $1, "MM"3 \n\t" /* d = (prev2[x] + next2[x])>>1 */\ MOVQ" "MM"0, %[tmp0] \n\t" /* c */\ MOVQ" "MM"3, %[tmp1] \n\t" /* d */\ MOVQ" "MM"1, %[tmp2] \n\t" /* e */\ "psubw "MM"4, "MM"2 \n\t"\ PABS( MM"4", MM"2") /* temporal_diff0 */\ LOAD("(%[prev],%[mrefs])", MM"3") /* prev[x-refs] */\ LOAD("(%[prev],%[prefs])", MM"4") /* prev[x+refs] */\ "psubw "MM"0, "MM"3 \n\t"\ "psubw "MM"1, "MM"4 \n\t"\ PABS( MM"5", MM"3")\ PABS( MM"5", MM"4")\ "paddw "MM"4, "MM"3 \n\t" /* temporal_diff1 */\ "psrlw $1, "MM"2 \n\t"\ "psrlw $1, "MM"3 \n\t"\ "pmaxsw "MM"3, "MM"2 \n\t"\ LOAD("(%[next],%[mrefs])", MM"3") /* next[x-refs] */\ LOAD("(%[next],%[prefs])", MM"4") /* next[x+refs] */\ "psubw "MM"0, "MM"3 \n\t"\ "psubw "MM"1, "MM"4 \n\t"\ PABS( MM"5", MM"3")\ PABS( MM"5", MM"4")\ "paddw "MM"4, "MM"3 \n\t" /* temporal_diff2 */\ "psrlw $1, "MM"3 \n\t"\ "pmaxsw "MM"3, "MM"2 \n\t"\ MOVQ" "MM"2, %[tmp3] \n\t" /* diff */\ \ "paddw "MM"0, "MM"1 \n\t"\ "paddw "MM"0, "MM"0 \n\t"\ "psubw "MM"1, "MM"0 \n\t"\ "psrlw $1, "MM"1 \n\t" /* spatial_pred */\ PABS( MM"2", MM"0") /* ABS(c-e) */\ \ MOVQU" -1(%[cur],%[mrefs]), "MM"2 \n\t" /* cur[x-refs-1] */\ MOVQU" -1(%[cur],%[prefs]), "MM"3 \n\t" /* cur[x+refs-1] */\ MOVQ" "MM"2, "MM"4 \n\t"\ "psubusb "MM"3, "MM"2 \n\t"\ "psubusb "MM"4, "MM"3 \n\t"\ "pmaxub "MM"3, "MM"2 \n\t"\ PSHUF(MM"3", MM"2") \ "punpcklbw "MM"7, "MM"2 \n\t" /* ABS(cur[x-refs-1] - cur[x+refs-1]) */\ "punpcklbw "MM"7, "MM"3 \n\t" /* ABS(cur[x-refs+1] - cur[x+refs+1]) */\ "paddw "MM"2, "MM"0 \n\t"\ "paddw "MM"3, "MM"0 \n\t"\ "psubw "MANGLE(pw_1)", "MM"0 \n\t" /* spatial_score */\ \ CHECK(-2,0)\ CHECK1\ CHECK(-3,1)\ CHECK2\ CHECK(0,-2)\ CHECK1\ CHECK(1,-3)\ CHECK2\ \ /* if(p->mode<2) ... */\ MOVQ" %[tmp3], "MM"6 \n\t" /* diff */\ "cmpl $2, %[mode] \n\t"\ "jge 1f \n\t"\ LOAD("(%["prev2"],%[mrefs],2)", MM"2") /* prev2[x-2*refs] */\ LOAD("(%["next2"],%[mrefs],2)", MM"4") /* next2[x-2*refs] */\ LOAD("(%["prev2"],%[prefs],2)", MM"3") /* prev2[x+2*refs] */\ LOAD("(%["next2"],%[prefs],2)", MM"5") /* next2[x+2*refs] */\ "paddw "MM"4, "MM"2 \n\t"\ "paddw "MM"5, "MM"3 \n\t"\ "psrlw $1, "MM"2 \n\t" /* b */\ "psrlw $1, "MM"3 \n\t" /* f */\ MOVQ" %[tmp0], "MM"4 \n\t" /* c */\ MOVQ" %[tmp1], "MM"5 \n\t" /* d */\ MOVQ" %[tmp2], "MM"7 \n\t" /* e */\ "psubw "MM"4, "MM"2 \n\t" /* b-c */\ "psubw "MM"7, "MM"3 \n\t" /* f-e */\ MOVQ" "MM"5, "MM"0 \n\t"\ "psubw "MM"4, "MM"5 \n\t" /* d-c */\ "psubw "MM"7, "MM"0 \n\t" /* d-e */\ MOVQ" "MM"2, "MM"4 \n\t"\ "pminsw "MM"3, "MM"2 \n\t"\ "pmaxsw "MM"4, "MM"3 \n\t"\ "pmaxsw "MM"5, "MM"2 \n\t"\ "pminsw "MM"5, "MM"3 \n\t"\ "pmaxsw "MM"0, "MM"2 \n\t" /* max */\ "pminsw "MM"0, "MM"3 \n\t" /* min */\ "pxor "MM"4, "MM"4 \n\t"\ "pmaxsw "MM"3, "MM"6 \n\t"\ "psubw "MM"2, "MM"4 \n\t" /* -max */\ "pmaxsw "MM"4, "MM"6 \n\t" /* diff= MAX3(diff, min, -max); */\ "1: \n\t"\ \ MOVQ" %[tmp1], "MM"2 \n\t" /* d */\ MOVQ" "MM"2, "MM"3 \n\t"\ "psubw "MM"6, "MM"2 \n\t" /* d-diff */\ "paddw "MM"6, "MM"3 \n\t" /* d+diff */\ "pmaxsw "MM"2, "MM"1 \n\t"\ "pminsw "MM"3, "MM"1 \n\t" /* d = clip(spatial_pred, d-diff, d+diff); */\ "packuswb "MM"1, "MM"1 \n\t"\ \ :[tmp0]"=m"(tmp0),\ [tmp1]"=m"(tmp1),\ [tmp2]"=m"(tmp2),\ [tmp3]"=m"(tmp3)\ :[prev] "r"(prev),\ [cur] "r"(cur),\ [next] "r"(next),\ [prefs]"r"((x86_reg)prefs),\ [mrefs]"r"((x86_reg)mrefs),\ [mode] "g"(mode)\ );\ __asm__ volatile(MOV" "MM"1, %0" :"=m"(*dst));\ dst += STEP;\ prev+= STEP;\ cur += STEP;\ next+= STEP;\ } if (parity) { #define prev2 "prev" #define next2 "cur" FILTER #undef prev2 #undef next2 } else { #define prev2 "cur" #define next2 "next" FILTER #undef prev2 #undef next2 } } | 18,368 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.