label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
0 | ssize_t nbd_wr_syncv(QIOChannel *ioc, struct iovec *iov, size_t niov, size_t length, bool do_read, Error **errp) { ssize_t done = 0; struct iovec *local_iov = g_new(struct iovec, niov); struct iovec *local_iov_head = local_iov; unsigned int nlocal_iov = niov; nlocal_iov = iov_copy(local_iov, nlocal_iov, iov, niov, 0, length); while (nlocal_iov > 0) { ssize_t len; if (do_read) { len = qio_channel_readv(ioc, local_iov, nlocal_iov, errp); } else { len = qio_channel_writev(ioc, local_iov, nlocal_iov, errp); } if (len == QIO_CHANNEL_ERR_BLOCK) { /* errp should not be set */ assert(qemu_in_coroutine()); qio_channel_yield(ioc, do_read ? G_IO_IN : G_IO_OUT); continue; } if (len < 0) { done = -EIO; goto cleanup; } if (do_read && len == 0) { break; } iov_discard_front(&local_iov, &nlocal_iov, len); done += len; } cleanup: g_free(local_iov_head); return done; } | 21,363 |
0 | void qemu_init_vcpu(void *_env) { CPUState *env = _env; if (kvm_enabled()) kvm_init_vcpu(env); env->nr_cores = smp_cores; env->nr_threads = smp_threads; return; } | 21,364 |
0 | void kvm_inject_x86_mce(CPUState *cenv, int bank, uint64_t status, uint64_t mcg_status, uint64_t addr, uint64_t misc, int flag) { #ifdef KVM_CAP_MCE struct kvm_x86_mce mce = { .bank = bank, .status = status, .mcg_status = mcg_status, .addr = addr, .misc = misc, }; if (flag & MCE_BROADCAST) { kvm_mce_broadcast_rest(cenv); } kvm_inject_x86_mce_on(cenv, &mce, flag); #else /* !KVM_CAP_MCE*/ if (flag & ABORT_ON_ERROR) { abort(); } #endif /* !KVM_CAP_MCE*/ } | 21,365 |
0 | readv_f(int argc, char **argv) { struct timeval t1, t2; int Cflag = 0, qflag = 0, vflag = 0; int c, cnt; char *buf; int64_t offset; int total; int nr_iov; QEMUIOVector qiov; int pattern = 0; int Pflag = 0; while ((c = getopt(argc, argv, "CP:qv")) != EOF) { switch (c) { case 'C': Cflag = 1; break; case 'P': Pflag = 1; pattern = atoi(optarg); break; case 'q': qflag = 1; break; case 'v': vflag = 1; break; default: return command_usage(&readv_cmd); } } if (optind > argc - 2) return command_usage(&readv_cmd); offset = cvtnum(argv[optind]); if (offset < 0) { printf("non-numeric length argument -- %s\n", argv[optind]); return 0; } optind++; if (offset & 0x1ff) { printf("offset %lld is not sector aligned\n", (long long)offset); return 0; } nr_iov = argc - optind; buf = create_iovec(&qiov, &argv[optind], nr_iov, 0xab); gettimeofday(&t1, NULL); cnt = do_aio_readv(&qiov, offset, &total); gettimeofday(&t2, NULL); if (cnt < 0) { printf("readv failed: %s\n", strerror(-cnt)); goto out; } if (Pflag) { void* cmp_buf = malloc(qiov.size); memset(cmp_buf, pattern, qiov.size); if (memcmp(buf, cmp_buf, qiov.size)) { printf("Pattern verification failed at offset %lld, " "%zd bytes\n", (long long) offset, qiov.size); } free(cmp_buf); } if (qflag) goto out; if (vflag) dump_buffer(buf, offset, qiov.size); /* Finally, report back -- -C gives a parsable format */ t2 = tsub(t2, t1); print_report("read", &t2, offset, qiov.size, total, cnt, Cflag); out: qemu_io_free(buf); return 0; } | 21,366 |
0 | static int init_directories(BDRVVVFATState* s, const char *dirname, int heads, int secs) { bootsector_t* bootsector; mapping_t* mapping; unsigned int i; unsigned int cluster; memset(&(s->first_sectors[0]),0,0x40*0x200); s->cluster_size=s->sectors_per_cluster*0x200; s->cluster_buffer=g_malloc(s->cluster_size); /* * The formula: sc = spf+1+spf*spc*(512*8/fat_type), * where sc is sector_count, * spf is sectors_per_fat, * spc is sectors_per_clusters, and * fat_type = 12, 16 or 32. */ i = 1+s->sectors_per_cluster*0x200*8/s->fat_type; s->sectors_per_fat=(s->sector_count+i)/i; /* round up */ array_init(&(s->mapping),sizeof(mapping_t)); array_init(&(s->directory),sizeof(direntry_t)); /* add volume label */ { direntry_t* entry=array_get_next(&(s->directory)); entry->attributes=0x28; /* archive | volume label */ memcpy(entry->name,"QEMU VVF",8); memcpy(entry->extension,"AT ",3); } /* Now build FAT, and write back information into directory */ init_fat(s); s->faked_sectors=s->first_sectors_number+s->sectors_per_fat*2; s->cluster_count=sector2cluster(s, s->sector_count); mapping = array_get_next(&(s->mapping)); mapping->begin = 0; mapping->dir_index = 0; mapping->info.dir.parent_mapping_index = -1; mapping->first_mapping_index = -1; mapping->path = g_strdup(dirname); i = strlen(mapping->path); if (i > 0 && mapping->path[i - 1] == '/') mapping->path[i - 1] = '\0'; mapping->mode = MODE_DIRECTORY; mapping->read_only = 0; s->path = mapping->path; for (i = 0, cluster = 0; i < s->mapping.next; i++) { /* MS-DOS expects the FAT to be 0 for the root directory * (except for the media byte). */ /* LATER TODO: still true for FAT32? */ int fix_fat = (i != 0); mapping = array_get(&(s->mapping), i); if (mapping->mode & MODE_DIRECTORY) { mapping->begin = cluster; if(read_directory(s, i)) { fprintf(stderr, "Could not read directory %s\n", mapping->path); return -1; } mapping = array_get(&(s->mapping), i); } else { assert(mapping->mode == MODE_UNDEFINED); mapping->mode=MODE_NORMAL; mapping->begin = cluster; if (mapping->end > 0) { direntry_t* direntry = array_get(&(s->directory), mapping->dir_index); mapping->end = cluster + 1 + (mapping->end-1)/s->cluster_size; set_begin_of_direntry(direntry, mapping->begin); } else { mapping->end = cluster + 1; fix_fat = 0; } } assert(mapping->begin < mapping->end); /* next free cluster */ cluster = mapping->end; if(cluster > s->cluster_count) { fprintf(stderr,"Directory does not fit in FAT%d (capacity %.2f MB)\n", s->fat_type, s->sector_count / 2000.0); return -EINVAL; } /* fix fat for entry */ if (fix_fat) { int j; for(j = mapping->begin; j < mapping->end - 1; j++) fat_set(s, j, j+1); fat_set(s, mapping->end - 1, s->max_fat_value); } } mapping = array_get(&(s->mapping), 0); s->sectors_of_root_directory = mapping->end * s->sectors_per_cluster; s->last_cluster_of_root_directory = mapping->end; /* the FAT signature */ fat_set(s,0,s->max_fat_value); fat_set(s,1,s->max_fat_value); s->current_mapping = NULL; bootsector=(bootsector_t*)(s->first_sectors+(s->first_sectors_number-1)*0x200); bootsector->jump[0]=0xeb; bootsector->jump[1]=0x3e; bootsector->jump[2]=0x90; memcpy(bootsector->name,"QEMU ",8); bootsector->sector_size=cpu_to_le16(0x200); bootsector->sectors_per_cluster=s->sectors_per_cluster; bootsector->reserved_sectors=cpu_to_le16(1); bootsector->number_of_fats=0x2; /* number of FATs */ bootsector->root_entries=cpu_to_le16(s->sectors_of_root_directory*0x10); bootsector->total_sectors16=s->sector_count>0xffff?0:cpu_to_le16(s->sector_count); bootsector->media_type=(s->first_sectors_number>1?0xf8:0xf0); /* media descriptor (f8=hd, f0=3.5 fd)*/ s->fat.pointer[0] = bootsector->media_type; bootsector->sectors_per_fat=cpu_to_le16(s->sectors_per_fat); bootsector->sectors_per_track = cpu_to_le16(secs); bootsector->number_of_heads = cpu_to_le16(heads); bootsector->hidden_sectors=cpu_to_le32(s->first_sectors_number==1?0:0x3f); bootsector->total_sectors=cpu_to_le32(s->sector_count>0xffff?s->sector_count:0); /* LATER TODO: if FAT32, this is wrong */ bootsector->u.fat16.drive_number=s->first_sectors_number==1?0:0x80; /* fda=0, hda=0x80 */ bootsector->u.fat16.current_head=0; bootsector->u.fat16.signature=0x29; bootsector->u.fat16.id=cpu_to_le32(0xfabe1afd); memcpy(bootsector->u.fat16.volume_label,"QEMU VVFAT ",11); memcpy(bootsector->fat_type,(s->fat_type==12?"FAT12 ":s->fat_type==16?"FAT16 ":"FAT32 "),8); bootsector->magic[0]=0x55; bootsector->magic[1]=0xaa; return 0; } | 21,367 |
0 | int kvm_vcpu_ioctl(CPUState *env, int type, ...) { int ret; void *arg; va_list ap; va_start(ap, type); arg = va_arg(ap, void *); va_end(ap); ret = ioctl(env->kvm_fd, type, arg); if (ret == -1) ret = -errno; return ret; } | 21,368 |
0 | ssize_t qemu_put_compression_data(QEMUFile *f, const uint8_t *p, size_t size, int level) { ssize_t blen = IO_BUF_SIZE - f->buf_index - sizeof(int32_t); if (blen < compressBound(size)) { return 0; } if (compress2(f->buf + f->buf_index + sizeof(int32_t), (uLongf *)&blen, (Bytef *)p, size, level) != Z_OK) { error_report("Compress Failed!"); return 0; } qemu_put_be32(f, blen); f->buf_index += blen; return blen + sizeof(int32_t); } | 21,370 |
0 | static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y, int mvn, int r_x, int r_y, uint8_t* is_intra, int dir) { MpegEncContext *s = &v->s; int xy, wrap, off = 0; int A[2], B[2], C[2]; int px = 0, py = 0; int a_valid = 0, b_valid = 0, c_valid = 0; int field_a, field_b, field_c; // 0: same, 1: opposit int total_valid, num_samefield, num_oppfield; int pos_c, pos_b, n_adj; wrap = s->b8_stride; xy = s->block_index[n]; if (s->mb_intra) { s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0; s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0; s->current_picture.motion_val[1][xy][0] = 0; s->current_picture.motion_val[1][xy][1] = 0; if (mvn == 1) { /* duplicate motion data for 1-MV block */ s->current_picture.motion_val[0][xy + 1][0] = 0; s->current_picture.motion_val[0][xy + 1][1] = 0; s->current_picture.motion_val[0][xy + wrap][0] = 0; s->current_picture.motion_val[0][xy + wrap][1] = 0; s->current_picture.motion_val[0][xy + wrap + 1][0] = 0; s->current_picture.motion_val[0][xy + wrap + 1][1] = 0; v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0; s->current_picture.motion_val[1][xy + 1][0] = 0; s->current_picture.motion_val[1][xy + 1][1] = 0; s->current_picture.motion_val[1][xy + wrap][0] = 0; s->current_picture.motion_val[1][xy + wrap][1] = 0; s->current_picture.motion_val[1][xy + wrap + 1][0] = 0; s->current_picture.motion_val[1][xy + wrap + 1][1] = 0; } return; } off = ((n == 0) || (n == 1)) ? 1 : -1; /* predict A */ if (s->mb_x || (n == 1) || (n == 3)) { if ((v->blk_mv_type[xy]) // current block (MB) has a field MV || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV A[0] = s->current_picture.motion_val[dir][xy - 1][0]; A[1] = s->current_picture.motion_val[dir][xy - 1][1]; a_valid = 1; } else { // current block has frame mv and cand. has field MV (so average) A[0] = (s->current_picture.motion_val[dir][xy - 1][0] + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1; A[1] = (s->current_picture.motion_val[dir][xy - 1][1] + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1; a_valid = 1; } if (!(n & 1) && v->is_intra[s->mb_x - 1]) { a_valid = 0; A[0] = A[1] = 0; } } else A[0] = A[1] = 0; /* Predict B and C */ B[0] = B[1] = C[0] = C[1] = 0; if (n == 0 || n == 1 || v->blk_mv_type[xy]) { if (!s->first_slice_line) { if (!v->is_intra[s->mb_x - s->mb_stride]) { b_valid = 1; n_adj = n | 2; pos_b = s->block_index[n_adj] - 2 * wrap; if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) { n_adj = (n & 2) | (n & 1); } B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0]; B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1]; if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) { B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1; B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1; } } if (s->mb_width > 1) { if (!v->is_intra[s->mb_x - s->mb_stride + 1]) { c_valid = 1; n_adj = 2; pos_c = s->block_index[2] - 2 * wrap + 2; if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) { n_adj = n & 2; } C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0]; C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1]; if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) { C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1; C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1; } if (s->mb_x == s->mb_width - 1) { if (!v->is_intra[s->mb_x - s->mb_stride - 1]) { c_valid = 1; n_adj = 3; pos_c = s->block_index[3] - 2 * wrap - 2; if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) { n_adj = n | 1; } C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0]; C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1]; if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) { C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1; C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1; } } else c_valid = 0; } } } } } else { pos_b = s->block_index[1]; b_valid = 1; B[0] = s->current_picture.motion_val[dir][pos_b][0]; B[1] = s->current_picture.motion_val[dir][pos_b][1]; pos_c = s->block_index[0]; c_valid = 1; C[0] = s->current_picture.motion_val[dir][pos_c][0]; C[1] = s->current_picture.motion_val[dir][pos_c][1]; } total_valid = a_valid + b_valid + c_valid; // check if predictor A is out of bounds if (!s->mb_x && !(n == 1 || n == 3)) { A[0] = A[1] = 0; } // check if predictor B is out of bounds if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) { B[0] = B[1] = C[0] = C[1] = 0; } if (!v->blk_mv_type[xy]) { if (s->mb_width == 1) { px = B[0]; py = B[1]; } else { if (total_valid >= 2) { px = mid_pred(A[0], B[0], C[0]); py = mid_pred(A[1], B[1], C[1]); } else if (total_valid) { if (a_valid) { px = A[0]; py = A[1]; } else if (b_valid) { px = B[0]; py = B[1]; } else if (c_valid) { px = C[0]; py = C[1]; } else av_assert2(0); } } } else { if (a_valid) field_a = (A[1] & 4) ? 1 : 0; else field_a = 0; if (b_valid) field_b = (B[1] & 4) ? 1 : 0; else field_b = 0; if (c_valid) field_c = (C[1] & 4) ? 1 : 0; else field_c = 0; num_oppfield = field_a + field_b + field_c; num_samefield = total_valid - num_oppfield; if (total_valid == 3) { if ((num_samefield == 3) || (num_oppfield == 3)) { px = mid_pred(A[0], B[0], C[0]); py = mid_pred(A[1], B[1], C[1]); } else if (num_samefield >= num_oppfield) { /* take one MV from same field set depending on priority the check for B may not be necessary */ px = !field_a ? A[0] : B[0]; py = !field_a ? A[1] : B[1]; } else { px = field_a ? A[0] : B[0]; py = field_a ? A[1] : B[1]; } } else if (total_valid == 2) { if (num_samefield >= num_oppfield) { if (!field_a && a_valid) { px = A[0]; py = A[1]; } else if (!field_b && b_valid) { px = B[0]; py = B[1]; } else if (c_valid) { px = C[0]; py = C[1]; } else px = py = 0; } else { if (field_a && a_valid) { px = A[0]; py = A[1]; } else if (field_b && b_valid) { px = B[0]; py = B[1]; } else if (c_valid) { px = C[0]; py = C[1]; } } } else if (total_valid == 1) { px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]); py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]); } } /* store MV using signed modulus of MV range defined in 4.11 */ s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x; s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y; if (mvn == 1) { /* duplicate motion data for 1-MV block */ s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0]; s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1]; s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0]; s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1]; s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0]; s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1]; } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */ s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0]; s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1]; s->mv[dir][n + 1][0] = s->mv[dir][n][0]; s->mv[dir][n + 1][1] = s->mv[dir][n][1]; } } | 21,371 |
1 | static void free_buffers(VP8Context *s) { int i; if (s->thread_data) for (i = 0; i < MAX_THREADS; i++) { av_freep(&s->thread_data[i].filter_strength); av_freep(&s->thread_data[i].edge_emu_buffer); } av_freep(&s->thread_data); av_freep(&s->macroblocks_base); av_freep(&s->intra4x4_pred_mode_top); av_freep(&s->top_nnz); av_freep(&s->top_border); s->macroblocks = NULL; } | 21,373 |
1 | static void intel_hda_realize(PCIDevice *pci, Error **errp) { IntelHDAState *d = INTEL_HDA(pci); uint8_t *conf = d->pci.config; d->name = object_get_typename(OBJECT(d)); pci_config_set_interrupt_pin(conf, 1); /* HDCTL off 0x40 bit 0 selects signaling mode (1-HDA, 0 - Ac97) 18.1.19 */ conf[0x40] = 0x01; memory_region_init_io(&d->mmio, OBJECT(d), &intel_hda_mmio_ops, d, "intel-hda", 0x4000); pci_register_bar(&d->pci, 0, 0, &d->mmio); if (d->msi != ON_OFF_AUTO_OFF) { /* TODO check for errors */ msi_init(&d->pci, d->old_msi_addr ? 0x50 : 0x60, 1, true, false); } hda_codec_bus_init(DEVICE(pci), &d->codecs, sizeof(d->codecs), intel_hda_response, intel_hda_xfer); } | 21,375 |
1 | static void vnc_dpy_copy(DisplayChangeListener *dcl, int src_x, int src_y, int dst_x, int dst_y, int w, int h) { VncDisplay *vd = container_of(dcl, VncDisplay, dcl); VncState *vs, *vn; uint8_t *src_row; uint8_t *dst_row; int i, x, y, pitch, inc, w_lim, s; int cmp_bytes; vnc_refresh_server_surface(vd); QTAILQ_FOREACH_SAFE(vs, &vd->clients, next, vn) { if (vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) { vs->force_update = 1; vnc_update_client(vs, 1, true); /* vs might be free()ed here */ /* do bitblit op on the local surface too */ pitch = vnc_server_fb_stride(vd); src_row = vnc_server_fb_ptr(vd, src_x, src_y); dst_row = vnc_server_fb_ptr(vd, dst_x, dst_y); y = dst_y; inc = 1; if (dst_y > src_y) { /* copy backwards */ src_row += pitch * (h-1); dst_row += pitch * (h-1); pitch = -pitch; y = dst_y + h - 1; inc = -1; w_lim = w - (VNC_DIRTY_PIXELS_PER_BIT - (dst_x % VNC_DIRTY_PIXELS_PER_BIT)); if (w_lim < 0) { w_lim = w; } else { w_lim = w - (w_lim % VNC_DIRTY_PIXELS_PER_BIT); for (i = 0; i < h; i++) { for (x = 0; x <= w_lim; x += s, src_row += cmp_bytes, dst_row += cmp_bytes) { if (x == w_lim) { if ((s = w - w_lim) == 0) break; } else if (!x) { s = (VNC_DIRTY_PIXELS_PER_BIT - (dst_x % VNC_DIRTY_PIXELS_PER_BIT)); s = MIN(s, w_lim); } else { s = VNC_DIRTY_PIXELS_PER_BIT; cmp_bytes = s * VNC_SERVER_FB_BYTES; if (memcmp(src_row, dst_row, cmp_bytes) == 0) continue; memmove(dst_row, src_row, cmp_bytes); QTAILQ_FOREACH(vs, &vd->clients, next) { if (!vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) { set_bit(((x + dst_x) / VNC_DIRTY_PIXELS_PER_BIT), vs->dirty[y]); src_row += pitch - w * VNC_SERVER_FB_BYTES; dst_row += pitch - w * VNC_SERVER_FB_BYTES; y += inc; QTAILQ_FOREACH(vs, &vd->clients, next) { if (vnc_has_feature(vs, VNC_FEATURE_COPYRECT)) { vnc_copy(vs, src_x, src_y, dst_x, dst_y, w, h); | 21,376 |
1 | static int write_refcount_block_entries(BlockDriverState *bs, int64_t refcount_block_offset, int first_index, int last_index) { BDRVQcowState *s = bs->opaque; size_t size; int ret; if (cache_refcount_updates) { first_index &= ~(REFCOUNTS_PER_SECTOR - 1); last_index = (last_index + REFCOUNTS_PER_SECTOR) & ~(REFCOUNTS_PER_SECTOR - 1); size = (last_index - first_index) << REFCOUNT_SHIFT; BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_UPDATE_PART); ret = bdrv_pwrite(bs->file, refcount_block_offset + (first_index << REFCOUNT_SHIFT), &s->refcount_block_cache[first_index], size); if (ret < 0) { return ret; | 21,377 |
1 | static int qemu_rdma_source_init(RDMAContext *rdma, Error **errp, bool pin_all) { int ret, idx; Error *local_err = NULL, **temp = &local_err; /* * Will be validated against destination's actual capabilities * after the connect() completes. */ rdma->pin_all = pin_all; ret = qemu_rdma_resolve_host(rdma, temp); if (ret) { goto err_rdma_source_init; } ret = qemu_rdma_alloc_pd_cq(rdma); if (ret) { ERROR(temp, "rdma migration: error allocating pd and cq! Your mlock()" " limits may be too low. Please check $ ulimit -a # and " "search for 'ulimit -l' in the output"); goto err_rdma_source_init; } ret = qemu_rdma_alloc_qp(rdma); if (ret) { ERROR(temp, "rdma migration: error allocating qp!"); goto err_rdma_source_init; } ret = qemu_rdma_init_ram_blocks(rdma); if (ret) { ERROR(temp, "rdma migration: error initializing ram blocks!"); goto err_rdma_source_init; } for (idx = 0; idx < RDMA_WRID_MAX; idx++) { ret = qemu_rdma_reg_control(rdma, idx); if (ret) { ERROR(temp, "rdma migration: error registering %d control!", idx); goto err_rdma_source_init; } } return 0; err_rdma_source_init: error_propagate(errp, local_err); qemu_rdma_cleanup(rdma); return -1; } | 21,378 |
1 | static int curl_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVCURLState *s = bs->opaque; CURLState *state = NULL; QemuOpts *opts; Error *local_err = NULL; const char *file; double d; static int inited = 0; if (flags & BDRV_O_RDWR) { error_setg(errp, "curl block device does not support writes"); return -EROFS; } opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(opts, options, &local_err); if (local_err) { error_propagate(errp, local_err); goto out_noclean; } s->readahead_size = qemu_opt_get_size(opts, CURL_BLOCK_OPT_READAHEAD, READ_AHEAD_DEFAULT); if ((s->readahead_size & 0x1ff) != 0) { error_setg(errp, "HTTP_READAHEAD_SIZE %zd is not a multiple of 512", s->readahead_size); goto out_noclean; } file = qemu_opt_get(opts, CURL_BLOCK_OPT_URL); if (file == NULL) { error_setg(errp, "curl block driver requires an 'url' option"); goto out_noclean; } if (!inited) { curl_global_init(CURL_GLOBAL_ALL); inited = 1; } DPRINTF("CURL: Opening %s\n", file); s->url = g_strdup(file); state = curl_init_state(s); if (!state) goto out_noclean; // Get file size s->accept_range = false; curl_easy_setopt(state->curl, CURLOPT_NOBODY, 1); curl_easy_setopt(state->curl, CURLOPT_HEADERFUNCTION, curl_header_cb); curl_easy_setopt(state->curl, CURLOPT_HEADERDATA, s); if (curl_easy_perform(state->curl)) goto out; curl_easy_getinfo(state->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &d); if (d) s->len = (size_t)d; else if(!s->len) goto out; if ((!strncasecmp(s->url, "http://", strlen("http://")) || !strncasecmp(s->url, "https://", strlen("https://"))) && !s->accept_range) { pstrcpy(state->errmsg, CURL_ERROR_SIZE, "Server does not support 'range' (byte ranges)."); goto out; } DPRINTF("CURL: Size = %zd\n", s->len); curl_clean_state(state); curl_easy_cleanup(state->curl); state->curl = NULL; aio_timer_init(bdrv_get_aio_context(bs), &s->timer, QEMU_CLOCK_REALTIME, SCALE_NS, curl_multi_timeout_do, s); // Now we know the file exists and its size, so let's // initialize the multi interface! s->multi = curl_multi_init(); curl_multi_setopt(s->multi, CURLMOPT_SOCKETFUNCTION, curl_sock_cb); #ifdef NEED_CURL_TIMER_CALLBACK curl_multi_setopt(s->multi, CURLMOPT_TIMERDATA, s); curl_multi_setopt(s->multi, CURLMOPT_TIMERFUNCTION, curl_timer_cb); #endif qemu_opts_del(opts); return 0; out: error_setg(errp, "CURL: Error opening file: %s", state->errmsg); curl_easy_cleanup(state->curl); state->curl = NULL; out_noclean: g_free(s->url); qemu_opts_del(opts); return -EINVAL; } | 21,379 |
1 | static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt) { int delay = FFMAX(st->codec->has_b_frames, st->codec->max_b_frames > 0); int num, den, frame_size, i; av_dlog(s, "compute_pkt_fields2: pts:%s dts:%s cur_dts:%s b:%d size:%d st:%d\n", av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), delay, pkt->size, pkt->stream_index); /* duration field */ if (pkt->duration == 0) { ff_compute_frame_duration(&num, &den, st, NULL, pkt); if (den && num) { pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num); if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay == 0) pkt->pts = pkt->dts; //XXX/FIXME this is a temporary hack until all encoders output pts if ((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay) { static int warned; if (!warned) { av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n"); warned = 1; pkt->dts = // pkt->pts= st->cur_dts; pkt->pts = st->pts.val; //calculate dts from pts if (pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) { st->pts_buffer[0] = pkt->pts; for (i = 1; i < delay + 1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++) st->pts_buffer[i] = pkt->pts + (i - delay - 1) * pkt->duration; for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++) FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]); pkt->dts = st->pts_buffer[0]; if (st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) && st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) { av_log(s, AV_LOG_ERROR, "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %s >= %s\n", st->index, av_ts2str(st->cur_dts), av_ts2str(pkt->dts)); return AVERROR(EINVAL); if (pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts) { av_log(s, AV_LOG_ERROR, "pts (%s) < dts (%s) in stream %d\n", av_ts2str(pkt->pts), av_ts2str(pkt->dts), st->index); return AVERROR(EINVAL); av_dlog(s, "av_write_frame: pts2:%s dts2:%s\n", av_ts2str(pkt->pts), av_ts2str(pkt->dts)); st->cur_dts = pkt->dts; st->pts.val = pkt->dts; /* update pts */ switch (st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: frame_size = (pkt->flags & AV_PKT_FLAG_UNCODED_FRAME) ? ((AVFrame *)pkt->data)->nb_samples : ff_get_audio_frame_size(st->codec, pkt->size, 1); /* HACK/FIXME, we skip the initial 0 size packets as they are most * likely equal to the encoder delay, but it would be better if we * had the real timestamps from the encoder */ if (frame_size >= 0 && (pkt->size || st->pts.num != st->pts.den >> 1 || st->pts.val)) { frac_add(&st->pts, (int64_t)st->time_base.den * frame_size); break; case AVMEDIA_TYPE_VIDEO: frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num); break; default: break; return 0; | 21,380 |
1 | int qemu_get_fd(QEMUFile *f) { if (f->ops->get_fd) { return f->ops->get_fd(f->opaque); } return -1; } | 21,381 |
1 | static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file, QDict *options, int flags, BlockDriver *drv, Error **errp) { int ret, open_flags; const char *filename; const char *node_name = NULL; Error *local_err = NULL; assert(drv != NULL); assert(bs->file == NULL); assert(options != NULL && bs->options != options); if (file != NULL) { filename = file->filename; } else { filename = qdict_get_try_str(options, "filename"); if (drv->bdrv_needs_filename && !filename) { error_setg(errp, "The '%s' block driver requires a file name", drv->format_name); return -EINVAL; trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name); node_name = qdict_get_try_str(options, "node-name"); bdrv_assign_node_name(bs, node_name, &local_err); if (local_err) { error_propagate(errp, local_err); return -EINVAL; qdict_del(options, "node-name"); /* bdrv_open() with directly using a protocol as drv. This layer is already * opened, so assign it to bs (while file becomes a closed BlockDriverState) * and return immediately. */ if (file != NULL && drv->bdrv_file_open) { bdrv_swap(file, bs); return 0; bs->open_flags = flags; bs->guest_block_size = 512; bs->request_alignment = 512; bs->zero_beyond_eof = true; open_flags = bdrv_open_flags(bs, flags); bs->read_only = !(open_flags & BDRV_O_RDWR); if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) { error_setg(errp, !bs->read_only && bdrv_is_whitelisted(drv, true) ? "Driver '%s' can only be used for read-only devices" : "Driver '%s' is not whitelisted", drv->format_name); return -ENOTSUP; assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */ if (flags & BDRV_O_COPY_ON_READ) { if (!bs->read_only) { bdrv_enable_copy_on_read(bs); } else { error_setg(errp, "Can't use copy-on-read on read-only device"); return -EINVAL; if (filename != NULL) { pstrcpy(bs->filename, sizeof(bs->filename), filename); } else { bs->filename[0] = '\0'; pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), bs->filename); bs->drv = drv; bs->opaque = g_malloc0(drv->instance_size); bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB); /* Open the image, either directly or using a protocol */ if (drv->bdrv_file_open) { assert(file == NULL); assert(!drv->bdrv_needs_filename || filename != NULL); ret = drv->bdrv_file_open(bs, options, open_flags, &local_err); } else { if (file == NULL) { error_setg(errp, "Can't use '%s' as a block driver for the " "protocol level", drv->format_name); ret = -EINVAL; goto free_and_fail; bs->file = file; ret = drv->bdrv_open(bs, options, open_flags, &local_err); if (ret < 0) { if (local_err) { error_propagate(errp, local_err); } else if (bs->filename[0]) { error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename); } else { error_setg_errno(errp, -ret, "Could not open image"); goto free_and_fail; ret = refresh_total_sectors(bs, bs->total_sectors); if (ret < 0) { error_setg_errno(errp, -ret, "Could not refresh total sector count"); goto free_and_fail; bdrv_refresh_limits(bs, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto free_and_fail; assert(bdrv_opt_mem_align(bs) != 0); assert((bs->request_alignment != 0) || bs->sg); return 0; free_and_fail: bs->file = NULL; g_free(bs->opaque); bs->opaque = NULL; bs->drv = NULL; return ret; | 21,382 |
1 | static int virtio_serial_load(QEMUFile *f, void *opaque, int version_id) { VirtIOSerial *s = opaque; VirtIOSerialPort *port; uint32_t max_nr_ports, nr_active_ports, ports_map; unsigned int i; if (version_id > 3) { /* The virtio device */ virtio_load(&s->vdev, f); if (version_id < 2) { return 0; /* The config space */ qemu_get_be16s(f, &s->config.cols); qemu_get_be16s(f, &s->config.rows); qemu_get_be32s(f, &max_nr_ports); if (max_nr_ports > s->config.max_nr_ports) { /* Source could have had more ports than us. Fail migration. */ for (i = 0; i < (max_nr_ports + 31) / 32; i++) { qemu_get_be32s(f, &ports_map); if (ports_map != s->ports_map[i]) { /* * Ports active on source and destination don't * match. Fail migration. */ qemu_get_be32s(f, &nr_active_ports); /* Items in struct VirtIOSerialPort */ for (i = 0; i < nr_active_ports; i++) { uint32_t id; bool host_connected; id = qemu_get_be32(f); port = find_port_by_id(s, id); port->guest_connected = qemu_get_byte(f); host_connected = qemu_get_byte(f); if (host_connected != port->host_connected) { /* * We have to let the guest know of the host connection * status change */ send_control_event(port, VIRTIO_CONSOLE_PORT_OPEN, port->host_connected); if (version_id > 2) { uint32_t elem_popped; qemu_get_be32s(f, &elem_popped); if (elem_popped) { qemu_get_be32s(f, &port->iov_idx); qemu_get_be64s(f, &port->iov_offset); qemu_get_buffer(f, (unsigned char *)&port->elem, sizeof(port->elem)); virtqueue_map_sg(port->elem.in_sg, port->elem.in_addr, port->elem.in_num, 1); virtqueue_map_sg(port->elem.out_sg, port->elem.out_addr, port->elem.out_num, 1); /* * Port was throttled on source machine. Let's * unthrottle it here so data starts flowing again. */ virtio_serial_throttle_port(port, false); return 0; | 21,384 |
1 | static int cdg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; int ret; uint8_t command, inst; uint8_t cdg_data[CDG_DATA_SIZE]; AVFrame *frame = data; CDGraphicsContext *cc = avctx->priv_data; if (buf_size < CDG_MINIMUM_PKT_SIZE) { av_log(avctx, AV_LOG_ERROR, "buffer too small for decoder\n"); return AVERROR(EINVAL); } if (buf_size > CDG_HEADER_SIZE + CDG_DATA_SIZE) { av_log(avctx, AV_LOG_ERROR, "buffer too big for decoder\n"); return AVERROR(EINVAL); } if ((ret = ff_reget_buffer(avctx, cc->frame)) < 0) return ret; if (!avctx->frame_number) { memset(cc->frame->data[0], 0, cc->frame->linesize[0] * avctx->height); memset(cc->frame->data[1], 0, AVPALETTE_SIZE); } command = bytestream_get_byte(&buf); inst = bytestream_get_byte(&buf); inst &= CDG_MASK; buf += 2; /// skipping 2 unneeded bytes if (buf_size > CDG_HEADER_SIZE) bytestream_get_buffer(&buf, cdg_data, buf_size - CDG_HEADER_SIZE); if ((command & CDG_MASK) == CDG_COMMAND) { switch (inst) { case CDG_INST_MEMORY_PRESET: if (!(cdg_data[1] & 0x0F)) memset(cc->frame->data[0], cdg_data[0] & 0x0F, cc->frame->linesize[0] * CDG_FULL_HEIGHT); break; case CDG_INST_LOAD_PAL_LO: case CDG_INST_LOAD_PAL_HIGH: if (buf_size - CDG_HEADER_SIZE < CDG_DATA_SIZE) { av_log(avctx, AV_LOG_ERROR, "buffer too small for loading palette\n"); return AVERROR(EINVAL); } cdg_load_palette(cc, cdg_data, inst == CDG_INST_LOAD_PAL_LO); break; case CDG_INST_BORDER_PRESET: cdg_border_preset(cc, cdg_data); break; case CDG_INST_TILE_BLOCK_XOR: case CDG_INST_TILE_BLOCK: if (buf_size - CDG_HEADER_SIZE < CDG_DATA_SIZE) { av_log(avctx, AV_LOG_ERROR, "buffer too small for drawing tile\n"); return AVERROR(EINVAL); } ret = cdg_tile_block(cc, cdg_data, inst == CDG_INST_TILE_BLOCK_XOR); if (ret) { av_log(avctx, AV_LOG_ERROR, "tile is out of range\n"); return ret; } break; case CDG_INST_SCROLL_PRESET: case CDG_INST_SCROLL_COPY: if (buf_size - CDG_HEADER_SIZE < CDG_MINIMUM_SCROLL_SIZE) { av_log(avctx, AV_LOG_ERROR, "buffer too small for scrolling\n"); return AVERROR(EINVAL); } if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) return ret; cdg_scroll(cc, cdg_data, frame, inst == CDG_INST_SCROLL_COPY); av_frame_unref(cc->frame); ret = av_frame_ref(cc->frame, frame); if (ret < 0) return ret; break; default: break; } if (!frame->data[0]) { ret = av_frame_ref(frame, cc->frame); if (ret < 0) return ret; } *got_frame = 1; } else { *got_frame = 0; buf_size = 0; } return buf_size; } | 21,388 |
1 | static int nut_write_header(AVFormatContext *s) { NUTContext *nut = s->priv_data; ByteIOContext *bc = &s->pb; AVCodecContext *codec; int i, j, tmp_time, tmp_flags,tmp_stream, tmp_mul, tmp_size, tmp_fields; nut->avf= s; nut->stream = av_mallocz(sizeof(StreamContext)*s->nb_streams); put_buffer(bc, ID_STRING, strlen(ID_STRING)); put_byte(bc, 0); nut->packet_start[2]= url_ftell(bc); /* main header */ put_be64(bc, MAIN_STARTCODE); put_packetheader(nut, bc, 120+5*256, 1); put_v(bc, 2); /* version */ put_v(bc, s->nb_streams); put_v(bc, MAX_DISTANCE); put_v(bc, MAX_SHORT_DISTANCE); put_v(bc, nut->rate_num=1); put_v(bc, nut->rate_den=2); put_v(bc, nut->short_startcode=0x4EFE79); build_frame_code(s); assert(nut->frame_code['N'].flags == FLAG_INVALID); tmp_time= tmp_flags= tmp_stream= tmp_mul= tmp_size= /*tmp_res=*/ INT_MAX; for(i=0; i<256;){ tmp_fields=0; tmp_size= 0; if(tmp_time != nut->frame_code[i].timestamp_delta) tmp_fields=1; if(tmp_mul != nut->frame_code[i].size_mul ) tmp_fields=2; if(tmp_stream != nut->frame_code[i].stream_id_plus1) tmp_fields=3; if(tmp_size != nut->frame_code[i].size_lsb ) tmp_fields=4; // if(tmp_res != nut->frame_code[i].res ) tmp_fields=5; tmp_time = nut->frame_code[i].timestamp_delta; tmp_flags = nut->frame_code[i].flags; tmp_stream= nut->frame_code[i].stream_id_plus1; tmp_mul = nut->frame_code[i].size_mul; tmp_size = nut->frame_code[i].size_lsb; // tmp_res = nut->frame_code[i].res; for(j=0; i<256; j++,i++){ if(nut->frame_code[i].timestamp_delta != tmp_time ) break; if(nut->frame_code[i].flags != tmp_flags ) break; if(nut->frame_code[i].stream_id_plus1 != tmp_stream) break; if(nut->frame_code[i].size_mul != tmp_mul ) break; if(nut->frame_code[i].size_lsb != tmp_size+j) break; // if(nut->frame_code[i].res != tmp_res ) break; } if(j != tmp_mul - tmp_size) tmp_fields=6; put_v(bc, tmp_flags); put_v(bc, tmp_fields); if(tmp_fields>0) put_s(bc, tmp_time); if(tmp_fields>1) put_v(bc, tmp_mul); if(tmp_fields>2) put_v(bc, tmp_stream); if(tmp_fields>3) put_v(bc, tmp_size); if(tmp_fields>4) put_v(bc, 0 /*tmp_res*/); if(tmp_fields>5) put_v(bc, j); } update_packetheader(nut, bc, 0, 1); /* stream headers */ for (i = 0; i < s->nb_streams; i++) { int nom, denom, gcd; codec = &s->streams[i]->codec; put_be64(bc, STREAM_STARTCODE); put_packetheader(nut, bc, 120 + codec->extradata_size, 1); put_v(bc, i /*s->streams[i]->index*/); put_v(bc, (codec->codec_type == CODEC_TYPE_AUDIO) ? 32 : 0); if (codec->codec_tag) put_vb(bc, codec->codec_tag); else if (codec->codec_type == CODEC_TYPE_VIDEO) { put_vb(bc, codec_get_bmp_tag(codec->codec_id)); } else if (codec->codec_type == CODEC_TYPE_AUDIO) { put_vb(bc, codec_get_wav_tag(codec->codec_id)); } else put_vb(bc, 0); if (codec->codec_type == CODEC_TYPE_VIDEO) { nom = codec->time_base.den; denom = codec->time_base.num; } else { nom = codec->sample_rate; if(codec->frame_size>0) denom= codec->frame_size; else denom= 1; //unlucky } gcd= ff_gcd(nom, denom); nom /= gcd; denom /= gcd; nut->stream[i].rate_num= nom; nut->stream[i].rate_den= denom; av_set_pts_info(s->streams[i], 60, denom, nom); put_v(bc, codec->bit_rate); put_vb(bc, 0); /* no language code */ put_v(bc, nom); put_v(bc, denom); if(nom / denom < 1000) nut->stream[i].msb_timestamp_shift = 7; else nut->stream[i].msb_timestamp_shift = 14; put_v(bc, nut->stream[i].msb_timestamp_shift); put_v(bc, codec->has_b_frames); put_byte(bc, 0); /* flags: 0x1 - fixed_fps, 0x2 - index_present */ if(codec->extradata_size){ put_v(bc, 1); put_v(bc, codec->extradata_size); put_buffer(bc, codec->extradata, codec->extradata_size); } put_v(bc, 0); /* end of codec specific headers */ switch(codec->codec_type) { case CODEC_TYPE_AUDIO: put_v(bc, codec->sample_rate); put_v(bc, 1); put_v(bc, codec->channels); break; case CODEC_TYPE_VIDEO: put_v(bc, codec->width); put_v(bc, codec->height); put_v(bc, codec->sample_aspect_ratio.num); put_v(bc, codec->sample_aspect_ratio.den); put_v(bc, 0); /* csp type -- unknown */ break; default: break; } update_packetheader(nut, bc, 0, 1); } /* info header */ put_be64(bc, INFO_STARTCODE); put_packetheader(nut, bc, 30+strlen(s->author)+strlen(s->title)+ strlen(s->comment)+strlen(s->copyright)+strlen(LIBAVFORMAT_IDENT), 1); if (s->author[0]) { put_v(bc, 9); /* type */ put_str(bc, s->author); } if (s->title[0]) { put_v(bc, 10); /* type */ put_str(bc, s->title); } if (s->comment[0]) { put_v(bc, 11); /* type */ put_str(bc, s->comment); } if (s->copyright[0]) { put_v(bc, 12); /* type */ put_str(bc, s->copyright); } /* encoder */ if(!(s->streams[0]->codec.flags & CODEC_FLAG_BITEXACT)){ put_v(bc, 13); /* type */ put_str(bc, LIBAVFORMAT_IDENT); } put_v(bc, 0); /* eof info */ update_packetheader(nut, bc, 0, 1); put_flush_packet(bc); return 0; } | 21,391 |
1 | static inline int IRQ_testbit(IRQ_queue_t *q, int n_IRQ) { return test_bit(q->queue, n_IRQ); } | 21,393 |
1 | static int vmdk_write_compressed(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors) { BDRVVmdkState *s = bs->opaque; if (s->num_extents == 1 && s->extents[0].compressed) { Coroutine *co; AioContext *aio_context = bdrv_get_aio_context(bs); VmdkWriteCompressedCo data = { .bs = bs, .sector_num = sector_num, .buf = buf, .nb_sectors = nb_sectors, .ret = -EINPROGRESS, }; co = qemu_coroutine_create(vmdk_co_write_compressed); qemu_coroutine_enter(co, &data); while (data.ret == -EINPROGRESS) { aio_poll(aio_context, true); } return data.ret; } else { return -ENOTSUP; } } | 21,394 |
1 | static int tight_compress_data(VncState *vs, int stream_id, size_t bytes, int level, int strategy) { z_streamp zstream = &vs->tight.stream[stream_id]; int previous_out; if (bytes < VNC_TIGHT_MIN_TO_COMPRESS) { vnc_write(vs, vs->tight.tight.buffer, vs->tight.tight.offset); return bytes; } if (tight_init_stream(vs, stream_id, level, strategy)) { return -1; } /* reserve memory in output buffer */ buffer_reserve(&vs->tight.zlib, bytes + 64); /* set pointers */ zstream->next_in = vs->tight.tight.buffer; zstream->avail_in = vs->tight.tight.offset; zstream->next_out = vs->tight.zlib.buffer + vs->tight.zlib.offset; zstream->avail_out = vs->tight.zlib.capacity - vs->tight.zlib.offset; zstream->data_type = Z_BINARY; previous_out = zstream->total_out; /* start encoding */ if (deflate(zstream, Z_SYNC_FLUSH) != Z_OK) { fprintf(stderr, "VNC: error during tight compression\n"); return -1; } vs->tight.zlib.offset = vs->tight.zlib.capacity - zstream->avail_out; bytes = zstream->total_out - previous_out; tight_send_compact_size(vs, bytes); vnc_write(vs, vs->tight.zlib.buffer, bytes); buffer_reset(&vs->tight.zlib); return bytes; } | 21,395 |
1 | static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) { tb_lock(); /* If it is already been done on request of another CPU, * just retry. */ if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_count.host_int) { goto done; } #if defined(DEBUG_TB_FLUSH) printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer), tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ? ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) / tcg_ctx.tb_ctx.nb_tbs : 0); #endif if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) > tcg_ctx.code_gen_buffer_size) { cpu_abort(cpu, "Internal error: code buffer overflow\n"); } CPU_FOREACH(cpu) { int i; for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) { atomic_set(&cpu->tb_jmp_cache[i], NULL); } } tcg_ctx.tb_ctx.nb_tbs = 0; qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE); page_flush_tb(); tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer; /* XXX: flush processor icache at this point if cache flush is expensive */ atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count, tcg_ctx.tb_ctx.tb_flush_count + 1); done: tb_unlock(); } | 21,396 |
1 | int v9fs_co_st_gen(V9fsPDU *pdu, V9fsPath *path, mode_t st_mode, V9fsStatDotl *v9stat) { int err = 0; V9fsState *s = pdu->s; if (v9fs_request_cancelled(pdu)) { return -EINTR; if (s->ctx.exops.get_st_gen) { v9fs_path_read_lock(s); v9fs_co_run_in_worker( { err = s->ctx.exops.get_st_gen(&s->ctx, path, st_mode, &v9stat->st_gen); if (err < 0) { err = -errno; }); v9fs_path_unlock(s); return err; | 21,397 |
0 | int avpicture_fill(AVPicture *picture, uint8_t *ptr, enum AVPixelFormat pix_fmt, int width, int height) { int ret; if ((ret = av_image_check_size(width, height, 0, NULL)) < 0) return ret; if ((ret = av_image_fill_linesizes(picture->linesize, pix_fmt, width)) < 0) return ret; return av_image_fill_pointers(picture->data, pix_fmt, height, ptr, picture->linesize); } | 21,398 |
1 | static int bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name, QDict *options, int open_flags, Error **errp) { Error *local_err = NULL; int ret; bdrv_assign_node_name(bs, node_name, &local_err); if (local_err) { error_propagate(errp, local_err); return -EINVAL; } bs->drv = drv; bs->read_only = !(bs->open_flags & BDRV_O_RDWR); bs->opaque = g_malloc0(drv->instance_size); if (drv->bdrv_file_open) { assert(!drv->bdrv_needs_filename || bs->filename[0]); ret = drv->bdrv_file_open(bs, options, open_flags, &local_err); } else if (drv->bdrv_open) { ret = drv->bdrv_open(bs, options, open_flags, &local_err); } else { ret = 0; } if (ret < 0) { if (local_err) { error_propagate(errp, local_err); } else if (bs->filename[0]) { error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename); } else { error_setg_errno(errp, -ret, "Could not open image"); } goto free_and_fail; } ret = refresh_total_sectors(bs, bs->total_sectors); if (ret < 0) { error_setg_errno(errp, -ret, "Could not refresh total sector count"); goto free_and_fail; } bdrv_refresh_limits(bs, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto free_and_fail; } assert(bdrv_opt_mem_align(bs) != 0); assert(bdrv_min_mem_align(bs) != 0); assert(is_power_of_2(bs->bl.request_alignment)); return 0; free_and_fail: /* FIXME Close bs first if already opened*/ g_free(bs->opaque); bs->opaque = NULL; bs->drv = NULL; return ret; } | 21,400 |
1 | static int set_params(AVFilterContext *ctx, const char *params) { Frei0rContext *frei0r = ctx->priv; int i; for (i = 0; i < frei0r->plugin_info.num_params; i++) { f0r_param_info_t info; char *param; int ret; frei0r->get_param_info(&info, i); if (*params) { if (!(param = av_get_token(¶ms, "|"))) return AVERROR(ENOMEM); params++; /* skip ':' */ ret = set_param(ctx, info, i, param); av_free(param); if (ret < 0) return ret; } av_log(ctx, AV_LOG_VERBOSE, "idx:%d name:'%s' type:%s explanation:'%s' ", i, info.name, info.type == F0R_PARAM_BOOL ? "bool" : info.type == F0R_PARAM_DOUBLE ? "double" : info.type == F0R_PARAM_COLOR ? "color" : info.type == F0R_PARAM_POSITION ? "position" : info.type == F0R_PARAM_STRING ? "string" : "unknown", info.explanation); #ifdef DEBUG av_log(ctx, AV_LOG_DEBUG, "value:"); switch (info.type) { void *v; double d; char s[128]; f0r_param_color_t col; f0r_param_position_t pos; case F0R_PARAM_BOOL: v = &d; frei0r->get_param_value(frei0r->instance, v, i); av_log(ctx, AV_LOG_DEBUG, "%s", d >= 0.5 && d <= 1.0 ? "y" : "n"); break; case F0R_PARAM_DOUBLE: v = &d; frei0r->get_param_value(frei0r->instance, v, i); av_log(ctx, AV_LOG_DEBUG, "%f", d); break; case F0R_PARAM_COLOR: v = &col; frei0r->get_param_value(frei0r->instance, v, i); av_log(ctx, AV_LOG_DEBUG, "%f/%f/%f", col.r, col.g, col.b); break; case F0R_PARAM_POSITION: v = &pos; frei0r->get_param_value(frei0r->instance, v, i); av_log(ctx, AV_LOG_DEBUG, "%f/%f", pos.x, pos.y); break; default: /* F0R_PARAM_STRING */ v = s; frei0r->get_param_value(frei0r->instance, v, i); av_log(ctx, AV_LOG_DEBUG, "'%s'\n", s); break; } #endif av_log(ctx, AV_LOG_VERBOSE, "\n"); } } | 21,401 |
1 | void do_load_fpscr (void) { /* The 32 MSB of the target fpr are undefined. * They'll be zero... */ union { float64 d; struct { uint32_t u[2]; } s; } u; int i; #ifdef WORDS_BIGENDIAN #define WORD0 0 #define WORD1 1 #else #define WORD0 1 #define WORD1 0 #endif u.s.u[WORD0] = 0; u.s.u[WORD1] = 0; for (i = 0; i < 8; i++) u.s.u[WORD1] |= env->fpscr[i] << (4 * i); FT0 = u.d; } | 21,402 |
0 | static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer, struct in_addr my_ip) { AVFormatContext *avc; AVStream *avs = NULL; AVOutputFormat *rtp_format = av_guess_format("rtp", NULL, NULL); AVDictionaryEntry *entry = av_dict_get(stream->metadata, "title", NULL, 0); int i; avc = avformat_alloc_context(); if (avc == NULL || !rtp_format) { return -1; } avc->oformat = rtp_format; av_dict_set(&avc->metadata, "title", entry ? entry->value : "No Title", 0); avc->nb_streams = stream->nb_streams; if (stream->is_multicast) { snprintf(avc->filename, 1024, "rtp://%s:%d?multicast=1?ttl=%d", inet_ntoa(stream->multicast_ip), stream->multicast_port, stream->multicast_ttl); } else { snprintf(avc->filename, 1024, "rtp://0.0.0.0"); } if (avc->nb_streams >= INT_MAX/sizeof(*avc->streams) || !(avc->streams = av_malloc(avc->nb_streams * sizeof(*avc->streams)))) goto sdp_done; if (avc->nb_streams >= INT_MAX/sizeof(*avs) || !(avs = av_malloc(avc->nb_streams * sizeof(*avs)))) goto sdp_done; for(i = 0; i < stream->nb_streams; i++) { avc->streams[i] = &avs[i]; avc->streams[i]->codec = stream->streams[i]->codec; } *pbuffer = av_mallocz(2048); av_sdp_create(&avc, 1, *pbuffer, 2048); sdp_done: av_free(avc->streams); av_dict_free(&avc->metadata); av_free(avc); av_free(avs); return strlen(*pbuffer); } | 21,403 |
1 | static void decode_init_vlc(H264Context *h){ static int done = 0; if (!done) { int i; done = 1; init_vlc(&chroma_dc_coeff_token_vlc, CHROMA_DC_COEFF_TOKEN_VLC_BITS, 4*5, &chroma_dc_coeff_token_len [0], 1, 1, &chroma_dc_coeff_token_bits[0], 1, 1); for(i=0; i<4; i++){ init_vlc(&coeff_token_vlc[i], COEFF_TOKEN_VLC_BITS, 4*17, &coeff_token_len [i][0], 1, 1, &coeff_token_bits[i][0], 1, 1); } for(i=0; i<3; i++){ init_vlc(&chroma_dc_total_zeros_vlc[i], CHROMA_DC_TOTAL_ZEROS_VLC_BITS, 4, &chroma_dc_total_zeros_len [i][0], 1, 1, &chroma_dc_total_zeros_bits[i][0], 1, 1); } for(i=0; i<15; i++){ init_vlc(&total_zeros_vlc[i], TOTAL_ZEROS_VLC_BITS, 16, &total_zeros_len [i][0], 1, 1, &total_zeros_bits[i][0], 1, 1); } for(i=0; i<6; i++){ init_vlc(&run_vlc[i], RUN_VLC_BITS, 7, &run_len [i][0], 1, 1, &run_bits[i][0], 1, 1); } init_vlc(&run7_vlc, RUN7_VLC_BITS, 16, &run_len [6][0], 1, 1, &run_bits[6][0], 1, 1); } } | 21,404 |
1 | int av_buffersink_poll_frame(AVFilterContext *ctx) { BufferSinkContext *buf = ctx->priv; AVFilterLink *inlink = ctx->inputs[0]; av_assert0(!strcmp(ctx->filter->name, "buffersink") || !strcmp(ctx->filter->name, "abuffersink")); return av_fifo_size(buf->fifo)/sizeof(AVFilterBufferRef *) + ff_poll_frame(inlink); } | 21,405 |
1 | int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MpegEncContext *s = avctx->priv_data; int ret; AVFrame *pict = data; #ifdef PRINT_FRAME_TIME uint64_t time= rdtsc(); #endif s->flags= avctx->flags; s->flags2= avctx->flags2; /* no supplementary picture */ if (buf_size == 0) { /* special case for last picture */ if (s->low_delay==0 && s->next_picture_ptr) { *pict = s->next_picture_ptr->f; s->next_picture_ptr= NULL; *data_size = sizeof(AVFrame); } return 0; } if(s->flags&CODEC_FLAG_TRUNCATED){ int next; if(CONFIG_MPEG4_DECODER && s->codec_id==CODEC_ID_MPEG4){ next= ff_mpeg4_find_frame_end(&s->parse_context, buf, buf_size); }else if(CONFIG_H263_DECODER && s->codec_id==CODEC_ID_H263){ next= ff_h263_find_frame_end(&s->parse_context, buf, buf_size); }else{ av_log(s->avctx, AV_LOG_ERROR, "this codec does not support truncated bitstreams\n"); return -1; } if( ff_combine_frame(&s->parse_context, next, (const uint8_t **)&buf, &buf_size) < 0 ) return buf_size; } retry: if(s->bitstream_buffer_size && (s->divx_packed || buf_size<20)){ //divx 5.01+/xvid frame reorder init_get_bits(&s->gb, s->bitstream_buffer, s->bitstream_buffer_size*8); }else init_get_bits(&s->gb, buf, buf_size*8); s->bitstream_buffer_size=0; if (!s->context_initialized) { if (ff_MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix return -1; } /* We need to set current_picture_ptr before reading the header, * otherwise we cannot store anyting in there */ if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) { int i= ff_find_unused_picture(s, 0); if (i < 0) return i; s->current_picture_ptr= &s->picture[i]; } /* let's go :-) */ if (CONFIG_WMV2_DECODER && s->msmpeg4_version==5) { ret= ff_wmv2_decode_picture_header(s); } else if (CONFIG_MSMPEG4_DECODER && s->msmpeg4_version) { ret = ff_msmpeg4_decode_picture_header(s); } else if (CONFIG_MPEG4_DECODER && s->h263_pred) { if(s->avctx->extradata_size && s->picture_number==0){ GetBitContext gb; init_get_bits(&gb, s->avctx->extradata, s->avctx->extradata_size*8); ret = ff_mpeg4_decode_picture_header(s, &gb); } ret = ff_mpeg4_decode_picture_header(s, &s->gb); } else if (CONFIG_H263I_DECODER && s->codec_id == CODEC_ID_H263I) { ret = ff_intel_h263_decode_picture_header(s); } else if (CONFIG_FLV_DECODER && s->h263_flv) { ret = ff_flv_decode_picture_header(s); } else { ret = ff_h263_decode_picture_header(s); } if(ret==FRAME_SKIPPED) return get_consumed_bytes(s, buf_size); /* skip if the header was thrashed */ if (ret < 0){ av_log(s->avctx, AV_LOG_ERROR, "header damaged\n"); return -1; } avctx->has_b_frames= !s->low_delay; if(s->xvid_build==-1 && s->divx_version==-1 && s->lavc_build==-1){ if(s->stream_codec_tag == AV_RL32("XVID") || s->codec_tag == AV_RL32("XVID") || s->codec_tag == AV_RL32("XVIX") || s->codec_tag == AV_RL32("RMP4") || s->codec_tag == AV_RL32("SIPP") ) s->xvid_build= 0; #if 0 if(s->codec_tag == AV_RL32("DIVX") && s->vo_type==0 && s->vol_control_parameters==1 && s->padding_bug_score > 0 && s->low_delay) // XVID with modified fourcc s->xvid_build= 0; #endif } if(s->xvid_build==-1 && s->divx_version==-1 && s->lavc_build==-1){ if(s->codec_tag == AV_RL32("DIVX") && s->vo_type==0 && s->vol_control_parameters==0) s->divx_version= 400; //divx 4 } if(s->xvid_build>=0 && s->divx_version>=0){ s->divx_version= s->divx_build= -1; } if(s->workaround_bugs&FF_BUG_AUTODETECT){ if(s->codec_tag == AV_RL32("XVIX")) s->workaround_bugs|= FF_BUG_XVID_ILACE; if(s->codec_tag == AV_RL32("UMP4")){ s->workaround_bugs|= FF_BUG_UMP4; } if(s->divx_version>=500 && s->divx_build<1814){ s->workaround_bugs|= FF_BUG_QPEL_CHROMA; } if(s->divx_version>502 && s->divx_build<1814){ s->workaround_bugs|= FF_BUG_QPEL_CHROMA2; } if(s->xvid_build<=3U) s->padding_bug_score= 256*256*256*64; if(s->xvid_build<=1U) s->workaround_bugs|= FF_BUG_QPEL_CHROMA; if(s->xvid_build<=12U) s->workaround_bugs|= FF_BUG_EDGE; if(s->xvid_build<=32U) s->workaround_bugs|= FF_BUG_DC_CLIP; #define SET_QPEL_FUNC(postfix1, postfix2) \ s->dsp.put_ ## postfix1 = ff_put_ ## postfix2;\ s->dsp.put_no_rnd_ ## postfix1 = ff_put_no_rnd_ ## postfix2;\ s->dsp.avg_ ## postfix1 = ff_avg_ ## postfix2; if(s->lavc_build<4653U) s->workaround_bugs|= FF_BUG_STD_QPEL; if(s->lavc_build<4655U) s->workaround_bugs|= FF_BUG_DIRECT_BLOCKSIZE; if(s->lavc_build<4670U){ s->workaround_bugs|= FF_BUG_EDGE; } if(s->lavc_build<=4712U) s->workaround_bugs|= FF_BUG_DC_CLIP; if(s->divx_version>=0) s->workaround_bugs|= FF_BUG_DIRECT_BLOCKSIZE; //printf("padding_bug_score: %d\n", s->padding_bug_score); if(s->divx_version==501 && s->divx_build==20020416) s->padding_bug_score= 256*256*256*64; if(s->divx_version<500U){ s->workaround_bugs|= FF_BUG_EDGE; } if(s->divx_version>=0) s->workaround_bugs|= FF_BUG_HPEL_CHROMA; #if 0 if(s->divx_version==500) s->padding_bug_score= 256*256*256*64; /* very ugly XVID padding bug detection FIXME/XXX solve this differently * Let us hope this at least works. */ if( s->resync_marker==0 && s->data_partitioning==0 && s->divx_version==-1 && s->codec_id==CODEC_ID_MPEG4 && s->vo_type==0) s->workaround_bugs|= FF_BUG_NO_PADDING; if(s->lavc_build<4609U) //FIXME not sure about the version num but a 4609 file seems ok s->workaround_bugs|= FF_BUG_NO_PADDING; #endif } if(s->workaround_bugs& FF_BUG_STD_QPEL){ SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_old_c) SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_old_c) SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_old_c) } if(avctx->debug & FF_DEBUG_BUGS) av_log(s->avctx, AV_LOG_DEBUG, "bugs: %X lavc_build:%d xvid_build:%d divx_version:%d divx_build:%d %s\n", s->workaround_bugs, s->lavc_build, s->xvid_build, s->divx_version, s->divx_build, s->divx_packed ? "p" : ""); #if HAVE_MMX if (s->codec_id == CODEC_ID_MPEG4 && s->xvid_build>=0 && avctx->idct_algo == FF_IDCT_AUTO && (av_get_cpu_flags() & AV_CPU_FLAG_MMX)) { avctx->idct_algo= FF_IDCT_XVIDMMX; avctx->coded_width= 0; // force reinit // ff_dsputil_init(&s->dsp, avctx); s->picture_number=0; } #endif /* After H263 & mpeg4 header decode we have the height, width,*/ /* and other parameters. So then we could init the picture */ /* FIXME: By the way H263 decoder is evolving it should have */ /* an H263EncContext */ if ( s->width != avctx->coded_width || s->height != avctx->coded_height) { /* H.263 could change picture size any time */ ParseContext pc= s->parse_context; //FIXME move these demuxng hack to avformat s->parse_context.buffer=0; ff_MPV_common_end(s); s->parse_context= pc; } if (!s->context_initialized) { avcodec_set_dimensions(avctx, s->width, s->height); goto retry; } if((s->codec_id==CODEC_ID_H263 || s->codec_id==CODEC_ID_H263P || s->codec_id == CODEC_ID_H263I)) s->gob_index = ff_h263_get_gob_height(s); // for skipping the frame s->current_picture.f.pict_type = s->pict_type; s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; /* skip B-frames if we don't have reference frames */ if(s->last_picture_ptr==NULL && (s->pict_type==AV_PICTURE_TYPE_B || s->dropable)) return get_consumed_bytes(s, buf_size); if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B) || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I) || avctx->skip_frame >= AVDISCARD_ALL) return get_consumed_bytes(s, buf_size); if(s->next_p_frame_damaged){ if(s->pict_type==AV_PICTURE_TYPE_B) return get_consumed_bytes(s, buf_size); else s->next_p_frame_damaged=0; } if((s->avctx->flags2 & CODEC_FLAG2_FAST) && s->pict_type==AV_PICTURE_TYPE_B){ s->me.qpel_put= s->dsp.put_2tap_qpel_pixels_tab; s->me.qpel_avg= s->dsp.avg_2tap_qpel_pixels_tab; }else if((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){ s->me.qpel_put= s->dsp.put_qpel_pixels_tab; s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab; }else{ s->me.qpel_put= s->dsp.put_no_rnd_qpel_pixels_tab; s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab; } if(ff_MPV_frame_start(s, avctx) < 0) return -1; if (!s->divx_packed) ff_thread_finish_setup(avctx); if (CONFIG_MPEG4_VDPAU_DECODER && (s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)) { ff_vdpau_mpeg4_decode_picture(s, s->gb.buffer, s->gb.buffer_end - s->gb.buffer); goto frame_end; } if (avctx->hwaccel) { if (avctx->hwaccel->start_frame(avctx, s->gb.buffer, s->gb.buffer_end - s->gb.buffer) < 0) return -1; } ff_er_frame_start(s); //the second part of the wmv2 header contains the MB skip bits which are stored in current_picture->mb_type //which is not available before ff_MPV_frame_start() if (CONFIG_WMV2_DECODER && s->msmpeg4_version==5){ ret = ff_wmv2_decode_secondary_picture_header(s); if(ret<0) return ret; if(ret==1) goto intrax8_decoded; } /* decode each macroblock */ s->mb_x=0; s->mb_y=0; ret = decode_slice(s); while(s->mb_y<s->mb_height){ if(s->msmpeg4_version){ if(s->slice_height==0 || s->mb_x!=0 || (s->mb_y%s->slice_height)!=0 || get_bits_count(&s->gb) > s->gb.size_in_bits) break; }else{ int prev_x=s->mb_x, prev_y=s->mb_y; if(ff_h263_resync(s)<0) break; if (prev_y * s->mb_width + prev_x < s->mb_y * s->mb_width + s->mb_x) s->error_occurred = 1; } if(s->msmpeg4_version<4 && s->h263_pred) ff_mpeg4_clean_buffers(s); if (decode_slice(s) < 0) ret = AVERROR_INVALIDDATA; } if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type==AV_PICTURE_TYPE_I) if(!CONFIG_MSMPEG4_DECODER || ff_msmpeg4_decode_ext_header(s, buf_size) < 0){ s->error_status_table[s->mb_num-1]= ER_MB_ERROR; } assert(s->bitstream_buffer_size==0); frame_end: /* divx 5.01+ bistream reorder stuff */ if(s->codec_id==CODEC_ID_MPEG4 && s->divx_packed){ int current_pos= get_bits_count(&s->gb)>>3; int startcode_found=0; if(buf_size - current_pos > 5){ int i; for(i=current_pos; i<buf_size-3; i++){ if(buf[i]==0 && buf[i+1]==0 && buf[i+2]==1 && buf[i+3]==0xB6){ startcode_found=1; break; } } } if(s->gb.buffer == s->bitstream_buffer && buf_size>7 && s->xvid_build>=0){ //xvid style startcode_found=1; current_pos=0; } if(startcode_found){ av_fast_malloc( &s->bitstream_buffer, &s->allocated_bitstream_buffer_size, buf_size - current_pos + FF_INPUT_BUFFER_PADDING_SIZE); if (!s->bitstream_buffer) return AVERROR(ENOMEM); memcpy(s->bitstream_buffer, buf + current_pos, buf_size - current_pos); s->bitstream_buffer_size= buf_size - current_pos; } } intrax8_decoded: ff_er_frame_end(s); if (avctx->hwaccel) { if (avctx->hwaccel->end_frame(avctx) < 0) return -1; } ff_MPV_frame_end(s); assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type); assert(s->current_picture.f.pict_type == s->pict_type); if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { *pict = s->current_picture_ptr->f; } else if (s->last_picture_ptr != NULL) { *pict = s->last_picture_ptr->f; } if(s->last_picture_ptr || s->low_delay){ *data_size = sizeof(AVFrame); ff_print_debug_info(s, pict); } #ifdef PRINT_FRAME_TIME av_log(avctx, AV_LOG_DEBUG, "%"PRId64"\n", rdtsc()-time); #endif return (ret && (avctx->err_recognition & AV_EF_EXPLODE))?ret:get_consumed_bytes(s, buf_size); } | 21,406 |
1 | static void mmap_release_buffer(AVPacket *pkt) { struct v4l2_buffer buf; int res, fd; struct buff_data *buf_descriptor = pkt->priv; memset(&buf, 0, sizeof(struct v4l2_buffer)); buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; buf.index = buf_descriptor->index; fd = buf_descriptor->fd; av_free(buf_descriptor); res = ioctl (fd, VIDIOC_QBUF, &buf); if (res < 0) { av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF)\n"); pkt->data = NULL; pkt->size = 0; | 21,408 |
1 | static int configure_video_filters(FilterGraph *fg) { InputStream *ist = fg->inputs[0]->ist; OutputStream *ost = fg->outputs[0]->ost; AVFilterContext *in_filter, *out_filter, *filter; AVCodecContext *codec = ost->st->codec; AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc(); char *pix_fmts; AVRational sample_aspect_ratio; char args[255]; int ret; avfilter_graph_free(&fg->graph); fg->graph = avfilter_graph_alloc(); if (!fg->graph) return AVERROR(ENOMEM); if (ist->st->sample_aspect_ratio.num) { sample_aspect_ratio = ist->st->sample_aspect_ratio; } else sample_aspect_ratio = ist->st->codec->sample_aspect_ratio; snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d:flags=%d", ist->st->codec->width, ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE, sample_aspect_ratio.num, sample_aspect_ratio.den, SWS_BILINEAR + ((ist->st->codec->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0)); ret = avfilter_graph_create_filter(&fg->inputs[0]->filter, avfilter_get_by_name("buffer"), "src", args, NULL, fg->graph); if (ret < 0) return ret; #if FF_API_OLD_VSINK_API ret = avfilter_graph_create_filter(&fg->outputs[0]->filter, avfilter_get_by_name("buffersink"), "out", NULL, NULL, fg->graph); #else ret = avfilter_graph_create_filter(&fg->outputs[0]->filter, avfilter_get_by_name("buffersink"), "out", NULL, buffersink_params, fg->graph); #endif av_freep(&buffersink_params); if (ret < 0) return ret; in_filter = fg->inputs[0]->filter; out_filter = fg->outputs[0]->filter; if (codec->width || codec->height) { snprintf(args, 255, "%d:%d:flags=0x%X", codec->width, codec->height, (unsigned)ost->sws_flags); if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"), NULL, args, NULL, fg->graph)) < 0) return ret; if ((ret = avfilter_link(in_filter, 0, filter, 0)) < 0) return ret; in_filter = filter; } if ((pix_fmts = choose_pixel_fmts(ost))) { if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("format"), "format", pix_fmts, NULL, fg->graph)) < 0) return ret; if ((ret = avfilter_link(filter, 0, out_filter, 0)) < 0) return ret; out_filter = filter; av_freep(&pix_fmts); } snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags); fg->graph->scale_sws_opts = av_strdup(args); if (ost->avfilter) { AVFilterInOut *outputs = avfilter_inout_alloc(); AVFilterInOut *inputs = avfilter_inout_alloc(); outputs->name = av_strdup("in"); outputs->filter_ctx = in_filter; outputs->pad_idx = 0; outputs->next = NULL; inputs->name = av_strdup("out"); inputs->filter_ctx = out_filter; inputs->pad_idx = 0; inputs->next = NULL; if ((ret = avfilter_graph_parse(fg->graph, ost->avfilter, &inputs, &outputs, NULL)) < 0) return ret; av_freep(&ost->avfilter); } else { if ((ret = avfilter_link(in_filter, 0, out_filter, 0)) < 0) return ret; } if (ost->keep_pix_fmt) avfilter_graph_set_auto_convert(fg->graph, AVFILTER_AUTO_CONVERT_NONE); if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0) return ret; ost->filter = fg->outputs[0]; return 0; } | 21,409 |
0 | static inline void RET_STOP (DisasContext *ctx) { gen_update_nip(ctx, ctx->nip); ctx->exception = EXCP_MTMSR; } | 21,410 |
0 | void qdev_property_add_legacy(DeviceState *dev, Property *prop, Error **errp) { gchar *type; type = g_strdup_printf("legacy<%s>", prop->info->name); qdev_property_add(dev, prop->name, type, qdev_get_legacy_property, qdev_set_legacy_property, NULL, prop, errp); g_free(type); } | 21,412 |
0 | uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2, uint64_t arg3) { CPU_DoubleU farg1, farg2, farg3; farg1.ll = arg1; farg2.ll = arg2; farg3.ll = arg3; if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) || (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) { /* Multiplication of zero by infinity */ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ); } else { if (unlikely(float64_is_signaling_nan(farg1.d) || float64_is_signaling_nan(farg2.d) || float64_is_signaling_nan(farg3.d))) { /* sNaN operation */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); } /* This is the way the PowerPC specification defines it */ float128 ft0_128, ft1_128; ft0_128 = float64_to_float128(farg1.d, &env->fp_status); ft1_128 = float64_to_float128(farg2.d, &env->fp_status); ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status); if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) && float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) { /* Magnitude subtraction of infinities */ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI); } else { ft1_128 = float64_to_float128(farg3.d, &env->fp_status); ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status); farg1.d = float128_to_float64(ft0_128, &env->fp_status); } } return farg1.ll; } | 21,414 |
0 | static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c) { int mb, me; assert(TCG_TARGET_REG_BITS == 64); if (mask64_operand(c, &mb, &me)) { if (mb == 0) { tcg_out_rld(s, RLDICR, dst, src, 0, me); } else { tcg_out_rld(s, RLDICL, dst, src, 0, mb); } } else if ((c & 0xffff) == c) { tcg_out32(s, ANDI | SAI(src, dst, c)); return; } else if ((c & 0xffff0000) == c) { tcg_out32(s, ANDIS | SAI(src, dst, c >> 16)); return; } else { tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c); tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0)); } } | 21,415 |
0 | static void eth_cleanup(NetClientState *nc) { ETRAXFSEthState *eth = qemu_get_nic_opaque(nc); /* Disconnect the client. */ eth->dma_out->client.push = NULL; eth->dma_out->client.opaque = NULL; eth->dma_in->client.opaque = NULL; eth->dma_in->client.pull = NULL; g_free(eth); } | 21,416 |
0 | long do_sigreturn(CPUCRISState *env) { struct target_signal_frame *frame; abi_ulong frame_addr; target_sigset_t target_set; sigset_t set; int i; frame_addr = env->regs[R_SP]; /* Make sure the guest isn't playing games. */ if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) goto badframe; /* Restore blocked signals */ if (__get_user(target_set.sig[0], &frame->sc.oldmask)) goto badframe; for(i = 1; i < TARGET_NSIG_WORDS; i++) { if (__get_user(target_set.sig[i], &frame->extramask[i - 1])) goto badframe; } target_to_host_sigset_internal(&set, &target_set); sigprocmask(SIG_SETMASK, &set, NULL); restore_sigcontext(&frame->sc, env); unlock_user_struct(frame, frame_addr, 0); return env->regs[10]; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); } | 21,418 |
0 | static inline int epoll_events_from_pfd(int pfd_events) { return (pfd_events & G_IO_IN ? EPOLLIN : 0) | (pfd_events & G_IO_OUT ? EPOLLOUT : 0) | (pfd_events & G_IO_HUP ? EPOLLHUP : 0) | (pfd_events & G_IO_ERR ? EPOLLERR : 0); } | 21,419 |
0 | static int mov_write_moov_tag(ByteIOContext *pb, MOVContext *mov) { int pos, i; pos = url_ftell(pb); put_be32(pb, 0); /* size placeholder*/ put_tag(pb, "moov"); mov->timescale = globalTimescale; for (i=0; i<MAX_STREAMS; i++) { if(mov->tracks[i].entry <= 0) continue; if(mov->tracks[i].enc->codec_type == CODEC_TYPE_VIDEO) { mov->tracks[i].timescale = mov->tracks[i].enc->frame_rate; mov->tracks[i].sampleDuration = mov->tracks[i].enc->frame_rate_base; } else if(mov->tracks[i].enc->codec_type == CODEC_TYPE_AUDIO) { /* If AMR, track timescale = 8000, AMR_WB = 16000 */ if(mov->tracks[i].enc->codec_id == CODEC_ID_AMR_NB) { mov->tracks[i].sampleDuration = 160; // Bytes per chunk mov->tracks[i].timescale = 8000; } else { mov->tracks[i].timescale = mov->tracks[i].enc->sample_rate; mov->tracks[i].sampleDuration = mov->tracks[i].enc->frame_size; } } mov->tracks[i].trackDuration = mov->tracks[i].sampleCount * mov->tracks[i].sampleDuration; mov->tracks[i].time = mov->time; mov->tracks[i].trackID = i+1; } mov_write_mvhd_tag(pb, mov); //mov_write_iods_tag(pb, mov); for (i=0; i<MAX_STREAMS; i++) { if(mov->tracks[i].entry > 0) { mov_write_trak_tag(pb, &(mov->tracks[i])); } } return updateSize(pb, pos); } | 21,420 |
0 | static void mips_cps_realize(DeviceState *dev, Error **errp) { MIPSCPSState *s = MIPS_CPS(dev); CPUMIPSState *env; MIPSCPU *cpu; int i; Error *err = NULL; target_ulong gcr_base; bool itu_present = false; for (i = 0; i < s->num_vp; i++) { cpu = cpu_mips_init(s->cpu_model); if (cpu == NULL) { error_setg(errp, "%s: CPU initialization failed\n", __func__); return; } /* Init internal devices */ cpu_mips_irq_init_cpu(cpu); cpu_mips_clock_init(cpu); env = &cpu->env; if (cpu_mips_itu_supported(env)) { itu_present = true; /* Attach ITC Tag to the VP */ env->itc_tag = mips_itu_get_tag_region(&s->itu); } qemu_register_reset(main_cpu_reset, cpu); } cpu = MIPS_CPU(first_cpu); env = &cpu->env; /* Inter-Thread Communication Unit */ if (itu_present) { object_initialize(&s->itu, sizeof(s->itu), TYPE_MIPS_ITU); qdev_set_parent_bus(DEVICE(&s->itu), sysbus_get_default()); object_property_set_int(OBJECT(&s->itu), 16, "num-fifo", &err); object_property_set_int(OBJECT(&s->itu), 16, "num-semaphores", &err); object_property_set_bool(OBJECT(&s->itu), true, "realized", &err); if (err != NULL) { error_propagate(errp, err); return; } memory_region_add_subregion(&s->container, 0, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->itu), 0)); } /* Cluster Power Controller */ object_initialize(&s->cpc, sizeof(s->cpc), TYPE_MIPS_CPC); qdev_set_parent_bus(DEVICE(&s->cpc), sysbus_get_default()); object_property_set_int(OBJECT(&s->cpc), s->num_vp, "num-vp", &err); object_property_set_int(OBJECT(&s->cpc), 1, "vp-start-running", &err); object_property_set_bool(OBJECT(&s->cpc), true, "realized", &err); if (err != NULL) { error_propagate(errp, err); return; } memory_region_add_subregion(&s->container, 0, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->cpc), 0)); /* Global Interrupt Controller */ object_initialize(&s->gic, sizeof(s->gic), TYPE_MIPS_GIC); qdev_set_parent_bus(DEVICE(&s->gic), sysbus_get_default()); object_property_set_int(OBJECT(&s->gic), s->num_vp, "num-vp", &err); object_property_set_int(OBJECT(&s->gic), 128, "num-irq", &err); object_property_set_bool(OBJECT(&s->gic), true, "realized", &err); if (err != NULL) { error_propagate(errp, err); return; } memory_region_add_subregion(&s->container, 0, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->gic), 0)); /* Global Configuration Registers */ gcr_base = env->CP0_CMGCRBase << 4; object_initialize(&s->gcr, sizeof(s->gcr), TYPE_MIPS_GCR); qdev_set_parent_bus(DEVICE(&s->gcr), sysbus_get_default()); object_property_set_int(OBJECT(&s->gcr), s->num_vp, "num-vp", &err); object_property_set_int(OBJECT(&s->gcr), 0x800, "gcr-rev", &err); object_property_set_int(OBJECT(&s->gcr), gcr_base, "gcr-base", &err); object_property_set_link(OBJECT(&s->gcr), OBJECT(&s->gic.mr), "gic", &err); object_property_set_link(OBJECT(&s->gcr), OBJECT(&s->cpc.mr), "cpc", &err); object_property_set_bool(OBJECT(&s->gcr), true, "realized", &err); if (err != NULL) { error_propagate(errp, err); return; } memory_region_add_subregion(&s->container, gcr_base, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->gcr), 0)); } | 21,421 |
0 | void cpu_loop(CPUM68KState *env) { CPUState *cs = CPU(m68k_env_get_cpu(env)); int trapnr; unsigned int n; target_siginfo_t info; TaskState *ts = cs->opaque; for(;;) { cpu_exec_start(cs); trapnr = cpu_m68k_exec(cs); cpu_exec_end(cs); switch(trapnr) { case EXCP_ILLEGAL: { if (ts->sim_syscalls) { uint16_t nr; get_user_u16(nr, env->pc + 2); env->pc += 4; do_m68k_simcall(env, nr); } else { goto do_sigill; } } break; case EXCP_HALT_INSN: /* Semihosing syscall. */ env->pc += 4; do_m68k_semihosting(env, env->dregs[0]); break; case EXCP_LINEA: case EXCP_LINEF: case EXCP_UNSUPPORTED: do_sigill: info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_ILLOPN; info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, &info); break; case EXCP_TRAP0: { ts->sim_syscalls = 0; n = env->dregs[0]; env->pc += 2; env->dregs[0] = do_syscall(env, n, env->dregs[1], env->dregs[2], env->dregs[3], env->dregs[4], env->dregs[5], env->aregs[0], 0, 0); } break; case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; case EXCP_ACCESS: { info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; /* XXX: check env->error_code */ info.si_code = TARGET_SEGV_MAPERR; info._sifields._sigfault._addr = env->mmu.ar; queue_signal(env, info.si_signo, &info); } break; case EXCP_DEBUG: { int sig; sig = gdb_handlesig(cs, TARGET_SIGTRAP); if (sig) { info.si_signo = sig; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; queue_signal(env, info.si_signo, &info); } } break; default: fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr); cpu_dump_state(cs, stderr, fprintf, 0); abort(); } process_pending_signals(env); } } | 21,422 |
0 | void usb_attach(USBPort *port, USBDevice *dev) { if (dev != NULL) { /* attach */ if (port->dev) { usb_attach(port, NULL); } dev->port = port; port->dev = dev; port->ops->attach(port); usb_send_msg(dev, USB_MSG_ATTACH); } else { /* detach */ dev = port->dev; port->ops->detach(port); if (dev) { usb_send_msg(dev, USB_MSG_DETACH); dev->port = NULL; port->dev = NULL; } } } | 21,423 |
0 | static void qerror_set_data(QError *qerr, const char *fmt, va_list *va) { QObject *obj; obj = qobject_from_jsonv(fmt, va); if (!obj) { qerror_abort(qerr, "invalid format '%s'", fmt); } if (qobject_type(obj) != QTYPE_QDICT) { qerror_abort(qerr, "error format is not a QDict '%s'", fmt); } qerr->error = qobject_to_qdict(obj); obj = qdict_get(qerr->error, "class"); if (!obj) { qerror_abort(qerr, "missing 'class' key in '%s'", fmt); } if (qobject_type(obj) != QTYPE_QSTRING) { qerror_abort(qerr, "'class' key value should be a QString"); } obj = qdict_get(qerr->error, "data"); if (!obj) { qerror_abort(qerr, "missing 'data' key in '%s'", fmt); } if (qobject_type(obj) != QTYPE_QDICT) { qerror_abort(qerr, "'data' key value should be a QDICT"); } } | 21,424 |
0 | udp_emu(struct socket *so, struct mbuf *m) { struct sockaddr_in addr; int addrlen = sizeof(addr); #ifdef EMULATE_TALK CTL_MSG_OLD *omsg; CTL_MSG *nmsg; char buff[sizeof(CTL_MSG)]; u_char type; struct talk_request { struct talk_request *next; struct socket *udp_so; struct socket *tcp_so; } *req; static struct talk_request *req_tbl = 0; #endif struct cu_header { uint16_t d_family; // destination family uint16_t d_port; // destination port uint32_t d_addr; // destination address uint16_t s_family; // source family uint16_t s_port; // source port uint32_t so_addr; // source address uint32_t seqn; // sequence number uint16_t message; // message uint16_t data_type; // data type uint16_t pkt_len; // packet length } *cu_head; switch(so->so_emu) { #ifdef EMULATE_TALK case EMU_TALK: case EMU_NTALK: /* * Talk emulation. We always change the ctl_addr to get * some answers from the daemon. When an ANNOUNCE comes, * we send LEAVE_INVITE to the local daemons. Also when a * DELETE comes, we send copies to the local daemons. */ if (getsockname(so->s, (struct sockaddr *)&addr, &addrlen) < 0) return; #define IS_OLD (so->so_emu == EMU_TALK) #define COPY_MSG(dest, src) { dest->type = src->type; \ dest->id_num = src->id_num; \ dest->pid = src->pid; \ dest->addr = src->addr; \ dest->ctl_addr = src->ctl_addr; \ memcpy(&dest->l_name, &src->l_name, NAME_SIZE_OLD); \ memcpy(&dest->r_name, &src->r_name, NAME_SIZE_OLD); \ memcpy(&dest->r_tty, &src->r_tty, TTY_SIZE); } #define OTOSIN(ptr, field) ((struct sockaddr_in *)&ptr->field) /* old_sockaddr to sockaddr_in */ if (IS_OLD) { /* old talk */ omsg = mtod(m, CTL_MSG_OLD*); nmsg = (CTL_MSG *) buff; type = omsg->type; OTOSIN(omsg, ctl_addr)->sin_port = addr.sin_port; OTOSIN(omsg, ctl_addr)->sin_addr = our_addr; strncpy(omsg->l_name, getlogin(), NAME_SIZE_OLD); } else { /* new talk */ omsg = (CTL_MSG_OLD *) buff; nmsg = mtod(m, CTL_MSG *); type = nmsg->type; OTOSIN(nmsg, ctl_addr)->sin_port = addr.sin_port; OTOSIN(nmsg, ctl_addr)->sin_addr = our_addr; strncpy(nmsg->l_name, getlogin(), NAME_SIZE_OLD); } if (type == LOOK_UP) return; /* for LOOK_UP this is enough */ if (IS_OLD) { /* make a copy of the message */ COPY_MSG(nmsg, omsg); nmsg->vers = 1; nmsg->answer = 0; } else COPY_MSG(omsg, nmsg); /* * If if is an ANNOUNCE message, we go through the * request table to see if a tcp port has already * been redirected for this socket. If not, we solisten() * a new socket and add this entry to the table. * The port number of the tcp socket and our IP * are put to the addr field of the message structures. * Then a LEAVE_INVITE is sent to both local daemon * ports, 517 and 518. This is why we have two copies * of the message, one in old talk and one in new talk * format. */ if (type == ANNOUNCE) { int s; u_short temp_port; for(req = req_tbl; req; req = req->next) if (so == req->udp_so) break; /* found it */ if (!req) { /* no entry for so, create new */ req = (struct talk_request *) malloc(sizeof(struct talk_request)); req->udp_so = so; req->tcp_so = solisten(0, OTOSIN(omsg, addr)->sin_addr.s_addr, OTOSIN(omsg, addr)->sin_port, SS_FACCEPTONCE); req->next = req_tbl; req_tbl = req; } /* replace port number in addr field */ addrlen = sizeof(addr); getsockname(req->tcp_so->s, (struct sockaddr *) &addr, &addrlen); OTOSIN(omsg, addr)->sin_port = addr.sin_port; OTOSIN(omsg, addr)->sin_addr = our_addr; OTOSIN(nmsg, addr)->sin_port = addr.sin_port; OTOSIN(nmsg, addr)->sin_addr = our_addr; /* send LEAVE_INVITEs */ temp_port = OTOSIN(omsg, ctl_addr)->sin_port; OTOSIN(omsg, ctl_addr)->sin_port = 0; OTOSIN(nmsg, ctl_addr)->sin_port = 0; omsg->type = nmsg->type = LEAVE_INVITE; s = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP); addr.sin_addr = our_addr; addr.sin_family = AF_INET; addr.sin_port = htons(517); sendto(s, (char *)omsg, sizeof(*omsg), 0, (struct sockaddr *)&addr, sizeof(addr)); addr.sin_port = htons(518); sendto(s, (char *)nmsg, sizeof(*nmsg), 0, (struct sockaddr *) &addr, sizeof(addr)); closesocket(s) ; omsg->type = nmsg->type = ANNOUNCE; OTOSIN(omsg, ctl_addr)->sin_port = temp_port; OTOSIN(nmsg, ctl_addr)->sin_port = temp_port; } /* * If it is a DELETE message, we send a copy to the * local daemons. Then we delete the entry corresponding * to our socket from the request table. */ if (type == DELETE) { struct talk_request *temp_req, *req_next; int s; u_short temp_port; temp_port = OTOSIN(omsg, ctl_addr)->sin_port; OTOSIN(omsg, ctl_addr)->sin_port = 0; OTOSIN(nmsg, ctl_addr)->sin_port = 0; s = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP); addr.sin_addr = our_addr; addr.sin_family = AF_INET; addr.sin_port = htons(517); sendto(s, (char *)omsg, sizeof(*omsg), 0, (struct sockaddr *)&addr, sizeof(addr)); addr.sin_port = htons(518); sendto(s, (char *)nmsg, sizeof(*nmsg), 0, (struct sockaddr *)&addr, sizeof(addr)); closesocket(s); OTOSIN(omsg, ctl_addr)->sin_port = temp_port; OTOSIN(nmsg, ctl_addr)->sin_port = temp_port; /* delete table entry */ if (so == req_tbl->udp_so) { temp_req = req_tbl; req_tbl = req_tbl->next; free(temp_req); } else { temp_req = req_tbl; for(req = req_tbl->next; req; req = req_next) { req_next = req->next; if (so == req->udp_so) { temp_req->next = req_next; free(req); break; } else { temp_req = req; } } } } return; #endif case EMU_CUSEEME: /* * Cu-SeeMe emulation. * Hopefully the packet is more that 16 bytes long. We don't * do any other tests, just replace the address and port * fields. */ if (m->m_len >= sizeof (*cu_head)) { if (getsockname(so->s, (struct sockaddr *)&addr, &addrlen) < 0) return; cu_head = mtod(m, struct cu_header *); cu_head->s_port = addr.sin_port; cu_head->so_addr = our_addr.s_addr; } return; } } | 21,425 |
0 | static ssize_t usbnet_receive(VLANClientState *nc, const uint8_t *buf, size_t size) { USBNetState *s = DO_UPCAST(NICState, nc, nc)->opaque; struct rndis_packet_msg_type *msg; if (s->rndis) { msg = (struct rndis_packet_msg_type *) s->in_buf; if (!s->rndis_state == RNDIS_DATA_INITIALIZED) return -1; if (size + sizeof(struct rndis_packet_msg_type) > sizeof(s->in_buf)) return -1; memset(msg, 0, sizeof(struct rndis_packet_msg_type)); msg->MessageType = cpu_to_le32(RNDIS_PACKET_MSG); msg->MessageLength = cpu_to_le32(size + sizeof(struct rndis_packet_msg_type)); msg->DataOffset = cpu_to_le32(sizeof(struct rndis_packet_msg_type) - 8); msg->DataLength = cpu_to_le32(size); /* msg->OOBDataOffset; * msg->OOBDataLength; * msg->NumOOBDataElements; * msg->PerPacketInfoOffset; * msg->PerPacketInfoLength; * msg->VcHandle; * msg->Reserved; */ memcpy(msg + 1, buf, size); s->in_len = size + sizeof(struct rndis_packet_msg_type); } else { if (size > sizeof(s->in_buf)) return -1; memcpy(s->in_buf, buf, size); s->in_len = size; } s->in_ptr = 0; return size; } | 21,426 |
0 | static void mpcore_priv_map_setup(mpcore_priv_state *s) { int i; SysBusDevice *gicbusdev = sysbus_from_qdev(s->gic); SysBusDevice *busdev = sysbus_from_qdev(s->mptimer); memory_region_init(&s->container, "mpcode-priv-container", 0x2000); memory_region_init_io(&s->iomem, &mpcore_scu_ops, s, "mpcore-scu", 0x100); memory_region_add_subregion(&s->container, 0, &s->iomem); /* GIC CPU interfaces: "current CPU" at 0x100, then specific CPUs * at 0x200, 0x300... */ for (i = 0; i < (s->num_cpu + 1); i++) { target_phys_addr_t offset = 0x100 + (i * 0x100); memory_region_add_subregion(&s->container, offset, sysbus_mmio_get_region(gicbusdev, i + 1)); } /* Add the regions for timer and watchdog for "current CPU" and * for each specific CPU. */ for (i = 0; i < (s->num_cpu + 1) * 2; i++) { /* Timers at 0x600, 0x700, ...; watchdogs at 0x620, 0x720, ... */ target_phys_addr_t offset = 0x600 + (i >> 1) * 0x100 + (i & 1) * 0x20; memory_region_add_subregion(&s->container, offset, sysbus_mmio_get_region(busdev, i)); } memory_region_add_subregion(&s->container, 0x1000, sysbus_mmio_get_region(gicbusdev, 0)); /* Wire up the interrupt from each watchdog and timer. * For each core the timer is PPI 29 and the watchdog PPI 30. */ for (i = 0; i < s->num_cpu; i++) { int ppibase = (s->num_irq - 32) + i * 32; sysbus_connect_irq(busdev, i * 2, qdev_get_gpio_in(s->gic, ppibase + 29)); sysbus_connect_irq(busdev, i * 2 + 1, qdev_get_gpio_in(s->gic, ppibase + 30)); } } | 21,428 |
0 | static int proxy_readdir_r(FsContext *ctx, V9fsFidOpenState *fs, struct dirent *entry, struct dirent **result) { return readdir_r(fs->dir, entry, result); } | 21,429 |
0 | int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl) { const SPS *sps; const PPS *pps; unsigned int first_mb_in_slice; unsigned int pps_id; int ret; unsigned int slice_type, tmp, i, j; int last_pic_structure, last_pic_droppable; int needs_reinit = 0; int field_pic_flag, bottom_field_flag; int frame_num, droppable, picture_structure; int mb_aff_frame = 0; h->qpel_put = h->h264qpel.put_h264_qpel_pixels_tab; h->qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab; first_mb_in_slice = get_ue_golomb(&sl->gb); if (first_mb_in_slice == 0) { // FIXME better field boundary detection if (h->current_slice && h->cur_pic_ptr && FIELD_PICTURE(h)) { ff_h264_field_end(h, sl, 1); } h->current_slice = 0; if (!h->first_field) { if (h->cur_pic_ptr && !h->droppable) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, h->picture_structure == PICT_BOTTOM_FIELD); } h->cur_pic_ptr = NULL; } } slice_type = get_ue_golomb_31(&sl->gb); if (slice_type > 9) { av_log(h->avctx, AV_LOG_ERROR, "slice type %d too large at %d\n", slice_type, first_mb_in_slice); return AVERROR_INVALIDDATA; } if (slice_type > 4) { slice_type -= 5; sl->slice_type_fixed = 1; } else sl->slice_type_fixed = 0; slice_type = ff_h264_golomb_to_pict_type[slice_type]; sl->slice_type = slice_type; sl->slice_type_nos = slice_type & 3; if (h->nal_unit_type == NAL_IDR_SLICE && sl->slice_type_nos != AV_PICTURE_TYPE_I) { av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n"); return AVERROR_INVALIDDATA; } // to make a few old functions happy, it's wrong though if (!h->setup_finished) h->pict_type = sl->slice_type; pps_id = get_ue_golomb(&sl->gb); if (pps_id >= MAX_PPS_COUNT) { av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id); return AVERROR_INVALIDDATA; } if (!h->ps.pps_list[pps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing PPS %u referenced\n", pps_id); return AVERROR_INVALIDDATA; } if (!h->setup_finished) { h->ps.pps = (const PPS*)h->ps.pps_list[pps_id]->data; } else if (h->ps.pps != (const PPS*)h->ps.pps_list[pps_id]->data) { av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n"); return AVERROR_INVALIDDATA; } if (!h->ps.sps_list[h->ps.pps->sps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing SPS %u referenced\n", h->ps.pps->sps_id); return AVERROR_INVALIDDATA; } if (h->ps.sps != (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data) { h->ps.sps = (SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data; if (h->bit_depth_luma != h->ps.sps->bit_depth_luma || h->chroma_format_idc != h->ps.sps->chroma_format_idc) needs_reinit = 1; if (h->flags & AV_CODEC_FLAG_LOW_DELAY || (h->ps.sps->bitstream_restriction_flag && !h->ps.sps->num_reorder_frames)) { if (h->avctx->has_b_frames > 1 || h->delayed_pic[0]) av_log(h->avctx, AV_LOG_WARNING, "Delayed frames seen. " "Reenabling low delay requires a codec flush.\n"); else h->low_delay = 1; } if (h->avctx->has_b_frames < 2) h->avctx->has_b_frames = !h->low_delay; } pps = h->ps.pps; sps = h->ps.sps; if (!h->setup_finished) { h->avctx->profile = ff_h264_get_profile(sps); h->avctx->level = sps->level_idc; h->avctx->refs = sps->ref_frame_count; if (h->mb_width != sps->mb_width || h->mb_height != sps->mb_height * (2 - sps->frame_mbs_only_flag)) needs_reinit = 1; h->mb_width = sps->mb_width; h->mb_height = sps->mb_height * (2 - sps->frame_mbs_only_flag); h->mb_num = h->mb_width * h->mb_height; h->mb_stride = h->mb_width + 1; h->b_stride = h->mb_width * 4; h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p h->width = 16 * h->mb_width; h->height = 16 * h->mb_height; ret = init_dimensions(h); if (ret < 0) return ret; if (sps->video_signal_type_present_flag) { h->avctx->color_range = sps->full_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; if (sps->colour_description_present_flag) { if (h->avctx->colorspace != sps->colorspace) needs_reinit = 1; h->avctx->color_primaries = sps->color_primaries; h->avctx->color_trc = sps->color_trc; h->avctx->colorspace = sps->colorspace; } } } if (h->context_initialized && needs_reinit) { h->context_initialized = 0; if (sl != h->slice_ctx) { av_log(h->avctx, AV_LOG_ERROR, "changing width %d -> %d / height %d -> %d on " "slice %d\n", h->width, h->avctx->coded_width, h->height, h->avctx->coded_height, h->current_slice + 1); return AVERROR_INVALIDDATA; } ff_h264_flush_change(h); if ((ret = get_pixel_format(h)) < 0) return ret; h->avctx->pix_fmt = ret; av_log(h->avctx, AV_LOG_INFO, "Reinit context to %dx%d, " "pix_fmt: %d\n", h->width, h->height, h->avctx->pix_fmt); if ((ret = h264_slice_header_init(h)) < 0) { av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed\n"); return ret; } } if (!h->context_initialized) { if (sl != h->slice_ctx) { av_log(h->avctx, AV_LOG_ERROR, "Cannot (re-)initialize context during parallel decoding.\n"); return AVERROR_PATCHWELCOME; } if ((ret = get_pixel_format(h)) < 0) return ret; h->avctx->pix_fmt = ret; if ((ret = h264_slice_header_init(h)) < 0) { av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed\n"); return ret; } } frame_num = get_bits(&sl->gb, sps->log2_max_frame_num); if (!h->setup_finished) h->frame_num = frame_num; sl->mb_mbaff = 0; last_pic_structure = h->picture_structure; last_pic_droppable = h->droppable; droppable = h->nal_ref_idc == 0; if (sps->frame_mbs_only_flag) { picture_structure = PICT_FRAME; } else { field_pic_flag = get_bits1(&sl->gb); if (field_pic_flag) { bottom_field_flag = get_bits1(&sl->gb); picture_structure = PICT_TOP_FIELD + bottom_field_flag; } else { picture_structure = PICT_FRAME; mb_aff_frame = sps->mb_aff; } } if (!h->setup_finished) { h->droppable = droppable; h->picture_structure = picture_structure; h->mb_aff_frame = mb_aff_frame; } sl->mb_field_decoding_flag = h->picture_structure != PICT_FRAME; if (h->current_slice != 0) { if (last_pic_structure != picture_structure || last_pic_droppable != droppable) { av_log(h->avctx, AV_LOG_ERROR, "Changing field mode (%d -> %d) between slices is not allowed\n", last_pic_structure, h->picture_structure); return AVERROR_INVALIDDATA; } else if (!h->cur_pic_ptr) { av_log(h->avctx, AV_LOG_ERROR, "unset cur_pic_ptr on slice %d\n", h->current_slice + 1); return AVERROR_INVALIDDATA; } } else { /* Shorten frame num gaps so we don't have to allocate reference * frames just to throw them away */ if (h->frame_num != h->prev_frame_num) { int unwrap_prev_frame_num = h->prev_frame_num; int max_frame_num = 1 << sps->log2_max_frame_num; if (unwrap_prev_frame_num > h->frame_num) unwrap_prev_frame_num -= max_frame_num; if ((h->frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) { unwrap_prev_frame_num = (h->frame_num - sps->ref_frame_count) - 1; if (unwrap_prev_frame_num < 0) unwrap_prev_frame_num += max_frame_num; h->prev_frame_num = unwrap_prev_frame_num; } } /* See if we have a decoded first field looking for a pair... * Here, we're using that to see if we should mark previously * decode frames as "finished". * We have to do that before the "dummy" in-between frame allocation, * since that can modify s->current_picture_ptr. */ if (h->first_field) { assert(h->cur_pic_ptr); assert(h->cur_pic_ptr->f->buf[0]); assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF); /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ if (!last_pic_droppable && last_pic_structure != PICT_FRAME) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } } else { if (h->cur_pic_ptr->frame_num != h->frame_num) { /* This and previous field were reference, but had * different frame_nums. Consider this field first in * pair. Throw away previous field except for reference * purposes. */ if (!last_pic_droppable && last_pic_structure != PICT_FRAME) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } } else { /* Second field in complementary pair */ if (!((last_pic_structure == PICT_TOP_FIELD && h->picture_structure == PICT_BOTTOM_FIELD) || (last_pic_structure == PICT_BOTTOM_FIELD && h->picture_structure == PICT_TOP_FIELD))) { av_log(h->avctx, AV_LOG_ERROR, "Invalid field mode combination %d/%d\n", last_pic_structure, h->picture_structure); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_INVALIDDATA; } else if (last_pic_droppable != h->droppable) { avpriv_request_sample(h->avctx, "Found reference and non-reference fields in the same frame, which"); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_PATCHWELCOME; } } } } while (h->frame_num != h->prev_frame_num && h->frame_num != (h->prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) { H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL; av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n", h->frame_num, h->prev_frame_num); ret = initialize_cur_frame(h); if (ret < 0) { h->first_field = 0; return ret; } h->prev_frame_num++; h->prev_frame_num %= 1 << sps->log2_max_frame_num; h->cur_pic_ptr->frame_num = h->prev_frame_num; ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0); ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1); ret = ff_generate_sliding_window_mmcos(h, 1); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return ret; ret = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return ret; /* Error concealment: If a ref is missing, copy the previous ref * in its place. * FIXME: Avoiding a memcpy would be nice, but ref handling makes * many assumptions about there being no actual duplicates. * FIXME: This does not copy padding for out-of-frame motion * vectors. Given we are concealing a lost frame, this probably * is not noticeable by comparison, but it should be fixed. */ if (h->short_ref_count) { if (prev && h->short_ref[0]->f->width == prev->f->width && h->short_ref[0]->f->height == prev->f->height && h->short_ref[0]->f->format == prev->f->format) { av_image_copy(h->short_ref[0]->f->data, h->short_ref[0]->f->linesize, (const uint8_t **)prev->f->data, prev->f->linesize, prev->f->format, h->mb_width * 16, h->mb_height * 16); h->short_ref[0]->poc = prev->poc + 2; } h->short_ref[0]->frame_num = h->prev_frame_num; } } /* See if we have a decoded first field looking for a pair... * We're using that to see whether to continue decoding in that * frame, or to allocate a new one. */ if (h->first_field) { assert(h->cur_pic_ptr); assert(h->cur_pic_ptr->f->buf[0]); assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF); /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ h->cur_pic_ptr = NULL; h->first_field = FIELD_PICTURE(h); } else { if (h->cur_pic_ptr->frame_num != h->frame_num) { /* This and the previous field had different frame_nums. * Consider this field first in pair. Throw away previous * one except for reference purposes. */ h->first_field = 1; h->cur_pic_ptr = NULL; } else { /* Second field in complementary pair */ h->first_field = 0; } } } else { /* Frame or first field in a potentially complementary pair */ h->first_field = FIELD_PICTURE(h); } if (!FIELD_PICTURE(h) || h->first_field) { if (h264_frame_start(h) < 0) { h->first_field = 0; return AVERROR_INVALIDDATA; } } else { release_unused_pictures(h, 0); } } assert(h->mb_num == h->mb_width * h->mb_height); if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num || first_mb_in_slice >= h->mb_num) { av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n"); return AVERROR_INVALIDDATA; } sl->resync_mb_x = sl->mb_x = first_mb_in_slice % h->mb_width; sl->resync_mb_y = sl->mb_y = (first_mb_in_slice / h->mb_width) << FIELD_OR_MBAFF_PICTURE(h); if (h->picture_structure == PICT_BOTTOM_FIELD) sl->resync_mb_y = sl->mb_y = sl->mb_y + 1; assert(sl->mb_y < h->mb_height); if (h->picture_structure == PICT_FRAME) { h->curr_pic_num = h->frame_num; h->max_pic_num = 1 << sps->log2_max_frame_num; } else { h->curr_pic_num = 2 * h->frame_num + 1; h->max_pic_num = 1 << (sps->log2_max_frame_num + 1); } if (h->nal_unit_type == NAL_IDR_SLICE) get_ue_golomb(&sl->gb); /* idr_pic_id */ if (sps->poc_type == 0) { int poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb); if (!h->setup_finished) h->poc_lsb = poc_lsb; if (pps->pic_order_present == 1 && h->picture_structure == PICT_FRAME) { int delta_poc_bottom = get_se_golomb(&sl->gb); if (!h->setup_finished) h->delta_poc_bottom = delta_poc_bottom; } } if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) { int delta_poc = get_se_golomb(&sl->gb); if (!h->setup_finished) h->delta_poc[0] = delta_poc; if (pps->pic_order_present == 1 && h->picture_structure == PICT_FRAME) { delta_poc = get_se_golomb(&sl->gb); if (!h->setup_finished) h->delta_poc[1] = delta_poc; } } if (!h->setup_finished) ff_init_poc(h, h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc); if (pps->redundant_pic_cnt_present) sl->redundant_pic_count = get_ue_golomb(&sl->gb); if (sl->slice_type_nos == AV_PICTURE_TYPE_B) sl->direct_spatial_mv_pred = get_bits1(&sl->gb); ret = ff_h264_parse_ref_count(&sl->list_count, sl->ref_count, &sl->gb, pps, sl->slice_type_nos, h->picture_structure); if (ret < 0) return ret; if (sl->slice_type_nos != AV_PICTURE_TYPE_I) { ret = ff_h264_decode_ref_pic_list_reordering(h, sl); if (ret < 0) { sl->ref_count[1] = sl->ref_count[0] = 0; return ret; } } if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) || (pps->weighted_bipred_idc == 1 && sl->slice_type_nos == AV_PICTURE_TYPE_B)) ff_h264_pred_weight_table(&sl->gb, sps, sl->ref_count, sl->slice_type_nos, &sl->pwt); else if (pps->weighted_bipred_idc == 2 && sl->slice_type_nos == AV_PICTURE_TYPE_B) { implicit_weight_table(h, sl, -1); } else { sl->pwt.use_weight = 0; for (i = 0; i < 2; i++) { sl->pwt.luma_weight_flag[i] = 0; sl->pwt.chroma_weight_flag[i] = 0; } } // If frame-mt is enabled, only update mmco tables for the first slice // in a field. Subsequent slices can temporarily clobber h->mmco_index // or h->mmco, which will cause ref list mix-ups and decoding errors // further down the line. This may break decoding if the first slice is // corrupt, thus we only do this if frame-mt is enabled. if (h->nal_ref_idc) { ret = ff_h264_decode_ref_pic_marking(h, &sl->gb, !(h->avctx->active_thread_type & FF_THREAD_FRAME) || h->current_slice == 0); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return AVERROR_INVALIDDATA; } if (FRAME_MBAFF(h)) { ff_h264_fill_mbaff_ref_list(h, sl); if (pps->weighted_bipred_idc == 2 && sl->slice_type_nos == AV_PICTURE_TYPE_B) { implicit_weight_table(h, sl, 0); implicit_weight_table(h, sl, 1); } } if (sl->slice_type_nos == AV_PICTURE_TYPE_B && !sl->direct_spatial_mv_pred) ff_h264_direct_dist_scale_factor(h, sl); ff_h264_direct_ref_list_init(h, sl); if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) { tmp = get_ue_golomb_31(&sl->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp); return AVERROR_INVALIDDATA; } sl->cabac_init_idc = tmp; } sl->last_qscale_diff = 0; tmp = pps->init_qp + get_se_golomb(&sl->gb); if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) { av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp); return AVERROR_INVALIDDATA; } sl->qscale = tmp; sl->chroma_qp[0] = get_chroma_qp(h, 0, sl->qscale); sl->chroma_qp[1] = get_chroma_qp(h, 1, sl->qscale); // FIXME qscale / qp ... stuff if (sl->slice_type == AV_PICTURE_TYPE_SP) get_bits1(&sl->gb); /* sp_for_switch_flag */ if (sl->slice_type == AV_PICTURE_TYPE_SP || sl->slice_type == AV_PICTURE_TYPE_SI) get_se_golomb(&sl->gb); /* slice_qs_delta */ sl->deblocking_filter = 1; sl->slice_alpha_c0_offset = 0; sl->slice_beta_offset = 0; if (pps->deblocking_filter_parameters_present) { tmp = get_ue_golomb_31(&sl->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, "deblocking_filter_idc %u out of range\n", tmp); return AVERROR_INVALIDDATA; } sl->deblocking_filter = tmp; if (sl->deblocking_filter < 2) sl->deblocking_filter ^= 1; // 1<->0 if (sl->deblocking_filter) { sl->slice_alpha_c0_offset = get_se_golomb(&sl->gb) * 2; sl->slice_beta_offset = get_se_golomb(&sl->gb) * 2; if (sl->slice_alpha_c0_offset > 12 || sl->slice_alpha_c0_offset < -12 || sl->slice_beta_offset > 12 || sl->slice_beta_offset < -12) { av_log(h->avctx, AV_LOG_ERROR, "deblocking filter parameters %d %d out of range\n", sl->slice_alpha_c0_offset, sl->slice_beta_offset); return AVERROR_INVALIDDATA; } } } if (h->avctx->skip_loop_filter >= AVDISCARD_ALL || (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY && sl->slice_type_nos != AV_PICTURE_TYPE_I) || (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR && sl->slice_type_nos == AV_PICTURE_TYPE_B) || (h->avctx->skip_loop_filter >= AVDISCARD_NONREF && h->nal_ref_idc == 0)) sl->deblocking_filter = 0; if (sl->deblocking_filter == 1 && h->max_contexts > 1) { if (h->avctx->flags2 & AV_CODEC_FLAG2_FAST) { /* Cheat slightly for speed: * Do not bother to deblock across slices. */ sl->deblocking_filter = 2; } else { h->max_contexts = 1; if (!h->single_decode_warning) { av_log(h->avctx, AV_LOG_INFO, "Cannot parallelize deblocking type 1, decoding such frames in sequential order\n"); h->single_decode_warning = 1; } if (sl != h->slice_ctx) { av_log(h->avctx, AV_LOG_ERROR, "Deblocking switched inside frame.\n"); return 1; } } } sl->qp_thresh = 15 - FFMIN(sl->slice_alpha_c0_offset, sl->slice_beta_offset) - FFMAX3(0, pps->chroma_qp_index_offset[0], pps->chroma_qp_index_offset[1]) + 6 * (sps->bit_depth_luma - 8); sl->slice_num = ++h->current_slice; if (sl->slice_num >= MAX_SLICES) { av_log(h->avctx, AV_LOG_ERROR, "Too many slices, increase MAX_SLICES and recompile\n"); } for (j = 0; j < 2; j++) { int id_list[16]; int *ref2frm = sl->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j]; for (i = 0; i < 16; i++) { id_list[i] = 60; if (j < sl->list_count && i < sl->ref_count[j] && sl->ref_list[j][i].parent->f->buf[0]) { int k; AVBuffer *buf = sl->ref_list[j][i].parent->f->buf[0]->buffer; for (k = 0; k < h->short_ref_count; k++) if (h->short_ref[k]->f->buf[0]->buffer == buf) { id_list[i] = k; break; } for (k = 0; k < h->long_ref_count; k++) if (h->long_ref[k] && h->long_ref[k]->f->buf[0]->buffer == buf) { id_list[i] = h->short_ref_count + k; break; } } } ref2frm[0] = ref2frm[1] = -1; for (i = 0; i < 16; i++) ref2frm[i + 2] = 4 * id_list[i] + (sl->ref_list[j][i].reference & 3); ref2frm[18 + 0] = ref2frm[18 + 1] = -1; for (i = 16; i < 48; i++) ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] + (sl->ref_list[j][i].reference & 3); } if (h->avctx->debug & FF_DEBUG_PICT_INFO) { av_log(h->avctx, AV_LOG_DEBUG, "slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n", sl->slice_num, (h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"), first_mb_in_slice, av_get_picture_type_char(sl->slice_type), sl->slice_type_fixed ? " fix" : "", h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "", pps_id, h->frame_num, h->cur_pic_ptr->field_poc[0], h->cur_pic_ptr->field_poc[1], sl->ref_count[0], sl->ref_count[1], sl->qscale, sl->deblocking_filter, sl->slice_alpha_c0_offset, sl->slice_beta_offset, sl->pwt.use_weight, sl->pwt.use_weight == 1 && sl->pwt.use_weight_chroma ? "c" : "", sl->slice_type == AV_PICTURE_TYPE_B ? (sl->direct_spatial_mv_pred ? "SPAT" : "TEMP") : ""); } return 0; } | 21,431 |
0 | void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb) { uint32_t type; struct iovec *in_iov = req->elem.in_sg; struct iovec *iov = req->elem.out_sg; unsigned in_num = req->elem.in_num; unsigned out_num = req->elem.out_num; if (req->elem.out_num < 1 || req->elem.in_num < 1) { error_report("virtio-blk missing headers"); exit(1); } if (unlikely(iov_to_buf(iov, out_num, 0, &req->out, sizeof(req->out)) != sizeof(req->out))) { error_report("virtio-blk request outhdr too short"); exit(1); } iov_discard_front(&iov, &out_num, sizeof(req->out)); if (in_num < 1 || in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) { error_report("virtio-blk request inhdr too short"); exit(1); } /* We always touch the last byte, so just see how big in_iov is. */ req->in_len = iov_size(in_iov, in_num); req->in = (void *)in_iov[in_num - 1].iov_base + in_iov[in_num - 1].iov_len - sizeof(struct virtio_blk_inhdr); iov_discard_back(in_iov, &in_num, sizeof(struct virtio_blk_inhdr)); type = virtio_ldl_p(VIRTIO_DEVICE(req->dev), &req->out.type); /* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER * is an optional flag. Although a guest should not send this flag if * not negotiated we ignored it in the past. So keep ignoring it. */ switch (type & ~(VIRTIO_BLK_T_OUT | VIRTIO_BLK_T_BARRIER)) { case VIRTIO_BLK_T_IN: { bool is_write = type & VIRTIO_BLK_T_OUT; req->sector_num = virtio_ldq_p(VIRTIO_DEVICE(req->dev), &req->out.sector); if (is_write) { qemu_iovec_init_external(&req->qiov, iov, out_num); trace_virtio_blk_handle_write(req, req->sector_num, req->qiov.size / BDRV_SECTOR_SIZE); } else { qemu_iovec_init_external(&req->qiov, in_iov, in_num); trace_virtio_blk_handle_read(req, req->sector_num, req->qiov.size / BDRV_SECTOR_SIZE); } if (!virtio_blk_sect_range_ok(req->dev, req->sector_num, req->qiov.size)) { virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); virtio_blk_free_request(req); return; } block_acct_start(blk_get_stats(req->dev->blk), &req->acct, req->qiov.size, is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ); /* merge would exceed maximum number of requests or IO direction * changes */ if (mrb->num_reqs > 0 && (mrb->num_reqs == VIRTIO_BLK_MAX_MERGE_REQS || is_write != mrb->is_write || !req->dev->conf.request_merging)) { virtio_blk_submit_multireq(req->dev->blk, mrb); } assert(mrb->num_reqs < VIRTIO_BLK_MAX_MERGE_REQS); mrb->reqs[mrb->num_reqs++] = req; mrb->is_write = is_write; break; } case VIRTIO_BLK_T_FLUSH: virtio_blk_handle_flush(req, mrb); break; case VIRTIO_BLK_T_SCSI_CMD: virtio_blk_handle_scsi(req); break; case VIRTIO_BLK_T_GET_ID: { VirtIOBlock *s = req->dev; /* * NB: per existing s/n string convention the string is * terminated by '\0' only when shorter than buffer. */ const char *serial = s->conf.serial ? s->conf.serial : ""; size_t size = MIN(strlen(serial) + 1, MIN(iov_size(in_iov, in_num), VIRTIO_BLK_ID_BYTES)); iov_from_buf(in_iov, in_num, 0, serial, size); virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); virtio_blk_free_request(req); break; } default: virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); virtio_blk_free_request(req); } } | 21,432 |
0 | static void net_vhost_user_event(void *opaque, int event) { VhostUserState *s = opaque; switch (event) { case CHR_EVENT_OPENED: vhost_user_start(s); net_vhost_link_down(s, false); error_report("chardev \"%s\" went up", s->chr->label); break; case CHR_EVENT_CLOSED: net_vhost_link_down(s, true); vhost_user_stop(s); error_report("chardev \"%s\" went down", s->chr->label); break; } } | 21,433 |
0 | static uint32_t virtio_blk_get_features(VirtIODevice *vdev) { VirtIOBlock *s = to_virtio_blk(vdev); uint32_t features = 0; features |= (1 << VIRTIO_BLK_F_SEG_MAX); features |= (1 << VIRTIO_BLK_F_GEOMETRY); if (bdrv_enable_write_cache(s->bs)) features |= (1 << VIRTIO_BLK_F_WCACHE); #ifdef __linux__ features |= (1 << VIRTIO_BLK_F_SCSI); #endif if (strcmp(s->serial_str, "0")) features |= 1 << VIRTIO_BLK_F_IDENTIFY; if (bdrv_is_read_only(s->bs)) features |= 1 << VIRTIO_BLK_F_RO; return features; } | 21,437 |
0 | static void vmsvga_bios_write(void *opaque, uint32_t address, uint32_t data) { printf("%s: what are we supposed to do with (%08x)?\n", __FUNCTION__, data); } | 21,438 |
0 | static void platform_ioport_map(PCIDevice *pci_dev, int region_num, pcibus_t addr, pcibus_t size, int type) { PCIXenPlatformState *d = DO_UPCAST(PCIXenPlatformState, pci_dev, pci_dev); register_ioport_write(addr, size, 1, xen_platform_ioport_writeb, d); register_ioport_read(addr, size, 1, xen_platform_ioport_readb, d); } | 21,439 |
0 | static void qemu_chr_fire_open_event(void *opaque) { CharDriverState *s = opaque; qemu_chr_be_event(s, CHR_EVENT_OPENED); qemu_free_timer(s->open_timer); s->open_timer = NULL; } | 21,441 |
0 | static int sd_snapshot_goto(BlockDriverState *bs, const char *snapshot_id) { BDRVSheepdogState *s = bs->opaque; BDRVSheepdogState *old_s; char vdi[SD_MAX_VDI_LEN], tag[SD_MAX_VDI_TAG_LEN]; char *buf = NULL; uint32_t vid; uint32_t snapid = 0; int ret = 0, fd; old_s = g_malloc(sizeof(BDRVSheepdogState)); memcpy(old_s, s, sizeof(BDRVSheepdogState)); pstrcpy(vdi, sizeof(vdi), s->name); snapid = strtoul(snapshot_id, NULL, 10); if (snapid) { tag[0] = 0; } else { pstrcpy(tag, sizeof(tag), s->name); } ret = find_vdi_name(s, vdi, snapid, tag, &vid, 1); if (ret) { error_report("Failed to find_vdi_name"); goto out; } fd = connect_to_sdog(s->addr, s->port); if (fd < 0) { error_report("failed to connect"); ret = fd; goto out; } buf = g_malloc(SD_INODE_SIZE); ret = read_object(fd, buf, vid_to_vdi_oid(vid), s->inode.nr_copies, SD_INODE_SIZE, 0, s->cache_enabled); closesocket(fd); if (ret) { goto out; } memcpy(&s->inode, buf, sizeof(s->inode)); if (!s->inode.vm_state_size) { error_report("Invalid snapshot"); ret = -ENOENT; goto out; } s->is_snapshot = true; g_free(buf); g_free(old_s); return 0; out: /* recover bdrv_sd_state */ memcpy(s, old_s, sizeof(BDRVSheepdogState)); g_free(buf); g_free(old_s); error_report("failed to open. recover old bdrv_sd_state."); return ret; } | 21,442 |
0 | static void ehci_advance_state(EHCIState *ehci, int async) { EHCIQueue *q = NULL; int again; int iter = 0; do { if (ehci_get_state(ehci, async) == EST_FETCHQH) { iter++; /* if we are roaming a lot of QH without executing a qTD * something is wrong with the linked list. TO-DO: why is * this hack needed? */ assert(iter < MAX_ITERATIONS); #if 0 if (iter > MAX_ITERATIONS) { DPRINTF("\n*** advance_state: bailing on MAX ITERATIONS***\n"); ehci_set_state(ehci, async, EST_ACTIVE); break; } #endif } switch(ehci_get_state(ehci, async)) { case EST_WAITLISTHEAD: again = ehci_state_waitlisthead(ehci, async); break; case EST_FETCHENTRY: again = ehci_state_fetchentry(ehci, async); break; case EST_FETCHQH: q = ehci_state_fetchqh(ehci, async); again = q ? 1 : 0; break; case EST_FETCHITD: again = ehci_state_fetchitd(ehci, async); break; case EST_FETCHSITD: again = ehci_state_fetchsitd(ehci, async); break; case EST_ADVANCEQUEUE: again = ehci_state_advqueue(q, async); break; case EST_FETCHQTD: again = ehci_state_fetchqtd(q, async); break; case EST_HORIZONTALQH: again = ehci_state_horizqh(q, async); break; case EST_EXECUTE: iter = 0; again = ehci_state_execute(q, async); break; case EST_EXECUTING: assert(q != NULL); again = ehci_state_executing(q, async); break; case EST_WRITEBACK: assert(q != NULL); again = ehci_state_writeback(q, async); break; default: fprintf(stderr, "Bad state!\n"); again = -1; assert(0); break; } if (again < 0) { fprintf(stderr, "processing error - resetting ehci HC\n"); ehci_reset(ehci); again = 0; } } while (again); ehci_commit_interrupt(ehci); } | 21,443 |
0 | static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y) { uint8_t *ptr_y, *ptr_cb, *ptr_cr; int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy; const int lowres = s->avctx->lowres; const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2); const int block_s = 8>>lowres; const int s_mask = (2 << lowres) - 1; const int h_edge_pos = s->h_edge_pos >> lowres; const int v_edge_pos = s->v_edge_pos >> lowres; linesize = s->current_picture.f.linesize[0] << field_based; uvlinesize = s->current_picture.f.linesize[1] << field_based; // FIXME obviously not perfect but qpel will not work in lowres anyway if (s->quarter_sample) { motion_x /= 2; motion_y /= 2; } if(field_based){ motion_y += (bottom_field - field_select)*((1 << lowres)-1); } sx = motion_x & s_mask; sy = motion_y & s_mask; src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1); src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1); if (s->out_format == FMT_H263) { uvsx = ((motion_x >> 1) & s_mask) | (sx & 1); uvsy = ((motion_y >> 1) & s_mask) | (sy & 1); uvsrc_x = src_x >> 1; uvsrc_y = src_y >> 1; } else if (s->out_format == FMT_H261) { // even chroma mv's are full pel in H261 mx = motion_x / 4; my = motion_y / 4; uvsx = (2 * mx) & s_mask; uvsy = (2 * my) & s_mask; uvsrc_x = s->mb_x * block_s + (mx >> lowres); uvsrc_y = mb_y * block_s + (my >> lowres); } else { if(s->chroma_y_shift){ mx = motion_x / 2; my = motion_y / 2; uvsx = mx & s_mask; uvsy = my & s_mask; uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1); uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1); } else { if(s->chroma_x_shift){ //Chroma422 mx = motion_x / 2; uvsx = mx & s_mask; uvsy = motion_y & s_mask; uvsrc_y = src_y; uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1)); } else { //Chroma444 uvsx = motion_x & s_mask; uvsy = motion_y & s_mask; uvsrc_x = src_x; uvsrc_y = src_y; } } } ptr_y = ref_picture[0] + src_y * linesize + src_x; ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x; ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x; if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 || (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) { s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, linesize >> field_based, 17, 17 + field_based, src_x, src_y << field_based, h_edge_pos, v_edge_pos); ptr_y = s->edge_emu_buffer; if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize; s->vdsp.emulated_edge_mc(uvbuf , ptr_cb, uvlinesize >> field_based, 9, 9 + field_based, uvsrc_x, uvsrc_y << field_based, h_edge_pos >> 1, v_edge_pos >> 1); s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr, uvlinesize >> field_based, 9, 9 + field_based, uvsrc_x, uvsrc_y << field_based, h_edge_pos >> 1, v_edge_pos >> 1); ptr_cb = uvbuf; ptr_cr = uvbuf + 16; } } // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data if (bottom_field) { dest_y += s->linesize; dest_cb += s->uvlinesize; dest_cr += s->uvlinesize; } if (field_select) { ptr_y += s->linesize; ptr_cb += s->uvlinesize; ptr_cr += s->uvlinesize; } sx = (sx << 2) >> lowres; sy = (sy << 2) >> lowres; pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy); if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h; uvsx = (uvsx << 2) >> lowres; uvsy = (uvsy << 2) >> lowres; if (hc) { pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy); pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy); } } // FIXME h261 lowres loop filter } | 21,444 |
0 | static bool run_poll_handlers_once(AioContext *ctx) { bool progress = false; AioHandler *node; QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { if (!node->deleted && node->io_poll && aio_node_check(ctx, node->is_external) && node->io_poll(node->opaque)) { progress = true; } /* Caller handles freeing deleted nodes. Don't do it here. */ } return progress; } | 21,445 |
0 | print_insn_ppi (int field_b, struct disassemble_info *info) { static const char *sx_tab[] = { "x0", "x1", "a0", "a1" }; static const char *sy_tab[] = { "y0", "y1", "m0", "m1" }; fprintf_ftype fprintf_fn = info->fprintf_func; void *stream = info->stream; unsigned int nib1, nib2, nib3; unsigned int altnib1, nib4; const char *dc = NULL; const sh_opcode_info *op; if ((field_b & 0xe800) == 0) { fprintf_fn (stream, "psh%c\t#%d,", field_b & 0x1000 ? 'a' : 'l', (field_b >> 4) & 127); print_dsp_reg (field_b & 0xf, fprintf_fn, stream); return; } if ((field_b & 0xc000) == 0x4000 && (field_b & 0x3000) != 0x1000) { static const char *du_tab[] = { "x0", "y0", "a0", "a1" }; static const char *se_tab[] = { "x0", "x1", "y0", "a1" }; static const char *sf_tab[] = { "y0", "y1", "x0", "a1" }; static const char *sg_tab[] = { "m0", "m1", "a0", "a1" }; if (field_b & 0x2000) { fprintf_fn (stream, "p%s %s,%s,%s\t", (field_b & 0x1000) ? "add" : "sub", sx_tab[(field_b >> 6) & 3], sy_tab[(field_b >> 4) & 3], du_tab[(field_b >> 0) & 3]); } else if ((field_b & 0xf0) == 0x10 && info->mach != bfd_mach_sh_dsp && info->mach != bfd_mach_sh3_dsp) { fprintf_fn (stream, "pclr %s \t", du_tab[(field_b >> 0) & 3]); } else if ((field_b & 0xf3) != 0) { fprintf_fn (stream, ".word 0x%x\t", field_b); } fprintf_fn (stream, "pmuls%c%s,%s,%s", field_b & 0x2000 ? ' ' : '\t', se_tab[(field_b >> 10) & 3], sf_tab[(field_b >> 8) & 3], sg_tab[(field_b >> 2) & 3]); return; } nib1 = PPIC; nib2 = field_b >> 12 & 0xf; nib3 = field_b >> 8 & 0xf; nib4 = field_b >> 4 & 0xf; switch (nib3 & 0x3) { case 0: dc = ""; nib1 = PPI3; break; case 1: dc = ""; break; case 2: dc = "dct "; nib3 -= 1; break; case 3: dc = "dcf "; nib3 -= 2; break; } if (nib1 == PPI3) altnib1 = PPI3NC; else altnib1 = nib1; for (op = sh_table; op->name; op++) { if ((op->nibbles[1] == nib1 || op->nibbles[1] == altnib1) && op->nibbles[2] == nib2 && op->nibbles[3] == nib3) { int n; switch (op->nibbles[4]) { case HEX_0: break; case HEX_XX00: if ((nib4 & 3) != 0) continue; break; case HEX_1: if ((nib4 & 3) != 1) continue; break; case HEX_00YY: if ((nib4 & 0xc) != 0) continue; break; case HEX_4: if ((nib4 & 0xc) != 4) continue; break; default: abort (); } fprintf_fn (stream, "%s%s\t", dc, op->name); for (n = 0; n < 3 && op->arg[n] != A_END; n++) { if (n && op->arg[1] != A_END) fprintf_fn (stream, ","); switch (op->arg[n]) { case DSP_REG_N: print_dsp_reg (field_b & 0xf, fprintf_fn, stream); break; case DSP_REG_X: fprintf_fn (stream, sx_tab[(field_b >> 6) & 3]); break; case DSP_REG_Y: fprintf_fn (stream, sy_tab[(field_b >> 4) & 3]); break; case A_MACH: fprintf_fn (stream, "mach"); break; case A_MACL: fprintf_fn (stream, "macl"); break; default: abort (); } } return; } } /* Not found. */ fprintf_fn (stream, ".word 0x%x", field_b); } | 21,446 |
0 | static int monitor_parse(const char *devname) { static int index = 0; char label[32]; if (strcmp(devname, "none") == 0) return 0; if (index == MAX_MONITOR_DEVICES) { fprintf(stderr, "qemu: too many monitor devices\n"); exit(1); } if (index == 0) { snprintf(label, sizeof(label), "monitor"); } else { snprintf(label, sizeof(label), "monitor%d", index); } monitor_hds[index] = qemu_chr_open(label, devname, NULL); if (!monitor_hds[index]) { fprintf(stderr, "qemu: could not open monitor device '%s'\n", devname); return -1; } index++; return 0; } | 21,447 |
0 | void ppc_store_sdr1(CPUPPCState *env, target_ulong value) { qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); assert(!env->external_htab); env->spr[SPR_SDR1] = value; #if defined(TARGET_PPC64) if (env->mmu_model & POWERPC_MMU_64) { PowerPCCPU *cpu = ppc_env_get_cpu(env); Error *local_err = NULL; ppc_hash64_set_sdr1(cpu, value, &local_err); if (local_err) { error_report_err(local_err); error_free(local_err); } } else #endif /* defined(TARGET_PPC64) */ { /* FIXME: Should check for valid HTABMASK values */ env->htab_mask = ((value & SDR_32_HTABMASK) << 16) | 0xFFFF; env->htab_base = value & SDR_32_HTABORG; } } | 21,448 |
0 | static av_always_inline void idct_internal(uint8_t *dst, DCTELEM *block, int stride, int block_stride, int shift, int add){ int i; uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; block[0] += 1<<(shift-1); for(i=0; i<4; i++){ const int z0= block[0 + block_stride*i] + block[2 + block_stride*i]; const int z1= block[0 + block_stride*i] - block[2 + block_stride*i]; const int z2= (block[1 + block_stride*i]>>1) - block[3 + block_stride*i]; const int z3= block[1 + block_stride*i] + (block[3 + block_stride*i]>>1); block[0 + block_stride*i]= z0 + z3; block[1 + block_stride*i]= z1 + z2; block[2 + block_stride*i]= z1 - z2; block[3 + block_stride*i]= z0 - z3; } for(i=0; i<4; i++){ const int z0= block[i + block_stride*0] + block[i + block_stride*2]; const int z1= block[i + block_stride*0] - block[i + block_stride*2]; const int z2= (block[i + block_stride*1]>>1) - block[i + block_stride*3]; const int z3= block[i + block_stride*1] + (block[i + block_stride*3]>>1); dst[i + 0*stride]= cm[ add*dst[i + 0*stride] + ((z0 + z3) >> shift) ]; dst[i + 1*stride]= cm[ add*dst[i + 1*stride] + ((z1 + z2) >> shift) ]; dst[i + 2*stride]= cm[ add*dst[i + 2*stride] + ((z1 - z2) >> shift) ]; dst[i + 3*stride]= cm[ add*dst[i + 3*stride] + ((z0 - z3) >> shift) ]; } } | 21,449 |
0 | static int mp_dacl_removexattr(FsContext *ctx, const char *path, const char *name) { int ret; char buffer[PATH_MAX]; ret = lremovexattr(rpath(ctx, path, buffer), MAP_ACL_DEFAULT); if (ret == -1 && errno == ENODATA) { /* * We don't get ENODATA error when trying to remove a * posix acl that is not present. So don't throw the error * even in case of mapped security model */ errno = 0; ret = 0; } return ret; } | 21,451 |
0 | void *qemu_thread_join(QemuThread *thread) { QemuThreadData *data; void *ret; HANDLE handle; data = thread->data; if (!data) { return NULL; } /* * Because multiple copies of the QemuThread can exist via * qemu_thread_get_self, we need to store a value that cannot * leak there. The simplest, non racy way is to store the TID, * discard the handle that _beginthreadex gives back, and * get another copy of the handle here. */ EnterCriticalSection(&data->cs); if (!data->exited) { handle = OpenThread(SYNCHRONIZE, FALSE, thread->tid); LeaveCriticalSection(&data->cs); WaitForSingleObject(handle, INFINITE); CloseHandle(handle); } else { LeaveCriticalSection(&data->cs); } ret = data->ret; DeleteCriticalSection(&data->cs); g_free(data); return ret; } | 21,452 |
0 | void OPPROTO op_sti(void) { raise_exception(EXCP0D_GPF); } | 21,454 |
0 | static void test_pci_spec(void) { AHCIQState *ahci; ahci = ahci_boot(); ahci_test_pci_spec(ahci); ahci_shutdown(ahci); } | 21,455 |
0 | static uint32_t nvdimm_rw_label_data_check(NVDIMMDevice *nvdimm, uint32_t offset, uint32_t length) { uint32_t ret = 3 /* Invalid Input Parameters */; if (offset + length < offset) { nvdimm_debug("offset %#x + length %#x is overflow.\n", offset, length); return ret; } if (nvdimm->label_size < offset + length) { nvdimm_debug("position %#x is beyond label data (len = %" PRIx64 ").\n", offset + length, nvdimm->label_size); return ret; } if (length > nvdimm_get_max_xfer_label_size()) { nvdimm_debug("length (%#x) is larger than max_xfer (%#x).\n", length, nvdimm_get_max_xfer_label_size()); return ret; } return 0 /* Success */; } | 21,456 |
0 | static inline void gen_evmwumia(DisasContext *ctx) { TCGv_i64 tmp; if (unlikely(!ctx->spe_enabled)) { gen_exception(ctx, POWERPC_EXCP_APU); return; } gen_evmwumi(ctx); /* rD := rA * rB */ tmp = tcg_temp_new_i64(); /* acc := rD */ gen_load_gpr64(tmp, rD(ctx->opcode)); tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUState, spe_acc)); tcg_temp_free_i64(tmp); } | 21,457 |
0 | QmpInputVisitor *qmp_input_visitor_new(QObject *obj, bool strict) { QmpInputVisitor *v; v = g_malloc0(sizeof(*v)); v->visitor.type = VISITOR_INPUT; v->visitor.start_struct = qmp_input_start_struct; v->visitor.end_struct = qmp_input_end_struct; v->visitor.start_list = qmp_input_start_list; v->visitor.next_list = qmp_input_next_list; v->visitor.end_list = qmp_input_end_list; v->visitor.start_alternate = qmp_input_start_alternate; v->visitor.type_int64 = qmp_input_type_int64; v->visitor.type_uint64 = qmp_input_type_uint64; v->visitor.type_bool = qmp_input_type_bool; v->visitor.type_str = qmp_input_type_str; v->visitor.type_number = qmp_input_type_number; v->visitor.type_any = qmp_input_type_any; v->visitor.optional = qmp_input_optional; v->strict = strict; qmp_input_push(v, obj, NULL); qobject_incref(obj); return v; } | 21,458 |
1 | static int mxf_parse_index(MXFContext *mxf, int track_id, AVStream *st) { int64_t accumulated_offset = 0; int j, k, ret, nb_sorted_segments; MXFIndexTableSegment **sorted_segments; int n_delta = track_id - 1; /* TrackID = 1-based stream index */ if (track_id < 1) { av_log(mxf->fc, AV_LOG_ERROR, "TrackID not positive: %i\n", track_id); return AVERROR_INVALIDDATA; } if ((ret = mxf_get_sorted_table_segments(mxf, &nb_sorted_segments, &sorted_segments))) return ret; for (j = 0; j < nb_sorted_segments; j++) { int duration, sample_duration = 1, last_sample_size = 0; int64_t segment_size; MXFIndexTableSegment *tableseg = sorted_segments[j]; /* reset accumulated_offset on BodySID change */ if (j > 0 && tableseg->body_sid != sorted_segments[j-1]->body_sid) accumulated_offset = 0; if (n_delta >= tableseg->nb_delta_entries && st->index != 0) continue; duration = tableseg->index_duration > 0 ? tableseg->index_duration : st->duration - st->nb_index_entries; segment_size = tableseg->edit_unit_byte_count * duration; /* check small EditUnitByteCount for audio */ if (tableseg->edit_unit_byte_count && tableseg->edit_unit_byte_count < 32 && !tableseg->index_duration) { /* duration might be prime relative to the new sample_duration, * which means we need to handle the last frame differently */ sample_duration = 8192; last_sample_size = (duration % sample_duration) * tableseg->edit_unit_byte_count; tableseg->edit_unit_byte_count *= sample_duration; duration /= sample_duration; if (last_sample_size) duration++; } for (k = 0; k < duration; k++) { int64_t pos; int size, flags = 0; if (k < tableseg->nb_index_entries) { pos = tableseg->stream_offset_entries[k]; if (n_delta < tableseg->nb_delta_entries) { if (n_delta < tableseg->nb_delta_entries - 1) { size = tableseg->slice_offset_entries[k][tableseg->slice[n_delta+1]-1] + tableseg->element_delta[n_delta+1] - tableseg->element_delta[n_delta]; if (tableseg->slice[n_delta] > 0) size -= tableseg->slice_offset_entries[k][tableseg->slice[n_delta]-1]; } else if (k < duration - 1) { size = tableseg->stream_offset_entries[k+1] - tableseg->stream_offset_entries[k] - tableseg->slice_offset_entries[k][tableseg->slice[tableseg->nb_delta_entries-1]-1] - tableseg->element_delta[tableseg->nb_delta_entries-1]; } else size = 0; if (tableseg->slice[n_delta] > 0) pos += tableseg->slice_offset_entries[k][tableseg->slice[n_delta]-1]; pos += tableseg->element_delta[n_delta]; } else size = 0; flags = !(tableseg->flag_entries[k] & 0x30) ? AVINDEX_KEYFRAME : 0; } else { pos = (int64_t)k * tableseg->edit_unit_byte_count + accumulated_offset; if (n_delta < tableseg->nb_delta_entries - 1) size = tableseg->element_delta[n_delta+1] - tableseg->element_delta[n_delta]; else { /* use smaller size for last sample if we should */ if (last_sample_size && k == duration - 1) size = last_sample_size; else size = tableseg->edit_unit_byte_count; if (tableseg->nb_delta_entries) size -= tableseg->element_delta[tableseg->nb_delta_entries-1]; } if (n_delta < tableseg->nb_delta_entries) pos += tableseg->element_delta[n_delta]; flags = AVINDEX_KEYFRAME; } if (mxf_absolute_bodysid_offset(mxf, tableseg->body_sid, pos, &pos) < 0) { /* probably partial file - no point going further for this stream */ break; } av_dlog(mxf->fc, "Stream %d IndexEntry %d TrackID %d Offset %"PRIx64" Timestamp %"PRId64"\n", st->index, st->nb_index_entries, track_id, pos, sample_duration * st->nb_index_entries); if ((ret = av_add_index_entry(st, pos, sample_duration * st->nb_index_entries, size, 0, flags)) < 0) return ret; } accumulated_offset += segment_size; } av_free(sorted_segments); return 0; } | 21,460 |
1 | static void fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *ctx, const HEVCContext *h, DXVA_PicParams_HEVC *pp) { const HEVCFrame *current_picture = h->ref; int i, j, k; memset(pp, 0, sizeof(*pp)); pp->PicWidthInMinCbsY = h->sps->min_cb_width; pp->PicHeightInMinCbsY = h->sps->min_cb_height; pp->wFormatAndSequenceInfoFlags = (h->sps->chroma_format_idc << 0) | (h->sps->separate_colour_plane_flag << 2) | ((h->sps->bit_depth - 8) << 3) | ((h->sps->bit_depth - 8) << 6) | ((h->sps->log2_max_poc_lsb - 4) << 9) | (0 << 13) | (0 << 14) | (0 << 15); fill_picture_entry(&pp->CurrPic, ff_dxva2_get_surface_index(avctx, ctx, current_picture->frame), 0); pp->sps_max_dec_pic_buffering_minus1 = h->sps->temporal_layer[h->sps->max_sub_layers - 1].max_dec_pic_buffering - 1; pp->log2_min_luma_coding_block_size_minus3 = h->sps->log2_min_cb_size - 3; pp->log2_diff_max_min_luma_coding_block_size = h->sps->log2_diff_max_min_coding_block_size; pp->log2_min_transform_block_size_minus2 = h->sps->log2_min_tb_size - 2; pp->log2_diff_max_min_transform_block_size = h->sps->log2_max_trafo_size - h->sps->log2_min_tb_size; pp->max_transform_hierarchy_depth_inter = h->sps->max_transform_hierarchy_depth_inter; pp->max_transform_hierarchy_depth_intra = h->sps->max_transform_hierarchy_depth_intra; pp->num_short_term_ref_pic_sets = h->sps->nb_st_rps; pp->num_long_term_ref_pics_sps = h->sps->num_long_term_ref_pics_sps; pp->num_ref_idx_l0_default_active_minus1 = h->pps->num_ref_idx_l0_default_active - 1; pp->num_ref_idx_l1_default_active_minus1 = h->pps->num_ref_idx_l1_default_active - 1; pp->init_qp_minus26 = h->pps->pic_init_qp_minus26; if (h->sh.short_term_ref_pic_set_sps_flag == 0 && h->sh.short_term_rps) { pp->ucNumDeltaPocsOfRefRpsIdx = h->sh.short_term_rps->num_delta_pocs; pp->wNumBitsForShortTermRPSInSlice = h->sh.short_term_ref_pic_set_size; } pp->dwCodingParamToolFlags = (h->sps->scaling_list_enable_flag << 0) | (h->sps->amp_enabled_flag << 1) | (h->sps->sao_enabled << 2) | (h->sps->pcm_enabled_flag << 3) | ((h->sps->pcm_enabled_flag ? (h->sps->pcm.bit_depth - 1) : 0) << 4) | ((h->sps->pcm_enabled_flag ? (h->sps->pcm.bit_depth_chroma - 1) : 0) << 8) | ((h->sps->pcm_enabled_flag ? (h->sps->pcm.log2_min_pcm_cb_size - 3) : 0) << 12) | ((h->sps->pcm_enabled_flag ? (h->sps->pcm.log2_max_pcm_cb_size - h->sps->pcm.log2_min_pcm_cb_size) : 0) << 14) | (h->sps->pcm.loop_filter_disable_flag << 16) | (h->sps->long_term_ref_pics_present_flag << 17) | (h->sps->sps_temporal_mvp_enabled_flag << 18) | (h->sps->sps_strong_intra_smoothing_enable_flag << 19) | (h->pps->dependent_slice_segments_enabled_flag << 20) | (h->pps->output_flag_present_flag << 21) | (h->pps->num_extra_slice_header_bits << 22) | (h->pps->sign_data_hiding_flag << 25) | (h->pps->cabac_init_present_flag << 26) | (0 << 27); pp->dwCodingSettingPicturePropertyFlags = (h->pps->constrained_intra_pred_flag << 0) | (h->pps->transform_skip_enabled_flag << 1) | (h->pps->cu_qp_delta_enabled_flag << 2) | (h->pps->pic_slice_level_chroma_qp_offsets_present_flag << 3) | (h->pps->weighted_pred_flag << 4) | (h->pps->weighted_bipred_flag << 5) | (h->pps->transquant_bypass_enable_flag << 6) | (h->pps->tiles_enabled_flag << 7) | (h->pps->entropy_coding_sync_enabled_flag << 8) | (h->pps->uniform_spacing_flag << 9) | ((h->pps->tiles_enabled_flag ? h->pps->loop_filter_across_tiles_enabled_flag : 0) << 10) | (h->pps->seq_loop_filter_across_slices_enabled_flag << 11) | (h->pps->deblocking_filter_override_enabled_flag << 12) | (h->pps->disable_dbf << 13) | (h->pps->lists_modification_present_flag << 14) | (h->pps->slice_header_extension_present_flag << 15) | (IS_IRAP(h) << 16) | (IS_IDR(h) << 17) | /* IntraPicFlag */ (IS_IRAP(h) << 18) | (0 << 19); pp->pps_cb_qp_offset = h->pps->cb_qp_offset; pp->pps_cr_qp_offset = h->pps->cr_qp_offset; if (h->pps->tiles_enabled_flag) { pp->num_tile_columns_minus1 = h->pps->num_tile_columns - 1; pp->num_tile_rows_minus1 = h->pps->num_tile_rows - 1; if (!h->pps->uniform_spacing_flag) { for (i = 0; i < h->pps->num_tile_columns; i++) pp->column_width_minus1[i] = h->pps->column_width[i] - 1; for (i = 0; i < h->pps->num_tile_rows; i++) pp->row_height_minus1[i] = h->pps->row_height[i] - 1; } } pp->diff_cu_qp_delta_depth = h->pps->diff_cu_qp_delta_depth; pp->pps_beta_offset_div2 = h->pps->beta_offset / 2; pp->pps_tc_offset_div2 = h->pps->tc_offset / 2; pp->log2_parallel_merge_level_minus2 = h->pps->log2_parallel_merge_level - 2; pp->CurrPicOrderCntVal = h->poc; // empty the lists memset(&pp->RefPicList, 0xff, sizeof(pp->RefPicList)); memset(&pp->RefPicSetStCurrBefore, 0xff, sizeof(pp->RefPicSetStCurrBefore)); memset(&pp->RefPicSetStCurrAfter, 0xff, sizeof(pp->RefPicSetStCurrAfter)); memset(&pp->RefPicSetLtCurr, 0xff, sizeof(pp->RefPicSetLtCurr)); // fill RefPicList from the DPB for (i = 0, j = 0; i < FF_ARRAY_ELEMS(h->DPB); i++) { const HEVCFrame *frame = &h->DPB[i]; if (frame != current_picture && (frame->flags & (HEVC_FRAME_FLAG_LONG_REF | HEVC_FRAME_FLAG_SHORT_REF))) { fill_picture_entry(&pp->RefPicList[j], ff_dxva2_get_surface_index(avctx, ctx, frame->frame), !!(frame->flags & HEVC_FRAME_FLAG_LONG_REF)); pp->PicOrderCntValList[j] = frame->poc; j++; } } #define DO_REF_LIST(ref_idx, ref_list) { \ const RefPicList *rpl = &h->rps[ref_idx]; \ av_assert0(rpl->nb_refs <= FF_ARRAY_ELEMS(pp->ref_list)); \ for (j = 0, k = 0; j < rpl->nb_refs; j++) { \ if (rpl->ref[j]) { \ pp->ref_list[k] = get_refpic_index(pp, ff_dxva2_get_surface_index(avctx, ctx, rpl->ref[j]->frame)); \ k++; \ } \ } \ } // Fill short term and long term lists DO_REF_LIST(ST_CURR_BEF, RefPicSetStCurrBefore); DO_REF_LIST(ST_CURR_AFT, RefPicSetStCurrAfter); DO_REF_LIST(LT_CURR, RefPicSetLtCurr); pp->StatusReportFeedbackNumber = 1 + DXVA_CONTEXT_REPORT_ID(avctx, ctx)++; } | 21,461 |
1 | static void isa_ipmi_bmc_check(Object *obj, const char *name, Object *val, Error **errp) { IPMIBmc *bmc = IPMI_BMC(val); if (bmc->intf) error_setg(errp, "BMC object is already in use"); } | 21,462 |
1 | av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4], int fullRange, int brightness, int contrast, int saturation) { const int isRgb = c->dstFormat == AV_PIX_FMT_RGB32 || c->dstFormat == AV_PIX_FMT_RGB32_1 || c->dstFormat == AV_PIX_FMT_BGR24 || c->dstFormat == AV_PIX_FMT_RGB565BE || c->dstFormat == AV_PIX_FMT_RGB565LE || c->dstFormat == AV_PIX_FMT_RGB555BE || c->dstFormat == AV_PIX_FMT_RGB555LE || c->dstFormat == AV_PIX_FMT_RGB444BE || c->dstFormat == AV_PIX_FMT_RGB444LE || c->dstFormat == AV_PIX_FMT_RGB8 || c->dstFormat == AV_PIX_FMT_RGB4 || c->dstFormat == AV_PIX_FMT_RGB4_BYTE || c->dstFormat == AV_PIX_FMT_MONOBLACK; const int isNotNe = c->dstFormat == AV_PIX_FMT_NE(RGB565LE, RGB565BE) || c->dstFormat == AV_PIX_FMT_NE(RGB555LE, RGB555BE) || c->dstFormat == AV_PIX_FMT_NE(RGB444LE, RGB444BE) || c->dstFormat == AV_PIX_FMT_NE(BGR565LE, BGR565BE) || c->dstFormat == AV_PIX_FMT_NE(BGR555LE, BGR555BE) || c->dstFormat == AV_PIX_FMT_NE(BGR444LE, BGR444BE); const int bpp = c->dstFormatBpp; uint8_t *y_table; uint16_t *y_table16; uint32_t *y_table32; int i, base, rbase, gbase, bbase, av_uninit(abase), needAlpha; const int yoffs = fullRange ? 384 : 326; int64_t crv = inv_table[0]; int64_t cbu = inv_table[1]; int64_t cgu = -inv_table[2]; int64_t cgv = -inv_table[3]; int64_t cy = 1 << 16; int64_t oy = 0; int64_t yb = 0; if (!fullRange) { cy = (cy * 255) / 219; oy = 16 << 16; } else { crv = (crv * 224) / 255; cbu = (cbu * 224) / 255; cgu = (cgu * 224) / 255; cgv = (cgv * 224) / 255; } cy = (cy * contrast) >> 16; crv = (crv * contrast * saturation) >> 32; cbu = (cbu * contrast * saturation) >> 32; cgu = (cgu * contrast * saturation) >> 32; cgv = (cgv * contrast * saturation) >> 32; oy -= 256 * brightness; c->uOffset = 0x0400040004000400LL; c->vOffset = 0x0400040004000400LL; c->yCoeff = roundToInt16(cy * 8192) * 0x0001000100010001ULL; c->vrCoeff = roundToInt16(crv * 8192) * 0x0001000100010001ULL; c->ubCoeff = roundToInt16(cbu * 8192) * 0x0001000100010001ULL; c->vgCoeff = roundToInt16(cgv * 8192) * 0x0001000100010001ULL; c->ugCoeff = roundToInt16(cgu * 8192) * 0x0001000100010001ULL; c->yOffset = roundToInt16(oy * 8) * 0x0001000100010001ULL; c->yuv2rgb_y_coeff = (int16_t)roundToInt16(cy << 13); c->yuv2rgb_y_offset = (int16_t)roundToInt16(oy << 9); c->yuv2rgb_v2r_coeff = (int16_t)roundToInt16(crv << 13); c->yuv2rgb_v2g_coeff = (int16_t)roundToInt16(cgv << 13); c->yuv2rgb_u2g_coeff = (int16_t)roundToInt16(cgu << 13); c->yuv2rgb_u2b_coeff = (int16_t)roundToInt16(cbu << 13); //scale coefficients by cy crv = ((crv << 16) + 0x8000) / cy; cbu = ((cbu << 16) + 0x8000) / cy; cgu = ((cgu << 16) + 0x8000) / cy; cgv = ((cgv << 16) + 0x8000) / cy; av_freep(&c->yuvTable); switch (bpp) { case 1: c->yuvTable = av_malloc(1024); y_table = c->yuvTable; yb = -(384 << 16) - oy; for (i = 0; i < 1024 - 110; i++) { y_table[i + 110] = av_clip_uint8((yb + 0x8000) >> 16) >> 7; yb += cy; } fill_table(c->table_gU, 1, cgu, y_table + yoffs); fill_gv_table(c->table_gV, 1, cgv); break; case 4: case 4 | 128: rbase = isRgb ? 3 : 0; gbase = 1; bbase = isRgb ? 0 : 3; c->yuvTable = av_malloc(1024 * 3); y_table = c->yuvTable; yb = -(384 << 16) - oy; for (i = 0; i < 1024 - 110; i++) { int yval = av_clip_uint8((yb + 0x8000) >> 16); y_table[i + 110] = (yval >> 7) << rbase; y_table[i + 37 + 1024] = ((yval + 43) / 85) << gbase; y_table[i + 110 + 2048] = (yval >> 7) << bbase; yb += cy; } fill_table(c->table_rV, 1, crv, y_table + yoffs); fill_table(c->table_gU, 1, cgu, y_table + yoffs + 1024); fill_table(c->table_bU, 1, cbu, y_table + yoffs + 2048); fill_gv_table(c->table_gV, 1, cgv); break; case 8: rbase = isRgb ? 5 : 0; gbase = isRgb ? 2 : 3; bbase = isRgb ? 0 : 6; c->yuvTable = av_malloc(1024 * 3); y_table = c->yuvTable; yb = -(384 << 16) - oy; for (i = 0; i < 1024 - 38; i++) { int yval = av_clip_uint8((yb + 0x8000) >> 16); y_table[i + 16] = ((yval + 18) / 36) << rbase; y_table[i + 16 + 1024] = ((yval + 18) / 36) << gbase; y_table[i + 37 + 2048] = ((yval + 43) / 85) << bbase; yb += cy; } fill_table(c->table_rV, 1, crv, y_table + yoffs); fill_table(c->table_gU, 1, cgu, y_table + yoffs + 1024); fill_table(c->table_bU, 1, cbu, y_table + yoffs + 2048); fill_gv_table(c->table_gV, 1, cgv); break; case 12: rbase = isRgb ? 8 : 0; gbase = 4; bbase = isRgb ? 0 : 8; c->yuvTable = av_malloc(1024 * 3 * 2); y_table16 = c->yuvTable; yb = -(384 << 16) - oy; for (i = 0; i < 1024; i++) { uint8_t yval = av_clip_uint8((yb + 0x8000) >> 16); y_table16[i] = (yval >> 4) << rbase; y_table16[i + 1024] = (yval >> 4) << gbase; y_table16[i + 2048] = (yval >> 4) << bbase; yb += cy; } if (isNotNe) for (i = 0; i < 1024 * 3; i++) y_table16[i] = av_bswap16(y_table16[i]); fill_table(c->table_rV, 2, crv, y_table16 + yoffs); fill_table(c->table_gU, 2, cgu, y_table16 + yoffs + 1024); fill_table(c->table_bU, 2, cbu, y_table16 + yoffs + 2048); fill_gv_table(c->table_gV, 2, cgv); break; case 15: case 16: rbase = isRgb ? bpp - 5 : 0; gbase = 5; bbase = isRgb ? 0 : (bpp - 5); c->yuvTable = av_malloc(1024 * 3 * 2); y_table16 = c->yuvTable; yb = -(384 << 16) - oy; for (i = 0; i < 1024; i++) { uint8_t yval = av_clip_uint8((yb + 0x8000) >> 16); y_table16[i] = (yval >> 3) << rbase; y_table16[i + 1024] = (yval >> (18 - bpp)) << gbase; y_table16[i + 2048] = (yval >> 3) << bbase; yb += cy; } if (isNotNe) for (i = 0; i < 1024 * 3; i++) y_table16[i] = av_bswap16(y_table16[i]); fill_table(c->table_rV, 2, crv, y_table16 + yoffs); fill_table(c->table_gU, 2, cgu, y_table16 + yoffs + 1024); fill_table(c->table_bU, 2, cbu, y_table16 + yoffs + 2048); fill_gv_table(c->table_gV, 2, cgv); break; case 24: case 48: c->yuvTable = av_malloc(1024); y_table = c->yuvTable; yb = -(384 << 16) - oy; for (i = 0; i < 1024; i++) { y_table[i] = av_clip_uint8((yb + 0x8000) >> 16); yb += cy; } fill_table(c->table_rV, 1, crv, y_table + yoffs); fill_table(c->table_gU, 1, cgu, y_table + yoffs); fill_table(c->table_bU, 1, cbu, y_table + yoffs); fill_gv_table(c->table_gV, 1, cgv); break; case 32: case 64: base = (c->dstFormat == AV_PIX_FMT_RGB32_1 || c->dstFormat == AV_PIX_FMT_BGR32_1) ? 8 : 0; rbase = base + (isRgb ? 16 : 0); gbase = base + 8; bbase = base + (isRgb ? 0 : 16); needAlpha = CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat); if (!needAlpha) abase = (base + 24) & 31; c->yuvTable = av_malloc(1024 * 3 * 4); y_table32 = c->yuvTable; yb = -(384 << 16) - oy; for (i = 0; i < 1024; i++) { unsigned yval = av_clip_uint8((yb + 0x8000) >> 16); y_table32[i] = (yval << rbase) + (needAlpha ? 0 : (255u << abase)); y_table32[i + 1024] = yval << gbase; y_table32[i + 2048] = yval << bbase; yb += cy; } fill_table(c->table_rV, 4, crv, y_table32 + yoffs); fill_table(c->table_gU, 4, cgu, y_table32 + yoffs + 1024); fill_table(c->table_bU, 4, cbu, y_table32 + yoffs + 2048); fill_gv_table(c->table_gV, 4, cgv); break; default: if(!isPlanar(c->dstFormat) || bpp <= 24) av_log(c, AV_LOG_ERROR, "%ibpp not supported by yuv2rgb\n", bpp); return -1; } return 0; } | 21,463 |
1 | abi_long do_syscall(void *cpu_env, int num, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6, abi_long arg7, abi_long arg8) { CPUState *cpu = ENV_GET_CPU(cpu_env); abi_long ret; struct stat st; struct statfs stfs; void *p; #if defined(DEBUG_ERESTARTSYS) /* Debug-only code for exercising the syscall-restart code paths * in the per-architecture cpu main loops: restart every syscall * the guest makes once before letting it through. */ { static int flag; flag = !flag; if (flag) { return -TARGET_ERESTARTSYS; #endif #ifdef DEBUG gemu_log("syscall %d", num); #endif trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); if(do_strace) print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); switch(num) { case TARGET_NR_exit: /* In old applications this may be used to implement _exit(2). However in threaded applictions it is used for thread termination, and _exit_group is used for application termination. Do thread termination if we have more then one thread. */ if (block_signals()) { ret = -TARGET_ERESTARTSYS; if (CPU_NEXT(first_cpu)) { TaskState *ts; cpu_list_lock(); /* Remove the CPU from the list. */ QTAILQ_REMOVE(&cpus, cpu, node); cpu_list_unlock(); ts = cpu->opaque; if (ts->child_tidptr) { put_user_u32(0, ts->child_tidptr); sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX, NULL, NULL, 0); thread_cpu = NULL; object_unref(OBJECT(cpu)); g_free(ts); rcu_unregister_thread(); pthread_exit(NULL); #ifdef TARGET_GPROF _mcleanup(); #endif gdb_exit(cpu_env, arg1); _exit(arg1); ret = 0; /* avoid warning */ case TARGET_NR_read: if (arg3 == 0) ret = 0; else { if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) goto efault; ret = get_errno(safe_read(arg1, p, arg3)); if (ret >= 0 && fd_trans_host_to_target_data(arg1)) { ret = fd_trans_host_to_target_data(arg1)(p, ret); unlock_user(p, arg2, ret); case TARGET_NR_write: if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) goto efault; ret = get_errno(safe_write(arg1, p, arg3)); unlock_user(p, arg2, 0); #ifdef TARGET_NR_open case TARGET_NR_open: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(do_openat(cpu_env, AT_FDCWD, p, target_to_host_bitmask(arg2, fcntl_flags_tbl), arg3)); fd_trans_unregister(ret); unlock_user(p, arg1, 0); #endif case TARGET_NR_openat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(do_openat(cpu_env, arg1, p, target_to_host_bitmask(arg3, fcntl_flags_tbl), arg4)); fd_trans_unregister(ret); unlock_user(p, arg2, 0); #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) case TARGET_NR_name_to_handle_at: ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5); #endif #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE) case TARGET_NR_open_by_handle_at: ret = do_open_by_handle_at(arg1, arg2, arg3); fd_trans_unregister(ret); #endif case TARGET_NR_close: fd_trans_unregister(arg1); ret = get_errno(close(arg1)); case TARGET_NR_brk: ret = do_brk(arg1); #ifdef TARGET_NR_fork case TARGET_NR_fork: ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0)); #endif #ifdef TARGET_NR_waitpid case TARGET_NR_waitpid: { int status; ret = get_errno(safe_wait4(arg1, &status, arg3, 0)); if (!is_error(ret) && arg2 && ret && put_user_s32(host_to_target_waitstatus(status), arg2)) goto efault; #endif #ifdef TARGET_NR_waitid case TARGET_NR_waitid: { siginfo_t info; info.si_pid = 0; ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL)); if (!is_error(ret) && arg3 && info.si_pid != 0) { if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) goto efault; host_to_target_siginfo(p, &info); unlock_user(p, arg3, sizeof(target_siginfo_t)); #endif #ifdef TARGET_NR_creat /* not on alpha */ case TARGET_NR_creat: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(creat(p, arg2)); fd_trans_unregister(ret); unlock_user(p, arg1, 0); #endif #ifdef TARGET_NR_link case TARGET_NR_link: { void * p2; p = lock_user_string(arg1); p2 = lock_user_string(arg2); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(link(p, p2)); unlock_user(p2, arg2, 0); unlock_user(p, arg1, 0); #endif #if defined(TARGET_NR_linkat) case TARGET_NR_linkat: { void * p2 = NULL; if (!arg2 || !arg4) goto efault; p = lock_user_string(arg2); p2 = lock_user_string(arg4); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(linkat(arg1, p, arg3, p2, arg5)); unlock_user(p, arg2, 0); unlock_user(p2, arg4, 0); #endif #ifdef TARGET_NR_unlink case TARGET_NR_unlink: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(unlink(p)); unlock_user(p, arg1, 0); #endif #if defined(TARGET_NR_unlinkat) case TARGET_NR_unlinkat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(unlinkat(arg1, p, arg3)); unlock_user(p, arg2, 0); #endif case TARGET_NR_execve: { char **argp, **envp; int argc, envc; abi_ulong gp; abi_ulong guest_argp; abi_ulong guest_envp; abi_ulong addr; char **q; int total_size = 0; argc = 0; guest_argp = arg2; for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) { if (get_user_ual(addr, gp)) goto efault; if (!addr) argc++; envc = 0; guest_envp = arg3; for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) { if (get_user_ual(addr, gp)) goto efault; if (!addr) envc++; argp = alloca((argc + 1) * sizeof(void *)); envp = alloca((envc + 1) * sizeof(void *)); for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) { if (get_user_ual(addr, gp)) goto execve_efault; if (!addr) if (!(*q = lock_user_string(addr))) goto execve_efault; total_size += strlen(*q) + 1; *q = NULL; for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) { if (get_user_ual(addr, gp)) goto execve_efault; if (!addr) if (!(*q = lock_user_string(addr))) goto execve_efault; total_size += strlen(*q) + 1; *q = NULL; if (!(p = lock_user_string(arg1))) goto execve_efault; /* Although execve() is not an interruptible syscall it is * a special case where we must use the safe_syscall wrapper: * if we allow a signal to happen before we make the host * syscall then we will 'lose' it, because at the point of * execve the process leaves QEMU's control. So we use the * safe syscall wrapper to ensure that we either take the * signal as a guest signal, or else it does not happen * before the execve completes and makes it the other * program's problem. */ ret = get_errno(safe_execve(p, argp, envp)); unlock_user(p, arg1, 0); goto execve_end; execve_efault: ret = -TARGET_EFAULT; execve_end: for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) { if (get_user_ual(addr, gp) || !addr) unlock_user(*q, addr, 0); for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) { if (get_user_ual(addr, gp) || !addr) unlock_user(*q, addr, 0); case TARGET_NR_chdir: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(chdir(p)); unlock_user(p, arg1, 0); #ifdef TARGET_NR_time case TARGET_NR_time: { time_t host_time; ret = get_errno(time(&host_time)); if (!is_error(ret) && arg1 && put_user_sal(host_time, arg1)) goto efault; #endif #ifdef TARGET_NR_mknod case TARGET_NR_mknod: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(mknod(p, arg2, arg3)); unlock_user(p, arg1, 0); #endif #if defined(TARGET_NR_mknodat) case TARGET_NR_mknodat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(mknodat(arg1, p, arg3, arg4)); unlock_user(p, arg2, 0); #endif #ifdef TARGET_NR_chmod case TARGET_NR_chmod: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(chmod(p, arg2)); unlock_user(p, arg1, 0); #endif #ifdef TARGET_NR_break case TARGET_NR_break: goto unimplemented; #endif #ifdef TARGET_NR_oldstat case TARGET_NR_oldstat: goto unimplemented; #endif case TARGET_NR_lseek: ret = get_errno(lseek(arg1, arg2, arg3)); #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA) /* Alpha specific */ case TARGET_NR_getxpid: ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid(); ret = get_errno(getpid()); #endif #ifdef TARGET_NR_getpid case TARGET_NR_getpid: ret = get_errno(getpid()); #endif case TARGET_NR_mount: { /* need to look at the data field */ void *p2, *p3; if (arg1) { p = lock_user_string(arg1); if (!p) { goto efault; } else { p = NULL; p2 = lock_user_string(arg2); if (!p2) { if (arg1) { unlock_user(p, arg1, 0); goto efault; if (arg3) { p3 = lock_user_string(arg3); if (!p3) { if (arg1) { unlock_user(p, arg1, 0); unlock_user(p2, arg2, 0); goto efault; } else { p3 = NULL; /* FIXME - arg5 should be locked, but it isn't clear how to * do that since it's not guaranteed to be a NULL-terminated * string. */ if (!arg5) { ret = mount(p, p2, p3, (unsigned long)arg4, NULL); } else { ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)); ret = get_errno(ret); if (arg1) { unlock_user(p, arg1, 0); unlock_user(p2, arg2, 0); if (arg3) { unlock_user(p3, arg3, 0); #ifdef TARGET_NR_umount case TARGET_NR_umount: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(umount(p)); unlock_user(p, arg1, 0); #endif #ifdef TARGET_NR_stime /* not on alpha */ case TARGET_NR_stime: { time_t host_time; if (get_user_sal(host_time, arg1)) goto efault; ret = get_errno(stime(&host_time)); #endif case TARGET_NR_ptrace: goto unimplemented; #ifdef TARGET_NR_alarm /* not on alpha */ case TARGET_NR_alarm: ret = alarm(arg1); #endif #ifdef TARGET_NR_oldfstat case TARGET_NR_oldfstat: goto unimplemented; #endif #ifdef TARGET_NR_pause /* not on alpha */ case TARGET_NR_pause: if (!block_signals()) { sigsuspend(&((TaskState *)cpu->opaque)->signal_mask); ret = -TARGET_EINTR; #endif #ifdef TARGET_NR_utime case TARGET_NR_utime: { struct utimbuf tbuf, *host_tbuf; struct target_utimbuf *target_tbuf; if (arg2) { if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1)) goto efault; tbuf.actime = tswapal(target_tbuf->actime); tbuf.modtime = tswapal(target_tbuf->modtime); unlock_user_struct(target_tbuf, arg2, 0); host_tbuf = &tbuf; } else { host_tbuf = NULL; if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(utime(p, host_tbuf)); unlock_user(p, arg1, 0); #endif #ifdef TARGET_NR_utimes case TARGET_NR_utimes: { struct timeval *tvp, tv[2]; if (arg2) { if (copy_from_user_timeval(&tv[0], arg2) || copy_from_user_timeval(&tv[1], arg2 + sizeof(struct target_timeval))) goto efault; tvp = tv; } else { tvp = NULL; if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(utimes(p, tvp)); unlock_user(p, arg1, 0); #endif #if defined(TARGET_NR_futimesat) case TARGET_NR_futimesat: { struct timeval *tvp, tv[2]; if (arg3) { if (copy_from_user_timeval(&tv[0], arg3) || copy_from_user_timeval(&tv[1], arg3 + sizeof(struct target_timeval))) goto efault; tvp = tv; } else { tvp = NULL; if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(futimesat(arg1, path(p), tvp)); unlock_user(p, arg2, 0); #endif #ifdef TARGET_NR_stty case TARGET_NR_stty: goto unimplemented; #endif #ifdef TARGET_NR_gtty case TARGET_NR_gtty: goto unimplemented; #endif #ifdef TARGET_NR_access case TARGET_NR_access: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(access(path(p), arg2)); unlock_user(p, arg1, 0); #endif #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat) case TARGET_NR_faccessat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(faccessat(arg1, p, arg3, 0)); unlock_user(p, arg2, 0); #endif #ifdef TARGET_NR_nice /* not on alpha */ case TARGET_NR_nice: ret = get_errno(nice(arg1)); #endif #ifdef TARGET_NR_ftime case TARGET_NR_ftime: goto unimplemented; #endif case TARGET_NR_sync: sync(); ret = 0; case TARGET_NR_kill: ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2))); #ifdef TARGET_NR_rename case TARGET_NR_rename: { void *p2; p = lock_user_string(arg1); p2 = lock_user_string(arg2); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(rename(p, p2)); unlock_user(p2, arg2, 0); unlock_user(p, arg1, 0); #endif #if defined(TARGET_NR_renameat) case TARGET_NR_renameat: { void *p2; p = lock_user_string(arg2); p2 = lock_user_string(arg4); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(renameat(arg1, p, arg3, p2)); unlock_user(p2, arg4, 0); unlock_user(p, arg2, 0); #endif #ifdef TARGET_NR_mkdir case TARGET_NR_mkdir: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(mkdir(p, arg2)); unlock_user(p, arg1, 0); #endif #if defined(TARGET_NR_mkdirat) case TARGET_NR_mkdirat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(mkdirat(arg1, p, arg3)); unlock_user(p, arg2, 0); #endif #ifdef TARGET_NR_rmdir case TARGET_NR_rmdir: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(rmdir(p)); unlock_user(p, arg1, 0); #endif case TARGET_NR_dup: ret = get_errno(dup(arg1)); if (ret >= 0) { fd_trans_dup(arg1, ret); #ifdef TARGET_NR_pipe case TARGET_NR_pipe: ret = do_pipe(cpu_env, arg1, 0, 0); #endif #ifdef TARGET_NR_pipe2 case TARGET_NR_pipe2: ret = do_pipe(cpu_env, arg1, target_to_host_bitmask(arg2, fcntl_flags_tbl), 1); #endif case TARGET_NR_times: { struct target_tms *tmsp; struct tms tms; ret = get_errno(times(&tms)); if (arg1) { tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0); if (!tmsp) goto efault; tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime)); tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime)); tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime)); tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime)); if (!is_error(ret)) ret = host_to_target_clock_t(ret); #ifdef TARGET_NR_prof case TARGET_NR_prof: goto unimplemented; #endif #ifdef TARGET_NR_signal case TARGET_NR_signal: goto unimplemented; #endif case TARGET_NR_acct: if (arg1 == 0) { ret = get_errno(acct(NULL)); } else { if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(acct(path(p))); unlock_user(p, arg1, 0); #ifdef TARGET_NR_umount2 case TARGET_NR_umount2: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(umount2(p, arg2)); unlock_user(p, arg1, 0); #endif #ifdef TARGET_NR_lock case TARGET_NR_lock: goto unimplemented; #endif case TARGET_NR_ioctl: ret = do_ioctl(arg1, arg2, arg3); case TARGET_NR_fcntl: ret = do_fcntl(arg1, arg2, arg3); #ifdef TARGET_NR_mpx case TARGET_NR_mpx: goto unimplemented; #endif case TARGET_NR_setpgid: ret = get_errno(setpgid(arg1, arg2)); #ifdef TARGET_NR_ulimit case TARGET_NR_ulimit: goto unimplemented; #endif #ifdef TARGET_NR_oldolduname case TARGET_NR_oldolduname: goto unimplemented; #endif case TARGET_NR_umask: ret = get_errno(umask(arg1)); case TARGET_NR_chroot: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(chroot(p)); unlock_user(p, arg1, 0); #ifdef TARGET_NR_ustat case TARGET_NR_ustat: goto unimplemented; #endif #ifdef TARGET_NR_dup2 case TARGET_NR_dup2: ret = get_errno(dup2(arg1, arg2)); if (ret >= 0) { fd_trans_dup(arg1, arg2); #endif #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3) case TARGET_NR_dup3: ret = get_errno(dup3(arg1, arg2, arg3)); if (ret >= 0) { fd_trans_dup(arg1, arg2); #endif #ifdef TARGET_NR_getppid /* not on alpha */ case TARGET_NR_getppid: ret = get_errno(getppid()); #endif #ifdef TARGET_NR_getpgrp case TARGET_NR_getpgrp: ret = get_errno(getpgrp()); #endif case TARGET_NR_setsid: ret = get_errno(setsid()); #ifdef TARGET_NR_sigaction case TARGET_NR_sigaction: { #if defined(TARGET_ALPHA) struct target_sigaction act, oact, *pact = 0; struct target_old_sigaction *old_act; if (arg2) { if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) goto efault; act._sa_handler = old_act->_sa_handler; target_siginitset(&act.sa_mask, old_act->sa_mask); act.sa_flags = old_act->sa_flags; act.sa_restorer = 0; unlock_user_struct(old_act, arg2, 0); pact = &act; ret = get_errno(do_sigaction(arg1, pact, &oact)); if (!is_error(ret) && arg3) { if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) goto efault; old_act->_sa_handler = oact._sa_handler; old_act->sa_mask = oact.sa_mask.sig[0]; old_act->sa_flags = oact.sa_flags; unlock_user_struct(old_act, arg3, 1); #elif defined(TARGET_MIPS) struct target_sigaction act, oact, *pact, *old_act; if (arg2) { if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) goto efault; act._sa_handler = old_act->_sa_handler; target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]); act.sa_flags = old_act->sa_flags; unlock_user_struct(old_act, arg2, 0); pact = &act; } else { pact = NULL; ret = get_errno(do_sigaction(arg1, pact, &oact)); if (!is_error(ret) && arg3) { if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) goto efault; old_act->_sa_handler = oact._sa_handler; old_act->sa_flags = oact.sa_flags; old_act->sa_mask.sig[0] = oact.sa_mask.sig[0]; old_act->sa_mask.sig[1] = 0; old_act->sa_mask.sig[2] = 0; old_act->sa_mask.sig[3] = 0; unlock_user_struct(old_act, arg3, 1); #else struct target_old_sigaction *old_act; struct target_sigaction act, oact, *pact; if (arg2) { if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1)) goto efault; act._sa_handler = old_act->_sa_handler; target_siginitset(&act.sa_mask, old_act->sa_mask); act.sa_flags = old_act->sa_flags; act.sa_restorer = old_act->sa_restorer; unlock_user_struct(old_act, arg2, 0); pact = &act; } else { pact = NULL; ret = get_errno(do_sigaction(arg1, pact, &oact)); if (!is_error(ret) && arg3) { if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0)) goto efault; old_act->_sa_handler = oact._sa_handler; old_act->sa_mask = oact.sa_mask.sig[0]; old_act->sa_flags = oact.sa_flags; old_act->sa_restorer = oact.sa_restorer; unlock_user_struct(old_act, arg3, 1); #endif #endif case TARGET_NR_rt_sigaction: { #if defined(TARGET_ALPHA) struct target_sigaction act, oact, *pact = 0; struct target_rt_sigaction *rt_act; if (arg4 != sizeof(target_sigset_t)) { if (arg2) { if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1)) goto efault; act._sa_handler = rt_act->_sa_handler; act.sa_mask = rt_act->sa_mask; act.sa_flags = rt_act->sa_flags; act.sa_restorer = arg5; unlock_user_struct(rt_act, arg2, 0); pact = &act; ret = get_errno(do_sigaction(arg1, pact, &oact)); if (!is_error(ret) && arg3) { if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0)) goto efault; rt_act->_sa_handler = oact._sa_handler; rt_act->sa_mask = oact.sa_mask; rt_act->sa_flags = oact.sa_flags; unlock_user_struct(rt_act, arg3, 1); #else struct target_sigaction *act; struct target_sigaction *oact; if (arg4 != sizeof(target_sigset_t)) { if (arg2) { if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) goto efault; } else act = NULL; if (arg3) { if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) { ret = -TARGET_EFAULT; goto rt_sigaction_fail; } else oact = NULL; ret = get_errno(do_sigaction(arg1, act, oact)); rt_sigaction_fail: if (act) unlock_user_struct(act, arg2, 0); if (oact) unlock_user_struct(oact, arg3, 1); #endif #ifdef TARGET_NR_sgetmask /* not on alpha */ case TARGET_NR_sgetmask: { sigset_t cur_set; abi_ulong target_set; ret = do_sigprocmask(0, NULL, &cur_set); if (!ret) { host_to_target_old_sigset(&target_set, &cur_set); ret = target_set; #endif #ifdef TARGET_NR_ssetmask /* not on alpha */ case TARGET_NR_ssetmask: { sigset_t set, oset, cur_set; abi_ulong target_set = arg1; /* We only have one word of the new mask so we must read * the rest of it with do_sigprocmask() and OR in this word. * We are guaranteed that a do_sigprocmask() that only queries * the signal mask will not fail. */ ret = do_sigprocmask(0, NULL, &cur_set); assert(!ret); target_to_host_old_sigset(&set, &target_set); sigorset(&set, &set, &cur_set); ret = do_sigprocmask(SIG_SETMASK, &set, &oset); if (!ret) { host_to_target_old_sigset(&target_set, &oset); ret = target_set; #endif #ifdef TARGET_NR_sigprocmask case TARGET_NR_sigprocmask: { #if defined(TARGET_ALPHA) sigset_t set, oldset; abi_ulong mask; int how; switch (arg1) { case TARGET_SIG_BLOCK: how = SIG_BLOCK; case TARGET_SIG_UNBLOCK: how = SIG_UNBLOCK; case TARGET_SIG_SETMASK: how = SIG_SETMASK; default: goto fail; mask = arg2; target_to_host_old_sigset(&set, &mask); ret = do_sigprocmask(how, &set, &oldset); if (!is_error(ret)) { host_to_target_old_sigset(&mask, &oldset); ret = mask; ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */ #else sigset_t set, oldset, *set_ptr; int how; if (arg2) { switch (arg1) { case TARGET_SIG_BLOCK: how = SIG_BLOCK; case TARGET_SIG_UNBLOCK: how = SIG_UNBLOCK; case TARGET_SIG_SETMASK: how = SIG_SETMASK; default: goto fail; if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) goto efault; target_to_host_old_sigset(&set, p); unlock_user(p, arg2, 0); set_ptr = &set; } else { how = 0; set_ptr = NULL; ret = do_sigprocmask(how, set_ptr, &oldset); if (!is_error(ret) && arg3) { if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) goto efault; host_to_target_old_sigset(p, &oldset); unlock_user(p, arg3, sizeof(target_sigset_t)); #endif #endif case TARGET_NR_rt_sigprocmask: { int how = arg1; sigset_t set, oldset, *set_ptr; if (arg4 != sizeof(target_sigset_t)) { if (arg2) { switch(how) { case TARGET_SIG_BLOCK: how = SIG_BLOCK; case TARGET_SIG_UNBLOCK: how = SIG_UNBLOCK; case TARGET_SIG_SETMASK: how = SIG_SETMASK; default: goto fail; if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1))) goto efault; target_to_host_sigset(&set, p); unlock_user(p, arg2, 0); set_ptr = &set; } else { how = 0; set_ptr = NULL; ret = do_sigprocmask(how, set_ptr, &oldset); if (!is_error(ret) && arg3) { if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0))) goto efault; host_to_target_sigset(p, &oldset); unlock_user(p, arg3, sizeof(target_sigset_t)); #ifdef TARGET_NR_sigpending case TARGET_NR_sigpending: { sigset_t set; ret = get_errno(sigpending(&set)); if (!is_error(ret)) { if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) goto efault; host_to_target_old_sigset(p, &set); unlock_user(p, arg1, sizeof(target_sigset_t)); #endif case TARGET_NR_rt_sigpending: { sigset_t set; /* Yes, this check is >, not != like most. We follow the kernel's * logic and it does it like this because it implements * NR_sigpending through the same code path, and in that case * the old_sigset_t is smaller in size. */ if (arg2 > sizeof(target_sigset_t)) { ret = get_errno(sigpending(&set)); if (!is_error(ret)) { if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0))) goto efault; host_to_target_sigset(p, &set); unlock_user(p, arg1, sizeof(target_sigset_t)); #ifdef TARGET_NR_sigsuspend case TARGET_NR_sigsuspend: { TaskState *ts = cpu->opaque; #if defined(TARGET_ALPHA) abi_ulong mask = arg1; target_to_host_old_sigset(&ts->sigsuspend_mask, &mask); #else if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) goto efault; target_to_host_old_sigset(&ts->sigsuspend_mask, p); unlock_user(p, arg1, 0); #endif ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, SIGSET_T_SIZE)); if (ret != -TARGET_ERESTARTSYS) { ts->in_sigsuspend = 1; #endif case TARGET_NR_rt_sigsuspend: { TaskState *ts = cpu->opaque; if (arg2 != sizeof(target_sigset_t)) { if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) goto efault; target_to_host_sigset(&ts->sigsuspend_mask, p); unlock_user(p, arg1, 0); ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask, SIGSET_T_SIZE)); if (ret != -TARGET_ERESTARTSYS) { ts->in_sigsuspend = 1; case TARGET_NR_rt_sigtimedwait: { sigset_t set; struct timespec uts, *puts; siginfo_t uinfo; if (arg4 != sizeof(target_sigset_t)) { if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1))) goto efault; target_to_host_sigset(&set, p); unlock_user(p, arg1, 0); if (arg3) { puts = &uts; target_to_host_timespec(puts, arg3); } else { puts = NULL; ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts, SIGSET_T_SIZE)); if (!is_error(ret)) { if (arg2) { p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0); if (!p) { goto efault; host_to_target_siginfo(p, &uinfo); unlock_user(p, arg2, sizeof(target_siginfo_t)); ret = host_to_target_signal(ret); case TARGET_NR_rt_sigqueueinfo: { siginfo_t uinfo; p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); if (!p) { goto efault; target_to_host_siginfo(&uinfo, p); unlock_user(p, arg1, 0); ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo)); #ifdef TARGET_NR_sigreturn case TARGET_NR_sigreturn: if (block_signals()) { ret = -TARGET_ERESTARTSYS; } else { ret = do_sigreturn(cpu_env); #endif case TARGET_NR_rt_sigreturn: if (block_signals()) { ret = -TARGET_ERESTARTSYS; } else { ret = do_rt_sigreturn(cpu_env); case TARGET_NR_sethostname: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(sethostname(p, arg2)); unlock_user(p, arg1, 0); case TARGET_NR_setrlimit: { int resource = target_to_host_resource(arg1); struct target_rlimit *target_rlim; struct rlimit rlim; if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1)) goto efault; rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur); rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max); unlock_user_struct(target_rlim, arg2, 0); ret = get_errno(setrlimit(resource, &rlim)); case TARGET_NR_getrlimit: { int resource = target_to_host_resource(arg1); struct target_rlimit *target_rlim; struct rlimit rlim; ret = get_errno(getrlimit(resource, &rlim)); if (!is_error(ret)) { if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) goto efault; target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); unlock_user_struct(target_rlim, arg2, 1); case TARGET_NR_getrusage: { struct rusage rusage; ret = get_errno(getrusage(arg1, &rusage)); if (!is_error(ret)) { ret = host_to_target_rusage(arg2, &rusage); case TARGET_NR_gettimeofday: { struct timeval tv; ret = get_errno(gettimeofday(&tv, NULL)); if (!is_error(ret)) { if (copy_to_user_timeval(arg1, &tv)) goto efault; case TARGET_NR_settimeofday: { struct timeval tv, *ptv = NULL; struct timezone tz, *ptz = NULL; if (arg1) { if (copy_from_user_timeval(&tv, arg1)) { goto efault; ptv = &tv; if (arg2) { if (copy_from_user_timezone(&tz, arg2)) { goto efault; ptz = &tz; ret = get_errno(settimeofday(ptv, ptz)); #if defined(TARGET_NR_select) case TARGET_NR_select: #if defined(TARGET_S390X) || defined(TARGET_ALPHA) ret = do_select(arg1, arg2, arg3, arg4, arg5); #else { struct target_sel_arg_struct *sel; abi_ulong inp, outp, exp, tvp; long nsel; if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) goto efault; nsel = tswapal(sel->n); inp = tswapal(sel->inp); outp = tswapal(sel->outp); exp = tswapal(sel->exp); tvp = tswapal(sel->tvp); unlock_user_struct(sel, arg1, 0); ret = do_select(nsel, inp, outp, exp, tvp); #endif #endif #ifdef TARGET_NR_pselect6 case TARGET_NR_pselect6: { abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr; fd_set rfds, wfds, efds; fd_set *rfds_ptr, *wfds_ptr, *efds_ptr; struct timespec ts, *ts_ptr; /* * The 6th arg is actually two args smashed together, * so we cannot use the C library. */ sigset_t set; struct { sigset_t *set; size_t size; } sig, *sig_ptr; abi_ulong arg_sigset, arg_sigsize, *arg7; target_sigset_t *target_sigset; n = arg1; rfd_addr = arg2; wfd_addr = arg3; efd_addr = arg4; ts_addr = arg5; ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n); if (ret) { goto fail; ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n); if (ret) { goto fail; ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n); if (ret) { goto fail; /* * This takes a timespec, and not a timeval, so we cannot * use the do_select() helper ... */ if (ts_addr) { if (target_to_host_timespec(&ts, ts_addr)) { goto efault; ts_ptr = &ts; } else { ts_ptr = NULL; /* Extract the two packed args for the sigset */ if (arg6) { sig_ptr = &sig; sig.size = SIGSET_T_SIZE; arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1); if (!arg7) { goto efault; arg_sigset = tswapal(arg7[0]); arg_sigsize = tswapal(arg7[1]); unlock_user(arg7, arg6, 0); if (arg_sigset) { sig.set = &set; if (arg_sigsize != sizeof(*target_sigset)) { /* Like the kernel, we enforce correct size sigsets */ goto fail; target_sigset = lock_user(VERIFY_READ, arg_sigset, sizeof(*target_sigset), 1); if (!target_sigset) { goto efault; target_to_host_sigset(&set, target_sigset); unlock_user(target_sigset, arg_sigset, 0); } else { sig.set = NULL; } else { sig_ptr = NULL; ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr, ts_ptr, sig_ptr)); if (!is_error(ret)) { if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) goto efault; if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) goto efault; if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) goto efault; if (ts_addr && host_to_target_timespec(ts_addr, &ts)) goto efault; #endif #ifdef TARGET_NR_symlink case TARGET_NR_symlink: { void *p2; p = lock_user_string(arg1); p2 = lock_user_string(arg2); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(symlink(p, p2)); unlock_user(p2, arg2, 0); unlock_user(p, arg1, 0); #endif #if defined(TARGET_NR_symlinkat) case TARGET_NR_symlinkat: { void *p2; p = lock_user_string(arg1); p2 = lock_user_string(arg3); if (!p || !p2) ret = -TARGET_EFAULT; else ret = get_errno(symlinkat(p, arg2, p2)); unlock_user(p2, arg3, 0); unlock_user(p, arg1, 0); #endif #ifdef TARGET_NR_oldlstat case TARGET_NR_oldlstat: goto unimplemented; #endif #ifdef TARGET_NR_readlink case TARGET_NR_readlink: { void *p2; p = lock_user_string(arg1); p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0); if (!p || !p2) { ret = -TARGET_EFAULT; } else if (!arg3) { /* Short circuit this for the magic exe check. */ } else if (is_proc_myself((const char *)p, "exe")) { char real[PATH_MAX], *temp; temp = realpath(exec_path, real); /* Return value is # of bytes that we wrote to the buffer. */ if (temp == NULL) { ret = get_errno(-1); } else { /* Don't worry about sign mismatch as earlier mapping * logic would have thrown a bad address error. */ ret = MIN(strlen(real), arg3); /* We cannot NUL terminate the string. */ memcpy(p2, real, ret); } else { ret = get_errno(readlink(path(p), p2, arg3)); unlock_user(p2, arg2, ret); unlock_user(p, arg1, 0); #endif #if defined(TARGET_NR_readlinkat) case TARGET_NR_readlinkat: { void *p2; p = lock_user_string(arg2); p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0); if (!p || !p2) { ret = -TARGET_EFAULT; } else if (is_proc_myself((const char *)p, "exe")) { char real[PATH_MAX], *temp; temp = realpath(exec_path, real); ret = temp == NULL ? get_errno(-1) : strlen(real) ; snprintf((char *)p2, arg4, "%s", real); } else { ret = get_errno(readlinkat(arg1, path(p), p2, arg4)); unlock_user(p2, arg3, ret); unlock_user(p, arg2, 0); #endif #ifdef TARGET_NR_uselib case TARGET_NR_uselib: goto unimplemented; #endif #ifdef TARGET_NR_swapon case TARGET_NR_swapon: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(swapon(p, arg2)); unlock_user(p, arg1, 0); #endif case TARGET_NR_reboot: if (arg3 == LINUX_REBOOT_CMD_RESTART2) { /* arg4 must be ignored in all other cases */ p = lock_user_string(arg4); if (!p) { goto efault; ret = get_errno(reboot(arg1, arg2, arg3, p)); unlock_user(p, arg4, 0); } else { ret = get_errno(reboot(arg1, arg2, arg3, NULL)); #ifdef TARGET_NR_readdir case TARGET_NR_readdir: goto unimplemented; #endif #ifdef TARGET_NR_mmap case TARGET_NR_mmap: #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \ (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \ defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \ || defined(TARGET_S390X) { abi_ulong *v; abi_ulong v1, v2, v3, v4, v5, v6; if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1))) goto efault; v1 = tswapal(v[0]); v2 = tswapal(v[1]); v3 = tswapal(v[2]); v4 = tswapal(v[3]); v5 = tswapal(v[4]); v6 = tswapal(v[5]); unlock_user(v, arg1, 0); ret = get_errno(target_mmap(v1, v2, v3, target_to_host_bitmask(v4, mmap_flags_tbl), v5, v6)); #else ret = get_errno(target_mmap(arg1, arg2, arg3, target_to_host_bitmask(arg4, mmap_flags_tbl), arg5, arg6)); #endif #endif #ifdef TARGET_NR_mmap2 case TARGET_NR_mmap2: #ifndef MMAP_SHIFT #define MMAP_SHIFT 12 #endif ret = get_errno(target_mmap(arg1, arg2, arg3, target_to_host_bitmask(arg4, mmap_flags_tbl), arg5, arg6 << MMAP_SHIFT)); #endif case TARGET_NR_munmap: ret = get_errno(target_munmap(arg1, arg2)); case TARGET_NR_mprotect: { TaskState *ts = cpu->opaque; /* Special hack to detect libc making the stack executable. */ if ((arg3 & PROT_GROWSDOWN) && arg1 >= ts->info->stack_limit && arg1 <= ts->info->start_stack) { arg3 &= ~PROT_GROWSDOWN; arg2 = arg2 + arg1 - ts->info->stack_limit; arg1 = ts->info->stack_limit; ret = get_errno(target_mprotect(arg1, arg2, arg3)); #ifdef TARGET_NR_mremap case TARGET_NR_mremap: ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5)); #endif /* ??? msync/mlock/munlock are broken for softmmu. */ #ifdef TARGET_NR_msync case TARGET_NR_msync: ret = get_errno(msync(g2h(arg1), arg2, arg3)); #endif #ifdef TARGET_NR_mlock case TARGET_NR_mlock: ret = get_errno(mlock(g2h(arg1), arg2)); #endif #ifdef TARGET_NR_munlock case TARGET_NR_munlock: ret = get_errno(munlock(g2h(arg1), arg2)); #endif #ifdef TARGET_NR_mlockall case TARGET_NR_mlockall: ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1))); #endif #ifdef TARGET_NR_munlockall case TARGET_NR_munlockall: ret = get_errno(munlockall()); #endif case TARGET_NR_truncate: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(truncate(p, arg2)); unlock_user(p, arg1, 0); case TARGET_NR_ftruncate: ret = get_errno(ftruncate(arg1, arg2)); case TARGET_NR_fchmod: ret = get_errno(fchmod(arg1, arg2)); #if defined(TARGET_NR_fchmodat) case TARGET_NR_fchmodat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(fchmodat(arg1, p, arg3, 0)); unlock_user(p, arg2, 0); #endif case TARGET_NR_getpriority: /* Note that negative values are valid for getpriority, so we must differentiate based on errno settings. */ errno = 0; ret = getpriority(arg1, arg2); if (ret == -1 && errno != 0) { ret = -host_to_target_errno(errno); #ifdef TARGET_ALPHA /* Return value is the unbiased priority. Signal no error. */ ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; #else /* Return value is a biased priority to avoid negative numbers. */ ret = 20 - ret; #endif case TARGET_NR_setpriority: ret = get_errno(setpriority(arg1, arg2, arg3)); #ifdef TARGET_NR_profil case TARGET_NR_profil: goto unimplemented; #endif case TARGET_NR_statfs: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(statfs(path(p), &stfs)); unlock_user(p, arg1, 0); convert_statfs: if (!is_error(ret)) { struct target_statfs *target_stfs; if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0)) goto efault; __put_user(stfs.f_type, &target_stfs->f_type); __put_user(stfs.f_bsize, &target_stfs->f_bsize); __put_user(stfs.f_blocks, &target_stfs->f_blocks); __put_user(stfs.f_bfree, &target_stfs->f_bfree); __put_user(stfs.f_bavail, &target_stfs->f_bavail); __put_user(stfs.f_files, &target_stfs->f_files); __put_user(stfs.f_ffree, &target_stfs->f_ffree); __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); __put_user(stfs.f_namelen, &target_stfs->f_namelen); __put_user(stfs.f_frsize, &target_stfs->f_frsize); memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); unlock_user_struct(target_stfs, arg2, 1); case TARGET_NR_fstatfs: ret = get_errno(fstatfs(arg1, &stfs)); goto convert_statfs; #ifdef TARGET_NR_statfs64 case TARGET_NR_statfs64: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(statfs(path(p), &stfs)); unlock_user(p, arg1, 0); convert_statfs64: if (!is_error(ret)) { struct target_statfs64 *target_stfs; if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0)) goto efault; __put_user(stfs.f_type, &target_stfs->f_type); __put_user(stfs.f_bsize, &target_stfs->f_bsize); __put_user(stfs.f_blocks, &target_stfs->f_blocks); __put_user(stfs.f_bfree, &target_stfs->f_bfree); __put_user(stfs.f_bavail, &target_stfs->f_bavail); __put_user(stfs.f_files, &target_stfs->f_files); __put_user(stfs.f_ffree, &target_stfs->f_ffree); __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]); __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]); __put_user(stfs.f_namelen, &target_stfs->f_namelen); __put_user(stfs.f_frsize, &target_stfs->f_frsize); memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare)); unlock_user_struct(target_stfs, arg3, 1); case TARGET_NR_fstatfs64: ret = get_errno(fstatfs(arg1, &stfs)); goto convert_statfs64; #endif #ifdef TARGET_NR_ioperm case TARGET_NR_ioperm: goto unimplemented; #endif #ifdef TARGET_NR_socketcall case TARGET_NR_socketcall: ret = do_socketcall(arg1, arg2); #endif #ifdef TARGET_NR_accept case TARGET_NR_accept: ret = do_accept4(arg1, arg2, arg3, 0); #endif #ifdef TARGET_NR_accept4 case TARGET_NR_accept4: ret = do_accept4(arg1, arg2, arg3, arg4); #endif #ifdef TARGET_NR_bind case TARGET_NR_bind: ret = do_bind(arg1, arg2, arg3); #endif #ifdef TARGET_NR_connect case TARGET_NR_connect: ret = do_connect(arg1, arg2, arg3); #endif #ifdef TARGET_NR_getpeername case TARGET_NR_getpeername: ret = do_getpeername(arg1, arg2, arg3); #endif #ifdef TARGET_NR_getsockname case TARGET_NR_getsockname: ret = do_getsockname(arg1, arg2, arg3); #endif #ifdef TARGET_NR_getsockopt case TARGET_NR_getsockopt: ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5); #endif #ifdef TARGET_NR_listen case TARGET_NR_listen: ret = get_errno(listen(arg1, arg2)); #endif #ifdef TARGET_NR_recv case TARGET_NR_recv: ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0); #endif #ifdef TARGET_NR_recvfrom case TARGET_NR_recvfrom: ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6); #endif #ifdef TARGET_NR_recvmsg case TARGET_NR_recvmsg: ret = do_sendrecvmsg(arg1, arg2, arg3, 0); #endif #ifdef TARGET_NR_send case TARGET_NR_send: ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0); #endif #ifdef TARGET_NR_sendmsg case TARGET_NR_sendmsg: ret = do_sendrecvmsg(arg1, arg2, arg3, 1); #endif #ifdef TARGET_NR_sendmmsg case TARGET_NR_sendmmsg: ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1); case TARGET_NR_recvmmsg: ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0); #endif #ifdef TARGET_NR_sendto case TARGET_NR_sendto: ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6); #endif #ifdef TARGET_NR_shutdown case TARGET_NR_shutdown: ret = get_errno(shutdown(arg1, arg2)); #endif #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom) case TARGET_NR_getrandom: p = lock_user(VERIFY_WRITE, arg1, arg2, 0); if (!p) { goto efault; ret = get_errno(getrandom(p, arg2, arg3)); unlock_user(p, arg1, ret); #endif #ifdef TARGET_NR_socket case TARGET_NR_socket: ret = do_socket(arg1, arg2, arg3); fd_trans_unregister(ret); #endif #ifdef TARGET_NR_socketpair case TARGET_NR_socketpair: ret = do_socketpair(arg1, arg2, arg3, arg4); #endif #ifdef TARGET_NR_setsockopt case TARGET_NR_setsockopt: ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5); #endif case TARGET_NR_syslog: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(sys_syslog((int)arg1, p, (int)arg3)); unlock_user(p, arg2, 0); case TARGET_NR_setitimer: { struct itimerval value, ovalue, *pvalue; if (arg2) { pvalue = &value; if (copy_from_user_timeval(&pvalue->it_interval, arg2) || copy_from_user_timeval(&pvalue->it_value, arg2 + sizeof(struct target_timeval))) goto efault; } else { pvalue = NULL; ret = get_errno(setitimer(arg1, pvalue, &ovalue)); if (!is_error(ret) && arg3) { if (copy_to_user_timeval(arg3, &ovalue.it_interval) || copy_to_user_timeval(arg3 + sizeof(struct target_timeval), &ovalue.it_value)) goto efault; case TARGET_NR_getitimer: { struct itimerval value; ret = get_errno(getitimer(arg1, &value)); if (!is_error(ret) && arg2) { if (copy_to_user_timeval(arg2, &value.it_interval) || copy_to_user_timeval(arg2 + sizeof(struct target_timeval), &value.it_value)) goto efault; #ifdef TARGET_NR_stat case TARGET_NR_stat: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(stat(path(p), &st)); unlock_user(p, arg1, 0); goto do_stat; #endif #ifdef TARGET_NR_lstat case TARGET_NR_lstat: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(lstat(path(p), &st)); unlock_user(p, arg1, 0); goto do_stat; #endif case TARGET_NR_fstat: { ret = get_errno(fstat(arg1, &st)); #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat) do_stat: #endif if (!is_error(ret)) { struct target_stat *target_st; if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0)) goto efault; memset(target_st, 0, sizeof(*target_st)); __put_user(st.st_dev, &target_st->st_dev); __put_user(st.st_ino, &target_st->st_ino); __put_user(st.st_mode, &target_st->st_mode); __put_user(st.st_uid, &target_st->st_uid); __put_user(st.st_gid, &target_st->st_gid); __put_user(st.st_nlink, &target_st->st_nlink); __put_user(st.st_rdev, &target_st->st_rdev); __put_user(st.st_size, &target_st->st_size); __put_user(st.st_blksize, &target_st->st_blksize); __put_user(st.st_blocks, &target_st->st_blocks); __put_user(st.st_atime, &target_st->target_st_atime); __put_user(st.st_mtime, &target_st->target_st_mtime); __put_user(st.st_ctime, &target_st->target_st_ctime); unlock_user_struct(target_st, arg2, 1); #ifdef TARGET_NR_olduname case TARGET_NR_olduname: goto unimplemented; #endif #ifdef TARGET_NR_iopl case TARGET_NR_iopl: goto unimplemented; #endif case TARGET_NR_vhangup: ret = get_errno(vhangup()); #ifdef TARGET_NR_idle case TARGET_NR_idle: goto unimplemented; #endif #ifdef TARGET_NR_syscall case TARGET_NR_syscall: ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5, arg6, arg7, arg8, 0); #endif case TARGET_NR_wait4: { int status; abi_long status_ptr = arg2; struct rusage rusage, *rusage_ptr; abi_ulong target_rusage = arg4; abi_long rusage_err; if (target_rusage) rusage_ptr = &rusage; else rusage_ptr = NULL; ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr)); if (!is_error(ret)) { if (status_ptr && ret) { status = host_to_target_waitstatus(status); if (put_user_s32(status, status_ptr)) goto efault; if (target_rusage) { rusage_err = host_to_target_rusage(target_rusage, &rusage); if (rusage_err) { ret = rusage_err; #ifdef TARGET_NR_swapoff case TARGET_NR_swapoff: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(swapoff(p)); unlock_user(p, arg1, 0); #endif case TARGET_NR_sysinfo: { struct target_sysinfo *target_value; struct sysinfo value; ret = get_errno(sysinfo(&value)); if (!is_error(ret) && arg1) { if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0)) goto efault; __put_user(value.uptime, &target_value->uptime); __put_user(value.loads[0], &target_value->loads[0]); __put_user(value.loads[1], &target_value->loads[1]); __put_user(value.loads[2], &target_value->loads[2]); __put_user(value.totalram, &target_value->totalram); __put_user(value.freeram, &target_value->freeram); __put_user(value.sharedram, &target_value->sharedram); __put_user(value.bufferram, &target_value->bufferram); __put_user(value.totalswap, &target_value->totalswap); __put_user(value.freeswap, &target_value->freeswap); __put_user(value.procs, &target_value->procs); __put_user(value.totalhigh, &target_value->totalhigh); __put_user(value.freehigh, &target_value->freehigh); __put_user(value.mem_unit, &target_value->mem_unit); unlock_user_struct(target_value, arg1, 1); #ifdef TARGET_NR_ipc case TARGET_NR_ipc: ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6); #endif #ifdef TARGET_NR_semget case TARGET_NR_semget: ret = get_errno(semget(arg1, arg2, arg3)); #endif #ifdef TARGET_NR_semop case TARGET_NR_semop: ret = do_semop(arg1, arg2, arg3); #endif #ifdef TARGET_NR_semctl case TARGET_NR_semctl: ret = do_semctl(arg1, arg2, arg3, arg4); #endif #ifdef TARGET_NR_msgctl case TARGET_NR_msgctl: ret = do_msgctl(arg1, arg2, arg3); #endif #ifdef TARGET_NR_msgget case TARGET_NR_msgget: ret = get_errno(msgget(arg1, arg2)); #endif #ifdef TARGET_NR_msgrcv case TARGET_NR_msgrcv: ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5); #endif #ifdef TARGET_NR_msgsnd case TARGET_NR_msgsnd: ret = do_msgsnd(arg1, arg2, arg3, arg4); #endif #ifdef TARGET_NR_shmget case TARGET_NR_shmget: ret = get_errno(shmget(arg1, arg2, arg3)); #endif #ifdef TARGET_NR_shmctl case TARGET_NR_shmctl: ret = do_shmctl(arg1, arg2, arg3); #endif #ifdef TARGET_NR_shmat case TARGET_NR_shmat: ret = do_shmat(arg1, arg2, arg3); #endif #ifdef TARGET_NR_shmdt case TARGET_NR_shmdt: ret = do_shmdt(arg1); #endif case TARGET_NR_fsync: ret = get_errno(fsync(arg1)); case TARGET_NR_clone: /* Linux manages to have three different orderings for its * arguments to clone(); the BACKWARDS and BACKWARDS2 defines * match the kernel's CONFIG_CLONE_* settings. * Microblaze is further special in that it uses a sixth * implicit argument to clone for the TLS pointer. */ #if defined(TARGET_MICROBLAZE) ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5)); #elif defined(TARGET_CLONE_BACKWARDS) ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5)); #elif defined(TARGET_CLONE_BACKWARDS2) ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4)); #else ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4)); #endif #ifdef __NR_exit_group /* new thread calls */ case TARGET_NR_exit_group: #ifdef TARGET_GPROF _mcleanup(); #endif gdb_exit(cpu_env, arg1); ret = get_errno(exit_group(arg1)); #endif case TARGET_NR_setdomainname: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(setdomainname(p, arg2)); unlock_user(p, arg1, 0); case TARGET_NR_uname: /* no need to transcode because we use the linux syscall */ { struct new_utsname * buf; if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0)) goto efault; ret = get_errno(sys_uname(buf)); if (!is_error(ret)) { /* Overwrite the native machine name with whatever is being emulated. */ strcpy (buf->machine, cpu_to_uname_machine(cpu_env)); /* Allow the user to override the reported release. */ if (qemu_uname_release && *qemu_uname_release) { g_strlcpy(buf->release, qemu_uname_release, sizeof(buf->release)); unlock_user_struct(buf, arg1, 1); #ifdef TARGET_I386 case TARGET_NR_modify_ldt: ret = do_modify_ldt(cpu_env, arg1, arg2, arg3); #if !defined(TARGET_X86_64) case TARGET_NR_vm86old: goto unimplemented; case TARGET_NR_vm86: ret = do_vm86(cpu_env, arg1, arg2); #endif #endif case TARGET_NR_adjtimex: goto unimplemented; #ifdef TARGET_NR_create_module case TARGET_NR_create_module: #endif case TARGET_NR_init_module: case TARGET_NR_delete_module: #ifdef TARGET_NR_get_kernel_syms case TARGET_NR_get_kernel_syms: #endif goto unimplemented; case TARGET_NR_quotactl: goto unimplemented; case TARGET_NR_getpgid: ret = get_errno(getpgid(arg1)); case TARGET_NR_fchdir: ret = get_errno(fchdir(arg1)); #ifdef TARGET_NR_bdflush /* not on x86_64 */ case TARGET_NR_bdflush: goto unimplemented; #endif #ifdef TARGET_NR_sysfs case TARGET_NR_sysfs: goto unimplemented; #endif case TARGET_NR_personality: ret = get_errno(personality(arg1)); #ifdef TARGET_NR_afs_syscall case TARGET_NR_afs_syscall: goto unimplemented; #endif #ifdef TARGET_NR__llseek /* Not on alpha */ case TARGET_NR__llseek: { int64_t res; #if !defined(__NR_llseek) res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5); if (res == -1) { ret = get_errno(res); } else { ret = 0; #else ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5)); #endif if ((ret == 0) && put_user_s64(res, arg4)) { goto efault; #endif #ifdef TARGET_NR_getdents case TARGET_NR_getdents: #ifdef __NR_getdents #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 { struct target_dirent *target_dirp; struct linux_dirent *dirp; abi_long count = arg3; dirp = g_try_malloc(count); if (!dirp) { ret = -TARGET_ENOMEM; goto fail; ret = get_errno(sys_getdents(arg1, dirp, count)); if (!is_error(ret)) { struct linux_dirent *de; struct target_dirent *tde; int len = ret; int reclen, treclen; int count1, tnamelen; count1 = 0; de = dirp; if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) goto efault; tde = target_dirp; while (len > 0) { reclen = de->d_reclen; tnamelen = reclen - offsetof(struct linux_dirent, d_name); assert(tnamelen >= 0); treclen = tnamelen + offsetof(struct target_dirent, d_name); assert(count1 + treclen <= count); tde->d_reclen = tswap16(treclen); tde->d_ino = tswapal(de->d_ino); tde->d_off = tswapal(de->d_off); memcpy(tde->d_name, de->d_name, tnamelen); de = (struct linux_dirent *)((char *)de + reclen); len -= reclen; tde = (struct target_dirent *)((char *)tde + treclen); count1 += treclen; ret = count1; unlock_user(target_dirp, arg2, ret); g_free(dirp); #else { struct linux_dirent *dirp; abi_long count = arg3; if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) goto efault; ret = get_errno(sys_getdents(arg1, dirp, count)); if (!is_error(ret)) { struct linux_dirent *de; int len = ret; int reclen; de = dirp; while (len > 0) { reclen = de->d_reclen; if (reclen > len) de->d_reclen = tswap16(reclen); tswapls(&de->d_ino); tswapls(&de->d_off); de = (struct linux_dirent *)((char *)de + reclen); len -= reclen; unlock_user(dirp, arg2, ret); #endif #else /* Implement getdents in terms of getdents64 */ { struct linux_dirent64 *dirp; abi_long count = arg3; dirp = lock_user(VERIFY_WRITE, arg2, count, 0); if (!dirp) { goto efault; ret = get_errno(sys_getdents64(arg1, dirp, count)); if (!is_error(ret)) { /* Convert the dirent64 structs to target dirent. We do this * in-place, since we can guarantee that a target_dirent is no * larger than a dirent64; however this means we have to be * careful to read everything before writing in the new format. */ struct linux_dirent64 *de; struct target_dirent *tde; int len = ret; int tlen = 0; de = dirp; tde = (struct target_dirent *)dirp; while (len > 0) { int namelen, treclen; int reclen = de->d_reclen; uint64_t ino = de->d_ino; int64_t off = de->d_off; uint8_t type = de->d_type; namelen = strlen(de->d_name); treclen = offsetof(struct target_dirent, d_name) + namelen + 2; treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long)); memmove(tde->d_name, de->d_name, namelen + 1); tde->d_ino = tswapal(ino); tde->d_off = tswapal(off); tde->d_reclen = tswap16(treclen); /* The target_dirent type is in what was formerly a padding * byte at the end of the structure: */ *(((char *)tde) + treclen - 1) = type; de = (struct linux_dirent64 *)((char *)de + reclen); tde = (struct target_dirent *)((char *)tde + treclen); len -= reclen; tlen += treclen; ret = tlen; unlock_user(dirp, arg2, ret); #endif #endif /* TARGET_NR_getdents */ #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64) case TARGET_NR_getdents64: { struct linux_dirent64 *dirp; abi_long count = arg3; if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0))) goto efault; ret = get_errno(sys_getdents64(arg1, dirp, count)); if (!is_error(ret)) { struct linux_dirent64 *de; int len = ret; int reclen; de = dirp; while (len > 0) { reclen = de->d_reclen; if (reclen > len) de->d_reclen = tswap16(reclen); tswap64s((uint64_t *)&de->d_ino); tswap64s((uint64_t *)&de->d_off); de = (struct linux_dirent64 *)((char *)de + reclen); len -= reclen; unlock_user(dirp, arg2, ret); #endif /* TARGET_NR_getdents64 */ #if defined(TARGET_NR__newselect) case TARGET_NR__newselect: ret = do_select(arg1, arg2, arg3, arg4, arg5); #endif #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) # ifdef TARGET_NR_poll case TARGET_NR_poll: # endif # ifdef TARGET_NR_ppoll case TARGET_NR_ppoll: # endif { struct target_pollfd *target_pfd; unsigned int nfds = arg2; struct pollfd *pfd; unsigned int i; pfd = NULL; target_pfd = NULL; if (nfds) { target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1); if (!target_pfd) { goto efault; pfd = alloca(sizeof(struct pollfd) * nfds); for (i = 0; i < nfds; i++) { pfd[i].fd = tswap32(target_pfd[i].fd); pfd[i].events = tswap16(target_pfd[i].events); switch (num) { # ifdef TARGET_NR_ppoll case TARGET_NR_ppoll: { struct timespec _timeout_ts, *timeout_ts = &_timeout_ts; target_sigset_t *target_set; sigset_t _set, *set = &_set; if (arg3) { if (target_to_host_timespec(timeout_ts, arg3)) { unlock_user(target_pfd, arg1, 0); goto efault; } else { timeout_ts = NULL; if (arg4) { if (arg5 != sizeof(target_sigset_t)) { unlock_user(target_pfd, arg1, 0); target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1); if (!target_set) { unlock_user(target_pfd, arg1, 0); goto efault; target_to_host_sigset(set, target_set); } else { set = NULL; ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts, set, SIGSET_T_SIZE)); if (!is_error(ret) && arg3) { host_to_target_timespec(arg3, timeout_ts); if (arg4) { unlock_user(target_set, arg4, 0); # endif # ifdef TARGET_NR_poll case TARGET_NR_poll: { struct timespec ts, *pts; if (arg3 >= 0) { /* Convert ms to secs, ns */ ts.tv_sec = arg3 / 1000; ts.tv_nsec = (arg3 % 1000) * 1000000LL; pts = &ts; } else { /* -ve poll() timeout means "infinite" */ pts = NULL; ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0)); # endif default: g_assert_not_reached(); if (!is_error(ret)) { for(i = 0; i < nfds; i++) { target_pfd[i].revents = tswap16(pfd[i].revents); unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds); #endif case TARGET_NR_flock: /* NOTE: the flock constant seems to be the same for every Linux platform */ ret = get_errno(safe_flock(arg1, arg2)); case TARGET_NR_readv: { struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0); if (vec != NULL) { ret = get_errno(safe_readv(arg1, vec, arg3)); unlock_iovec(vec, arg2, arg3, 1); } else { ret = -host_to_target_errno(errno); case TARGET_NR_writev: { struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); if (vec != NULL) { ret = get_errno(safe_writev(arg1, vec, arg3)); unlock_iovec(vec, arg2, arg3, 0); } else { ret = -host_to_target_errno(errno); case TARGET_NR_getsid: ret = get_errno(getsid(arg1)); #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */ case TARGET_NR_fdatasync: ret = get_errno(fdatasync(arg1)); #endif #ifdef TARGET_NR__sysctl case TARGET_NR__sysctl: /* We don't implement this, but ENOTDIR is always a safe return value. */ ret = -TARGET_ENOTDIR; #endif case TARGET_NR_sched_getaffinity: { unsigned int mask_size; unsigned long *mask; /* * sched_getaffinity needs multiples of ulong, so need to take * care of mismatches between target ulong and host ulong sizes. */ if (arg2 & (sizeof(abi_ulong) - 1)) { mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); mask = alloca(mask_size); ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask)); if (!is_error(ret)) { if (ret > arg2) { /* More data returned than the caller's buffer will fit. * This only happens if sizeof(abi_long) < sizeof(long) * and the caller passed us a buffer holding an odd number * of abi_longs. If the host kernel is actually using the * extra 4 bytes then fail EINVAL; otherwise we can just * ignore them and only copy the interesting part. */ int numcpus = sysconf(_SC_NPROCESSORS_CONF); if (numcpus > arg2 * 8) { ret = arg2; if (copy_to_user(arg3, mask, ret)) { goto efault; case TARGET_NR_sched_setaffinity: { unsigned int mask_size; unsigned long *mask; /* * sched_setaffinity needs multiples of ulong, so need to take * care of mismatches between target ulong and host ulong sizes. */ if (arg2 & (sizeof(abi_ulong) - 1)) { mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1); mask = alloca(mask_size); if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) { goto efault; memcpy(mask, p, arg2); unlock_user_struct(p, arg2, 0); ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask)); case TARGET_NR_sched_setparam: { struct sched_param *target_schp; struct sched_param schp; if (arg2 == 0) { return -TARGET_EINVAL; if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) goto efault; schp.sched_priority = tswap32(target_schp->sched_priority); unlock_user_struct(target_schp, arg2, 0); ret = get_errno(sched_setparam(arg1, &schp)); case TARGET_NR_sched_getparam: { struct sched_param *target_schp; struct sched_param schp; if (arg2 == 0) { return -TARGET_EINVAL; ret = get_errno(sched_getparam(arg1, &schp)); if (!is_error(ret)) { if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) goto efault; target_schp->sched_priority = tswap32(schp.sched_priority); unlock_user_struct(target_schp, arg2, 1); case TARGET_NR_sched_setscheduler: { struct sched_param *target_schp; struct sched_param schp; if (arg3 == 0) { return -TARGET_EINVAL; if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) goto efault; schp.sched_priority = tswap32(target_schp->sched_priority); unlock_user_struct(target_schp, arg3, 0); ret = get_errno(sched_setscheduler(arg1, arg2, &schp)); case TARGET_NR_sched_getscheduler: ret = get_errno(sched_getscheduler(arg1)); case TARGET_NR_sched_yield: ret = get_errno(sched_yield()); case TARGET_NR_sched_get_priority_max: ret = get_errno(sched_get_priority_max(arg1)); case TARGET_NR_sched_get_priority_min: ret = get_errno(sched_get_priority_min(arg1)); case TARGET_NR_sched_rr_get_interval: { struct timespec ts; ret = get_errno(sched_rr_get_interval(arg1, &ts)); if (!is_error(ret)) { ret = host_to_target_timespec(arg2, &ts); case TARGET_NR_nanosleep: { struct timespec req, rem; target_to_host_timespec(&req, arg1); ret = get_errno(safe_nanosleep(&req, &rem)); if (is_error(ret) && arg2) { host_to_target_timespec(arg2, &rem); #ifdef TARGET_NR_query_module case TARGET_NR_query_module: goto unimplemented; #endif #ifdef TARGET_NR_nfsservctl case TARGET_NR_nfsservctl: goto unimplemented; #endif case TARGET_NR_prctl: switch (arg1) { case PR_GET_PDEATHSIG: { int deathsig; ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5)); if (!is_error(ret) && arg2 && put_user_ual(deathsig, arg2)) { goto efault; #ifdef PR_GET_NAME case PR_GET_NAME: { void *name = lock_user(VERIFY_WRITE, arg2, 16, 1); if (!name) { goto efault; ret = get_errno(prctl(arg1, (unsigned long)name, arg3, arg4, arg5)); unlock_user(name, arg2, 16); case PR_SET_NAME: { void *name = lock_user(VERIFY_READ, arg2, 16, 1); if (!name) { goto efault; ret = get_errno(prctl(arg1, (unsigned long)name, arg3, arg4, arg5)); unlock_user(name, arg2, 0); #endif default: /* Most prctl options have no pointer arguments */ ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5)); #ifdef TARGET_NR_arch_prctl case TARGET_NR_arch_prctl: #if defined(TARGET_I386) && !defined(TARGET_ABI32) ret = do_arch_prctl(cpu_env, arg1, arg2); #else goto unimplemented; #endif #endif #ifdef TARGET_NR_pread64 case TARGET_NR_pread64: if (regpairs_aligned(cpu_env)) { arg4 = arg5; arg5 = arg6; if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0))) goto efault; ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5))); unlock_user(p, arg2, ret); case TARGET_NR_pwrite64: if (regpairs_aligned(cpu_env)) { arg4 = arg5; arg5 = arg6; if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1))) goto efault; ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5))); unlock_user(p, arg2, 0); #endif case TARGET_NR_getcwd: if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0))) goto efault; ret = get_errno(sys_getcwd1(p, arg2)); unlock_user(p, arg1, ret); case TARGET_NR_capget: case TARGET_NR_capset: { struct target_user_cap_header *target_header; struct target_user_cap_data *target_data = NULL; struct __user_cap_header_struct header; struct __user_cap_data_struct data[2]; struct __user_cap_data_struct *dataptr = NULL; int i, target_datalen; int data_items = 1; if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) { goto efault; header.version = tswap32(target_header->version); header.pid = tswap32(target_header->pid); if (header.version != _LINUX_CAPABILITY_VERSION) { /* Version 2 and up takes pointer to two user_data structs */ data_items = 2; target_datalen = sizeof(*target_data) * data_items; if (arg2) { if (num == TARGET_NR_capget) { target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0); } else { target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1); if (!target_data) { unlock_user_struct(target_header, arg1, 0); goto efault; if (num == TARGET_NR_capset) { for (i = 0; i < data_items; i++) { data[i].effective = tswap32(target_data[i].effective); data[i].permitted = tswap32(target_data[i].permitted); data[i].inheritable = tswap32(target_data[i].inheritable); dataptr = data; if (num == TARGET_NR_capget) { ret = get_errno(capget(&header, dataptr)); } else { ret = get_errno(capset(&header, dataptr)); /* The kernel always updates version for both capget and capset */ target_header->version = tswap32(header.version); unlock_user_struct(target_header, arg1, 1); if (arg2) { if (num == TARGET_NR_capget) { for (i = 0; i < data_items; i++) { target_data[i].effective = tswap32(data[i].effective); target_data[i].permitted = tswap32(data[i].permitted); target_data[i].inheritable = tswap32(data[i].inheritable); unlock_user(target_data, arg2, target_datalen); } else { unlock_user(target_data, arg2, 0); case TARGET_NR_sigaltstack: ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env)); #ifdef CONFIG_SENDFILE case TARGET_NR_sendfile: { off_t *offp = NULL; off_t off; if (arg3) { ret = get_user_sal(off, arg3); if (is_error(ret)) { offp = &off; ret = get_errno(sendfile(arg1, arg2, offp, arg4)); if (!is_error(ret) && arg3) { abi_long ret2 = put_user_sal(off, arg3); if (is_error(ret2)) { ret = ret2; #ifdef TARGET_NR_sendfile64 case TARGET_NR_sendfile64: { off_t *offp = NULL; off_t off; if (arg3) { ret = get_user_s64(off, arg3); if (is_error(ret)) { offp = &off; ret = get_errno(sendfile(arg1, arg2, offp, arg4)); if (!is_error(ret) && arg3) { abi_long ret2 = put_user_s64(off, arg3); if (is_error(ret2)) { ret = ret2; #endif #else case TARGET_NR_sendfile: #ifdef TARGET_NR_sendfile64 case TARGET_NR_sendfile64: #endif goto unimplemented; #endif #ifdef TARGET_NR_getpmsg case TARGET_NR_getpmsg: goto unimplemented; #endif #ifdef TARGET_NR_putpmsg case TARGET_NR_putpmsg: goto unimplemented; #endif #ifdef TARGET_NR_vfork case TARGET_NR_vfork: ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0, 0, 0, 0)); #endif #ifdef TARGET_NR_ugetrlimit case TARGET_NR_ugetrlimit: { struct rlimit rlim; int resource = target_to_host_resource(arg1); ret = get_errno(getrlimit(resource, &rlim)); if (!is_error(ret)) { struct target_rlimit *target_rlim; if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0)) goto efault; target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur); target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max); unlock_user_struct(target_rlim, arg2, 1); #endif #ifdef TARGET_NR_truncate64 case TARGET_NR_truncate64: if (!(p = lock_user_string(arg1))) goto efault; ret = target_truncate64(cpu_env, p, arg2, arg3, arg4); unlock_user(p, arg1, 0); #endif #ifdef TARGET_NR_ftruncate64 case TARGET_NR_ftruncate64: ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4); #endif #ifdef TARGET_NR_stat64 case TARGET_NR_stat64: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(stat(path(p), &st)); unlock_user(p, arg1, 0); if (!is_error(ret)) ret = host_to_target_stat64(cpu_env, arg2, &st); #endif #ifdef TARGET_NR_lstat64 case TARGET_NR_lstat64: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(lstat(path(p), &st)); unlock_user(p, arg1, 0); if (!is_error(ret)) ret = host_to_target_stat64(cpu_env, arg2, &st); #endif #ifdef TARGET_NR_fstat64 case TARGET_NR_fstat64: ret = get_errno(fstat(arg1, &st)); if (!is_error(ret)) ret = host_to_target_stat64(cpu_env, arg2, &st); #endif #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) #ifdef TARGET_NR_fstatat64 case TARGET_NR_fstatat64: #endif #ifdef TARGET_NR_newfstatat case TARGET_NR_newfstatat: #endif if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(fstatat(arg1, path(p), &st, arg4)); if (!is_error(ret)) ret = host_to_target_stat64(cpu_env, arg3, &st); #endif #ifdef TARGET_NR_lchown case TARGET_NR_lchown: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3))); unlock_user(p, arg1, 0); #endif #ifdef TARGET_NR_getuid case TARGET_NR_getuid: ret = get_errno(high2lowuid(getuid())); #endif #ifdef TARGET_NR_getgid case TARGET_NR_getgid: ret = get_errno(high2lowgid(getgid())); #endif #ifdef TARGET_NR_geteuid case TARGET_NR_geteuid: ret = get_errno(high2lowuid(geteuid())); #endif #ifdef TARGET_NR_getegid case TARGET_NR_getegid: ret = get_errno(high2lowgid(getegid())); #endif case TARGET_NR_setreuid: ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2))); case TARGET_NR_setregid: ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2))); case TARGET_NR_getgroups: { int gidsetsize = arg1; target_id *target_grouplist; gid_t *grouplist; int i; grouplist = alloca(gidsetsize * sizeof(gid_t)); ret = get_errno(getgroups(gidsetsize, grouplist)); if (gidsetsize == 0) if (!is_error(ret)) { target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0); if (!target_grouplist) goto efault; for(i = 0;i < ret; i++) target_grouplist[i] = tswapid(high2lowgid(grouplist[i])); unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id)); case TARGET_NR_setgroups: { int gidsetsize = arg1; target_id *target_grouplist; gid_t *grouplist = NULL; int i; if (gidsetsize) { grouplist = alloca(gidsetsize * sizeof(gid_t)); target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1); if (!target_grouplist) { ret = -TARGET_EFAULT; goto fail; for (i = 0; i < gidsetsize; i++) { grouplist[i] = low2highgid(tswapid(target_grouplist[i])); unlock_user(target_grouplist, arg2, 0); ret = get_errno(setgroups(gidsetsize, grouplist)); case TARGET_NR_fchown: ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3))); #if defined(TARGET_NR_fchownat) case TARGET_NR_fchownat: if (!(p = lock_user_string(arg2))) goto efault; ret = get_errno(fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5)); unlock_user(p, arg2, 0); #endif #ifdef TARGET_NR_setresuid case TARGET_NR_setresuid: ret = get_errno(sys_setresuid(low2highuid(arg1), low2highuid(arg2), low2highuid(arg3))); #endif #ifdef TARGET_NR_getresuid case TARGET_NR_getresuid: { uid_t ruid, euid, suid; ret = get_errno(getresuid(&ruid, &euid, &suid)); if (!is_error(ret)) { if (put_user_id(high2lowuid(ruid), arg1) || put_user_id(high2lowuid(euid), arg2) || put_user_id(high2lowuid(suid), arg3)) goto efault; #endif #ifdef TARGET_NR_getresgid case TARGET_NR_setresgid: ret = get_errno(sys_setresgid(low2highgid(arg1), low2highgid(arg2), low2highgid(arg3))); #endif #ifdef TARGET_NR_getresgid case TARGET_NR_getresgid: { gid_t rgid, egid, sgid; ret = get_errno(getresgid(&rgid, &egid, &sgid)); if (!is_error(ret)) { if (put_user_id(high2lowgid(rgid), arg1) || put_user_id(high2lowgid(egid), arg2) || put_user_id(high2lowgid(sgid), arg3)) goto efault; #endif #ifdef TARGET_NR_chown case TARGET_NR_chown: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3))); unlock_user(p, arg1, 0); #endif case TARGET_NR_setuid: ret = get_errno(sys_setuid(low2highuid(arg1))); case TARGET_NR_setgid: ret = get_errno(sys_setgid(low2highgid(arg1))); case TARGET_NR_setfsuid: ret = get_errno(setfsuid(arg1)); case TARGET_NR_setfsgid: ret = get_errno(setfsgid(arg1)); #ifdef TARGET_NR_lchown32 case TARGET_NR_lchown32: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(lchown(p, arg2, arg3)); unlock_user(p, arg1, 0); #endif #ifdef TARGET_NR_getuid32 case TARGET_NR_getuid32: ret = get_errno(getuid()); #endif #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA) /* Alpha specific */ case TARGET_NR_getxuid: { uid_t euid; euid=geteuid(); ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid; ret = get_errno(getuid()); #endif #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA) /* Alpha specific */ case TARGET_NR_getxgid: { uid_t egid; egid=getegid(); ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid; ret = get_errno(getgid()); #endif #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA) /* Alpha specific */ case TARGET_NR_osf_getsysinfo: ret = -TARGET_EOPNOTSUPP; switch (arg1) { case TARGET_GSI_IEEE_FP_CONTROL: { uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env); /* Copied from linux ieee_fpcr_to_swcr. */ swcr = (fpcr >> 35) & SWCR_STATUS_MASK; swcr |= (fpcr >> 36) & SWCR_MAP_DMZ; swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV | SWCR_TRAP_ENABLE_DZE | SWCR_TRAP_ENABLE_OVF); swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF | SWCR_TRAP_ENABLE_INE); swcr |= (fpcr >> 47) & SWCR_MAP_UMZ; swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO; if (put_user_u64 (swcr, arg2)) goto efault; ret = 0; /* case GSI_IEEE_STATE_AT_SIGNAL: -- Not implemented in linux kernel. case GSI_UACPROC: -- Retrieves current unaligned access state; not much used. case GSI_PROC_TYPE: -- Retrieves implver information; surely not used. case GSI_GET_HWRPB: -- Grabs a copy of the HWRPB; surely not used. */ #endif #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA) /* Alpha specific */ case TARGET_NR_osf_setsysinfo: ret = -TARGET_EOPNOTSUPP; switch (arg1) { case TARGET_SSI_IEEE_FP_CONTROL: { uint64_t swcr, fpcr, orig_fpcr; if (get_user_u64 (swcr, arg2)) { goto efault; orig_fpcr = cpu_alpha_load_fpcr(cpu_env); fpcr = orig_fpcr & FPCR_DYN_MASK; /* Copied from linux ieee_swcr_to_fpcr. */ fpcr |= (swcr & SWCR_STATUS_MASK) << 35; fpcr |= (swcr & SWCR_MAP_DMZ) << 36; fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV | SWCR_TRAP_ENABLE_DZE | SWCR_TRAP_ENABLE_OVF)) << 48; fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF | SWCR_TRAP_ENABLE_INE)) << 57; fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41; cpu_alpha_store_fpcr(cpu_env, fpcr); ret = 0; case TARGET_SSI_IEEE_RAISE_EXCEPTION: { uint64_t exc, fpcr, orig_fpcr; int si_code; if (get_user_u64(exc, arg2)) { goto efault; orig_fpcr = cpu_alpha_load_fpcr(cpu_env); /* We only add to the exception status here. */ fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35); cpu_alpha_store_fpcr(cpu_env, fpcr); ret = 0; /* Old exceptions are not signaled. */ fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK); /* If any exceptions set by this call, and are unmasked, send a signal. */ si_code = 0; if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) { si_code = TARGET_FPE_FLTRES; if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) { si_code = TARGET_FPE_FLTUND; if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) { si_code = TARGET_FPE_FLTOVF; if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) { si_code = TARGET_FPE_FLTDIV; if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) { si_code = TARGET_FPE_FLTINV; if (si_code != 0) { target_siginfo_t info; info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = si_code; info._sifields._sigfault._addr = ((CPUArchState *)cpu_env)->pc; queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); /* case SSI_NVPAIRS: -- Used with SSIN_UACPROC to enable unaligned accesses. case SSI_IEEE_STATE_AT_SIGNAL: case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: -- Not implemented in linux kernel */ #endif #ifdef TARGET_NR_osf_sigprocmask /* Alpha specific. */ case TARGET_NR_osf_sigprocmask: { abi_ulong mask; int how; sigset_t set, oldset; switch(arg1) { case TARGET_SIG_BLOCK: how = SIG_BLOCK; case TARGET_SIG_UNBLOCK: how = SIG_UNBLOCK; case TARGET_SIG_SETMASK: how = SIG_SETMASK; default: goto fail; mask = arg2; target_to_host_old_sigset(&set, &mask); ret = do_sigprocmask(how, &set, &oldset); if (!ret) { host_to_target_old_sigset(&mask, &oldset); ret = mask; #endif #ifdef TARGET_NR_getgid32 case TARGET_NR_getgid32: ret = get_errno(getgid()); #endif #ifdef TARGET_NR_geteuid32 case TARGET_NR_geteuid32: ret = get_errno(geteuid()); #endif #ifdef TARGET_NR_getegid32 case TARGET_NR_getegid32: ret = get_errno(getegid()); #endif #ifdef TARGET_NR_setreuid32 case TARGET_NR_setreuid32: ret = get_errno(setreuid(arg1, arg2)); #endif #ifdef TARGET_NR_setregid32 case TARGET_NR_setregid32: ret = get_errno(setregid(arg1, arg2)); #endif #ifdef TARGET_NR_getgroups32 case TARGET_NR_getgroups32: { int gidsetsize = arg1; uint32_t *target_grouplist; gid_t *grouplist; int i; grouplist = alloca(gidsetsize * sizeof(gid_t)); ret = get_errno(getgroups(gidsetsize, grouplist)); if (gidsetsize == 0) if (!is_error(ret)) { target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0); if (!target_grouplist) { ret = -TARGET_EFAULT; goto fail; for(i = 0;i < ret; i++) target_grouplist[i] = tswap32(grouplist[i]); unlock_user(target_grouplist, arg2, gidsetsize * 4); #endif #ifdef TARGET_NR_setgroups32 case TARGET_NR_setgroups32: { int gidsetsize = arg1; uint32_t *target_grouplist; gid_t *grouplist; int i; grouplist = alloca(gidsetsize * sizeof(gid_t)); target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1); if (!target_grouplist) { ret = -TARGET_EFAULT; goto fail; for(i = 0;i < gidsetsize; i++) grouplist[i] = tswap32(target_grouplist[i]); unlock_user(target_grouplist, arg2, 0); ret = get_errno(setgroups(gidsetsize, grouplist)); #endif #ifdef TARGET_NR_fchown32 case TARGET_NR_fchown32: ret = get_errno(fchown(arg1, arg2, arg3)); #endif #ifdef TARGET_NR_setresuid32 case TARGET_NR_setresuid32: ret = get_errno(sys_setresuid(arg1, arg2, arg3)); #endif #ifdef TARGET_NR_getresuid32 case TARGET_NR_getresuid32: { uid_t ruid, euid, suid; ret = get_errno(getresuid(&ruid, &euid, &suid)); if (!is_error(ret)) { if (put_user_u32(ruid, arg1) || put_user_u32(euid, arg2) || put_user_u32(suid, arg3)) goto efault; #endif #ifdef TARGET_NR_setresgid32 case TARGET_NR_setresgid32: ret = get_errno(sys_setresgid(arg1, arg2, arg3)); #endif #ifdef TARGET_NR_getresgid32 case TARGET_NR_getresgid32: { gid_t rgid, egid, sgid; ret = get_errno(getresgid(&rgid, &egid, &sgid)); if (!is_error(ret)) { if (put_user_u32(rgid, arg1) || put_user_u32(egid, arg2) || put_user_u32(sgid, arg3)) goto efault; #endif #ifdef TARGET_NR_chown32 case TARGET_NR_chown32: if (!(p = lock_user_string(arg1))) goto efault; ret = get_errno(chown(p, arg2, arg3)); unlock_user(p, arg1, 0); #endif #ifdef TARGET_NR_setuid32 case TARGET_NR_setuid32: ret = get_errno(sys_setuid(arg1)); #endif #ifdef TARGET_NR_setgid32 case TARGET_NR_setgid32: ret = get_errno(sys_setgid(arg1)); #endif #ifdef TARGET_NR_setfsuid32 case TARGET_NR_setfsuid32: ret = get_errno(setfsuid(arg1)); #endif #ifdef TARGET_NR_setfsgid32 case TARGET_NR_setfsgid32: ret = get_errno(setfsgid(arg1)); #endif case TARGET_NR_pivot_root: goto unimplemented; #ifdef TARGET_NR_mincore case TARGET_NR_mincore: { void *a; ret = -TARGET_EFAULT; if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0))) goto efault; if (!(p = lock_user_string(arg3))) goto mincore_fail; ret = get_errno(mincore(a, arg2, p)); unlock_user(p, arg3, ret); mincore_fail: unlock_user(a, arg1, 0); #endif #ifdef TARGET_NR_arm_fadvise64_64 case TARGET_NR_arm_fadvise64_64: /* arm_fadvise64_64 looks like fadvise64_64 but * with different argument order: fd, advice, offset, len * rather than the usual fd, offset, len, advice. * Note that offset and len are both 64-bit so appear as * pairs of 32-bit registers. */ ret = posix_fadvise(arg1, target_offset64(arg3, arg4), target_offset64(arg5, arg6), arg2); ret = -host_to_target_errno(ret); #endif #if TARGET_ABI_BITS == 32 #ifdef TARGET_NR_fadvise64_64 case TARGET_NR_fadvise64_64: /* 6 args: fd, offset (high, low), len (high, low), advice */ if (regpairs_aligned(cpu_env)) { /* offset is in (3,4), len in (5,6) and advice in 7 */ arg2 = arg3; arg3 = arg4; arg4 = arg5; arg5 = arg6; arg6 = arg7; ret = -host_to_target_errno(posix_fadvise(arg1, target_offset64(arg2, arg3), target_offset64(arg4, arg5), arg6)); #endif #ifdef TARGET_NR_fadvise64 case TARGET_NR_fadvise64: /* 5 args: fd, offset (high, low), len, advice */ if (regpairs_aligned(cpu_env)) { /* offset is in (3,4), len in 5 and advice in 6 */ arg2 = arg3; arg3 = arg4; arg4 = arg5; arg5 = arg6; ret = -host_to_target_errno(posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5)); #endif #else /* not a 32-bit ABI */ #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64) #ifdef TARGET_NR_fadvise64_64 case TARGET_NR_fadvise64_64: #endif #ifdef TARGET_NR_fadvise64 case TARGET_NR_fadvise64: #endif #ifdef TARGET_S390X switch (arg4) { case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */ case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */ case 6: arg4 = POSIX_FADV_DONTNEED; break; case 7: arg4 = POSIX_FADV_NOREUSE; break; default: break; #endif ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4)); #endif #endif /* end of 64-bit ABI fadvise handling */ #ifdef TARGET_NR_madvise case TARGET_NR_madvise: /* A straight passthrough may not be safe because qemu sometimes turns private file-backed mappings into anonymous mappings. This will break MADV_DONTNEED. This is a hint, so ignoring and returning success is ok. */ ret = get_errno(0); #endif #if TARGET_ABI_BITS == 32 case TARGET_NR_fcntl64: { int cmd; struct flock64 fl; from_flock64_fn *copyfrom = copy_from_user_flock64; to_flock64_fn *copyto = copy_to_user_flock64; #ifdef TARGET_ARM if (((CPUARMState *)cpu_env)->eabi) { copyfrom = copy_from_user_eabi_flock64; copyto = copy_to_user_eabi_flock64; #endif cmd = target_to_host_fcntl_cmd(arg2); if (cmd == -TARGET_EINVAL) { ret = cmd; switch(arg2) { case TARGET_F_GETLK64: ret = copyfrom(&fl, arg3); if (ret) { ret = get_errno(fcntl(arg1, cmd, &fl)); if (ret == 0) { ret = copyto(arg3, &fl); case TARGET_F_SETLK64: case TARGET_F_SETLKW64: ret = copyfrom(&fl, arg3); if (ret) { ret = get_errno(safe_fcntl(arg1, cmd, &fl)); default: ret = do_fcntl(arg1, arg2, arg3); #endif #ifdef TARGET_NR_cacheflush case TARGET_NR_cacheflush: /* self-modifying code is handled automatically, so nothing needed */ ret = 0; #endif #ifdef TARGET_NR_security case TARGET_NR_security: goto unimplemented; #endif #ifdef TARGET_NR_getpagesize case TARGET_NR_getpagesize: ret = TARGET_PAGE_SIZE; #endif case TARGET_NR_gettid: ret = get_errno(gettid()); #ifdef TARGET_NR_readahead case TARGET_NR_readahead: #if TARGET_ABI_BITS == 32 if (regpairs_aligned(cpu_env)) { arg2 = arg3; arg3 = arg4; arg4 = arg5; ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4)); #else ret = get_errno(readahead(arg1, arg2, arg3)); #endif #endif #ifdef CONFIG_ATTR #ifdef TARGET_NR_setxattr case TARGET_NR_listxattr: case TARGET_NR_llistxattr: { void *p, *b = 0; if (arg2) { b = lock_user(VERIFY_WRITE, arg2, arg3, 0); if (!b) { ret = -TARGET_EFAULT; p = lock_user_string(arg1); if (p) { if (num == TARGET_NR_listxattr) { ret = get_errno(listxattr(p, b, arg3)); } else { ret = get_errno(llistxattr(p, b, arg3)); } else { ret = -TARGET_EFAULT; unlock_user(p, arg1, 0); unlock_user(b, arg2, arg3); case TARGET_NR_flistxattr: { void *b = 0; if (arg2) { b = lock_user(VERIFY_WRITE, arg2, arg3, 0); if (!b) { ret = -TARGET_EFAULT; ret = get_errno(flistxattr(arg1, b, arg3)); unlock_user(b, arg2, arg3); case TARGET_NR_setxattr: case TARGET_NR_lsetxattr: { void *p, *n, *v = 0; if (arg3) { v = lock_user(VERIFY_READ, arg3, arg4, 1); if (!v) { ret = -TARGET_EFAULT; p = lock_user_string(arg1); n = lock_user_string(arg2); if (p && n) { if (num == TARGET_NR_setxattr) { ret = get_errno(setxattr(p, n, v, arg4, arg5)); } else { ret = get_errno(lsetxattr(p, n, v, arg4, arg5)); } else { ret = -TARGET_EFAULT; unlock_user(p, arg1, 0); unlock_user(n, arg2, 0); unlock_user(v, arg3, 0); case TARGET_NR_fsetxattr: { void *n, *v = 0; if (arg3) { v = lock_user(VERIFY_READ, arg3, arg4, 1); if (!v) { ret = -TARGET_EFAULT; n = lock_user_string(arg2); if (n) { ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5)); } else { ret = -TARGET_EFAULT; unlock_user(n, arg2, 0); unlock_user(v, arg3, 0); case TARGET_NR_getxattr: case TARGET_NR_lgetxattr: { void *p, *n, *v = 0; if (arg3) { v = lock_user(VERIFY_WRITE, arg3, arg4, 0); if (!v) { ret = -TARGET_EFAULT; p = lock_user_string(arg1); n = lock_user_string(arg2); if (p && n) { if (num == TARGET_NR_getxattr) { ret = get_errno(getxattr(p, n, v, arg4)); } else { ret = get_errno(lgetxattr(p, n, v, arg4)); } else { ret = -TARGET_EFAULT; unlock_user(p, arg1, 0); unlock_user(n, arg2, 0); unlock_user(v, arg3, arg4); case TARGET_NR_fgetxattr: { void *n, *v = 0; if (arg3) { v = lock_user(VERIFY_WRITE, arg3, arg4, 0); if (!v) { ret = -TARGET_EFAULT; n = lock_user_string(arg2); if (n) { ret = get_errno(fgetxattr(arg1, n, v, arg4)); } else { ret = -TARGET_EFAULT; unlock_user(n, arg2, 0); unlock_user(v, arg3, arg4); case TARGET_NR_removexattr: case TARGET_NR_lremovexattr: { void *p, *n; p = lock_user_string(arg1); n = lock_user_string(arg2); if (p && n) { if (num == TARGET_NR_removexattr) { ret = get_errno(removexattr(p, n)); } else { ret = get_errno(lremovexattr(p, n)); } else { ret = -TARGET_EFAULT; unlock_user(p, arg1, 0); unlock_user(n, arg2, 0); case TARGET_NR_fremovexattr: { void *n; n = lock_user_string(arg2); if (n) { ret = get_errno(fremovexattr(arg1, n)); } else { ret = -TARGET_EFAULT; unlock_user(n, arg2, 0); #endif #endif /* CONFIG_ATTR */ #ifdef TARGET_NR_set_thread_area case TARGET_NR_set_thread_area: #if defined(TARGET_MIPS) ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1; ret = 0; #elif defined(TARGET_CRIS) if (arg1 & 0xff) else { ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1; ret = 0; #elif defined(TARGET_I386) && defined(TARGET_ABI32) ret = do_set_thread_area(cpu_env, arg1); #elif defined(TARGET_M68K) { TaskState *ts = cpu->opaque; ts->tp_value = arg1; ret = 0; #else goto unimplemented_nowarn; #endif #endif #ifdef TARGET_NR_get_thread_area case TARGET_NR_get_thread_area: #if defined(TARGET_I386) && defined(TARGET_ABI32) ret = do_get_thread_area(cpu_env, arg1); #elif defined(TARGET_M68K) { TaskState *ts = cpu->opaque; ret = ts->tp_value; #else goto unimplemented_nowarn; #endif #endif #ifdef TARGET_NR_getdomainname case TARGET_NR_getdomainname: goto unimplemented_nowarn; #endif #ifdef TARGET_NR_clock_gettime case TARGET_NR_clock_gettime: { struct timespec ts; ret = get_errno(clock_gettime(arg1, &ts)); if (!is_error(ret)) { host_to_target_timespec(arg2, &ts); #endif #ifdef TARGET_NR_clock_getres case TARGET_NR_clock_getres: { struct timespec ts; ret = get_errno(clock_getres(arg1, &ts)); if (!is_error(ret)) { host_to_target_timespec(arg2, &ts); #endif #ifdef TARGET_NR_clock_nanosleep case TARGET_NR_clock_nanosleep: { struct timespec ts; target_to_host_timespec(&ts, arg3); ret = get_errno(safe_clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL)); if (arg4) host_to_target_timespec(arg4, &ts); #if defined(TARGET_PPC) /* clock_nanosleep is odd in that it returns positive errno values. * On PPC, CR0 bit 3 should be set in such a situation. */ if (ret && ret != -TARGET_ERESTARTSYS) { ((CPUPPCState *)cpu_env)->crf[0] |= 1; #endif #endif #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) case TARGET_NR_set_tid_address: ret = get_errno(set_tid_address((int *)g2h(arg1))); #endif case TARGET_NR_tkill: ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2))); case TARGET_NR_tgkill: ret = get_errno(safe_tgkill((int)arg1, (int)arg2, target_to_host_signal(arg3))); #ifdef TARGET_NR_set_robust_list case TARGET_NR_set_robust_list: case TARGET_NR_get_robust_list: /* The ABI for supporting robust futexes has userspace pass * the kernel a pointer to a linked list which is updated by * userspace after the syscall; the list is walked by the kernel * when the thread exits. Since the linked list in QEMU guest * memory isn't a valid linked list for the host and we have * no way to reliably intercept the thread-death event, we can't * support these. Silently return ENOSYS so that guest userspace * falls back to a non-robust futex implementation (which should * be OK except in the corner case of the guest crashing while * holding a mutex that is shared with another process via * shared memory). */ goto unimplemented_nowarn; #endif #if defined(TARGET_NR_utimensat) case TARGET_NR_utimensat: { struct timespec *tsp, ts[2]; if (!arg3) { tsp = NULL; } else { target_to_host_timespec(ts, arg3); target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec)); tsp = ts; if (!arg2) ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4)); else { if (!(p = lock_user_string(arg2))) { ret = -TARGET_EFAULT; goto fail; ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4)); unlock_user(p, arg2, 0); #endif case TARGET_NR_futex: ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) case TARGET_NR_inotify_init: ret = get_errno(sys_inotify_init()); #endif #ifdef CONFIG_INOTIFY1 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1) case TARGET_NR_inotify_init1: ret = get_errno(sys_inotify_init1(arg1)); #endif #endif #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch) case TARGET_NR_inotify_add_watch: p = lock_user_string(arg2); ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3)); unlock_user(p, arg2, 0); #endif #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch) case TARGET_NR_inotify_rm_watch: ret = get_errno(sys_inotify_rm_watch(arg1, arg2)); #endif #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open) case TARGET_NR_mq_open: { struct mq_attr posix_mq_attr, *attrp; p = lock_user_string(arg1 - 1); if (arg4 != 0) { copy_from_user_mq_attr (&posix_mq_attr, arg4); attrp = &posix_mq_attr; } else { attrp = 0; ret = get_errno(mq_open(p, arg2, arg3, attrp)); unlock_user (p, arg1, 0); case TARGET_NR_mq_unlink: p = lock_user_string(arg1 - 1); ret = get_errno(mq_unlink(p)); unlock_user (p, arg1, 0); case TARGET_NR_mq_timedsend: { struct timespec ts; p = lock_user (VERIFY_READ, arg2, arg3, 1); if (arg5 != 0) { target_to_host_timespec(&ts, arg5); ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts)); host_to_target_timespec(arg5, &ts); } else { ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL)); unlock_user (p, arg2, arg3); case TARGET_NR_mq_timedreceive: { struct timespec ts; unsigned int prio; p = lock_user (VERIFY_READ, arg2, arg3, 1); if (arg5 != 0) { target_to_host_timespec(&ts, arg5); ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, &prio, &ts)); host_to_target_timespec(arg5, &ts); } else { ret = get_errno(safe_mq_timedreceive(arg1, p, arg3, &prio, NULL)); unlock_user (p, arg2, arg3); if (arg4 != 0) put_user_u32(prio, arg4); /* Not implemented for now... */ /* case TARGET_NR_mq_notify: */ /* break; */ case TARGET_NR_mq_getsetattr: { struct mq_attr posix_mq_attr_in, posix_mq_attr_out; ret = 0; if (arg3 != 0) { ret = mq_getattr(arg1, &posix_mq_attr_out); copy_to_user_mq_attr(arg3, &posix_mq_attr_out); if (arg2 != 0) { copy_from_user_mq_attr(&posix_mq_attr_in, arg2); ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out); #endif #ifdef CONFIG_SPLICE #ifdef TARGET_NR_tee case TARGET_NR_tee: { ret = get_errno(tee(arg1,arg2,arg3,arg4)); #endif #ifdef TARGET_NR_splice case TARGET_NR_splice: { loff_t loff_in, loff_out; loff_t *ploff_in = NULL, *ploff_out = NULL; if (arg2) { if (get_user_u64(loff_in, arg2)) { goto efault; ploff_in = &loff_in; if (arg4) { if (get_user_u64(loff_out, arg4)) { goto efault; ploff_out = &loff_out; ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6)); if (arg2) { if (put_user_u64(loff_in, arg2)) { goto efault; if (arg4) { if (put_user_u64(loff_out, arg4)) { goto efault; #endif #ifdef TARGET_NR_vmsplice case TARGET_NR_vmsplice: { struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1); if (vec != NULL) { ret = get_errno(vmsplice(arg1, vec, arg3, arg4)); unlock_iovec(vec, arg2, arg3, 0); } else { ret = -host_to_target_errno(errno); #endif #endif /* CONFIG_SPLICE */ #ifdef CONFIG_EVENTFD #if defined(TARGET_NR_eventfd) case TARGET_NR_eventfd: ret = get_errno(eventfd(arg1, 0)); fd_trans_unregister(ret); #endif #if defined(TARGET_NR_eventfd2) case TARGET_NR_eventfd2: { int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)); if (arg2 & TARGET_O_NONBLOCK) { host_flags |= O_NONBLOCK; if (arg2 & TARGET_O_CLOEXEC) { host_flags |= O_CLOEXEC; ret = get_errno(eventfd(arg1, host_flags)); fd_trans_unregister(ret); #endif #endif /* CONFIG_EVENTFD */ #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate) case TARGET_NR_fallocate: #if TARGET_ABI_BITS == 32 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4), target_offset64(arg5, arg6))); #else ret = get_errno(fallocate(arg1, arg2, arg3, arg4)); #endif #endif #if defined(CONFIG_SYNC_FILE_RANGE) #if defined(TARGET_NR_sync_file_range) case TARGET_NR_sync_file_range: #if TARGET_ABI_BITS == 32 #if defined(TARGET_MIPS) ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), target_offset64(arg5, arg6), arg7)); #else ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3), target_offset64(arg4, arg5), arg6)); #endif /* !TARGET_MIPS */ #else ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4)); #endif #endif #if defined(TARGET_NR_sync_file_range2) case TARGET_NR_sync_file_range2: /* This is like sync_file_range but the arguments are reordered */ #if TARGET_ABI_BITS == 32 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4), target_offset64(arg5, arg6), arg2)); #else ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2)); #endif #endif #endif #if defined(TARGET_NR_signalfd4) case TARGET_NR_signalfd4: ret = do_signalfd4(arg1, arg2, arg4); #endif #if defined(TARGET_NR_signalfd) case TARGET_NR_signalfd: ret = do_signalfd4(arg1, arg2, 0); #endif #if defined(CONFIG_EPOLL) #if defined(TARGET_NR_epoll_create) case TARGET_NR_epoll_create: ret = get_errno(epoll_create(arg1)); #endif #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1) case TARGET_NR_epoll_create1: ret = get_errno(epoll_create1(arg1)); #endif #if defined(TARGET_NR_epoll_ctl) case TARGET_NR_epoll_ctl: { struct epoll_event ep; struct epoll_event *epp = 0; if (arg4) { struct target_epoll_event *target_ep; if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) { goto efault; ep.events = tswap32(target_ep->events); /* The epoll_data_t union is just opaque data to the kernel, * so we transfer all 64 bits across and need not worry what * actual data type it is. */ ep.data.u64 = tswap64(target_ep->data.u64); unlock_user_struct(target_ep, arg4, 0); epp = &ep; ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp)); #endif #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait) #if defined(TARGET_NR_epoll_wait) case TARGET_NR_epoll_wait: #endif #if defined(TARGET_NR_epoll_pwait) case TARGET_NR_epoll_pwait: #endif { struct target_epoll_event *target_ep; struct epoll_event *ep; int epfd = arg1; int maxevents = arg3; int timeout = arg4; if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) { target_ep = lock_user(VERIFY_WRITE, arg2, maxevents * sizeof(struct target_epoll_event), 1); if (!target_ep) { goto efault; ep = alloca(maxevents * sizeof(struct epoll_event)); switch (num) { #if defined(TARGET_NR_epoll_pwait) case TARGET_NR_epoll_pwait: { target_sigset_t *target_set; sigset_t _set, *set = &_set; if (arg5) { if (arg6 != sizeof(target_sigset_t)) { target_set = lock_user(VERIFY_READ, arg5, sizeof(target_sigset_t), 1); if (!target_set) { unlock_user(target_ep, arg2, 0); goto efault; target_to_host_sigset(set, target_set); unlock_user(target_set, arg5, 0); } else { set = NULL; ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, set, SIGSET_T_SIZE)); #endif #if defined(TARGET_NR_epoll_wait) case TARGET_NR_epoll_wait: ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout, NULL, 0)); #endif default: ret = -TARGET_ENOSYS; if (!is_error(ret)) { int i; for (i = 0; i < ret; i++) { target_ep[i].events = tswap32(ep[i].events); target_ep[i].data.u64 = tswap64(ep[i].data.u64); unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event)); #endif #endif #ifdef TARGET_NR_prlimit64 case TARGET_NR_prlimit64: { /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */ struct target_rlimit64 *target_rnew, *target_rold; struct host_rlimit64 rnew, rold, *rnewp = 0; int resource = target_to_host_resource(arg2); if (arg3) { if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) { goto efault; rnew.rlim_cur = tswap64(target_rnew->rlim_cur); rnew.rlim_max = tswap64(target_rnew->rlim_max); unlock_user_struct(target_rnew, arg3, 0); rnewp = &rnew; ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0)); if (!is_error(ret) && arg4) { if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) { goto efault; target_rold->rlim_cur = tswap64(rold.rlim_cur); target_rold->rlim_max = tswap64(rold.rlim_max); unlock_user_struct(target_rold, arg4, 1); #endif #ifdef TARGET_NR_gethostname case TARGET_NR_gethostname: { char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0); if (name) { ret = get_errno(gethostname(name, arg2)); unlock_user(name, arg1, arg2); } else { ret = -TARGET_EFAULT; #endif #ifdef TARGET_NR_atomic_cmpxchg_32 case TARGET_NR_atomic_cmpxchg_32: { /* should use start_exclusive from main.c */ abi_ulong mem_value; if (get_user_u32(mem_value, arg6)) { target_siginfo_t info; info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = TARGET_SEGV_MAPERR; info._sifields._sigfault._addr = arg6; queue_signal((CPUArchState *)cpu_env, info.si_signo, &info); ret = 0xdeadbeef; if (mem_value == arg2) put_user_u32(arg1, arg6); ret = mem_value; #endif #ifdef TARGET_NR_atomic_barrier case TARGET_NR_atomic_barrier: { /* Like the kernel implementation and the qemu arm barrier, no-op this? */ ret = 0; #endif #ifdef TARGET_NR_timer_create case TARGET_NR_timer_create: { /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */ struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL; int clkid = arg1; int timer_index = next_free_host_timer(); if (timer_index < 0) { ret = -TARGET_EAGAIN; } else { timer_t *phtimer = g_posix_timers + timer_index; if (arg2) { phost_sevp = &host_sevp; ret = target_to_host_sigevent(phost_sevp, arg2); if (ret != 0) { ret = get_errno(timer_create(clkid, phost_sevp, phtimer)); if (ret) { phtimer = NULL; } else { if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) { goto efault; #endif #ifdef TARGET_NR_timer_settime case TARGET_NR_timer_settime: { /* args: timer_t timerid, int flags, const struct itimerspec *new_value, * struct itimerspec * old_value */ target_timer_t timerid = get_timer_id(arg1); if (timerid < 0) { ret = timerid; } else if (arg3 == 0) { } else { timer_t htimer = g_posix_timers[timerid]; struct itimerspec hspec_new = {{0},}, hspec_old = {{0},}; target_to_host_itimerspec(&hspec_new, arg3); ret = get_errno( timer_settime(htimer, arg2, &hspec_new, &hspec_old)); host_to_target_itimerspec(arg2, &hspec_old); #endif #ifdef TARGET_NR_timer_gettime case TARGET_NR_timer_gettime: { /* args: timer_t timerid, struct itimerspec *curr_value */ target_timer_t timerid = get_timer_id(arg1); if (timerid < 0) { ret = timerid; } else if (!arg2) { ret = -TARGET_EFAULT; } else { timer_t htimer = g_posix_timers[timerid]; struct itimerspec hspec; ret = get_errno(timer_gettime(htimer, &hspec)); if (host_to_target_itimerspec(arg2, &hspec)) { ret = -TARGET_EFAULT; #endif #ifdef TARGET_NR_timer_getoverrun case TARGET_NR_timer_getoverrun: { /* args: timer_t timerid */ target_timer_t timerid = get_timer_id(arg1); if (timerid < 0) { ret = timerid; } else { timer_t htimer = g_posix_timers[timerid]; ret = get_errno(timer_getoverrun(htimer)); fd_trans_unregister(ret); #endif #ifdef TARGET_NR_timer_delete case TARGET_NR_timer_delete: { /* args: timer_t timerid */ target_timer_t timerid = get_timer_id(arg1); if (timerid < 0) { ret = timerid; } else { timer_t htimer = g_posix_timers[timerid]; ret = get_errno(timer_delete(htimer)); g_posix_timers[timerid] = 0; #endif #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD) case TARGET_NR_timerfd_create: ret = get_errno(timerfd_create(arg1, target_to_host_bitmask(arg2, fcntl_flags_tbl))); #endif #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD) case TARGET_NR_timerfd_gettime: { struct itimerspec its_curr; ret = get_errno(timerfd_gettime(arg1, &its_curr)); if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) { goto efault; #endif #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD) case TARGET_NR_timerfd_settime: { struct itimerspec its_new, its_old, *p_new; if (arg3) { if (target_to_host_itimerspec(&its_new, arg3)) { goto efault; p_new = &its_new; } else { p_new = NULL; ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old)); if (arg4 && host_to_target_itimerspec(arg4, &its_old)) { goto efault; #endif #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get) case TARGET_NR_ioprio_get: ret = get_errno(ioprio_get(arg1, arg2)); #endif #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set) case TARGET_NR_ioprio_set: ret = get_errno(ioprio_set(arg1, arg2, arg3)); #endif #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS) case TARGET_NR_setns: ret = get_errno(setns(arg1, arg2)); #endif #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS) case TARGET_NR_unshare: ret = get_errno(unshare(arg1)); #endif default: unimplemented: gemu_log("qemu: Unsupported syscall: %d\n", num); #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list) unimplemented_nowarn: #endif ret = -TARGET_ENOSYS; fail: #ifdef DEBUG gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret); #endif if(do_strace) print_syscall_ret(num, ret); trace_guest_user_syscall_ret(cpu, num, ret); return ret; efault: ret = -TARGET_EFAULT; goto fail; | 21,464 |
0 | static void build_chunks(MOVTrack *trk) { int i; MOVIentry *chunk= &trk->cluster[0]; uint64_t chunkSize = chunk->size; chunk->chunkNum= 1; trk->chunkCount= 1; for(i=1; i<trk->entry; i++){ if(chunk->pos + chunkSize == trk->cluster[i].pos){ chunkSize += trk->cluster[i].size; chunk->samplesInChunk += trk->cluster[i].entries; }else{ trk->cluster[i].chunkNum = chunk->chunkNum+1; chunk=&trk->cluster[i]; chunkSize = chunk->size; trk->chunkCount++; } } } | 21,465 |
1 | static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet) { ProresContext *ctx = avctx->priv_data; uint8_t *orig_buf, *buf, *slice_hdr, *slice_sizes, *tmp; uint8_t *picture_size_pos; PutBitContext pb; int x, y, i, mb, q = 0; int sizes[4] = { 0 }; int slice_hdr_size = 2 + 2 * (ctx->num_planes - 1); int frame_size, picture_size, slice_size; int pkt_size, ret; uint8_t frame_flags; *avctx->coded_frame = *pic; avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; avctx->coded_frame->key_frame = 1; pkt_size = ctx->frame_size_upper_bound; if ((ret = ff_alloc_packet(pkt, pkt_size + FF_MIN_BUFFER_SIZE)) < 0) { av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n"); return ret; } orig_buf = pkt->data; // frame atom orig_buf += 4; // frame size bytestream_put_be32 (&orig_buf, FRAME_ID); // frame container ID buf = orig_buf; // frame header tmp = buf; buf += 2; // frame header size will be stored here bytestream_put_be16 (&buf, 0); // version 1 bytestream_put_buffer(&buf, ctx->vendor, 4); bytestream_put_be16 (&buf, avctx->width); bytestream_put_be16 (&buf, avctx->height); frame_flags = ctx->chroma_factor << 6; if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) frame_flags |= pic->top_field_first ? 0x04 : 0x08; bytestream_put_byte (&buf, frame_flags); bytestream_put_byte (&buf, 0); // reserved bytestream_put_byte (&buf, avctx->color_primaries); bytestream_put_byte (&buf, avctx->color_trc); bytestream_put_byte (&buf, avctx->colorspace); bytestream_put_byte (&buf, 0x40 | (ctx->alpha_bits >> 3)); bytestream_put_byte (&buf, 0); // reserved if (ctx->quant_sel != QUANT_MAT_DEFAULT) { bytestream_put_byte (&buf, 0x03); // matrix flags - both matrices are present // luma quantisation matrix for (i = 0; i < 64; i++) bytestream_put_byte(&buf, ctx->quant_mat[i]); // chroma quantisation matrix for (i = 0; i < 64; i++) bytestream_put_byte(&buf, ctx->quant_mat[i]); } else { bytestream_put_byte (&buf, 0x00); // matrix flags - default matrices are used } bytestream_put_be16 (&tmp, buf - orig_buf); // write back frame header size for (ctx->cur_picture_idx = 0; ctx->cur_picture_idx < ctx->pictures_per_frame; ctx->cur_picture_idx++) { // picture header picture_size_pos = buf + 1; bytestream_put_byte (&buf, 0x40); // picture header size (in bits) buf += 4; // picture data size will be stored here bytestream_put_be16 (&buf, ctx->slices_per_picture); bytestream_put_byte (&buf, av_log2(ctx->mbs_per_slice) << 4); // slice width and height in MBs // seek table - will be filled during slice encoding slice_sizes = buf; buf += ctx->slices_per_picture * 2; // slices if (!ctx->force_quant) { ret = avctx->execute2(avctx, find_quant_thread, NULL, NULL, ctx->mb_height); if (ret) return ret; } for (y = 0; y < ctx->mb_height; y++) { int mbs_per_slice = ctx->mbs_per_slice; for (x = mb = 0; x < ctx->mb_width; x += mbs_per_slice, mb++) { q = ctx->force_quant ? ctx->force_quant : ctx->slice_q[mb + y * ctx->slices_width]; while (ctx->mb_width - x < mbs_per_slice) mbs_per_slice >>= 1; bytestream_put_byte(&buf, slice_hdr_size << 3); slice_hdr = buf; buf += slice_hdr_size - 1; init_put_bits(&pb, buf, (pkt_size - (buf - orig_buf)) * 8); ret = encode_slice(avctx, pic, &pb, sizes, x, y, q, mbs_per_slice); if (ret < 0) return ret; bytestream_put_byte(&slice_hdr, q); slice_size = slice_hdr_size + sizes[ctx->num_planes - 1]; for (i = 0; i < ctx->num_planes - 1; i++) { bytestream_put_be16(&slice_hdr, sizes[i]); slice_size += sizes[i]; } bytestream_put_be16(&slice_sizes, slice_size); buf += slice_size - slice_hdr_size; } } if (ctx->pictures_per_frame == 1) picture_size = buf - picture_size_pos - 6; else picture_size = buf - picture_size_pos + 1; bytestream_put_be32(&picture_size_pos, picture_size); } orig_buf -= 8; frame_size = buf - orig_buf; bytestream_put_be32(&orig_buf, frame_size); pkt->size = frame_size; pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; return 0; } | 21,467 |
1 | void ff_lag_rac_init(lag_rac *l, GetBitContext *gb, int length) { int i, j; /* According to reference decoder "1st byte is garbage", * however, it gets skipped by the call to align_get_bits() */ align_get_bits(gb); l->bytestream_start = l->bytestream = gb->buffer + get_bits_count(gb) / 8; l->bytestream_end = l->bytestream_start + length; l->range = 0x80; l->low = *l->bytestream >> 1; l->hash_shift = FFMAX(l->scale - 8, 0); for (i = j = 0; i < 256; i++) { unsigned r = i << l->hash_shift; while (l->prob[j + 1] <= r) j++; l->range_hash[i] = j; } /* Add conversion factor to hash_shift so we don't have to in lag_get_rac. */ l->hash_shift += 23; } | 21,469 |
1 | void vmstate_unregister(DeviceState *dev, const VMStateDescription *vmsd, void *opaque) { SaveStateEntry *se, *new_se; QTAILQ_FOREACH_SAFE(se, &savevm_handlers, entry, new_se) { if (se->vmsd == vmsd && se->opaque == opaque) { QTAILQ_REMOVE(&savevm_handlers, se, entry); qemu_free(se); | 21,471 |
1 | void iothread_stop_all(void) { Object *container = object_get_objects_root(); BlockDriverState *bs; BdrvNextIterator it; for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { AioContext *ctx = bdrv_get_aio_context(bs); if (ctx == qemu_get_aio_context()) { continue; } aio_context_acquire(ctx); bdrv_set_aio_context(bs, qemu_get_aio_context()); aio_context_release(ctx); } object_child_foreach(container, iothread_stop, NULL); } | 21,472 |
0 | int apic_get_interrupt(DeviceState *d) { APICState *s = DO_UPCAST(APICState, busdev.qdev, d); int intno; /* if the APIC is installed or enabled, we let the 8259 handle the IRQs */ if (!s) return -1; if (!(s->spurious_vec & APIC_SV_ENABLE)) return -1; /* XXX: spurious IRQ handling */ intno = get_highest_priority_int(s->irr); if (intno < 0) return -1; if (s->tpr && intno <= s->tpr) return s->spurious_vec & 0xff; reset_bit(s->irr, intno); set_bit(s->isr, intno); apic_update_irq(s); return intno; } | 21,474 |
0 | static inline void decode_residual_inter(AVSContext *h) { int block; /* get coded block pattern */ h->cbp = cbp_tab[get_ue_golomb(&h->s.gb)][1]; /* get quantizer */ if(h->cbp && !h->qp_fixed) h->qp += get_se_golomb(&h->s.gb); for(block=0;block<4;block++) if(h->cbp & (1<<block)) decode_residual_block(h,&h->s.gb,inter_2dvlc,0,h->qp, h->cy + h->luma_scan[block], h->l_stride); decode_residual_chroma(h); } | 21,475 |
0 | static void pfpu_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { MilkymistPFPUState *s = opaque; trace_milkymist_pfpu_memory_write(addr, value); addr >>= 2; switch (addr) { case R_CTL: if (value & CTL_START_BUSY) { pfpu_start(s); } break; case R_MESHBASE: case R_HMESHLAST: case R_VMESHLAST: case R_CODEPAGE: case R_VERTICES: case R_COLLISIONS: case R_STRAYWRITES: case R_LASTDMA: case R_PC: case R_DREGBASE: case R_CODEBASE: s->regs[addr] = value; break; case GPR_BEGIN ... GPR_END: s->gp_regs[addr - GPR_BEGIN] = value; break; case MICROCODE_BEGIN ... MICROCODE_END: s->microcode[get_microcode_address(s, addr)] = value; break; default: error_report("milkymist_pfpu: write access to unknown register 0x" TARGET_FMT_plx, addr << 2); break; } } | 21,476 |
0 | uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len) { VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val; memcpy(&emu_bits, vdev->emulated_config_bits + addr, len); emu_bits = le32_to_cpu(emu_bits); if (emu_bits) { emu_val = pci_default_read_config(pdev, addr, len); } if (~emu_bits & (0xffffffffU >> (32 - len * 8))) { ssize_t ret; ret = pread(vdev->vbasedev.fd, &phys_val, len, vdev->config_offset + addr); if (ret != len) { error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m", __func__, vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function, addr, len); return -errno; } phys_val = le32_to_cpu(phys_val); } val = (emu_val & emu_bits) | (phys_val & ~emu_bits); trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val); return val; } | 21,477 |
0 | static void pci_device_reset(PCIDevice *dev) { int r; dev->irq_state = 0; pci_update_irq_status(dev); /* Clear all writeable bits */ pci_word_test_and_clear_mask(dev->config + PCI_COMMAND, pci_get_word(dev->wmask + PCI_COMMAND)); dev->config[PCI_CACHE_LINE_SIZE] = 0x0; dev->config[PCI_INTERRUPT_LINE] = 0x0; for (r = 0; r < PCI_NUM_REGIONS; ++r) { PCIIORegion *region = &dev->io_regions[r]; if (!region->size) { continue; } if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) && region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) { pci_set_quad(dev->config + pci_bar(dev, r), region->type); } else { pci_set_long(dev->config + pci_bar(dev, r), region->type); } } pci_update_mappings(dev); } | 21,478 |
0 | static void xen_set_memory(struct MemoryListener *listener, MemoryRegionSection *section, bool add) { XenIOState *state = container_of(listener, XenIOState, memory_listener); hwaddr start_addr = section->offset_within_address_space; ram_addr_t size = int128_get64(section->size); bool log_dirty = memory_region_is_logging(section->mr); hvmmem_type_t mem_type; if (section->mr == &ram_memory) { return; } else { if (add) { xen_map_memory_section(xen_xc, xen_domid, state->ioservid, section); } else { xen_unmap_memory_section(xen_xc, xen_domid, state->ioservid, section); } } if (!memory_region_is_ram(section->mr)) { return; } if (log_dirty != add) { return; } trace_xen_client_set_memory(start_addr, size, log_dirty); start_addr &= TARGET_PAGE_MASK; size = TARGET_PAGE_ALIGN(size); if (add) { if (!memory_region_is_rom(section->mr)) { xen_add_to_physmap(state, start_addr, size, section->mr, section->offset_within_region); } else { mem_type = HVMMEM_ram_ro; if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type, start_addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS)) { DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx"\n", start_addr); } } } else { if (xen_remove_from_physmap(state, start_addr, size) < 0) { DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr); } } } | 21,479 |
0 | static void hmp_mouse_move(Monitor *mon, const QDict *qdict) { int dx, dy, dz, button; const char *dx_str = qdict_get_str(qdict, "dx_str"); const char *dy_str = qdict_get_str(qdict, "dy_str"); const char *dz_str = qdict_get_try_str(qdict, "dz_str"); dx = strtol(dx_str, NULL, 0); dy = strtol(dy_str, NULL, 0); qemu_input_queue_rel(NULL, INPUT_AXIS_X, dx); qemu_input_queue_rel(NULL, INPUT_AXIS_Y, dy); if (dz_str) { dz = strtol(dz_str, NULL, 0); if (dz != 0) { button = (dz > 0) ? INPUT_BUTTON_WHEEL_UP : INPUT_BUTTON_WHEEL_DOWN; qemu_input_queue_btn(NULL, button, true); qemu_input_event_sync(); qemu_input_queue_btn(NULL, button, false); } } qemu_input_event_sync(); } | 21,480 |
0 | static void realview_init(ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model, enum realview_board_type board_type) { CPUState *env = NULL; ram_addr_t ram_offset; DeviceState *dev, *sysctl, *gpio2; SysBusDevice *busdev; qemu_irq *irqp; qemu_irq pic[64]; qemu_irq mmc_irq[2]; PCIBus *pci_bus; NICInfo *nd; i2c_bus *i2c; int n; int done_nic = 0; qemu_irq cpu_irq[4]; int is_mpcore = 0; int is_pb = 0; uint32_t proc_id = 0; uint32_t sys_id; ram_addr_t low_ram_size; switch (board_type) { case BOARD_EB: break; case BOARD_EB_MPCORE: is_mpcore = 1; break; case BOARD_PB_A8: is_pb = 1; break; case BOARD_PBX_A9: is_mpcore = 1; is_pb = 1; break; } for (n = 0; n < smp_cpus; n++) { env = cpu_init(cpu_model); if (!env) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } irqp = arm_pic_init_cpu(env); cpu_irq[n] = irqp[ARM_PIC_CPU_IRQ]; } if (arm_feature(env, ARM_FEATURE_V7)) { if (is_mpcore) { proc_id = 0x0c000000; } else { proc_id = 0x0e000000; } } else if (arm_feature(env, ARM_FEATURE_V6K)) { proc_id = 0x06000000; } else if (arm_feature(env, ARM_FEATURE_V6)) { proc_id = 0x04000000; } else { proc_id = 0x02000000; } if (is_pb && ram_size > 0x20000000) { /* Core tile RAM. */ low_ram_size = ram_size - 0x20000000; ram_size = 0x20000000; ram_offset = qemu_ram_alloc(NULL, "realview.lowmem", low_ram_size); cpu_register_physical_memory(0x20000000, low_ram_size, ram_offset | IO_MEM_RAM); } ram_offset = qemu_ram_alloc(NULL, "realview.highmem", ram_size); low_ram_size = ram_size; if (low_ram_size > 0x10000000) low_ram_size = 0x10000000; /* SDRAM at address zero. */ cpu_register_physical_memory(0, low_ram_size, ram_offset | IO_MEM_RAM); if (is_pb) { /* And again at a high address. */ cpu_register_physical_memory(0x70000000, ram_size, ram_offset | IO_MEM_RAM); } else { ram_size = low_ram_size; } sys_id = is_pb ? 0x01780500 : 0xc1400400; sysctl = qdev_create(NULL, "realview_sysctl"); qdev_prop_set_uint32(sysctl, "sys_id", sys_id); qdev_init_nofail(sysctl); qdev_prop_set_uint32(sysctl, "proc_id", proc_id); sysbus_mmio_map(sysbus_from_qdev(sysctl), 0, 0x10000000); if (is_mpcore) { dev = qdev_create(NULL, is_pb ? "a9mpcore_priv": "realview_mpcore"); qdev_prop_set_uint32(dev, "num-cpu", smp_cpus); qdev_init_nofail(dev); busdev = sysbus_from_qdev(dev); if (is_pb) { realview_binfo.smp_priv_base = 0x1f000000; } else { realview_binfo.smp_priv_base = 0x10100000; } sysbus_mmio_map(busdev, 0, realview_binfo.smp_priv_base); for (n = 0; n < smp_cpus; n++) { sysbus_connect_irq(busdev, n, cpu_irq[n]); } } else { uint32_t gic_addr = is_pb ? 0x1e000000 : 0x10040000; /* For now just create the nIRQ GIC, and ignore the others. */ dev = sysbus_create_simple("realview_gic", gic_addr, cpu_irq[0]); } for (n = 0; n < 64; n++) { pic[n] = qdev_get_gpio_in(dev, n); } sysbus_create_simple("pl050_keyboard", 0x10006000, pic[20]); sysbus_create_simple("pl050_mouse", 0x10007000, pic[21]); sysbus_create_simple("pl011", 0x10009000, pic[12]); sysbus_create_simple("pl011", 0x1000a000, pic[13]); sysbus_create_simple("pl011", 0x1000b000, pic[14]); sysbus_create_simple("pl011", 0x1000c000, pic[15]); /* DMA controller is optional, apparently. */ sysbus_create_simple("pl081", 0x10030000, pic[24]); sysbus_create_simple("sp804", 0x10011000, pic[4]); sysbus_create_simple("sp804", 0x10012000, pic[5]); sysbus_create_simple("pl061", 0x10013000, pic[6]); sysbus_create_simple("pl061", 0x10014000, pic[7]); gpio2 = sysbus_create_simple("pl061", 0x10015000, pic[8]); sysbus_create_simple("pl111", 0x10020000, pic[23]); dev = sysbus_create_varargs("pl181", 0x10005000, pic[17], pic[18], NULL); /* Wire up MMC card detect and read-only signals. These have * to go to both the PL061 GPIO and the sysctl register. * Note that the PL181 orders these lines (readonly,inserted) * and the PL061 has them the other way about. Also the card * detect line is inverted. */ mmc_irq[0] = qemu_irq_split( qdev_get_gpio_in(sysctl, ARM_SYSCTL_GPIO_MMC_WPROT), qdev_get_gpio_in(gpio2, 1)); mmc_irq[1] = qemu_irq_split( qdev_get_gpio_in(sysctl, ARM_SYSCTL_GPIO_MMC_CARDIN), qemu_irq_invert(qdev_get_gpio_in(gpio2, 0))); qdev_connect_gpio_out(dev, 0, mmc_irq[0]); qdev_connect_gpio_out(dev, 1, mmc_irq[1]); sysbus_create_simple("pl031", 0x10017000, pic[10]); if (!is_pb) { dev = sysbus_create_varargs("realview_pci", 0x60000000, pic[48], pic[49], pic[50], pic[51], NULL); pci_bus = (PCIBus *)qdev_get_child_bus(dev, "pci"); if (usb_enabled) { usb_ohci_init_pci(pci_bus, -1); } n = drive_get_max_bus(IF_SCSI); while (n >= 0) { pci_create_simple(pci_bus, -1, "lsi53c895a"); n--; } } for(n = 0; n < nb_nics; n++) { nd = &nd_table[n]; if (!done_nic && (!nd->model || strcmp(nd->model, is_pb ? "lan9118" : "smc91c111") == 0)) { if (is_pb) { lan9118_init(nd, 0x4e000000, pic[28]); } else { smc91c111_init(nd, 0x4e000000, pic[28]); } done_nic = 1; } else { pci_nic_init_nofail(nd, "rtl8139", NULL); } } dev = sysbus_create_simple("realview_i2c", 0x10002000, NULL); i2c = (i2c_bus *)qdev_get_child_bus(dev, "i2c"); i2c_create_slave(i2c, "ds1338", 0x68); /* Memory map for RealView Emulation Baseboard: */ /* 0x10000000 System registers. */ /* 0x10001000 System controller. */ /* 0x10002000 Two-Wire Serial Bus. */ /* 0x10003000 Reserved. */ /* 0x10004000 AACI. */ /* 0x10005000 MCI. */ /* 0x10006000 KMI0. */ /* 0x10007000 KMI1. */ /* 0x10008000 Character LCD. (EB) */ /* 0x10009000 UART0. */ /* 0x1000a000 UART1. */ /* 0x1000b000 UART2. */ /* 0x1000c000 UART3. */ /* 0x1000d000 SSPI. */ /* 0x1000e000 SCI. */ /* 0x1000f000 Reserved. */ /* 0x10010000 Watchdog. */ /* 0x10011000 Timer 0+1. */ /* 0x10012000 Timer 2+3. */ /* 0x10013000 GPIO 0. */ /* 0x10014000 GPIO 1. */ /* 0x10015000 GPIO 2. */ /* 0x10002000 Two-Wire Serial Bus - DVI. (PB) */ /* 0x10017000 RTC. */ /* 0x10018000 DMC. */ /* 0x10019000 PCI controller config. */ /* 0x10020000 CLCD. */ /* 0x10030000 DMA Controller. */ /* 0x10040000 GIC1. (EB) */ /* 0x10050000 GIC2. (EB) */ /* 0x10060000 GIC3. (EB) */ /* 0x10070000 GIC4. (EB) */ /* 0x10080000 SMC. */ /* 0x1e000000 GIC1. (PB) */ /* 0x1e001000 GIC2. (PB) */ /* 0x1e002000 GIC3. (PB) */ /* 0x1e003000 GIC4. (PB) */ /* 0x40000000 NOR flash. */ /* 0x44000000 DoC flash. */ /* 0x48000000 SRAM. */ /* 0x4c000000 Configuration flash. */ /* 0x4e000000 Ethernet. */ /* 0x4f000000 USB. */ /* 0x50000000 PISMO. */ /* 0x54000000 PISMO. */ /* 0x58000000 PISMO. */ /* 0x5c000000 PISMO. */ /* 0x60000000 PCI. */ /* 0x61000000 PCI Self Config. */ /* 0x62000000 PCI Config. */ /* 0x63000000 PCI IO. */ /* 0x64000000 PCI mem 0. */ /* 0x68000000 PCI mem 1. */ /* 0x6c000000 PCI mem 2. */ /* ??? Hack to map an additional page of ram for the secondary CPU startup code. I guess this works on real hardware because the BootROM happens to be in ROM/flash or in memory that isn't clobbered until after Linux boots the secondary CPUs. */ ram_offset = qemu_ram_alloc(NULL, "realview.hack", 0x1000); cpu_register_physical_memory(SMP_BOOT_ADDR, 0x1000, ram_offset | IO_MEM_RAM); realview_binfo.ram_size = ram_size; realview_binfo.kernel_filename = kernel_filename; realview_binfo.kernel_cmdline = kernel_cmdline; realview_binfo.initrd_filename = initrd_filename; realview_binfo.nb_cpus = smp_cpus; realview_binfo.board_id = realview_board_id[board_type]; realview_binfo.loader_start = (board_type == BOARD_PB_A8 ? 0x70000000 : 0); arm_load_kernel(first_cpu, &realview_binfo); } | 21,481 |
0 | static int intel_hda_post_load(void *opaque, int version) { IntelHDAState* d = opaque; int i; dprint(d, 1, "%s\n", __FUNCTION__); for (i = 0; i < ARRAY_SIZE(d->st); i++) { if (d->st[i].ctl & 0x02) { intel_hda_parse_bdl(d, &d->st[i]); } } intel_hda_update_irq(d); return 0; } | 21,482 |
0 | int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, int is_write1, int mmu_idx, int is_softmmu) { uint64_t ptep, pte; target_ulong pde_addr, pte_addr; int error_code, is_dirty, prot, page_size, ret, is_write, is_user; target_phys_addr_t paddr; uint32_t page_offset; target_ulong vaddr, virt_addr; is_user = mmu_idx == MMU_USER_IDX; #if defined(DEBUG_MMU) printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n", addr, is_write1, is_user, env->eip); #endif is_write = is_write1 & 1; if (!(env->cr[0] & CR0_PG_MASK)) { pte = addr; virt_addr = addr & TARGET_PAGE_MASK; prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; page_size = 4096; goto do_mapping; } if (env->cr[4] & CR4_PAE_MASK) { uint64_t pde, pdpe; target_ulong pdpe_addr; #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { uint64_t pml4e_addr, pml4e; int32_t sext; /* test virtual address sign extension */ sext = (int64_t)addr >> 47; if (sext != 0 && sext != -1) { env->error_code = 0; env->exception_index = EXCP0D_GPF; return 1; } pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & env->a20_mask; pml4e = ldq_phys(pml4e_addr); if (!(pml4e & PG_PRESENT_MASK)) { error_code = 0; goto do_fault; } if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) { error_code = PG_ERROR_RSVD_MASK; goto do_fault; } if (!(pml4e & PG_ACCESSED_MASK)) { pml4e |= PG_ACCESSED_MASK; stl_phys_notdirty(pml4e_addr, pml4e); } ptep = pml4e ^ PG_NX_MASK; pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask; pdpe = ldq_phys(pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) { error_code = 0; goto do_fault; } if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) { error_code = PG_ERROR_RSVD_MASK; goto do_fault; } ptep &= pdpe ^ PG_NX_MASK; if (!(pdpe & PG_ACCESSED_MASK)) { pdpe |= PG_ACCESSED_MASK; stl_phys_notdirty(pdpe_addr, pdpe); } } else #endif { /* XXX: load them when cr3 is loaded ? */ pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & env->a20_mask; pdpe = ldq_phys(pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) { error_code = 0; goto do_fault; } ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK; } pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask; pde = ldq_phys(pde_addr); if (!(pde & PG_PRESENT_MASK)) { error_code = 0; goto do_fault; } if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) { error_code = PG_ERROR_RSVD_MASK; goto do_fault; } ptep &= pde ^ PG_NX_MASK; if (pde & PG_PSE_MASK) { /* 2 MB page */ page_size = 2048 * 1024; ptep ^= PG_NX_MASK; if ((ptep & PG_NX_MASK) && is_write1 == 2) goto do_fault_protect; if (is_user) { if (!(ptep & PG_USER_MASK)) goto do_fault_protect; if (is_write && !(ptep & PG_RW_MASK)) goto do_fault_protect; } else { if ((env->cr[0] & CR0_WP_MASK) && is_write && !(ptep & PG_RW_MASK)) goto do_fault_protect; } is_dirty = is_write && !(pde & PG_DIRTY_MASK); if (!(pde & PG_ACCESSED_MASK) || is_dirty) { pde |= PG_ACCESSED_MASK; if (is_dirty) pde |= PG_DIRTY_MASK; stl_phys_notdirty(pde_addr, pde); } /* align to page_size */ pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff); virt_addr = addr & ~(page_size - 1); } else { /* 4 KB page */ if (!(pde & PG_ACCESSED_MASK)) { pde |= PG_ACCESSED_MASK; stl_phys_notdirty(pde_addr, pde); } pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask; pte = ldq_phys(pte_addr); if (!(pte & PG_PRESENT_MASK)) { error_code = 0; goto do_fault; } if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) { error_code = PG_ERROR_RSVD_MASK; goto do_fault; } /* combine pde and pte nx, user and rw protections */ ptep &= pte ^ PG_NX_MASK; ptep ^= PG_NX_MASK; if ((ptep & PG_NX_MASK) && is_write1 == 2) goto do_fault_protect; if (is_user) { if (!(ptep & PG_USER_MASK)) goto do_fault_protect; if (is_write && !(ptep & PG_RW_MASK)) goto do_fault_protect; } else { if ((env->cr[0] & CR0_WP_MASK) && is_write && !(ptep & PG_RW_MASK)) goto do_fault_protect; } is_dirty = is_write && !(pte & PG_DIRTY_MASK); if (!(pte & PG_ACCESSED_MASK) || is_dirty) { pte |= PG_ACCESSED_MASK; if (is_dirty) pte |= PG_DIRTY_MASK; stl_phys_notdirty(pte_addr, pte); } page_size = 4096; virt_addr = addr & ~0xfff; pte = pte & (PHYS_ADDR_MASK | 0xfff); } } else { uint32_t pde; /* page directory entry */ pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask; pde = ldl_phys(pde_addr); if (!(pde & PG_PRESENT_MASK)) { error_code = 0; goto do_fault; } /* if PSE bit is set, then we use a 4MB page */ if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { page_size = 4096 * 1024; if (is_user) { if (!(pde & PG_USER_MASK)) goto do_fault_protect; if (is_write && !(pde & PG_RW_MASK)) goto do_fault_protect; } else { if ((env->cr[0] & CR0_WP_MASK) && is_write && !(pde & PG_RW_MASK)) goto do_fault_protect; } is_dirty = is_write && !(pde & PG_DIRTY_MASK); if (!(pde & PG_ACCESSED_MASK) || is_dirty) { pde |= PG_ACCESSED_MASK; if (is_dirty) pde |= PG_DIRTY_MASK; stl_phys_notdirty(pde_addr, pde); } pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */ ptep = pte; virt_addr = addr & ~(page_size - 1); } else { if (!(pde & PG_ACCESSED_MASK)) { pde |= PG_ACCESSED_MASK; stl_phys_notdirty(pde_addr, pde); } /* page directory entry */ pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask; pte = ldl_phys(pte_addr); if (!(pte & PG_PRESENT_MASK)) { error_code = 0; goto do_fault; } /* combine pde and pte user and rw protections */ ptep = pte & pde; if (is_user) { if (!(ptep & PG_USER_MASK)) goto do_fault_protect; if (is_write && !(ptep & PG_RW_MASK)) goto do_fault_protect; } else { if ((env->cr[0] & CR0_WP_MASK) && is_write && !(ptep & PG_RW_MASK)) goto do_fault_protect; } is_dirty = is_write && !(pte & PG_DIRTY_MASK); if (!(pte & PG_ACCESSED_MASK) || is_dirty) { pte |= PG_ACCESSED_MASK; if (is_dirty) pte |= PG_DIRTY_MASK; stl_phys_notdirty(pte_addr, pte); } page_size = 4096; virt_addr = addr & ~0xfff; } } /* the page can be put in the TLB */ prot = PAGE_READ; if (!(ptep & PG_NX_MASK)) prot |= PAGE_EXEC; if (pte & PG_DIRTY_MASK) { /* only set write access if already dirty... otherwise wait for dirty access */ if (is_user) { if (ptep & PG_RW_MASK) prot |= PAGE_WRITE; } else { if (!(env->cr[0] & CR0_WP_MASK) || (ptep & PG_RW_MASK)) prot |= PAGE_WRITE; } } do_mapping: pte = pte & env->a20_mask; /* Even if 4MB pages, we map only one 4KB page in the cache to avoid filling it too fast */ page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1); paddr = (pte & TARGET_PAGE_MASK) + page_offset; vaddr = virt_addr + page_offset; ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu); return ret; do_fault_protect: error_code = PG_ERROR_P_MASK; do_fault: error_code |= (is_write << PG_ERROR_W_BIT); if (is_user) error_code |= PG_ERROR_U_MASK; if (is_write1 == 2 && (env->efer & MSR_EFER_NXE) && (env->cr[4] & CR4_PAE_MASK)) error_code |= PG_ERROR_I_D_MASK; if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) { /* cr2 is not modified in case of exceptions */ stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), addr); } else { env->cr[2] = addr; } env->error_code = error_code; env->exception_index = EXCP0E_PAGE; return 1; } | 21,484 |
0 | static CharDriverState *qemu_chr_open_ringbuf(const char *id, ChardevBackend *backend, ChardevReturn *ret, Error **errp) { ChardevRingbuf *opts = backend->u.ringbuf; CharDriverState *chr; RingBufCharDriver *d; chr = qemu_chr_alloc(); d = g_malloc(sizeof(*d)); d->size = opts->has_size ? opts->size : 65536; /* The size must be power of 2 */ if (d->size & (d->size - 1)) { error_setg(errp, "size of ringbuf chardev must be power of two"); goto fail; } d->prod = 0; d->cons = 0; d->cbuf = g_malloc0(d->size); chr->opaque = d; chr->chr_write = ringbuf_chr_write; chr->chr_close = ringbuf_chr_close; return chr; fail: g_free(d); g_free(chr); return NULL; } | 21,485 |
0 | int avpriv_mpa_decode_header(AVCodecContext *avctx, uint32_t head, int *sample_rate, int *channels, int *frame_size, int *bit_rate) { MPADecodeHeader s1, *s = &s1; if (ff_mpa_check_header(head) != 0) return -1; if (avpriv_mpegaudio_decode_header(s, head) != 0) { return -1; } switch(s->layer) { case 1: avctx->codec_id = AV_CODEC_ID_MP1; *frame_size = 384; break; case 2: avctx->codec_id = AV_CODEC_ID_MP2; *frame_size = 1152; break; default: case 3: if (avctx->codec_id != AV_CODEC_ID_MP3ADU) avctx->codec_id = AV_CODEC_ID_MP3; if (s->lsf) *frame_size = 576; else *frame_size = 1152; break; } *sample_rate = s->sample_rate; *channels = s->nb_channels; *bit_rate = s->bit_rate; return s->frame_size; } | 21,486 |
0 | static inline void gen_speundef (DisasContext *ctx) { RET_INVAL(ctx); } | 21,488 |
0 | int kqemu_init(CPUState *env) { struct kqemu_init kinit; int ret, version; #ifdef _WIN32 DWORD temp; #endif if (!kqemu_allowed) return -1; #ifdef _WIN32 kqemu_fd = CreateFile(KQEMU_DEVICE, GENERIC_WRITE | GENERIC_READ, FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); if (kqemu_fd == KQEMU_INVALID_FD) { fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated: %lu\n", KQEMU_DEVICE, GetLastError()); return -1; } #else kqemu_fd = open(KQEMU_DEVICE, O_RDWR); if (kqemu_fd == KQEMU_INVALID_FD) { fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated: %s\n", KQEMU_DEVICE, strerror(errno)); return -1; } #endif version = 0; #ifdef _WIN32 DeviceIoControl(kqemu_fd, KQEMU_GET_VERSION, NULL, 0, &version, sizeof(version), &temp, NULL); #else ioctl(kqemu_fd, KQEMU_GET_VERSION, &version); #endif if (version != KQEMU_VERSION) { fprintf(stderr, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n", version, KQEMU_VERSION); goto fail; } pages_to_flush = qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH * sizeof(uint64_t)); if (!pages_to_flush) goto fail; ram_pages_to_update = qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE * sizeof(uint64_t)); if (!ram_pages_to_update) goto fail; modified_ram_pages = qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES * sizeof(uint64_t)); if (!modified_ram_pages) goto fail; modified_ram_pages_table = qemu_mallocz(kqemu_phys_ram_size >> TARGET_PAGE_BITS); if (!modified_ram_pages_table) goto fail; memset(&kinit, 0, sizeof(kinit)); /* set the paddings to zero */ kinit.ram_base = kqemu_phys_ram_base; kinit.ram_size = kqemu_phys_ram_size; kinit.ram_dirty = phys_ram_dirty; kinit.pages_to_flush = pages_to_flush; kinit.ram_pages_to_update = ram_pages_to_update; kinit.modified_ram_pages = modified_ram_pages; #ifdef _WIN32 ret = DeviceIoControl(kqemu_fd, KQEMU_INIT, &kinit, sizeof(kinit), NULL, 0, &temp, NULL) == TRUE ? 0 : -1; #else ret = ioctl(kqemu_fd, KQEMU_INIT, &kinit); #endif if (ret < 0) { fprintf(stderr, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret); fail: kqemu_closefd(kqemu_fd); kqemu_fd = KQEMU_INVALID_FD; return -1; } kqemu_update_cpuid(env); env->kqemu_enabled = kqemu_allowed; nb_pages_to_flush = 0; nb_ram_pages_to_update = 0; qpi_init(); return 0; } | 21,489 |
0 | POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); dc->fw_name = "PowerPC,POWER9"; dc->desc = "POWER9"; dc->props = powerpc_servercpu_properties; pcc->pvr_match = ppc_pvr_match_power9; pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07; pcc->pcr_supported = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05; pcc->init_proc = init_proc_POWER9; pcc->check_pow = check_pow_nocheck; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_64B | PPC_64BX | PPC_ALTIVEC | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD | PPC_CILDST; pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM | PPC2_PM_ISA206 | PPC2_ISA300; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_TM) | (1ull << MSR_VR) | (1ull << MSR_VSX) | (1ull << MSR_EE) | (1ull << MSR_PR) | (1ull << MSR_FP) | (1ull << MSR_ME) | (1ull << MSR_FE0) | (1ull << MSR_SE) | (1ull << MSR_DE) | (1ull << MSR_FE1) | (1ull << MSR_IR) | (1ull << MSR_DR) | (1ull << MSR_PMM) | (1ull << MSR_RI) | (1ull << MSR_LE); pcc->mmu_model = POWERPC_MMU_3_00; #if defined(CONFIG_SOFTMMU) pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault; /* segment page size remain the same */ pcc->sps = &POWER7_POWER8_sps; #endif pcc->excp_model = POWERPC_EXCP_POWER8; pcc->bus_model = PPC_FLAGS_INPUT_POWER7; pcc->bfd_mach = bfd_mach_ppc64; pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | POWERPC_FLAG_VSX | POWERPC_FLAG_TM; pcc->l1_dcache_size = 0x8000; pcc->l1_icache_size = 0x8000; pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr; } | 21,490 |
0 | int cpu_get_dump_info(ArchDumpInfo *info, const GuestPhysBlockList *guest_phys_blocks) { bool lma = false; GuestPhysBlock *block; #ifdef TARGET_X86_64 X86CPU *first_x86_cpu = X86_CPU(first_cpu); lma = !!(first_x86_cpu->env.hflags & HF_LMA_MASK); #endif if (lma) { info->d_machine = EM_X86_64; } else { info->d_machine = EM_386; } info->d_endian = ELFDATA2LSB; if (lma) { info->d_class = ELFCLASS64; } else { info->d_class = ELFCLASS32; QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) { if (block->target_end > UINT_MAX) { /* The memory size is greater than 4G */ info->d_class = ELFCLASS64; break; } } } return 0; } | 21,491 |
0 | static void handler_audit(Monitor *mon, const mon_cmd_t *cmd, int ret) { if (ret && !monitor_has_error(mon)) { /* * If it returns failure, it must have passed on error. * * Action: Report an internal error to the client if in QMP. */ if (monitor_ctrl_mode(mon)) { qerror_report(QERR_UNDEFINED_ERROR); } MON_DEBUG("command '%s' returned failure but did not pass an error\n", cmd->name); } #ifdef CONFIG_DEBUG_MONITOR if (!ret && monitor_has_error(mon)) { /* * If it returns success, it must not have passed an error. * * Action: Report the passed error to the client. */ MON_DEBUG("command '%s' returned success but passed an error\n", cmd->name); } if (mon_print_count_get(mon) > 0 && strcmp(cmd->name, "info") != 0) { /* * Handlers should not call Monitor print functions. * * Action: Ignore them in QMP. * * (XXX: we don't check any 'info' or 'query' command here * because the user print function _is_ called by do_info(), hence * we will trigger this check. This problem will go away when we * make 'query' commands real and kill do_info()) */ MON_DEBUG("command '%s' called print functions %d time(s)\n", cmd->name, mon_print_count_get(mon)); } #endif } | 21,493 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.