project
stringclasses 2
values | commit_id
stringlengths 40
40
| target
int64 0
1
| func
stringlengths 26
142k
| idx
int64 0
27.3k
|
---|---|---|---|---|
FFmpeg | a625e13208ad0ebf1554aa73c9bf41452520f176 | 0 | static void av_always_inline filter_mb_edgeh( uint8_t *pix, int stride, int16_t bS[4], unsigned int qp, H264Context *h ) {
const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset;
const int alpha = alpha_table[index_a];
const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset];
if (alpha ==0 || beta == 0) return;
if( bS[0] < 4 ) {
int8_t tc[4];
tc[0] = tc0_table[index_a][bS[0]];
tc[1] = tc0_table[index_a][bS[1]];
tc[2] = tc0_table[index_a][bS[2]];
tc[3] = tc0_table[index_a][bS[3]];
h->h264dsp.h264_v_loop_filter_luma(pix, stride, alpha, beta, tc);
} else {
h->h264dsp.h264_v_loop_filter_luma_intra(pix, stride, alpha, beta);
}
}
| 21,986 |
FFmpeg | 3a8c95f730732b9f1ffacdbfbf79a01b202a67af | 0 | static void show_format(AVFormatContext *fmt_ctx)
{
AVDictionaryEntry *tag = NULL;
char val_str[128];
int64_t size = fmt_ctx->pb ? avio_size(fmt_ctx->pb) : -1;
print_format_entry(NULL, "[FORMAT]");
print_format_entry("filename", fmt_ctx->filename);
snprintf(val_str, sizeof(val_str) - 1, "%d", fmt_ctx->nb_streams);
print_format_entry("nb_streams", val_str);
print_format_entry("format_name", fmt_ctx->iformat->name);
print_format_entry("format_long_name", fmt_ctx->iformat->long_name);
print_format_entry("start_time",
time_value_string(val_str, sizeof(val_str),
fmt_ctx->start_time, &AV_TIME_BASE_Q));
print_format_entry("duration",
time_value_string(val_str, sizeof(val_str),
fmt_ctx->duration, &AV_TIME_BASE_Q));
print_format_entry("size",
size >= 0 ? value_string(val_str, sizeof(val_str),
size, unit_byte_str)
: "unknown");
print_format_entry("bit_rate",
value_string(val_str, sizeof(val_str),
fmt_ctx->bit_rate, unit_bit_per_second_str));
while ((tag = av_dict_get(fmt_ctx->metadata, "", tag,
AV_DICT_IGNORE_SUFFIX))) {
snprintf(val_str, sizeof(val_str) - 1, "TAG:%s", tag->key);
print_format_entry(val_str, tag->value);
}
print_format_entry(NULL, "[/FORMAT]");
}
| 21,987 |
FFmpeg | 1918057c8a3bc37c27e476d16736fe8bc76afd34 | 0 | static void spatial_compose97i(IDWTELEM *buffer, int width, int height, int stride){
dwt_compose_t cs;
spatial_compose97i_init(&cs, buffer, height, stride);
while(cs.y <= height)
spatial_compose97i_dy(&cs, buffer, width, height, stride);
}
| 21,988 |
FFmpeg | 70143a3954e1c4412efb2bf1a3a818adea2d3abf | 0 | static void *get_surface(const AVFrame *frame)
{
return frame->data[3];
}
| 21,989 |
qemu | eeae2e7b52255dae0976a027b6e11274990c708d | 1 | static uint32_t pci_reg_read4(void *opaque, target_phys_addr_t addr)
{
PPCE500PCIState *pci = opaque;
unsigned long win;
uint32_t value = 0;
win = addr & 0xfe0;
switch (win) {
case PPCE500_PCI_OW1:
case PPCE500_PCI_OW2:
case PPCE500_PCI_OW3:
case PPCE500_PCI_OW4:
switch (addr & 0xC) {
case PCI_POTAR:
value = pci->pob[(addr >> 5) & 0x7].potar;
break;
case PCI_POTEAR:
value = pci->pob[(addr >> 5) & 0x7].potear;
break;
case PCI_POWBAR:
value = pci->pob[(addr >> 5) & 0x7].powbar;
break;
case PCI_POWAR:
value = pci->pob[(addr >> 5) & 0x7].powar;
break;
default:
break;
}
break;
case PPCE500_PCI_IW3:
case PPCE500_PCI_IW2:
case PPCE500_PCI_IW1:
switch (addr & 0xC) {
case PCI_PITAR:
value = pci->pib[(addr >> 5) & 0x3].pitar;
break;
case PCI_PIWBAR:
value = pci->pib[(addr >> 5) & 0x3].piwbar;
break;
case PCI_PIWBEAR:
value = pci->pib[(addr >> 5) & 0x3].piwbear;
break;
case PCI_PIWAR:
value = pci->pib[(addr >> 5) & 0x3].piwar;
break;
default:
break;
};
break;
case PPCE500_PCI_GASKET_TIMR:
value = pci->gasket_time;
break;
default:
break;
}
pci_debug("%s: win:%lx(addr:" TARGET_FMT_plx ") -> value:%x\n", __func__,
win, addr, value);
return value;
}
| 21,990 |
qemu | 5bfb723f07fde2caafa90cb40c102a4e36dfea9e | 1 | static void win32_rearm_timer(struct qemu_alarm_timer *t,
int64_t nearest_delta_ns)
{
HANDLE hTimer = t->timer;
int nearest_delta_ms;
BOOLEAN success;
nearest_delta_ms = (nearest_delta_ns + 999999) / 1000000;
if (nearest_delta_ms < 1) {
nearest_delta_ms = 1;
}
success = ChangeTimerQueueTimer(NULL,
hTimer,
nearest_delta_ms,
3600000);
if (!success) {
fprintf(stderr, "Failed to rearm win32 alarm timer: %ld\n",
GetLastError());
exit(-1);
}
}
| 21,991 |
FFmpeg | d50aa006fb3430bedc3872ba10e028a714499625 | 1 | static void add_entry(TiffEncoderContext * s,
enum TiffTags tag, enum TiffTypes type, int count,
const void *ptr_val)
{
uint8_t *entries_ptr = s->entries + 12 * s->num_entries;
av_assert0(s->num_entries < TIFF_MAX_ENTRY);
bytestream_put_le16(&entries_ptr, tag);
bytestream_put_le16(&entries_ptr, type);
bytestream_put_le32(&entries_ptr, count);
if (type_sizes[type] * count <= 4) {
tnput(&entries_ptr, count, ptr_val, type, 0);
} else {
bytestream_put_le32(&entries_ptr, *s->buf - s->buf_start);
check_size(s, count * type_sizes2[type]);
tnput(s->buf, count, ptr_val, type, 0);
}
s->num_entries++;
}
| 21,992 |
qemu | 240ce26a0533a6e5ee472789fbfbd9f7f939197e | 1 | static int decode_extended_mips16_opc (CPUMIPSState *env, DisasContext *ctx,
int *is_branch)
{
int extend = cpu_lduw_code(env, ctx->pc + 2);
int op, rx, ry, funct, sa;
int16_t imm, offset;
ctx->opcode = (ctx->opcode << 16) | extend;
op = (ctx->opcode >> 11) & 0x1f;
sa = (ctx->opcode >> 22) & 0x1f;
funct = (ctx->opcode >> 8) & 0x7;
rx = xlat((ctx->opcode >> 8) & 0x7);
ry = xlat((ctx->opcode >> 5) & 0x7);
offset = imm = (int16_t) (((ctx->opcode >> 16) & 0x1f) << 11
| ((ctx->opcode >> 21) & 0x3f) << 5
| (ctx->opcode & 0x1f));
/* The extended opcodes cleverly reuse the opcodes from their 16-bit
counterparts. */
switch (op) {
case M16_OPC_ADDIUSP:
gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm);
break;
case M16_OPC_ADDIUPC:
gen_addiupc(ctx, rx, imm, 0, 1);
break;
case M16_OPC_B:
gen_compute_branch(ctx, OPC_BEQ, 4, 0, 0, offset << 1);
/* No delay slot, so just process as a normal instruction */
break;
case M16_OPC_BEQZ:
gen_compute_branch(ctx, OPC_BEQ, 4, rx, 0, offset << 1);
/* No delay slot, so just process as a normal instruction */
break;
case M16_OPC_BNEQZ:
gen_compute_branch(ctx, OPC_BNE, 4, rx, 0, offset << 1);
/* No delay slot, so just process as a normal instruction */
break;
case M16_OPC_SHIFT:
switch (ctx->opcode & 0x3) {
case 0x0:
gen_shift_imm(ctx, OPC_SLL, rx, ry, sa);
break;
case 0x1:
#if defined(TARGET_MIPS64)
check_mips_64(ctx);
gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa);
#else
generate_exception(ctx, EXCP_RI);
#endif
break;
case 0x2:
gen_shift_imm(ctx, OPC_SRL, rx, ry, sa);
break;
case 0x3:
gen_shift_imm(ctx, OPC_SRA, rx, ry, sa);
break;
}
break;
#if defined(TARGET_MIPS64)
case M16_OPC_LD:
check_mips_64(ctx);
gen_ld(ctx, OPC_LD, ry, rx, offset);
break;
#endif
case M16_OPC_RRIA:
imm = ctx->opcode & 0xf;
imm = imm | ((ctx->opcode >> 20) & 0x7f) << 4;
imm = imm | ((ctx->opcode >> 16) & 0xf) << 11;
imm = (int16_t) (imm << 1) >> 1;
if ((ctx->opcode >> 4) & 0x1) {
#if defined(TARGET_MIPS64)
check_mips_64(ctx);
gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm);
#else
generate_exception(ctx, EXCP_RI);
#endif
} else {
gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm);
}
break;
case M16_OPC_ADDIU8:
gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm);
break;
case M16_OPC_SLTI:
gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm);
break;
case M16_OPC_SLTIU:
gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm);
break;
case M16_OPC_I8:
switch (funct) {
case I8_BTEQZ:
gen_compute_branch(ctx, OPC_BEQ, 4, 24, 0, offset << 1);
break;
case I8_BTNEZ:
gen_compute_branch(ctx, OPC_BNE, 4, 24, 0, offset << 1);
break;
case I8_SWRASP:
gen_st(ctx, OPC_SW, 31, 29, imm);
break;
case I8_ADJSP:
gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm);
break;
case I8_SVRS:
{
int xsregs = (ctx->opcode >> 24) & 0x7;
int aregs = (ctx->opcode >> 16) & 0xf;
int do_ra = (ctx->opcode >> 6) & 0x1;
int do_s0 = (ctx->opcode >> 5) & 0x1;
int do_s1 = (ctx->opcode >> 4) & 0x1;
int framesize = (((ctx->opcode >> 20) & 0xf) << 4
| (ctx->opcode & 0xf)) << 3;
if (ctx->opcode & (1 << 7)) {
gen_mips16_save(ctx, xsregs, aregs,
do_ra, do_s0, do_s1,
framesize);
} else {
gen_mips16_restore(ctx, xsregs, aregs,
do_ra, do_s0, do_s1,
framesize);
}
}
break;
default:
generate_exception(ctx, EXCP_RI);
break;
}
break;
case M16_OPC_LI:
tcg_gen_movi_tl(cpu_gpr[rx], (uint16_t) imm);
break;
case M16_OPC_CMPI:
tcg_gen_xori_tl(cpu_gpr[24], cpu_gpr[rx], (uint16_t) imm);
break;
#if defined(TARGET_MIPS64)
case M16_OPC_SD:
gen_st(ctx, OPC_SD, ry, rx, offset);
break;
#endif
case M16_OPC_LB:
gen_ld(ctx, OPC_LB, ry, rx, offset);
break;
case M16_OPC_LH:
gen_ld(ctx, OPC_LH, ry, rx, offset);
break;
case M16_OPC_LWSP:
gen_ld(ctx, OPC_LW, rx, 29, offset);
break;
case M16_OPC_LW:
gen_ld(ctx, OPC_LW, ry, rx, offset);
break;
case M16_OPC_LBU:
gen_ld(ctx, OPC_LBU, ry, rx, offset);
break;
case M16_OPC_LHU:
gen_ld(ctx, OPC_LHU, ry, rx, offset);
break;
case M16_OPC_LWPC:
gen_ld(ctx, OPC_LWPC, rx, 0, offset);
break;
#if defined(TARGET_MIPS64)
case M16_OPC_LWU:
gen_ld(ctx, OPC_LWU, ry, rx, offset);
break;
#endif
case M16_OPC_SB:
gen_st(ctx, OPC_SB, ry, rx, offset);
break;
case M16_OPC_SH:
gen_st(ctx, OPC_SH, ry, rx, offset);
break;
case M16_OPC_SWSP:
gen_st(ctx, OPC_SW, rx, 29, offset);
break;
case M16_OPC_SW:
gen_st(ctx, OPC_SW, ry, rx, offset);
break;
#if defined(TARGET_MIPS64)
case M16_OPC_I64:
decode_i64_mips16(ctx, ry, funct, offset, 1);
break;
#endif
default:
generate_exception(ctx, EXCP_RI);
break;
}
return 4;
}
| 21,993 |
qemu | 6baebed7698a37a0ac5168faf26023426b0ac940 | 1 | static void set_pixel_format(VncState *vs,
int bits_per_pixel, int depth,
int big_endian_flag, int true_color_flag,
int red_max, int green_max, int blue_max,
int red_shift, int green_shift, int blue_shift)
{
if (!true_color_flag) {
vnc_client_error(vs);
return;
}
vs->clientds = vs->serverds;
vs->clientds.pf.rmax = red_max;
count_bits(vs->clientds.pf.rbits, red_max);
vs->clientds.pf.rshift = red_shift;
vs->clientds.pf.rmask = red_max << red_shift;
vs->clientds.pf.gmax = green_max;
count_bits(vs->clientds.pf.gbits, green_max);
vs->clientds.pf.gshift = green_shift;
vs->clientds.pf.gmask = green_max << green_shift;
vs->clientds.pf.bmax = blue_max;
count_bits(vs->clientds.pf.bbits, blue_max);
vs->clientds.pf.bshift = blue_shift;
vs->clientds.pf.bmask = blue_max << blue_shift;
vs->clientds.pf.bits_per_pixel = bits_per_pixel;
vs->clientds.pf.bytes_per_pixel = bits_per_pixel / 8;
vs->clientds.pf.depth = bits_per_pixel == 32 ? 24 : bits_per_pixel;
vs->clientds.flags = big_endian_flag ? QEMU_BIG_ENDIAN_FLAG : 0x00;
set_pixel_conversion(vs);
vga_hw_invalidate();
vga_hw_update();
}
| 21,994 |
qemu | 258dc7c96bb4b7ca71d5bee811e73933310e168c | 1 | int virtio_load(VirtIODevice *vdev, QEMUFile *f)
{
int num, i, ret;
uint32_t features;
uint32_t supported_features =
vdev->binding->get_features(vdev->binding_opaque);
if (vdev->binding->load_config) {
ret = vdev->binding->load_config(vdev->binding_opaque, f);
if (ret)
return ret;
}
qemu_get_8s(f, &vdev->status);
qemu_get_8s(f, &vdev->isr);
qemu_get_be16s(f, &vdev->queue_sel);
qemu_get_be32s(f, &features);
if (features & ~supported_features) {
fprintf(stderr, "Features 0x%x unsupported. Allowed features: 0x%x\n",
features, supported_features);
return -1;
}
if (vdev->set_features)
vdev->set_features(vdev, features);
vdev->guest_features = features;
vdev->config_len = qemu_get_be32(f);
qemu_get_buffer(f, vdev->config, vdev->config_len);
num = qemu_get_be32(f);
for (i = 0; i < num; i++) {
vdev->vq[i].vring.num = qemu_get_be32(f);
vdev->vq[i].pa = qemu_get_be64(f);
qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
if (vdev->vq[i].pa) {
virtqueue_init(&vdev->vq[i]);
}
num_heads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
/* Check it isn't doing very strange things with descriptor numbers. */
if (num_heads > vdev->vq[i].vring.num) {
fprintf(stderr, "VQ %d size 0x%x Guest index 0x%x "
"inconsistent with Host index 0x%x: delta 0x%x\n",
i, vdev->vq[i].vring.num,
vring_avail_idx(&vdev->vq[i]),
vdev->vq[i].last_avail_idx, num_heads);
return -1;
}
if (vdev->binding->load_queue) {
ret = vdev->binding->load_queue(vdev->binding_opaque, i, f);
if (ret)
return ret;
}
}
virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
return 0;
} | 21,996 |
FFmpeg | 77a416e8aab77058b542030870fd7178b62d2a62 | 1 | static inline void RENAME(hyscale)(uint16_t *dst, int dstWidth, uint8_t *src, int srcW, int xInc,
int flags, int canMMX2BeUsed, int16_t *hLumFilter,
int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
int32_t *mmx2FilterPos)
{
if(srcFormat==IMGFMT_YUY2)
{
RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
src= formatConvBuffer;
}
else if(srcFormat==IMGFMT_UYVY)
{
RENAME(uyvyToY)(formatConvBuffer, src, srcW);
src= formatConvBuffer;
}
else if(srcFormat==IMGFMT_BGR32)
{
RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
src= formatConvBuffer;
}
else if(srcFormat==IMGFMT_BGR24)
{
RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
src= formatConvBuffer;
}
else if(srcFormat==IMGFMT_BGR16)
{
RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
src= formatConvBuffer;
}
else if(srcFormat==IMGFMT_BGR15)
{
RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
src= formatConvBuffer;
}
else if(srcFormat==IMGFMT_RGB32)
{
RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
src= formatConvBuffer;
}
else if(srcFormat==IMGFMT_RGB24)
{
RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
src= formatConvBuffer;
}
#ifdef HAVE_MMX
// use the new MMX scaler if th mmx2 cant be used (its faster than the x86asm one)
if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
#else
if(!(flags&SWS_FAST_BILINEAR))
#endif
{
RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
}
else // Fast Bilinear upscale / crap downscale
{
#ifdef ARCH_X86
#ifdef HAVE_MMX2
int i;
if(canMMX2BeUsed)
{
asm volatile(
"pxor %%mm7, %%mm7 \n\t"
"movl %0, %%ecx \n\t"
"movl %1, %%edi \n\t"
"movl %2, %%edx \n\t"
"movl %3, %%ebx \n\t"
"xorl %%eax, %%eax \n\t" // i
PREFETCH" (%%ecx) \n\t"
PREFETCH" 32(%%ecx) \n\t"
PREFETCH" 64(%%ecx) \n\t"
#define FUNNY_Y_CODE \
"movl (%%ebx), %%esi \n\t"\
"call *%4 \n\t"\
"addl (%%ebx, %%eax), %%ecx \n\t"\
"addl %%eax, %%edi \n\t"\
"xorl %%eax, %%eax \n\t"\
FUNNY_Y_CODE
FUNNY_Y_CODE
FUNNY_Y_CODE
FUNNY_Y_CODE
FUNNY_Y_CODE
FUNNY_Y_CODE
FUNNY_Y_CODE
FUNNY_Y_CODE
:: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
"m" (funnyYCode)
: "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
);
for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
}
else
{
#endif
//NO MMX just normal asm ...
asm volatile(
"xorl %%eax, %%eax \n\t" // i
"xorl %%ebx, %%ebx \n\t" // xx
"xorl %%ecx, %%ecx \n\t" // 2*xalpha
".balign 16 \n\t"
"1: \n\t"
"movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
"movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
"subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
"imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
"shll $16, %%edi \n\t"
"addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
"movl %1, %%edi \n\t"
"shrl $9, %%esi \n\t"
"movw %%si, (%%edi, %%eax, 2) \n\t"
"addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
"adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
"movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
"movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
"subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
"imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
"shll $16, %%edi \n\t"
"addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
"movl %1, %%edi \n\t"
"shrl $9, %%esi \n\t"
"movw %%si, 2(%%edi, %%eax, 2) \n\t"
"addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
"adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
"addl $2, %%eax \n\t"
"cmpl %2, %%eax \n\t"
" jb 1b \n\t"
:: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
: "%eax", "%ebx", "%ecx", "%edi", "%esi"
);
#ifdef HAVE_MMX2
} //if MMX2 cant be used
#endif
#else
int i;
unsigned int xpos=0;
for(i=0;i<dstWidth;i++)
{
register unsigned int xx=xpos>>16;
register unsigned int xalpha=(xpos&0xFFFF)>>9;
dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
xpos+=xInc;
}
#endif
}
}
| 21,997 |
qemu | b57482d7a0fe669aeb6f0c3c3503d143b9db89dd | 1 | qcrypto_tls_session_check_credentials(QCryptoTLSSession *session,
Error **errp)
{
if (object_dynamic_cast(OBJECT(session->creds),
TYPE_QCRYPTO_TLS_CREDS_ANON)) {
return 0;
} else if (object_dynamic_cast(OBJECT(session->creds),
TYPE_QCRYPTO_TLS_CREDS_X509)) {
if (session->creds->verifyPeer) {
return qcrypto_tls_session_check_certificate(session,
errp);
} else {
return 0;
}
} else {
error_setg(errp, "Unexpected credential type %s",
object_get_typename(OBJECT(session->creds)));
return -1;
}
}
| 21,998 |
qemu | 6c2f9a15dfc8c18ba94defb0f819109902a817cb | 1 | static QObject *qmp_output_first(QmpOutputVisitor *qov)
{
QStackEntry *e = QTAILQ_LAST(&qov->stack, QStack);
/* FIXME - find a better way to deal with NULL values */
if (!e) {
return NULL;
}
return e->value;
}
| 21,999 |
FFmpeg | 4860625236475da20d0da954017e8c7fe412dea2 | 1 | yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2],
const int16_t *ubuf[2], const int16_t *vbuf[2],
const int16_t *abuf[2], uint8_t *dest, int dstW,
int yalpha, int uvalpha, int y,
enum PixelFormat target)
{
const int16_t *buf0 = buf[0], *buf1 = buf[1],
*ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
*vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
int yalpha1 = 4095 - yalpha;
int uvalpha1 = 4095 - uvalpha;
int i;
for (i = 0; i < ((dstW + 1) >> 1); i++) {
int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
Y1 = av_clip_uint8(Y1);
Y2 = av_clip_uint8(Y2);
U = av_clip_uint8(U);
V = av_clip_uint8(V);
output_pixels(i * 4, Y1, U, Y2, V);
}
}
| 22,000 |
qemu | e23a1b33b53d25510320b26d9f154e19c6c99725 | 1 | PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name)
{
PCIDevice *dev = pci_create(bus, devfn, name);
qdev_init(&dev->qdev);
return dev;
}
| 22,001 |
qemu | d9bce9d99f4656ae0b0127f7472db9067b8f84ab | 1 | PPC_OP(test_ctr)
{
T0 = regs->ctr;
RETURN();
}
| 22,004 |
qemu | 07caea315a85ebfe90851f9c2e4ef3fdd24117b5 | 1 | static void ppc_heathrow_init (ram_addr_t ram_size,
const char *boot_device,
const char *kernel_filename,
const char *kernel_cmdline,
const char *initrd_filename,
const char *cpu_model)
{
CPUState *env = NULL, *envs[MAX_CPUS];
char *filename;
qemu_irq *pic, **heathrow_irqs;
int linux_boot, i;
ram_addr_t ram_offset, bios_offset, vga_bios_offset;
uint32_t kernel_base, initrd_base;
int32_t kernel_size, initrd_size;
PCIBus *pci_bus;
MacIONVRAMState *nvr;
int vga_bios_size, bios_size;
int pic_mem_index, nvram_mem_index, dbdma_mem_index, cuda_mem_index;
int escc_mem_index, ide_mem_index[2];
uint16_t ppc_boot_device;
DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS];
void *fw_cfg;
void *dbdma;
uint8_t *vga_bios_ptr;
linux_boot = (kernel_filename != NULL);
/* init CPUs */
if (cpu_model == NULL)
cpu_model = "G3";
for (i = 0; i < smp_cpus; i++) {
env = cpu_init(cpu_model);
if (!env) {
fprintf(stderr, "Unable to find PowerPC CPU definition\n");
exit(1);
}
/* Set time-base frequency to 16.6 Mhz */
cpu_ppc_tb_init(env, 16600000UL);
env->osi_call = vga_osi_call;
qemu_register_reset(&cpu_ppc_reset, env);
envs[i] = env;
}
/* allocate RAM */
if (ram_size > (2047 << 20)) {
fprintf(stderr,
"qemu: Too much memory for this machine: %d MB, maximum 2047 MB\n",
((unsigned int)ram_size / (1 << 20)));
exit(1);
}
ram_offset = qemu_ram_alloc(ram_size);
cpu_register_physical_memory(0, ram_size, ram_offset);
/* allocate and load BIOS */
bios_offset = qemu_ram_alloc(BIOS_SIZE);
if (bios_name == NULL)
bios_name = PROM_FILENAME;
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
cpu_register_physical_memory(PROM_ADDR, BIOS_SIZE, bios_offset | IO_MEM_ROM);
/* Load OpenBIOS (ELF) */
if (filename) {
bios_size = load_elf(filename, 0, NULL, NULL, NULL,
1, ELF_MACHINE, 0);
qemu_free(filename);
} else {
bios_size = -1;
}
if (bios_size < 0 || bios_size > BIOS_SIZE) {
hw_error("qemu: could not load PowerPC bios '%s'\n", bios_name);
exit(1);
}
/* allocate and load VGA BIOS */
vga_bios_offset = qemu_ram_alloc(VGA_BIOS_SIZE);
vga_bios_ptr = qemu_get_ram_ptr(vga_bios_offset);
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, VGABIOS_FILENAME);
if (filename) {
vga_bios_size = load_image(filename, vga_bios_ptr + 8);
qemu_free(filename);
} else {
vga_bios_size = -1;
}
if (vga_bios_size < 0) {
/* if no bios is present, we can still work */
fprintf(stderr, "qemu: warning: could not load VGA bios '%s'\n",
VGABIOS_FILENAME);
vga_bios_size = 0;
} else {
/* set a specific header (XXX: find real Apple format for NDRV
drivers) */
vga_bios_ptr[0] = 'N';
vga_bios_ptr[1] = 'D';
vga_bios_ptr[2] = 'R';
vga_bios_ptr[3] = 'V';
cpu_to_be32w((uint32_t *)(vga_bios_ptr + 4), vga_bios_size);
vga_bios_size += 8;
/* Round to page boundary */
vga_bios_size = (vga_bios_size + TARGET_PAGE_SIZE - 1) &
TARGET_PAGE_MASK;
}
if (linux_boot) {
uint64_t lowaddr = 0;
int bswap_needed;
#ifdef BSWAP_NEEDED
bswap_needed = 1;
#else
bswap_needed = 0;
#endif
kernel_base = KERNEL_LOAD_ADDR;
/* Now we can load the kernel. The first step tries to load the kernel
supposing PhysAddr = 0x00000000. If that was wrong the kernel is
loaded again, the new PhysAddr being computed from lowaddr. */
kernel_size = load_elf(kernel_filename, kernel_base, NULL, &lowaddr, NULL,
1, ELF_MACHINE, 0);
if (kernel_size > 0 && lowaddr != KERNEL_LOAD_ADDR) {
kernel_size = load_elf(kernel_filename, (2 * kernel_base) - lowaddr,
NULL, NULL, NULL, 1, ELF_MACHINE, 0);
}
if (kernel_size < 0)
kernel_size = load_aout(kernel_filename, kernel_base,
ram_size - kernel_base, bswap_needed,
TARGET_PAGE_SIZE);
if (kernel_size < 0)
kernel_size = load_image_targphys(kernel_filename,
kernel_base,
ram_size - kernel_base);
if (kernel_size < 0) {
hw_error("qemu: could not load kernel '%s'\n",
kernel_filename);
exit(1);
}
/* load initrd */
if (initrd_filename) {
initrd_base = INITRD_LOAD_ADDR;
initrd_size = load_image_targphys(initrd_filename, initrd_base,
ram_size - initrd_base);
if (initrd_size < 0) {
hw_error("qemu: could not load initial ram disk '%s'\n",
initrd_filename);
exit(1);
}
} else {
initrd_base = 0;
initrd_size = 0;
}
ppc_boot_device = 'm';
} else {
kernel_base = 0;
kernel_size = 0;
initrd_base = 0;
initrd_size = 0;
ppc_boot_device = '\0';
for (i = 0; boot_device[i] != '\0'; i++) {
/* TOFIX: for now, the second IDE channel is not properly
* used by OHW. The Mac floppy disk are not emulated.
* For now, OHW cannot boot from the network.
*/
#if 0
if (boot_device[i] >= 'a' && boot_device[i] <= 'f') {
ppc_boot_device = boot_device[i];
break;
}
#else
if (boot_device[i] >= 'c' && boot_device[i] <= 'd') {
ppc_boot_device = boot_device[i];
break;
}
#endif
}
if (ppc_boot_device == '\0') {
fprintf(stderr, "No valid boot device for G3 Beige machine\n");
exit(1);
}
}
isa_mem_base = 0x80000000;
/* Register 2 MB of ISA IO space */
isa_mmio_init(0xfe000000, 0x00200000);
/* XXX: we register only 1 output pin for heathrow PIC */
heathrow_irqs = qemu_mallocz(smp_cpus * sizeof(qemu_irq *));
heathrow_irqs[0] =
qemu_mallocz(smp_cpus * sizeof(qemu_irq) * 1);
/* Connect the heathrow PIC outputs to the 6xx bus */
for (i = 0; i < smp_cpus; i++) {
switch (PPC_INPUT(env)) {
case PPC_FLAGS_INPUT_6xx:
heathrow_irqs[i] = heathrow_irqs[0] + (i * 1);
heathrow_irqs[i][0] =
((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT];
break;
default:
hw_error("Bus model not supported on OldWorld Mac machine\n");
}
}
/* init basic PC hardware */
if (PPC_INPUT(env) != PPC_FLAGS_INPUT_6xx) {
hw_error("Only 6xx bus is supported on heathrow machine\n");
}
pic = heathrow_pic_init(&pic_mem_index, 1, heathrow_irqs);
pci_bus = pci_grackle_init(0xfec00000, pic);
pci_vga_init(pci_bus, vga_bios_offset, vga_bios_size);
escc_mem_index = escc_init(0x80013000, pic[0x0f], pic[0x10], serial_hds[0],
serial_hds[1], ESCC_CLOCK, 4);
for(i = 0; i < nb_nics; i++)
pci_nic_init(&nd_table[i], "ne2k_pci", NULL);
if (drive_get_max_bus(IF_IDE) >= MAX_IDE_BUS) {
fprintf(stderr, "qemu: too many IDE bus\n");
exit(1);
}
/* First IDE channel is a MAC IDE on the MacIO bus */
hd[0] = drive_get(IF_IDE, 0, 0);
hd[1] = drive_get(IF_IDE, 0, 1);
dbdma = DBDMA_init(&dbdma_mem_index);
ide_mem_index[0] = -1;
ide_mem_index[1] = pmac_ide_init(hd, pic[0x0D], dbdma, 0x16, pic[0x02]);
/* Second IDE channel is a CMD646 on the PCI bus */
hd[0] = drive_get(IF_IDE, 1, 0);
hd[1] = drive_get(IF_IDE, 1, 1);
hd[3] = hd[2] = NULL;
pci_cmd646_ide_init(pci_bus, hd, 0);
/* cuda also initialize ADB */
cuda_init(&cuda_mem_index, pic[0x12]);
adb_kbd_init(&adb_bus);
adb_mouse_init(&adb_bus);
nvr = macio_nvram_init(&nvram_mem_index, 0x2000, 4);
pmac_format_nvram_partition(nvr, 0x2000);
macio_init(pci_bus, PCI_DEVICE_ID_APPLE_343S1201, 1, pic_mem_index,
dbdma_mem_index, cuda_mem_index, nvr, 2, ide_mem_index,
escc_mem_index);
if (usb_enabled) {
usb_ohci_init_pci(pci_bus, -1);
}
if (graphic_depth != 15 && graphic_depth != 32 && graphic_depth != 8)
graphic_depth = 15;
/* No PCI init: the BIOS will do it */
fw_cfg = fw_cfg_init(0, 0, CFG_ADDR, CFG_ADDR + 2);
fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1);
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, ARCH_HEATHROW);
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_base);
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size);
if (kernel_cmdline) {
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, CMDLINE_ADDR);
pstrcpy_targphys(CMDLINE_ADDR, TARGET_PAGE_SIZE, kernel_cmdline);
} else {
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, 0);
}
fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_base);
fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size);
fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, ppc_boot_device);
fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_WIDTH, graphic_width);
fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_HEIGHT, graphic_height);
fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_DEPTH, graphic_depth);
qemu_register_boot_set(fw_cfg_boot_set, fw_cfg);
}
| 22,005 |
FFmpeg | 206167a295a5c28cec3c38f7308835b0b7e0618f | 1 | SYNTH_FILTER_FUNC(sse2)
SYNTH_FILTER_FUNC(avx)
av_cold void ff_synth_filter_init_x86(SynthFilterContext *s)
{
#if HAVE_YASM
int cpu_flags = av_get_cpu_flags();
#if ARCH_X86_32
if (EXTERNAL_SSE(cpu_flags)) {
s->synth_filter_float = synth_filter_sse;
}
if (EXTERNAL_SSE2(cpu_flags)) {
s->synth_filter_float = synth_filter_sse2;
}
if (EXTERNAL_AVX(cpu_flags)) {
s->synth_filter_float = synth_filter_avx;
}
} | 22,007 |
FFmpeg | 7da9f4523159670d577a2808d4481e64008a8894 | 1 | static int rd_strip(CinepakEncContext *s, int y, int h, int keyframe, AVPicture *last_pict, AVPicture *pict, AVPicture *scratch_pict, unsigned char *buf, int64_t *best_score)
{
int64_t score = 0;
int best_size = 0, v1_size, v4_size, v4, mb_count = s->w * h / MB_AREA;
strip_info info;
CinepakMode best_mode;
int v4_codebooks[CODEBOOK_NUM][CODEBOOK_MAX*VECTOR_MAX];
if(!keyframe)
calculate_skip_errors(s, h, last_pict, pict, &info);
//precompute V4 codebooks
for(v4_size = 1, v4 = 0; v4_size <= 256; v4_size <<= 2, v4++) {
info.v4_codebook = v4_codebooks[v4];
quantize(s, h, pict, 0, v4_size, v4, &info);
}
//try all powers of 4 for the size of the codebooks
//constraint the v4 codebook to be no bigger than the v1 codebook
for(v1_size = 1; v1_size <= 256; v1_size <<= 2) {
//compute V1 codebook
quantize(s, h, pict, 1, v1_size, -1, &info);
for(v4_size = 0, v4 = -1; v4_size <= v1_size; v4_size = v4_size ? v4_size << 2 : v1_size >= 4 ? v1_size >> 2 : 1, v4++) {
//try all modes
for(CinepakMode mode = 0; mode < MODE_COUNT; mode++) {
//don't allow MODE_MC in inter frames
if(keyframe && mode == MODE_MC)
continue;
//only allow V1-only mode if v4 codebook is empty
if(!v4_size && mode != MODE_V1_ONLY)
continue;
info.v4_codebook = v4 >= 0 ? v4_codebooks[v4] : NULL;
score = calculate_mode_score(s, mode, h, v1_size, v4_size, v4, &info);
//av_log(s->avctx, AV_LOG_INFO, "%3i %3i score = %li\n", v1_size, v4_size, score);
if(best_size == 0 || score < *best_score) {
*best_score = score;
best_size = encode_mode(s, mode, h, v1_size, v4_size, v4, scratch_pict, &info, s->strip_buf + STRIP_HEADER_SIZE);
best_mode = mode;
av_log(s->avctx, AV_LOG_INFO, "mode %i, %3i, %3i: %18li %i B\n", mode, v1_size, v4_size, score, best_size);
#ifdef CINEPAKENC_DEBUG
//save MB encoding choices
memcpy(s->best_mb, s->mb, mb_count*sizeof(mb_info));
#endif
//memcpy(strip_temp + STRIP_HEADER_SIZE, strip_temp, best_size);
write_strip_header(s, y, h, keyframe, s->strip_buf, best_size);
}
}
}
}
#ifdef CINEPAKENC_DEBUG
//gather stats. this will only work properly of MAX_STRIPS == 1
if(best_mode == MODE_V1_ONLY) {
s->num_v1_mode++;
s->num_v1_encs += s->w*h/MB_AREA;
} else {
if(best_mode == MODE_V1_V4)
s->num_v4_mode++;
else
s->num_mc_mode++;
int x;
for(x = 0; x < s->w*h/MB_AREA; x++)
if(s->best_mb[x].best_encoding == ENC_V1)
s->num_v1_encs++;
else if(s->best_mb[x].best_encoding == ENC_V4)
s->num_v4_encs++;
else
s->num_skips++;
}
#endif
best_size += STRIP_HEADER_SIZE;
memcpy(buf, s->strip_buf, best_size);
return best_size;
}
| 22,008 |
qemu | 58ac321135af890b503ebe56d0d00e184779918f | 1 | void ide_sector_read(IDEState *s)
{
int64_t sector_num;
int n;
s->status = READY_STAT | SEEK_STAT;
s->error = 0; /* not needed by IDE spec, but needed by Windows */
sector_num = ide_get_sector(s);
n = s->nsector;
if (n == 0) {
ide_transfer_stop(s);
s->status |= BUSY_STAT;
if (n > s->req_nb_sectors) {
n = s->req_nb_sectors;
#if defined(DEBUG_IDE)
printf("sector=%" PRId64 "\n", sector_num);
#endif
s->iov.iov_base = s->io_buffer;
s->iov.iov_len = n * BDRV_SECTOR_SIZE;
qemu_iovec_init_external(&s->qiov, &s->iov, 1);
bdrv_acct_start(s->bs, &s->acct, n * BDRV_SECTOR_SIZE, BDRV_ACCT_READ);
s->pio_aiocb = bdrv_aio_readv(s->bs, sector_num, &s->qiov, n,
ide_sector_read_cb, s); | 22,010 |
qemu | 7839ff593be03a7de3c6760e0b50c565ea751d36 | 1 | static void pc_compat_1_5(QEMUMachineInitArgs *args)
{
pc_compat_1_6(args);
has_pvpanic = true;
}
| 22,011 |
FFmpeg | a0c624e299730c8c5800375c2f5f3c6c200053ff | 1 | int ff_v4l2_m2m_codec_init(AVCodecContext *avctx)
{
int ret = AVERROR(EINVAL);
struct dirent *entry;
char node[PATH_MAX];
DIR *dirp;
V4L2m2mContext *s = avctx->priv_data;
s->avctx = avctx;
dirp = opendir("/dev");
if (!dirp)
return AVERROR(errno);
for (entry = readdir(dirp); entry; entry = readdir(dirp)) {
if (strncmp(entry->d_name, "video", 5))
continue;
snprintf(node, sizeof(node), "/dev/%s", entry->d_name);
av_log(s->avctx, AV_LOG_DEBUG, "probing device %s\n", node);
strncpy(s->devname, node, strlen(node) + 1);
ret = v4l2_probe_driver(s);
if (!ret)
break;
}
closedir(dirp);
if (ret) {
av_log(s->avctx, AV_LOG_ERROR, "Could not find a valid device\n");
memset(s->devname, 0, sizeof(s->devname));
return ret;
}
av_log(s->avctx, AV_LOG_INFO, "Using device %s\n", node);
return v4l2_configure_contexts(s);
}
| 22,012 |
FFmpeg | 473147bed01c0c6c82d85fd79d3e1c1d65542663 | 0 | static int decode_frame_ilbm(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
IffContext *s = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
const uint8_t *buf_end = buf+buf_size;
int y, plane;
if (avctx->reget_buffer(avctx, &s->frame) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
for(y = 0; y < avctx->height; y++ ) {
uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
memset(row, 0, avctx->pix_fmt == PIX_FMT_PAL8 ? avctx->width : (avctx->width * 4));
for (plane = 0; plane < avctx->bits_per_coded_sample && buf < buf_end; plane++) {
if (avctx->pix_fmt == PIX_FMT_PAL8) {
decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), avctx->bits_per_coded_sample, plane);
} else { // PIX_FMT_BGR32
decodeplane32(row, buf, FFMIN(s->planesize, buf_end - buf), avctx->bits_per_coded_sample, plane);
}
buf += s->planesize;
}
}
*data_size = sizeof(AVFrame);
*(AVFrame*)data = s->frame;
return buf_size;
}
| 22,014 |
FFmpeg | 5e6ce28dabe002a6130f17b59c454bdee33088f7 | 0 | static int mp3_seek(AVFormatContext *s, int stream_index, int64_t timestamp,
int flags)
{
MP3DecContext *mp3 = s->priv_data;
AVIndexEntry *ie, ie1;
AVStream *st = s->streams[0];
int64_t ret = av_index_search_timestamp(st, timestamp, flags);
int64_t best_pos;
int fast_seek = (s->flags & AVFMT_FLAG_FAST_SEEK) ? 1 : 0;
int64_t filesize = mp3->header_filesize;
if (mp3->usetoc == 2)
return -1; // generic index code
if (filesize <= 0) {
int64_t size = avio_size(s->pb);
if (size > 0 && size > s->internal->data_offset)
filesize = size - s->internal->data_offset;
}
if ( (mp3->is_cbr || fast_seek)
&& (mp3->usetoc == 0 || !mp3->xing_toc)
&& st->duration > 0
&& filesize > 0) {
ie = &ie1;
timestamp = av_clip64(timestamp, 0, st->duration);
ie->timestamp = timestamp;
ie->pos = av_rescale(timestamp, filesize, st->duration) + s->internal->data_offset;
} else if (mp3->xing_toc) {
if (ret < 0)
return ret;
ie = &st->index_entries[ret];
} else {
return -1;
}
best_pos = mp3_sync(s, ie->pos, flags);
if (best_pos < 0)
return best_pos;
if (mp3->is_cbr && ie == &ie1 && mp3->frames) {
int frame_duration = av_rescale(st->duration, 1, mp3->frames);
ie1.timestamp = frame_duration * av_rescale(best_pos - s->internal->data_offset, mp3->frames, mp3->header_filesize);
}
ff_update_cur_dts(s, st, ie->timestamp);
return 0;
}
| 22,015 |
qemu | 2061c14c9bea67f8f1fc6bc7acb33c903a0586c1 | 1 | static int handle_secondary_tcp_pkt(NetFilterState *nf,
Connection *conn,
Packet *pkt)
{
struct tcphdr *tcp_pkt;
tcp_pkt = (struct tcphdr *)pkt->transport_header;
if (trace_event_get_state(TRACE_COLO_FILTER_REWRITER_DEBUG)) {
char *sdebug, *ddebug;
sdebug = strdup(inet_ntoa(pkt->ip->ip_src));
ddebug = strdup(inet_ntoa(pkt->ip->ip_dst));
trace_colo_filter_rewriter_pkt_info(__func__, sdebug, ddebug,
ntohl(tcp_pkt->th_seq), ntohl(tcp_pkt->th_ack),
tcp_pkt->th_flags);
trace_colo_filter_rewriter_conn_offset(conn->offset);
g_free(sdebug);
g_free(ddebug);
}
if (((tcp_pkt->th_flags & (TH_ACK | TH_SYN)) == (TH_ACK | TH_SYN))) {
/*
* save offset = secondary_seq and then
* in handle_primary_tcp_pkt make offset
* = secondary_seq - primary_seq
*/
conn->offset = ntohl(tcp_pkt->th_seq);
}
if ((tcp_pkt->th_flags & (TH_ACK | TH_SYN)) == TH_ACK) {
/* handle packets to the primary from the secondary*/
tcp_pkt->th_seq = htonl(ntohl(tcp_pkt->th_seq) - conn->offset);
net_checksum_calculate((uint8_t *)pkt->data, pkt->size);
}
return 0;
}
| 22,017 |
qemu | 372579427a5040a26dfee78464b50e2bdf27ef26 | 1 | static inline bool cpu_handle_halt(CPUState *cpu)
{
if (cpu->halted) {
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
&& replay_interrupt()) {
X86CPU *x86_cpu = X86_CPU(cpu);
qemu_mutex_lock_iothread();
apic_poll_irq(x86_cpu->apic_state);
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
qemu_mutex_unlock_iothread();
}
#endif
if (!cpu_has_work(cpu)) {
current_cpu = NULL;
return true;
}
cpu->halted = 0;
}
return false;
}
| 22,018 |
FFmpeg | b5f45208fbe5373c7f9112a8169933b73a8478e1 | 1 | static inline CopyRet copy_frame(AVCodecContext *avctx,
BC_DTS_PROC_OUT *output,
void *data, int *got_frame)
{
BC_STATUS ret;
BC_DTS_STATUS decoder_status = { 0, };
uint8_t trust_interlaced;
uint8_t interlaced;
CHDContext *priv = avctx->priv_data;
int64_t pkt_pts = AV_NOPTS_VALUE;
uint8_t pic_type = 0;
uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) ==
VDEC_FLAG_BOTTOMFIELD;
uint8_t bottom_first = !!(output->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST);
int width = output->PicInfo.width;
int height = output->PicInfo.height;
int bwidth;
uint8_t *src = output->Ybuff;
int sStride;
uint8_t *dst;
int dStride;
if (output->PicInfo.timeStamp != 0) {
OpaqueList *node = opaque_list_pop(priv, output->PicInfo.timeStamp);
if (node) {
pkt_pts = node->reordered_opaque;
pic_type = node->pic_type;
av_free(node);
} else {
/*
* We will encounter a situation where a timestamp cannot be
* popped if a second field is being returned. In this case,
* each field has the same timestamp and the first one will
* cause it to be popped. To keep subsequent calculations
* simple, pic_type should be set a FIELD value - doesn't
* matter which, but I chose BOTTOM.
*/
pic_type = PICT_BOTTOM_FIELD;
}
av_log(avctx, AV_LOG_VERBOSE, "output \"pts\": %"PRIu64"\n",
output->PicInfo.timeStamp);
av_log(avctx, AV_LOG_VERBOSE, "output picture type %d\n",
pic_type);
}
ret = DtsGetDriverStatus(priv->dev, &decoder_status);
if (ret != BC_STS_SUCCESS) {
av_log(avctx, AV_LOG_ERROR,
"CrystalHD: GetDriverStatus failed: %u\n", ret);
return RET_ERROR;
}
/*
* For most content, we can trust the interlaced flag returned
* by the hardware, but sometimes we can't. These are the
* conditions under which we can trust the flag:
*
* 1) It's not h.264 content
* 2) The UNKNOWN_SRC flag is not set
* 3) We know we're expecting a second field
* 4) The hardware reports this picture and the next picture
* have the same picture number.
*
* Note that there can still be interlaced content that will
* fail this check, if the hardware hasn't decoded the next
* picture or if there is a corruption in the stream. (In either
* case a 0 will be returned for the next picture number)
*/
trust_interlaced = avctx->codec->id != AV_CODEC_ID_H264 ||
!(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ||
priv->need_second_field ||
(decoder_status.picNumFlags & ~0x40000000) ==
output->PicInfo.picture_number;
/*
* If we got a false negative for trust_interlaced on the first field,
* we will realise our mistake here when we see that the picture number is that
* of the previous picture. We cannot recover the frame and should discard the
* second field to keep the correct number of output frames.
*/
if (output->PicInfo.picture_number == priv->last_picture && !priv->need_second_field) {
av_log(avctx, AV_LOG_WARNING,
"Incorrectly guessed progressive frame. Discarding second field\n");
/* Returning without providing a picture. */
return RET_OK;
}
interlaced = (output->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC) &&
trust_interlaced;
if (!trust_interlaced && (decoder_status.picNumFlags & ~0x40000000) == 0) {
av_log(avctx, AV_LOG_VERBOSE,
"Next picture number unknown. Assuming progressive frame.\n");
}
av_log(avctx, AV_LOG_VERBOSE, "Interlaced state: %d | trust_interlaced %d\n",
interlaced, trust_interlaced);
if (priv->pic->data[0] && !priv->need_second_field)
av_frame_unref(priv->pic);
priv->need_second_field = interlaced && !priv->need_second_field;
if (!priv->pic->data[0]) {
if (ff_get_buffer(avctx, priv->pic, AV_GET_BUFFER_FLAG_REF) < 0)
return RET_ERROR;
}
bwidth = av_image_get_linesize(avctx->pix_fmt, width, 0);
if (priv->is_70012) {
int pStride;
if (width <= 720)
pStride = 720;
else if (width <= 1280)
pStride = 1280;
else pStride = 1920;
sStride = av_image_get_linesize(avctx->pix_fmt, pStride, 0);
} else {
sStride = bwidth;
}
dStride = priv->pic->linesize[0];
dst = priv->pic->data[0];
av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n");
if (interlaced) {
int dY = 0;
int sY = 0;
height /= 2;
if (bottom_field) {
av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: bottom field\n");
dY = 1;
} else {
av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: top field\n");
dY = 0;
}
for (sY = 0; sY < height; dY++, sY++) {
memcpy(&(dst[dY * dStride]), &(src[sY * sStride]), bwidth);
dY++;
}
} else {
av_image_copy_plane(dst, dStride, src, sStride, bwidth, height);
}
priv->pic->interlaced_frame = interlaced;
if (interlaced)
priv->pic->top_field_first = !bottom_first;
priv->pic->pts = pkt_pts;
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
priv->pic->pkt_pts = pkt_pts;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (!priv->need_second_field) {
*got_frame = 1;
if ((ret = av_frame_ref(data, priv->pic)) < 0) {
return ret;
}
}
/*
* Two types of PAFF content have been observed. One form causes the
* hardware to return a field pair and the other individual fields,
* even though the input is always individual fields. We must skip
* copying on the next decode() call to maintain pipeline length in
* the first case.
*/
if (!interlaced && (output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) &&
(pic_type == PICT_TOP_FIELD || pic_type == PICT_BOTTOM_FIELD)) {
av_log(priv->avctx, AV_LOG_VERBOSE, "Fieldpair from two packets.\n");
return RET_SKIP_NEXT_COPY;
}
/*
* The logic here is purely based on empirical testing with samples.
* If we need a second field, it could come from a second input packet,
* or it could come from the same field-pair input packet at the current
* field. In the first case, we should return and wait for the next time
* round to get the second field, while in the second case, we should
* ask the decoder for it immediately.
*
* Testing has shown that we are dealing with the fieldpair -> two fields
* case if the VDEC_FLAG_UNKNOWN_SRC is not set or if the input picture
* type was PICT_FRAME (in this second case, the flag might still be set)
*/
return priv->need_second_field &&
(!(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ||
pic_type == PICT_FRAME) ?
RET_COPY_NEXT_FIELD : RET_OK;
}
| 22,019 |
FFmpeg | d9f4dc52a0fe3edb93f153cf13e750f7c46243d1 | 1 | static av_cold int prores_encode_init(AVCodecContext *avctx)
{
int i;
ProresContext* ctx = avctx->priv_data;
if (avctx->pix_fmt != PIX_FMT_YUV422P10LE) {
av_log(avctx, AV_LOG_ERROR, "need YUV422P10\n");
return -1;
}
if (avctx->width & 0x1) {
av_log(avctx, AV_LOG_ERROR,
"frame width needs to be multiple of 2\n");
return -1;
}
if ((avctx->height & 0xf) || (avctx->width & 0xf)) {
ctx->fill_y = av_malloc(DEFAULT_SLICE_MB_WIDTH << 9);
ctx->fill_u = av_malloc(DEFAULT_SLICE_MB_WIDTH << 8);
ctx->fill_v = av_malloc(DEFAULT_SLICE_MB_WIDTH << 8);
}
if (avctx->profile == FF_PROFILE_UNKNOWN) {
avctx->profile = FF_PROFILE_PRORES_STANDARD;
av_log(avctx, AV_LOG_INFO,
"encoding with ProRes standard (apcn) profile\n");
} else if (avctx->profile < FF_PROFILE_PRORES_PROXY
|| avctx->profile > FF_PROFILE_PRORES_HQ) {
av_log(
avctx,
AV_LOG_ERROR,
"unknown profile %d, use [0 - apco, 1 - apcs, 2 - apcn (default), 3 - apch]\n",
avctx->profile);
return -1;
}
avctx->codec_tag = AV_RL32((const uint8_t*)profiles[avctx->profile].name);
for (i = 1; i <= 16; i++) {
scale_mat(QMAT_LUMA[avctx->profile] , ctx->qmat_luma[i - 1] , i);
scale_mat(QMAT_CHROMA[avctx->profile], ctx->qmat_chroma[i - 1], i);
}
avctx->coded_frame = avcodec_alloc_frame();
avctx->coded_frame->key_frame = 1;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
return 0;
}
| 22,020 |
qemu | 544a3731591f5d53e15f22de00ce5ac758d490b3 | 1 | static void test_visitor_out_union_flat(TestOutputVisitorData *data,
const void *unused)
{
QObject *arg;
QDict *qdict;
UserDefFlatUnion *tmp = g_malloc0(sizeof(UserDefFlatUnion));
tmp->enum1 = ENUM_ONE_VALUE1;
tmp->string = g_strdup("str");
tmp->u.value1 = g_malloc0(sizeof(UserDefA));
tmp->integer = 41;
tmp->u.value1->boolean = true;
visit_type_UserDefFlatUnion(data->ov, NULL, &tmp, &error_abort);
arg = qmp_output_get_qobject(data->qov);
g_assert(qobject_type(arg) == QTYPE_QDICT);
qdict = qobject_to_qdict(arg);
g_assert_cmpstr(qdict_get_str(qdict, "enum1"), ==, "value1");
g_assert_cmpstr(qdict_get_str(qdict, "string"), ==, "str");
g_assert_cmpint(qdict_get_int(qdict, "integer"), ==, 41);
g_assert_cmpint(qdict_get_bool(qdict, "boolean"), ==, true);
qapi_free_UserDefFlatUnion(tmp);
QDECREF(qdict);
}
| 22,021 |
FFmpeg | 11ce88346b1ae4da21b581baf1b4eb784d842547 | 1 | int estimate_motion(MpegEncContext * s,
int mb_x, int mb_y,
int *mx_ptr, int *my_ptr)
{
UINT8 *pix, *ppix;
int sum, varc, vard, mx, my, range, dmin, xx, yy;
int xmin, ymin, xmax, ymax;
int rel_xmin, rel_ymin, rel_xmax, rel_ymax;
int pred_x=0, pred_y=0;
int P[5][2];
const int shift= 1+s->quarter_sample;
range = 8 * (1 << (s->f_code - 1));
/* XXX: temporary kludge to avoid overflow for msmpeg4 */
if (s->out_format == FMT_H263 && !s->h263_msmpeg4)
range = range * 2;
if (s->unrestricted_mv) {
xmin = -16;
ymin = -16;
if (s->h263_plus)
range *= 2;
if(s->avctx==NULL || s->avctx->codec->id!=CODEC_ID_MPEG4){
xmax = s->mb_width*16;
ymax = s->mb_height*16;
}else {
/* XXX: dunno if this is correct but ffmpeg4 decoder wont like it otherwise
(cuz the drawn edge isnt large enough))*/
xmax = s->width;
ymax = s->height;
}
} else {
xmin = 0;
ymin = 0;
xmax = s->mb_width*16 - 16;
ymax = s->mb_height*16 - 16;
}
switch(s->full_search) {
case ME_ZERO:
default:
no_motion_search(s, &mx, &my);
dmin = 0;
break;
case ME_FULL:
dmin = full_motion_search(s, &mx, &my, range, xmin, ymin, xmax, ymax);
break;
case ME_LOG:
dmin = log_motion_search(s, &mx, &my, range / 2, xmin, ymin, xmax, ymax);
break;
case ME_PHODS:
dmin = phods_motion_search(s, &mx, &my, range / 2, xmin, ymin, xmax, ymax);
break;
case ME_X1: // just reserving some space for experiments ...
case ME_EPZS:
rel_xmin= xmin - s->mb_x*16;
rel_xmax= xmax - s->mb_x*16;
rel_ymin= ymin - s->mb_y*16;
rel_ymax= ymax - s->mb_y*16;
if(s->out_format == FMT_H263){
static const int off[4]= {2, 1, 1, -1};
const int mot_stride = s->block_wrap[0];
const int mot_xy = s->block_index[0];
P[0][0] = s->motion_val[mot_xy ][0];
P[0][1] = s->motion_val[mot_xy ][1];
P[1][0] = s->motion_val[mot_xy - 1][0];
P[1][1] = s->motion_val[mot_xy - 1][1];
if(P[1][0] > (rel_xmax<<shift)) P[1][0]= (rel_xmax<<shift);
/* special case for first line */
if ((s->mb_y == 0 || s->first_slice_line || s->first_gob_line)) {
pred_x = P[1][0];
pred_y = P[1][1];
} else {
P[2][0] = s->motion_val[mot_xy - mot_stride ][0];
P[2][1] = s->motion_val[mot_xy - mot_stride ][1];
P[3][0] = s->motion_val[mot_xy - mot_stride + off[0] ][0];
P[3][1] = s->motion_val[mot_xy - mot_stride + off[0] ][1];
if(P[2][1] > (rel_ymax<<shift)) P[2][1]= (rel_ymax<<shift);
if(P[3][0] < (rel_xmin<<shift)) P[3][0]= (rel_xmin<<shift);
if(P[3][1] > (rel_ymax<<shift)) P[3][1]= (rel_ymax<<shift);
P[4][0]= pred_x = mid_pred(P[1][0], P[2][0], P[3][0]);
P[4][1]= pred_y = mid_pred(P[1][1], P[2][1], P[3][1]);
}
}else {
const int xy= s->mb_y*s->mb_width + s->mb_x;
pred_x= s->last_mv[0][0][0];
pred_y= s->last_mv[0][0][1];
P[0][0]= s->mv_table[0][xy ];
P[0][1]= s->mv_table[1][xy ];
if(s->mb_x == 0){
P[1][0]= 0;
P[1][1]= 0;
}else{
P[1][0]= s->mv_table[0][xy-1];
P[1][1]= s->mv_table[1][xy-1];
if(P[1][0] > (rel_xmax<<shift)) P[1][0]= (rel_xmax<<shift);
}
if (!(s->mb_y == 0 || s->first_slice_line || s->first_gob_line)) {
P[2][0] = s->mv_table[0][xy - s->mb_width];
P[2][1] = s->mv_table[1][xy - s->mb_width];
P[3][0] = s->mv_table[0][xy - s->mb_width+1];
P[3][1] = s->mv_table[1][xy - s->mb_width+1];
if(P[2][1] > (rel_ymax<<shift)) P[2][1]= (rel_ymax<<shift);
if(P[3][0] > (rel_xmax<<shift)) P[3][0]= (rel_xmax<<shift);
if(P[3][0] < (rel_xmin<<shift)) P[3][0]= (rel_xmin<<shift);
if(P[3][1] > (rel_ymax<<shift)) P[3][1]= (rel_ymax<<shift);
P[4][0]= mid_pred(P[1][0], P[2][0], P[3][0]);
P[4][1]= mid_pred(P[1][1], P[2][1], P[3][1]);
}
}
dmin = epzs_motion_search(s, &mx, &my, P, pred_x, pred_y, rel_xmin, rel_ymin, rel_xmax, rel_ymax);
mx+= s->mb_x*16;
my+= s->mb_y*16;
break;
}
/* intra / predictive decision */
xx = mb_x * 16;
yy = mb_y * 16;
pix = s->new_picture[0] + (yy * s->linesize) + xx;
/* At this point (mx,my) are full-pell and the absolute displacement */
ppix = s->last_picture[0] + (my * s->linesize) + mx;
sum = pix_sum(pix, s->linesize);
varc = pix_norm1(pix, s->linesize);
vard = pix_norm(pix, ppix, s->linesize);
vard = vard >> 8;
sum = sum >> 8;
varc = (varc >> 8) - (sum * sum);
s->mb_var[s->mb_width * mb_y + mb_x] = varc;
s->avg_mb_var += varc;
s->mc_mb_var += vard;
#if 0
printf("varc=%4d avg_var=%4d (sum=%4d) vard=%4d mx=%2d my=%2d\n",
varc, s->avg_mb_var, sum, vard, mx - xx, my - yy);
#endif
if (vard <= 64 || vard < varc) {
if (s->full_search != ME_ZERO) {
halfpel_motion_search(s, &mx, &my, dmin, xmin, ymin, xmax, ymax, pred_x, pred_y);
} else {
mx -= 16 * s->mb_x;
my -= 16 * s->mb_y;
}
*mx_ptr = mx;
*my_ptr = my;
return 0;
} else {
*mx_ptr = 0;
*my_ptr = 0;
return 1;
}
}
| 22,022 |
FFmpeg | 78987a88a88b28d93d03ed6c228bcb33f178444f | 1 | static int get_std_framerate(int i)
{
if (i < 60 * 12)
return i * 1001;
else
return ((const int[]) { 24, 30, 60, 12, 15 })[i - 60 * 12] * 1000 * 12;
}
| 22,023 |
qemu | 58ae2d1f037fae1d90eed4522053a85d79edfbec | 1 | static int bad_mode_switch(CPUARMState *env, int mode)
{
/* Return true if it is not valid for us to switch to
* this CPU mode (ie all the UNPREDICTABLE cases in
* the ARM ARM CPSRWriteByInstr pseudocode).
*/
switch (mode) {
case ARM_CPU_MODE_USR:
case ARM_CPU_MODE_SYS:
case ARM_CPU_MODE_SVC:
case ARM_CPU_MODE_ABT:
case ARM_CPU_MODE_UND:
case ARM_CPU_MODE_IRQ:
case ARM_CPU_MODE_FIQ:
/* Note that we don't implement the IMPDEF NSACR.RFR which in v7
* allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
*/
return 0;
case ARM_CPU_MODE_HYP:
return !arm_feature(env, ARM_FEATURE_EL2)
|| arm_current_el(env) < 2 || arm_is_secure(env);
case ARM_CPU_MODE_MON:
return !arm_is_secure(env);
default:
return 1;
}
}
| 22,024 |
qemu | 7e8c49c56154ab5c45d4f07edf0c22728735da35 | 1 | static void scsi_write_complete(void * opaque, int ret)
{
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
uint32_t n;
if (r->req.aiocb != NULL) {
r->req.aiocb = NULL;
bdrv_acct_done(s->qdev.conf.bs, &r->acct);
}
if (ret < 0) {
if (scsi_handle_rw_error(r, -ret)) {
goto done;
}
}
n = r->qiov.size / 512;
r->sector += n;
r->sector_count -= n;
if (r->sector_count == 0) {
scsi_req_complete(&r->req, GOOD);
} else {
scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
DPRINTF("Write complete tag=0x%x more=%d\n", r->req.tag, r->qiov.size);
scsi_req_data(&r->req, r->qiov.size);
}
done:
if (!r->req.io_canceled) {
scsi_req_unref(&r->req);
}
}
| 22,025 |
FFmpeg | 8e87d146d798ca25d8f3a4520a6deb7946b39d73 | 1 | static void subband_scale(int *dst, int *src, int scale, int offset, int len)
{
int ssign = scale < 0 ? -1 : 1;
int s = FFABS(scale);
unsigned int round;
int i, out, c = exp2tab[s & 3];
s = offset - (s >> 2);
if (s > 31) {
for (i=0; i<len; i++) {
dst[i] = 0;
}
} else if (s > 0) {
round = 1 << (s-1);
for (i=0; i<len; i++) {
out = (int)(((int64_t)src[i] * c) >> 32);
dst[i] = ((int)(out+round) >> s) * ssign;
}
}
else {
s = s + 32;
round = 1U << (s-1);
for (i=0; i<len; i++) {
out = (int)((int64_t)((int64_t)src[i] * c + round) >> s);
dst[i] = out * ssign;
}
}
}
| 22,026 |
qemu | 7e09797c299712cafa7bc05dd57c1b13afcc6039 | 1 | static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
env->cp15.c5_insn = extended_mpu_ap_bits(value);
}
| 22,027 |
qemu | a890643958f03aaa344290700093b280cb606c28 | 1 | qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j)
{
qht_debug_assert(!(to == from && i == j));
qht_debug_assert(to->pointers[i]);
qht_debug_assert(from->pointers[j]);
to->hashes[i] = from->hashes[j];
atomic_set(&to->pointers[i], from->pointers[j]);
from->hashes[j] = 0;
atomic_set(&from->pointers[j], NULL);
}
| 22,030 |
FFmpeg | 7e4881a2d074a7dfba7ee1990b3e17c9276f985d | 0 | static int atrac3_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt) {
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
ATRAC3Context *q = avctx->priv_data;
int result = 0;
const uint8_t* databuf;
float *samples = data;
if (buf_size < avctx->block_align) {
av_log(avctx, AV_LOG_ERROR,
"Frame too small (%d bytes). Truncated file?\n", buf_size);
*data_size = 0;
return buf_size;
}
/* Check if we need to descramble and what buffer to pass on. */
if (q->scrambled_stream) {
decode_bytes(buf, q->decoded_bytes_buffer, avctx->block_align);
databuf = q->decoded_bytes_buffer;
} else {
databuf = buf;
}
result = decodeFrame(q, databuf, q->channels == 2 ? q->outSamples : &samples);
if (result != 0) {
av_log(NULL,AV_LOG_ERROR,"Frame decoding error!\n");
return -1;
}
/* interleave */
if (q->channels == 2) {
q->fmt_conv.float_interleave(samples, (const float **)q->outSamples,
1024, 2);
}
*data_size = 1024 * q->channels * av_get_bytes_per_sample(avctx->sample_fmt);
return avctx->block_align;
}
| 22,032 |
FFmpeg | 87e8788680e16c51f6048af26f3f7830c35207a5 | 0 | static int flic_probe(AVProbeData *p)
{
int magic_number;
if (p->buf_size < 6)
return 0;
magic_number = AV_RL16(&p->buf[4]);
if ((magic_number != FLIC_FILE_MAGIC_1) &&
(magic_number != FLIC_FILE_MAGIC_2) &&
(magic_number != FLIC_FILE_MAGIC_3))
return 0;
return AVPROBE_SCORE_MAX;
}
| 22,033 |
FFmpeg | ddfa3751c092feaf1e080f66587024689dfe603c | 1 | static int jp2_find_codestream(J2kDecoderContext *s)
{
uint32_t atom_size;
int found_codestream = 0, search_range = 10;
// skip jpeg2k signature atom
s->buf += 12;
while(!found_codestream && search_range && s->buf_end - s->buf >= 8) {
atom_size = AV_RB32(s->buf);
if(AV_RB32(s->buf + 4) == JP2_CODESTREAM) {
found_codestream = 1;
s->buf += 8;
} else {
if (s->buf_end - s->buf < atom_size)
return 0;
s->buf += atom_size;
search_range--;
}
}
if(found_codestream)
return 1;
return 0;
}
| 22,035 |
qemu | a70dadc7f1a3e96a7179c6c3a6ccd1a0ea65760a | 1 | static void tswap_siginfo(target_siginfo_t *tinfo,
const target_siginfo_t *info)
{
int sig = info->si_signo;
tinfo->si_signo = tswap32(sig);
tinfo->si_errno = tswap32(info->si_errno);
tinfo->si_code = tswap32(info->si_code);
if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
|| sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
tinfo->_sifields._sigfault._addr
= tswapal(info->_sifields._sigfault._addr);
} else if (sig == TARGET_SIGIO) {
tinfo->_sifields._sigpoll._band
= tswap32(info->_sifields._sigpoll._band);
tinfo->_sifields._sigpoll._fd = tswap32(info->_sifields._sigpoll._fd);
} else if (sig == TARGET_SIGCHLD) {
tinfo->_sifields._sigchld._pid
= tswap32(info->_sifields._sigchld._pid);
tinfo->_sifields._sigchld._uid
= tswap32(info->_sifields._sigchld._uid);
tinfo->_sifields._sigchld._status
= tswap32(info->_sifields._sigchld._status);
tinfo->_sifields._sigchld._utime
= tswapal(info->_sifields._sigchld._utime);
tinfo->_sifields._sigchld._stime
= tswapal(info->_sifields._sigchld._stime);
} else if (sig >= TARGET_SIGRTMIN) {
tinfo->_sifields._rt._pid = tswap32(info->_sifields._rt._pid);
tinfo->_sifields._rt._uid = tswap32(info->_sifields._rt._uid);
tinfo->_sifields._rt._sigval.sival_ptr
= tswapal(info->_sifields._rt._sigval.sival_ptr);
}
}
| 22,036 |
qemu | e23a1b33b53d25510320b26d9f154e19c6c99725 | 1 | void axisdev88_init (ram_addr_t ram_size,
const char *boot_device,
const char *kernel_filename, const char *kernel_cmdline,
const char *initrd_filename, const char *cpu_model)
{
CPUState *env;
DeviceState *dev;
SysBusDevice *s;
qemu_irq irq[30], nmi[2], *cpu_irq;
void *etraxfs_dmac;
struct etraxfs_dma_client *eth[2] = {NULL, NULL};
int kernel_size;
int i;
int nand_regs;
int gpio_regs;
ram_addr_t phys_ram;
ram_addr_t phys_intmem;
/* init CPUs */
if (cpu_model == NULL) {
cpu_model = "crisv32";
}
env = cpu_init(cpu_model);
qemu_register_reset(main_cpu_reset, env);
/* allocate RAM */
phys_ram = qemu_ram_alloc(ram_size);
cpu_register_physical_memory(0x40000000, ram_size, phys_ram | IO_MEM_RAM);
/* The ETRAX-FS has 128Kb on chip ram, the docs refer to it as the
internal memory. */
phys_intmem = qemu_ram_alloc(INTMEM_SIZE);
cpu_register_physical_memory(0x38000000, INTMEM_SIZE,
phys_intmem | IO_MEM_RAM);
/* Attach a NAND flash to CS1. */
nand_state.nand = nand_init(NAND_MFR_STMICRO, 0x39);
nand_regs = cpu_register_io_memory(nand_read, nand_write, &nand_state);
cpu_register_physical_memory(0x10000000, 0x05000000, nand_regs);
gpio_state.nand = &nand_state;
gpio_regs = cpu_register_io_memory(gpio_read, gpio_write, &gpio_state);
cpu_register_physical_memory(0x3001a000, 0x5c, gpio_regs);
cpu_irq = cris_pic_init_cpu(env);
dev = qdev_create(NULL, "etraxfs,pic");
/* FIXME: Is there a proper way to signal vectors to the CPU core? */
qdev_prop_set_ptr(dev, "interrupt_vector", &env->interrupt_vector);
qdev_init(dev);
s = sysbus_from_qdev(dev);
sysbus_mmio_map(s, 0, 0x3001c000);
sysbus_connect_irq(s, 0, cpu_irq[0]);
sysbus_connect_irq(s, 1, cpu_irq[1]);
for (i = 0; i < 30; i++) {
irq[i] = qdev_get_gpio_in(dev, i);
}
nmi[0] = qdev_get_gpio_in(dev, 30);
nmi[1] = qdev_get_gpio_in(dev, 31);
etraxfs_dmac = etraxfs_dmac_init(0x30000000, 10);
for (i = 0; i < 10; i++) {
/* On ETRAX, odd numbered channels are inputs. */
etraxfs_dmac_connect(etraxfs_dmac, i, irq + 7 + i, i & 1);
}
/* Add the two ethernet blocks. */
eth[0] = etraxfs_eth_init(&nd_table[0], 0x30034000, 1);
if (nb_nics > 1)
eth[1] = etraxfs_eth_init(&nd_table[1], 0x30036000, 2);
/* The DMA Connector block is missing, hardwire things for now. */
etraxfs_dmac_connect_client(etraxfs_dmac, 0, eth[0]);
etraxfs_dmac_connect_client(etraxfs_dmac, 1, eth[0] + 1);
if (eth[1]) {
etraxfs_dmac_connect_client(etraxfs_dmac, 6, eth[1]);
etraxfs_dmac_connect_client(etraxfs_dmac, 7, eth[1] + 1);
}
/* 2 timers. */
sysbus_create_varargs("etraxfs,timer", 0x3001e000, irq[0x1b], nmi[1], NULL);
sysbus_create_varargs("etraxfs,timer", 0x3005e000, irq[0x1b], nmi[1], NULL);
for (i = 0; i < 4; i++) {
sysbus_create_simple("etraxfs,serial", 0x30026000 + i * 0x2000,
irq[0x14 + i]);
}
if (kernel_filename) {
uint64_t entry, high;
int kcmdline_len;
/* Boots a kernel elf binary, os/linux-2.6/vmlinux from the axis
devboard SDK. */
kernel_size = load_elf(kernel_filename, -0x80000000LL,
&entry, NULL, &high, 0, ELF_MACHINE, 0);
bootstrap_pc = entry;
if (kernel_size < 0) {
/* Takes a kimage from the axis devboard SDK. */
kernel_size = load_image_targphys(kernel_filename, 0x40004000,
ram_size);
bootstrap_pc = 0x40004000;
env->regs[9] = 0x40004000 + kernel_size;
}
env->regs[8] = 0x56902387; /* RAM init magic. */
if (kernel_cmdline && (kcmdline_len = strlen(kernel_cmdline))) {
if (kcmdline_len > 256) {
fprintf(stderr, "Too long CRIS kernel cmdline (max 256)\n");
exit(1);
}
/* Let the kernel know we are modifying the cmdline. */
env->regs[10] = 0x87109563;
env->regs[11] = 0x40000000;
pstrcpy_targphys(env->regs[11], 256, kernel_cmdline);
}
}
env->pc = bootstrap_pc;
printf ("pc =%x\n", env->pc);
printf ("ram size =%ld\n", ram_size);
}
| 22,038 |
FFmpeg | 8a701ef7ddbb2d80ef77b14287d286fc9760f131 | 1 | static int decode_pic_timing(HEVCContext *s)
{
GetBitContext *gb = &s->HEVClc->gb;
HEVCSPS *sps = (HEVCSPS*)s->sps_list[s->active_seq_parameter_set_id]->data;
if (!sps)
return(AVERROR(ENOMEM));
if (sps->vui.frame_field_info_present_flag) {
int pic_struct = get_bits(gb, 4);
s->picture_struct = AV_PICTURE_STRUCTURE_UNKNOWN;
if (pic_struct == 2) {
av_log(s->avctx, AV_LOG_DEBUG, "BOTTOM Field\n");
s->picture_struct = AV_PICTURE_STRUCTURE_BOTTOM_FIELD;
} else if (pic_struct == 1) {
av_log(s->avctx, AV_LOG_DEBUG, "TOP Field\n");
s->picture_struct = AV_PICTURE_STRUCTURE_TOP_FIELD;
}
get_bits(gb, 2); // source_scan_type
get_bits(gb, 1); // duplicate_flag
}
return 1;
}
| 22,039 |
FFmpeg | 3069e70f62fa506c6b86bd7dac4fcb139c886f37 | 1 | static void *circular_buffer_task( void *_URLContext)
{
URLContext *h = _URLContext;
UDPContext *s = h->priv_data;
fd_set rfds;
struct timeval tv;
while(!s->exit_thread) {
int left;
int ret;
int len;
if (ff_check_interrupt(&h->interrupt_callback)) {
s->circular_buffer_error = AVERROR(EINTR);
goto end;
}
FD_ZERO(&rfds);
FD_SET(s->udp_fd, &rfds);
tv.tv_sec = 1;
tv.tv_usec = 0;
ret = select(s->udp_fd + 1, &rfds, NULL, NULL, &tv);
if (ret < 0) {
if (ff_neterrno() == AVERROR(EINTR))
continue;
s->circular_buffer_error = AVERROR(EIO);
goto end;
}
if (!(ret > 0 && FD_ISSET(s->udp_fd, &rfds)))
continue;
/* How much do we have left to the end of the buffer */
/* Whats the minimum we can read so that we dont comletely fill the buffer */
left = av_fifo_space(s->fifo);
/* No Space left, error, what do we do now */
if(left < UDP_MAX_PKT_SIZE + 4) {
av_log(h, AV_LOG_ERROR, "circular_buffer: OVERRUN\n");
s->circular_buffer_error = AVERROR(EIO);
goto end;
}
len = recv(s->udp_fd, s->tmp+4, sizeof(s->tmp)-4, 0);
if (len < 0) {
if (ff_neterrno() != AVERROR(EAGAIN) && ff_neterrno() != AVERROR(EINTR)) {
s->circular_buffer_error = AVERROR(EIO);
goto end;
}
continue;
}
AV_WL32(s->tmp, len);
pthread_mutex_lock(&s->mutex);
av_fifo_generic_write(s->fifo, s->tmp, len+4, NULL);
pthread_cond_signal(&s->cond);
pthread_mutex_unlock(&s->mutex);
}
end:
pthread_mutex_lock(&s->mutex);
pthread_cond_signal(&s->cond);
pthread_mutex_unlock(&s->mutex);
return NULL;
}
| 22,040 |
FFmpeg | bcaa9099b3648b47060e1724a97dc98b63c83702 | 1 | static int decode_rle(uint8_t *bitmap, int linesize, int w, int h,
const uint8_t *buf, int start, int buf_size, int is_8bit)
{
GetBitContext gb;
int bit_len;
int x, y, len, color;
uint8_t *d;
if (start >= buf_size)
bit_len = (buf_size - start) * 8;
init_get_bits(&gb, buf + start, bit_len);
x = 0;
y = 0;
d = bitmap;
for(;;) {
if (get_bits_count(&gb) > bit_len)
if (is_8bit)
len = decode_run_8bit(&gb, &color);
else
len = decode_run_2bit(&gb, &color);
len = FFMIN(len, w - x);
memset(d + x, color, len);
x += len;
if (x >= w) {
y++;
if (y >= h)
break;
d += linesize;
x = 0;
/* byte align */
align_get_bits(&gb);
}
}
return 0;
} | 22,041 |
qemu | aea2a33c73f28ecd8f10b242ecadddcc79c1c28b | 1 | void bdrv_eject(BlockDriverState *bs, int eject_flag)
{
BlockDriver *drv = bs->drv;
int ret;
if (!drv || !drv->bdrv_eject) {
ret = -ENOTSUP;
} else {
ret = drv->bdrv_eject(bs, eject_flag);
}
if (ret == -ENOTSUP) {
if (eject_flag)
bdrv_close(bs);
}
}
| 22,042 |
qemu | f2d089425d43735b5369f70f3a36b712440578e5 | 1 | static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
hwaddr addr,
uint64_t *value,
unsigned size,
unsigned shift,
uint64_t mask,
MemTxAttrs attrs)
{
uint64_t tmp;
tmp = (*value >> shift) & mask;
if (mr->subpage) {
trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
} else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
}
return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
} | 22,044 |
qemu | 07b026fd82d6cf11baf7d7c603c4f5f6070b35bf | 1 | static void usbredir_realize(USBDevice *udev, Error **errp)
{
USBRedirDevice *dev = USB_REDIRECT(udev);
int i;
if (!qemu_chr_fe_get_driver(&dev->cs)) {
error_setg(errp, QERR_MISSING_PARAMETER, "chardev");
return;
}
if (dev->filter_str) {
i = usbredirfilter_string_to_rules(dev->filter_str, ":", "|",
&dev->filter_rules,
&dev->filter_rules_count);
if (i) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "filter",
"a usb device filter string");
return;
}
}
dev->chardev_close_bh = qemu_bh_new(usbredir_chardev_close_bh, dev);
dev->device_reject_bh = qemu_bh_new(usbredir_device_reject_bh, dev);
dev->attach_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, usbredir_do_attach, dev);
packet_id_queue_init(&dev->cancelled, dev, "cancelled");
packet_id_queue_init(&dev->already_in_flight, dev, "already-in-flight");
usbredir_init_endpoints(dev);
/* We'll do the attach once we receive the speed from the usb-host */
udev->auto_attach = 0;
/* Will be cleared during setup when we find conflicts */
dev->compatible_speedmask = USB_SPEED_MASK_FULL | USB_SPEED_MASK_HIGH;
/* Let the backend know we are ready */
qemu_chr_fe_set_handlers(&dev->cs, usbredir_chardev_can_read,
usbredir_chardev_read, usbredir_chardev_event,
dev, NULL, true);
qemu_add_vm_change_state_handler(usbredir_vm_state_change, dev);
}
| 22,046 |
qemu | d63fb193e71644a073b77ff5ac6f1216f2f6cf6e | 1 | static void coroutine_fn v9fs_lcreate(void *opaque)
{
int32_t dfid, flags, mode;
gid_t gid;
ssize_t err = 0;
ssize_t offset = 7;
V9fsString name;
V9fsFidState *fidp;
struct stat stbuf;
V9fsQID qid;
int32_t iounit;
V9fsPDU *pdu = opaque;
v9fs_string_init(&name);
err = pdu_unmarshal(pdu, offset, "dsddd", &dfid,
&name, &flags, &mode, &gid);
if (err < 0) {
goto out_nofid;
trace_v9fs_lcreate(pdu->tag, pdu->id, dfid, flags, mode, gid);
if (name_is_illegal(name.data)) {
err = -ENOENT;
goto out_nofid;
if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
err = -EEXIST;
goto out_nofid;
fidp = get_fid(pdu, dfid);
if (fidp == NULL) {
err = -ENOENT;
goto out_nofid;
flags = get_dotl_openflags(pdu->s, flags);
err = v9fs_co_open2(pdu, fidp, &name, gid,
flags | O_CREAT, mode, &stbuf);
if (err < 0) {
fidp->fid_type = P9_FID_FILE;
fidp->open_flags = flags;
if (flags & O_EXCL) {
/*
* We let the host file system do O_EXCL check
* We should not reclaim such fd
*/
fidp->flags |= FID_NON_RECLAIMABLE;
iounit = get_iounit(pdu, &fidp->path);
stat_to_qid(&stbuf, &qid);
err = pdu_marshal(pdu, offset, "Qd", &qid, iounit);
if (err < 0) {
err += offset;
trace_v9fs_lcreate_return(pdu->tag, pdu->id,
qid.type, qid.version, qid.path, iounit);
out:
put_fid(pdu, fidp);
out_nofid:
pdu_complete(pdu, err);
v9fs_string_free(&name);
| 22,048 |
qemu | f80256b7eebfbe20683b3a2b2720ad9991313761 | 1 | static inline int array_ensure_allocated(array_t* array, int index)
{
if((index + 1) * array->item_size > array->size) {
int new_size = (index + 32) * array->item_size;
array->pointer = g_realloc(array->pointer, new_size);
if (!array->pointer)
return -1;
array->size = new_size;
array->next = index + 1;
}
return 0;
} | 22,050 |
qemu | b5fc09ae52e3d19e01126715c998eb6587795b56 | 1 | static inline TranslationBlock *tb_find_fast(void)
{
TranslationBlock *tb;
target_ulong cs_base, pc;
uint64_t flags;
/* we record a subset of the CPU state. It will
always be the same before a given translated block
is executed. */
#if defined(TARGET_I386)
flags = env->hflags;
flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
flags |= env->intercept;
cs_base = env->segs[R_CS].base;
pc = cs_base + env->eip;
#elif defined(TARGET_ARM)
flags = env->thumb | (env->vfp.vec_len << 1)
| (env->vfp.vec_stride << 4);
if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
flags |= (1 << 6);
if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
flags |= (1 << 7);
flags |= (env->condexec_bits << 8);
cs_base = 0;
pc = env->regs[15];
#elif defined(TARGET_SPARC)
#ifdef TARGET_SPARC64
// Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
| (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
#else
// FPU enable . Supervisor
flags = (env->psref << 4) | env->psrs;
#endif
cs_base = env->npc;
pc = env->pc;
#elif defined(TARGET_PPC)
flags = env->hflags;
cs_base = 0;
pc = env->nip;
#elif defined(TARGET_MIPS)
flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
cs_base = 0;
pc = env->PC[env->current_tc];
#elif defined(TARGET_M68K)
flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
| (env->sr & SR_S) /* Bit 13 */
| ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
cs_base = 0;
pc = env->pc;
#elif defined(TARGET_SH4)
flags = env->flags;
cs_base = 0;
pc = env->pc;
#elif defined(TARGET_ALPHA)
flags = env->ps;
cs_base = 0;
pc = env->pc;
#elif defined(TARGET_CRIS)
flags = env->pregs[PR_CCS] & (U_FLAG | X_FLAG);
cs_base = 0;
pc = env->pc;
#else
#error unsupported CPU
#endif
tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags, 0)) {
tb = tb_find_slow(pc, cs_base, flags);
/* Note: we do it here to avoid a gcc bug on Mac OS X when
doing it in tb_find_slow */
if (tb_invalidated_flag) {
/* as some TB could have been invalidated because
of memory exceptions while generating the code, we
must recompute the hash index here */
T0 = 0;
}
}
return tb;
}
| 22,051 |
qemu | 062ba099e01ff1474be98c0a4f3da351efab5d9d | 1 | static void arm_cpu_reset(CPUState *s)
{
ARMCPU *cpu = ARM_CPU(s);
ARMCPUClass *acc = ARM_CPU_GET_CLASS(cpu);
CPUARMState *env = &cpu->env;
acc->parent_reset(s);
memset(env, 0, offsetof(CPUARMState, end_reset_fields));
g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu);
g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
env->vfp.xregs[ARM_VFP_MVFR0] = cpu->mvfr0;
env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1;
env->vfp.xregs[ARM_VFP_MVFR2] = cpu->mvfr2;
cpu->powered_off = cpu->start_powered_off;
s->halted = cpu->start_powered_off;
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
}
if (arm_feature(env, ARM_FEATURE_AARCH64)) {
/* 64 bit CPUs always start in 64 bit mode */
env->aarch64 = 1;
#if defined(CONFIG_USER_ONLY)
env->pstate = PSTATE_MODE_EL0t;
/* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
/* and to the FP/Neon instructions */
env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 2, 3);
#else
/* Reset into the highest available EL */
if (arm_feature(env, ARM_FEATURE_EL3)) {
env->pstate = PSTATE_MODE_EL3h;
} else if (arm_feature(env, ARM_FEATURE_EL2)) {
env->pstate = PSTATE_MODE_EL2h;
} else {
env->pstate = PSTATE_MODE_EL1h;
}
env->pc = cpu->rvbar;
#endif
} else {
#if defined(CONFIG_USER_ONLY)
/* Userspace expects access to cp10 and cp11 for FP/Neon */
env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 4, 0xf);
#endif
}
#if defined(CONFIG_USER_ONLY)
env->uncached_cpsr = ARM_CPU_MODE_USR;
/* For user mode we must enable access to coprocessors */
env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
env->cp15.c15_cpar = 3;
} else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
env->cp15.c15_cpar = 1;
}
#else
/* SVC mode with interrupts disabled. */
env->uncached_cpsr = ARM_CPU_MODE_SVC;
env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
if (arm_feature(env, ARM_FEATURE_M)) {
uint32_t initial_msp; /* Loaded from 0x0 */
uint32_t initial_pc; /* Loaded from 0x4 */
uint8_t *rom;
/* For M profile we store FAULTMASK and PRIMASK in the
* PSTATE F and I bits; these are both clear at reset.
*/
env->daif &= ~(PSTATE_I | PSTATE_F);
/* The reset value of this bit is IMPDEF, but ARM recommends
* that it resets to 1, so QEMU always does that rather than making
* it dependent on CPU model.
*/
env->v7m.ccr = R_V7M_CCR_STKALIGN_MASK;
/* Unlike A/R profile, M profile defines the reset LR value */
env->regs[14] = 0xffffffff;
/* Load the initial SP and PC from the vector table at address 0 */
rom = rom_ptr(0);
if (rom) {
/* Address zero is covered by ROM which hasn't yet been
* copied into physical memory.
*/
initial_msp = ldl_p(rom);
initial_pc = ldl_p(rom + 4);
} else {
/* Address zero not covered by a ROM blob, or the ROM blob
* is in non-modifiable memory and this is a second reset after
* it got copied into memory. In the latter case, rom_ptr
* will return a NULL pointer and we should use ldl_phys instead.
*/
initial_msp = ldl_phys(s->as, 0);
initial_pc = ldl_phys(s->as, 4);
}
env->regs[13] = initial_msp & 0xFFFFFFFC;
env->regs[15] = initial_pc & ~1;
env->thumb = initial_pc & 1;
}
/* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
* executing as AArch32 then check if highvecs are enabled and
* adjust the PC accordingly.
*/
if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
env->regs[15] = 0xFFFF0000;
}
env->vfp.xregs[ARM_VFP_FPEXC] = 0;
#endif
set_flush_to_zero(1, &env->vfp.standard_fp_status);
set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
set_default_nan_mode(1, &env->vfp.standard_fp_status);
set_float_detect_tininess(float_tininess_before_rounding,
&env->vfp.fp_status);
set_float_detect_tininess(float_tininess_before_rounding,
&env->vfp.standard_fp_status);
#ifndef CONFIG_USER_ONLY
if (kvm_enabled()) {
kvm_arm_reset_vcpu(cpu);
}
#endif
hw_breakpoint_update_all(cpu);
hw_watchpoint_update_all(cpu);
}
| 22,052 |
qemu | c3b08d0e05f381b0a02647038d454eecf51ae014 | 1 | void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
{
TCGOpcode op;
TCGOpDef *def;
const char *ct_str;
int i, nb_args;
for(;;) {
if (tdefs->op == (TCGOpcode)-1)
break;
op = tdefs->op;
assert(op >= 0 && op < NB_OPS);
def = &tcg_op_defs[op];
#if defined(CONFIG_DEBUG_TCG)
/* Duplicate entry in op definitions? */
assert(!def->used);
def->used = 1;
#endif
nb_args = def->nb_iargs + def->nb_oargs;
for(i = 0; i < nb_args; i++) {
ct_str = tdefs->args_ct_str[i];
/* Incomplete TCGTargetOpDef entry? */
assert(ct_str != NULL);
tcg_regset_clear(def->args_ct[i].u.regs);
def->args_ct[i].ct = 0;
if (ct_str[0] >= '0' && ct_str[0] <= '9') {
int oarg;
oarg = ct_str[0] - '0';
assert(oarg < def->nb_oargs);
assert(def->args_ct[oarg].ct & TCG_CT_REG);
/* TCG_CT_ALIAS is for the output arguments. The input
argument is tagged with TCG_CT_IALIAS. */
def->args_ct[i] = def->args_ct[oarg];
def->args_ct[oarg].ct = TCG_CT_ALIAS;
def->args_ct[oarg].alias_index = i;
def->args_ct[i].ct |= TCG_CT_IALIAS;
def->args_ct[i].alias_index = oarg;
} else {
for(;;) {
if (*ct_str == '\0')
break;
switch(*ct_str) {
case 'i':
def->args_ct[i].ct |= TCG_CT_CONST;
ct_str++;
break;
default:
if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) {
fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n",
ct_str, i, def->name);
exit(1);
}
}
}
}
}
/* TCGTargetOpDef entry with too much information? */
assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
/* sort the constraints (XXX: this is just an heuristic) */
sort_constraints(def, 0, def->nb_oargs);
sort_constraints(def, def->nb_oargs, def->nb_iargs);
#if 0
{
int i;
printf("%s: sorted=", def->name);
for(i = 0; i < def->nb_oargs + def->nb_iargs; i++)
printf(" %d", def->sorted_args[i]);
printf("\n");
}
#endif
tdefs++;
}
#if defined(CONFIG_DEBUG_TCG)
i = 0;
for (op = 0; op < ARRAY_SIZE(tcg_op_defs); op++) {
if (op < INDEX_op_call || op == INDEX_op_debug_insn_start) {
/* Wrong entry in op definitions? */
if (tcg_op_defs[op].used) {
fprintf(stderr, "Invalid op definition for %s\n",
tcg_op_defs[op].name);
i = 1;
}
} else {
/* Missing entry in op definitions? */
if (!tcg_op_defs[op].used) {
fprintf(stderr, "Missing op definition for %s\n",
tcg_op_defs[op].name);
i = 1;
}
}
}
if (i == 1) {
tcg_abort();
}
#endif
}
| 22,053 |
FFmpeg | 3359246d9a47c3f4418d994853efe17324a0159b | 1 | static int mxf_read_seek(AVFormatContext *s, int stream_index, int64_t sample_time, int flags)
{
AVStream *st = s->streams[stream_index];
int64_t seconds;
if (!s->bit_rate)
return AVERROR_INVALIDDATA;
if (sample_time < 0)
sample_time = 0;
seconds = av_rescale(sample_time, st->time_base.num, st->time_base.den);
avio_seek(s->pb, (s->bit_rate * seconds) >> 3, SEEK_SET);
ff_update_cur_dts(s, st, sample_time);
return 0;
}
| 22,054 |
qemu | 12de9a396acbc95e25c5d60ed097cc55777eaaed | 1 | static inline int _find_pte (mmu_ctx_t *ctx, int is_64b, int h, int rw)
{
target_ulong base, pte0, pte1;
int i, good = -1;
int ret, r;
ret = -1; /* No entry found */
base = ctx->pg_addr[h];
for (i = 0; i < 8; i++) {
#if defined(TARGET_PPC64)
if (is_64b) {
pte0 = ldq_phys(base + (i * 16));
pte1 = ldq_phys(base + (i * 16) + 8);
r = pte64_check(ctx, pte0, pte1, h, rw);
} else
#endif
{
pte0 = ldl_phys(base + (i * 8));
pte1 = ldl_phys(base + (i * 8) + 4);
r = pte32_check(ctx, pte0, pte1, h, rw);
}
#if defined (DEBUG_MMU)
if (loglevel != 0) {
fprintf(logfile, "Load pte from 0x" ADDRX " => 0x" ADDRX
" 0x" ADDRX " %d %d %d 0x" ADDRX "\n",
base + (i * 8), pte0, pte1,
(int)(pte0 >> 31), h, (int)((pte0 >> 6) & 1), ctx->ptem);
}
#endif
switch (r) {
case -3:
/* PTE inconsistency */
return -1;
case -2:
/* Access violation */
ret = -2;
good = i;
break;
case -1:
default:
/* No PTE match */
break;
case 0:
/* access granted */
/* XXX: we should go on looping to check all PTEs consistency
* but if we can speed-up the whole thing as the
* result would be undefined if PTEs are not consistent.
*/
ret = 0;
good = i;
goto done;
}
}
if (good != -1) {
done:
#if defined (DEBUG_MMU)
if (loglevel != 0) {
fprintf(logfile, "found PTE at addr 0x" PADDRX " prot=0x%01x "
"ret=%d\n",
ctx->raddr, ctx->prot, ret);
}
#endif
/* Update page flags */
pte1 = ctx->raddr;
if (pte_update_flags(ctx, &pte1, ret, rw) == 1) {
#if defined(TARGET_PPC64)
if (is_64b) {
stq_phys_notdirty(base + (good * 16) + 8, pte1);
} else
#endif
{
stl_phys_notdirty(base + (good * 8) + 4, pte1);
}
}
}
return ret;
}
| 22,055 |
qemu | 94e7340b5db8bce7866e44e700ffa8fd26585c7e | 1 | static int nbd_send_reply(int csock, struct nbd_reply *reply)
{
uint8_t buf[4 + 4 + 8];
/* Reply
[ 0 .. 3] magic (NBD_REPLY_MAGIC)
[ 4 .. 7] error (0 == no error)
[ 7 .. 15] handle
*/
cpu_to_be32w((uint32_t*)buf, NBD_REPLY_MAGIC);
cpu_to_be32w((uint32_t*)(buf + 4), reply->error);
cpu_to_be64w((uint64_t*)(buf + 8), reply->handle);
TRACE("Sending response to client");
if (write_sync(csock, buf, sizeof(buf)) != sizeof(buf)) {
LOG("writing to socket failed");
errno = EINVAL;
return -1;
}
return 0;
}
| 22,056 |
FFmpeg | e96ecaf053d8d606e38ae2e56ba6cf58875021b0 | 1 | static int encode_apng(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
PNGEncContext *s = avctx->priv_data;
int ret;
int enc_row_size;
size_t max_packet_size;
APNGFctlChunk fctl_chunk;
if (pict && avctx->codec_id == AV_CODEC_ID_APNG && s->color_type == PNG_COLOR_TYPE_PALETTE) {
uint32_t checksum = ~av_crc(av_crc_get_table(AV_CRC_32_IEEE_LE), ~0U, pict->data[1], 256 * sizeof(uint32_t));
if (avctx->frame_number == 0) {
s->palette_checksum = checksum;
} else if (checksum != s->palette_checksum) {
av_log(avctx, AV_LOG_ERROR,
"Input contains more than one unique palette. APNG does not support multiple palettes.\n");
return -1;
}
}
enc_row_size = deflateBound(&s->zstream, (avctx->width * s->bits_per_pixel + 7) >> 3);
max_packet_size =
AV_INPUT_BUFFER_MIN_SIZE + // headers
avctx->height * (
enc_row_size +
(4 + 12) * (((int64_t)enc_row_size + IOBUF_SIZE - 1) / IOBUF_SIZE) // fdAT * ceil(enc_row_size / IOBUF_SIZE)
);
if (max_packet_size > INT_MAX)
return AVERROR(ENOMEM);
if (avctx->frame_number == 0) {
s->bytestream = avctx->extradata = av_malloc(FF_MIN_BUFFER_SIZE);
if (!avctx->extradata)
return AVERROR(ENOMEM);
ret = encode_headers(avctx, pict);
if (ret < 0)
return ret;
avctx->extradata_size = s->bytestream - avctx->extradata;
s->last_frame_packet = av_malloc(max_packet_size);
if (!s->last_frame_packet)
return AVERROR(ENOMEM);
} else if (s->last_frame) {
ret = ff_alloc_packet2(avctx, pkt, max_packet_size, 0);
if (ret < 0)
return ret;
memcpy(pkt->data, s->last_frame_packet, s->last_frame_packet_size);
pkt->size = s->last_frame_packet_size;
pkt->pts = pkt->dts = s->last_frame->pts;
}
if (pict) {
s->bytestream_start =
s->bytestream = s->last_frame_packet;
s->bytestream_end = s->bytestream + max_packet_size;
// We're encoding the frame first, so we have to do a bit of shuffling around
// to have the image data write to the correct place in the buffer
fctl_chunk.sequence_number = s->sequence_number;
++s->sequence_number;
s->bytestream += 26 + 12;
ret = apng_encode_frame(avctx, pict, &fctl_chunk, &s->last_frame_fctl);
if (ret < 0)
return ret;
fctl_chunk.delay_num = 0; // delay filled in during muxing
fctl_chunk.delay_den = 0;
} else {
s->last_frame_fctl.dispose_op = APNG_DISPOSE_OP_NONE;
}
if (s->last_frame) {
uint8_t* last_fctl_chunk_start = pkt->data;
uint8_t buf[26];
AV_WB32(buf + 0, s->last_frame_fctl.sequence_number);
AV_WB32(buf + 4, s->last_frame_fctl.width);
AV_WB32(buf + 8, s->last_frame_fctl.height);
AV_WB32(buf + 12, s->last_frame_fctl.x_offset);
AV_WB32(buf + 16, s->last_frame_fctl.y_offset);
AV_WB16(buf + 20, s->last_frame_fctl.delay_num);
AV_WB16(buf + 22, s->last_frame_fctl.delay_den);
buf[24] = s->last_frame_fctl.dispose_op;
buf[25] = s->last_frame_fctl.blend_op;
png_write_chunk(&last_fctl_chunk_start, MKTAG('f', 'c', 'T', 'L'), buf, 26);
*got_packet = 1;
}
if (pict) {
if (!s->last_frame) {
s->last_frame = av_frame_alloc();
if (!s->last_frame)
return AVERROR(ENOMEM);
} else if (s->last_frame_fctl.dispose_op != APNG_DISPOSE_OP_PREVIOUS) {
if (!s->prev_frame) {
s->prev_frame = av_frame_alloc();
if (!s->prev_frame)
return AVERROR(ENOMEM);
s->prev_frame->format = pict->format;
s->prev_frame->width = pict->width;
s->prev_frame->height = pict->height;
if ((ret = av_frame_get_buffer(s->prev_frame, 32)) < 0)
return ret;
}
// Do disposal, but not blending
memcpy(s->prev_frame->data[0], s->last_frame->data[0],
s->last_frame->linesize[0] * s->last_frame->height);
if (s->last_frame_fctl.dispose_op == APNG_DISPOSE_OP_BACKGROUND) {
uint32_t y;
uint8_t bpp = (s->bits_per_pixel + 7) >> 3;
for (y = s->last_frame_fctl.y_offset; y < s->last_frame_fctl.y_offset + s->last_frame_fctl.height; ++y) {
size_t row_start = s->last_frame->linesize[0] * y + bpp * s->last_frame_fctl.x_offset;
memset(s->prev_frame->data[0] + row_start, 0, bpp * s->last_frame_fctl.width);
}
}
}
av_frame_unref(s->last_frame);
ret = av_frame_ref(s->last_frame, (AVFrame*)pict);
if (ret < 0)
return ret;
s->last_frame_fctl = fctl_chunk;
s->last_frame_packet_size = s->bytestream - s->bytestream_start;
} else {
av_frame_free(&s->last_frame);
}
return 0;
}
| 22,057 |
qemu | 69c07db04625cb243db6e8a0ac0a8e3973dd961a | 1 | static int tpm_passthrough_open_sysfs_cancel(TPMPassthruState *tpm_pt)
{
int fd = -1;
char *dev;
char path[PATH_MAX];
if (tpm_pt->options->cancel_path) {
fd = qemu_open(tpm_pt->options->cancel_path, O_WRONLY);
if (fd < 0) {
error_report("Could not open TPM cancel path : %s",
strerror(errno));
}
return fd;
}
dev = strrchr(tpm_pt->tpm_dev, '/');
if (dev) {
dev++;
if (snprintf(path, sizeof(path), "/sys/class/misc/%s/device/cancel",
dev) < sizeof(path)) {
fd = qemu_open(path, O_WRONLY);
if (fd >= 0) {
tpm_pt->options->cancel_path = g_strdup(path);
} else {
error_report("tpm_passthrough: Could not open TPM cancel "
"path %s : %s", path, strerror(errno));
}
}
} else {
error_report("tpm_passthrough: Bad TPM device path %s",
tpm_pt->tpm_dev);
}
return fd;
}
| 22,059 |
FFmpeg | 6a697b42d0c8469c05e2a1a0920d8539ba7b068d | 1 | int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size)
{
if(pc->overread){
av_dlog(NULL, "overread %d, state:%X next:%d index:%d o_index:%d\n",
pc->overread, pc->state, next, pc->index, pc->overread_index);
av_dlog(NULL, "%X %X %X %X\n", (*buf)[0], (*buf)[1], (*buf)[2], (*buf)[3]);
}
/* Copy overread bytes from last frame into buffer. */
for(; pc->overread>0; pc->overread--){
pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
}
/* flush remaining if EOF */
if(!*buf_size && next == END_NOT_FOUND){
next= 0;
}
pc->last_index= pc->index;
/* copy into buffer end return */
if(next == END_NOT_FOUND){
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
if(!new_buffer)
return AVERROR(ENOMEM);
pc->buffer = new_buffer;
memcpy(&pc->buffer[pc->index], *buf, *buf_size);
pc->index += *buf_size;
return -1;
}
*buf_size=
pc->overread_index= pc->index + next;
/* append to buffer */
if(pc->index){
void* new_buffer = av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
if(!new_buffer)
return AVERROR(ENOMEM);
pc->buffer = new_buffer;
memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE );
pc->index = 0;
*buf= pc->buffer;
}
/* store overread bytes */
for(;next < 0; next++){
pc->state = (pc->state<<8) | pc->buffer[pc->last_index + next];
pc->state64 = (pc->state64<<8) | pc->buffer[pc->last_index + next];
pc->overread++;
}
if(pc->overread){
av_dlog(NULL, "overread %d, state:%X next:%d index:%d o_index:%d\n",
pc->overread, pc->state, next, pc->index, pc->overread_index);
av_dlog(NULL, "%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
}
return 0;
} | 22,061 |
FFmpeg | de64d8cf171c6ecdca22d57f0bdd7efec95d0c0e | 1 | static void qtrle_decode_1bpp(QtrleContext *s, int stream_ptr, int row_ptr, int lines_to_change)
{
int rle_code;
int pixel_ptr = 0;
int row_inc = s->frame.linesize[0];
unsigned char pi0, pi1; /* 2 8-pixel values */
unsigned char *rgb = s->frame.data[0];
int pixel_limit = s->frame.linesize[0] * s->avctx->height;
int skip;
while (lines_to_change) {
CHECK_STREAM_PTR(2);
skip = s->buf[stream_ptr++];
rle_code = (signed char)s->buf[stream_ptr++];
if (rle_code == 0)
break;
if(skip & 0x80) {
lines_to_change--;
row_ptr += row_inc;
pixel_ptr = row_ptr + 2 * (skip & 0x7f);
} else
pixel_ptr += 2 * skip;
CHECK_PIXEL_PTR(0); /* make sure pixel_ptr is positive */
if (rle_code < 0) {
/* decode the run length code */
rle_code = -rle_code;
/* get the next 2 bytes from the stream, treat them as groups
* of 8 pixels, and output them rle_code times */
CHECK_STREAM_PTR(2);
pi0 = s->buf[stream_ptr++];
pi1 = s->buf[stream_ptr++];
CHECK_PIXEL_PTR(rle_code * 2);
while (rle_code--) {
rgb[pixel_ptr++] = pi0;
rgb[pixel_ptr++] = pi1;
}
} else {
/* copy the same pixel directly to output 2 times */
rle_code *= 2;
CHECK_STREAM_PTR(rle_code);
CHECK_PIXEL_PTR(rle_code);
while (rle_code--)
rgb[pixel_ptr++] = s->buf[stream_ptr++];
}
}
}
| 22,062 |
qemu | ce5b1bbf624b977a55ff7f85bb3871682d03baff | 1 | static void lm32_cpu_initfn(Object *obj)
{
CPUState *cs = CPU(obj);
LM32CPU *cpu = LM32_CPU(obj);
CPULM32State *env = &cpu->env;
static bool tcg_initialized;
cs->env_ptr = env;
cpu_exec_init(cs, &error_abort);
env->flags = 0;
if (tcg_enabled() && !tcg_initialized) {
tcg_initialized = true;
lm32_translate_init();
}
}
| 22,063 |
qemu | 0a73336d96397c80881219d080518fac6f1ecacb | 1 | static int find_and_check_chardev(CharDriverState **chr,
char *chr_name,
Error **errp)
{
CompareChardevProps props;
*chr = qemu_chr_find(chr_name);
if (*chr == NULL) {
error_setg(errp, "Device '%s' not found",
chr_name);
return 1;
}
memset(&props, 0, sizeof(props));
if (qemu_opt_foreach((*chr)->opts, compare_chardev_opts, &props, errp)) {
return 1;
}
if (!props.is_socket) {
error_setg(errp, "chardev \"%s\" is not a tcp socket",
chr_name);
return 1;
}
return 0;
}
| 22,064 |
qemu | 949fc82314cc84162e64a5323764527a542421ce | 0 | static void set_bit(Object *obj, Visitor *v, void *opaque,
const char *name, Error **errp)
{
DeviceState *dev = DEVICE(obj);
Property *prop = opaque;
Error *local_err = NULL;
bool value;
if (dev->realized) {
qdev_prop_set_after_realize(dev, name, errp);
return;
}
visit_type_bool(v, &value, name, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
bit_prop_set(dev, prop, value);
}
| 22,066 |
qemu | fe62089563ffc6a42f16ff28a6b6be34d2697766 | 0 | static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
{
target_ulong pc = s->cs_base + eip;
if (use_goto_tb(s, pc)) {
/* jump to same page: we can use a direct jump */
tcg_gen_goto_tb(tb_num);
gen_jmp_im(eip);
tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
} else {
/* jump to another page: currently not optimized */
gen_jmp_im(eip);
gen_eob(s);
}
}
| 22,067 |
qemu | e42349cbd6afd1f6838e719184e3d07190c02de7 | 0 | static const char *target_parse_constraint(TCGArgConstraint *ct,
const char *ct_str, TCGType type)
{
switch (*ct_str++) {
case 'r': /* all registers */
ct->ct |= TCG_CT_REG;
tcg_regset_set32(ct->u.regs, 0, 0xffff);
break;
case 'L': /* qemu_ld/st constraint */
ct->ct |= TCG_CT_REG;
tcg_regset_set32(ct->u.regs, 0, 0xffff);
tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
break;
case 'a': /* force R2 for division */
ct->ct |= TCG_CT_REG;
tcg_regset_clear(ct->u.regs);
tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
break;
case 'b': /* force R3 for division */
ct->ct |= TCG_CT_REG;
tcg_regset_clear(ct->u.regs);
tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
break;
case 'A':
ct->ct |= TCG_CT_CONST_S33;
break;
case 'I':
ct->ct |= TCG_CT_CONST_S16;
break;
case 'J':
ct->ct |= TCG_CT_CONST_S32;
break;
case 'O':
ct->ct |= TCG_CT_CONST_ORI;
break;
case 'X':
ct->ct |= TCG_CT_CONST_XORI;
break;
case 'C':
/* ??? We have no insight here into whether the comparison is
signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
a 32-bit unsigned immediate. If we were to use the (semi)
obvious "val == (int32_t)val" we would be enabling unsigned
comparisons vs very large numbers. The only solution is to
take the intersection of the ranges. */
/* ??? Another possible solution is to simply lie and allow all
constants here and force the out-of-range values into a temp
register in tgen_cmp when we have knowledge of the actual
comparison code in use. */
ct->ct |= TCG_CT_CONST_U31;
break;
case 'Z':
ct->ct |= TCG_CT_CONST_ZERO;
break;
default:
return NULL;
}
return ct_str;
}
| 22,068 |
qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | 0 | static uint64_t megasas_port_read(void *opaque, target_phys_addr_t addr,
unsigned size)
{
return megasas_mmio_read(opaque, addr & 0xff, size);
}
| 22,069 |
qemu | 3826121d9298cde1d29ead05910e1f40125ee9b0 | 0 | void arm_debug_excp_handler(CPUState *cs)
{
/* Called by core code when a watchpoint or breakpoint fires;
* need to check which one and raise the appropriate exception.
*/
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
CPUWatchpoint *wp_hit = cs->watchpoint_hit;
if (wp_hit) {
if (wp_hit->flags & BP_CPU) {
cs->watchpoint_hit = NULL;
if (check_watchpoints(cpu)) {
bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
bool same_el = arm_debug_target_el(env) == arm_current_el(env);
if (extended_addresses_enabled(env)) {
env->exception.fsr = (1 << 9) | 0x22;
} else {
env->exception.fsr = 0x2;
}
env->exception.vaddress = wp_hit->hitaddr;
raise_exception(env, EXCP_DATA_ABORT,
syn_watchpoint(same_el, 0, wnr),
arm_debug_target_el(env));
} else {
cpu_resume_from_signal(cs, NULL);
}
}
} else {
uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
/* (1) GDB breakpoints should be handled first.
* (2) Do not raise a CPU exception if no CPU breakpoint has fired,
* since singlestep is also done by generating a debug internal
* exception.
*/
if (cpu_breakpoint_test(cs, pc, BP_GDB)
|| !cpu_breakpoint_test(cs, pc, BP_CPU)) {
return;
}
if (extended_addresses_enabled(env)) {
env->exception.fsr = (1 << 9) | 0x22;
} else {
env->exception.fsr = 0x2;
}
/* FAR is UNKNOWN, so doesn't need setting */
raise_exception(env, EXCP_PREFETCH_ABORT,
syn_breakpoint(same_el),
arm_debug_target_el(env));
}
}
| 22,070 |
qemu | 34f1b23f8a61841bac06010e898221c6192a9035 | 0 | int load_elf_as(const char *filename,
uint64_t (*translate_fn)(void *, uint64_t),
void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr,
uint64_t *highaddr, int big_endian, int elf_machine,
int clear_lsb, int data_swab, AddressSpace *as)
{
int fd, data_order, target_data_order, must_swab, ret = ELF_LOAD_FAILED;
uint8_t e_ident[EI_NIDENT];
fd = open(filename, O_RDONLY | O_BINARY);
if (fd < 0) {
perror(filename);
return -1;
}
if (read(fd, e_ident, sizeof(e_ident)) != sizeof(e_ident))
goto fail;
if (e_ident[0] != ELFMAG0 ||
e_ident[1] != ELFMAG1 ||
e_ident[2] != ELFMAG2 ||
e_ident[3] != ELFMAG3) {
ret = ELF_LOAD_NOT_ELF;
goto fail;
}
#ifdef HOST_WORDS_BIGENDIAN
data_order = ELFDATA2MSB;
#else
data_order = ELFDATA2LSB;
#endif
must_swab = data_order != e_ident[EI_DATA];
if (big_endian) {
target_data_order = ELFDATA2MSB;
} else {
target_data_order = ELFDATA2LSB;
}
if (target_data_order != e_ident[EI_DATA]) {
ret = ELF_LOAD_WRONG_ENDIAN;
goto fail;
}
lseek(fd, 0, SEEK_SET);
if (e_ident[EI_CLASS] == ELFCLASS64) {
ret = load_elf64(filename, fd, translate_fn, translate_opaque, must_swab,
pentry, lowaddr, highaddr, elf_machine, clear_lsb,
data_swab, as);
} else {
ret = load_elf32(filename, fd, translate_fn, translate_opaque, must_swab,
pentry, lowaddr, highaddr, elf_machine, clear_lsb,
data_swab, as);
}
fail:
close(fd);
return ret;
}
| 22,071 |
qemu | 2d2507ef23d2a28eaeea5507ff4ec68657f1792f | 0 | static int vhost_net_start_one(struct vhost_net *net,
VirtIODevice *dev,
int vq_index)
{
struct vhost_vring_file file = { };
int r;
if (net->dev.started) {
return 0;
}
net->dev.nvqs = 2;
net->dev.vqs = net->vqs;
net->dev.vq_index = vq_index;
r = vhost_dev_enable_notifiers(&net->dev, dev);
if (r < 0) {
goto fail_notifiers;
}
r = vhost_dev_start(&net->dev, dev);
if (r < 0) {
goto fail_start;
}
if (net->nc->info->poll) {
net->nc->info->poll(net->nc, false);
}
if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_TAP) {
qemu_set_fd_handler(net->backend, NULL, NULL, NULL);
file.fd = net->backend;
for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
const VhostOps *vhost_ops = net->dev.vhost_ops;
r = vhost_ops->vhost_call(&net->dev, VHOST_NET_SET_BACKEND,
&file);
if (r < 0) {
r = -errno;
goto fail;
}
}
}
return 0;
fail:
file.fd = -1;
if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_TAP) {
while (file.index-- > 0) {
const VhostOps *vhost_ops = net->dev.vhost_ops;
int r = vhost_ops->vhost_call(&net->dev, VHOST_NET_SET_BACKEND,
&file);
assert(r >= 0);
}
}
if (net->nc->info->poll) {
net->nc->info->poll(net->nc, true);
}
vhost_dev_stop(&net->dev, dev);
fail_start:
vhost_dev_disable_notifiers(&net->dev, dev);
fail_notifiers:
return r;
}
| 22,072 |
qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | 0 | mst_fpga_readb(void *opaque, target_phys_addr_t addr, unsigned size)
{
mst_irq_state *s = (mst_irq_state *) opaque;
switch (addr) {
case MST_LEDDAT1:
return s->leddat1;
case MST_LEDDAT2:
return s->leddat2;
case MST_LEDCTRL:
return s->ledctrl;
case MST_GPSWR:
return s->gpswr;
case MST_MSCWR1:
return s->mscwr1;
case MST_MSCWR2:
return s->mscwr2;
case MST_MSCWR3:
return s->mscwr3;
case MST_MSCRD:
return s->mscrd;
case MST_INTMSKENA:
return s->intmskena;
case MST_INTSETCLR:
return s->intsetclr;
case MST_PCMCIA0:
return s->pcmcia0;
case MST_PCMCIA1:
return s->pcmcia1;
default:
printf("Mainstone - mst_fpga_readb: Bad register offset "
"0x" TARGET_FMT_plx "\n", addr);
}
return 0;
}
| 22,073 |
qemu | ffba87862b37f1d7762370c8d31b09f6e359ff09 | 0 | int ppcmas_tlb_check(CPUState *env, ppcmas_tlb_t *tlb,
target_phys_addr_t *raddrp,
target_ulong address, uint32_t pid)
{
target_ulong mask;
uint32_t tlb_pid;
/* Check valid flag */
if (!(tlb->mas1 & MAS1_VALID)) {
return -1;
}
mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%"
PRIx64 " mask=0x" TARGET_FMT_lx " MAS7_3=0x%" PRIx64 " MAS8=%x\n",
__func__, address, pid, tlb->mas1, tlb->mas2, mask, tlb->mas7_3,
tlb->mas8);
/* Check PID */
tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT;
if (tlb_pid != 0 && tlb_pid != pid) {
return -1;
}
/* Check effective address */
if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) {
return -1;
}
*raddrp = (tlb->mas7_3 & mask) | (address & ~mask);
return 0;
}
| 22,074 |
FFmpeg | 52d2bcc78632f868cc4045c8f1cd03533418f0b6 | 0 | static int libopenjpeg_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *frame, int *got_packet)
{
LibOpenJPEGContext *ctx = avctx->priv_data;
opj_cinfo_t *compress = ctx->compress;
opj_image_t *image = ctx->image;
opj_cio_t *stream = ctx->stream;
int cpyresult = 0;
int ret, len;
AVFrame *gbrframe;
switch (avctx->pix_fmt) {
case AV_PIX_FMT_RGB24:
case AV_PIX_FMT_RGBA:
case AV_PIX_FMT_GRAY8A:
cpyresult = libopenjpeg_copy_packed8(avctx, frame, image);
break;
case AV_PIX_FMT_XYZ12:
cpyresult = libopenjpeg_copy_packed12(avctx, frame, image);
break;
case AV_PIX_FMT_RGB48:
case AV_PIX_FMT_RGBA64:
cpyresult = libopenjpeg_copy_packed16(avctx, frame, image);
break;
case AV_PIX_FMT_GBR24P:
case AV_PIX_FMT_GBRP9:
case AV_PIX_FMT_GBRP10:
case AV_PIX_FMT_GBRP12:
case AV_PIX_FMT_GBRP14:
case AV_PIX_FMT_GBRP16:
gbrframe = av_frame_alloc();
if (!gbrframe)
return AVERROR(ENOMEM);
av_frame_ref(gbrframe, frame);
gbrframe->data[0] = frame->data[2]; // swap to be rgb
gbrframe->data[1] = frame->data[0];
gbrframe->data[2] = frame->data[1];
gbrframe->linesize[0] = frame->linesize[2];
gbrframe->linesize[1] = frame->linesize[0];
gbrframe->linesize[2] = frame->linesize[1];
if (avctx->pix_fmt == AV_PIX_FMT_GBR24P) {
cpyresult = libopenjpeg_copy_unpacked8(avctx, gbrframe, image);
} else {
cpyresult = libopenjpeg_copy_unpacked16(avctx, gbrframe, image);
}
av_frame_free(&gbrframe);
break;
case AV_PIX_FMT_GRAY8:
case AV_PIX_FMT_YUV410P:
case AV_PIX_FMT_YUV411P:
case AV_PIX_FMT_YUV420P:
case AV_PIX_FMT_YUV422P:
case AV_PIX_FMT_YUV440P:
case AV_PIX_FMT_YUV444P:
case AV_PIX_FMT_YUVA420P:
case AV_PIX_FMT_YUVA422P:
case AV_PIX_FMT_YUVA444P:
cpyresult = libopenjpeg_copy_unpacked8(avctx, frame, image);
break;
case AV_PIX_FMT_GRAY16:
case AV_PIX_FMT_YUV420P9:
case AV_PIX_FMT_YUV422P9:
case AV_PIX_FMT_YUV444P9:
case AV_PIX_FMT_YUVA420P9:
case AV_PIX_FMT_YUVA422P9:
case AV_PIX_FMT_YUVA444P9:
case AV_PIX_FMT_YUV444P10:
case AV_PIX_FMT_YUV422P10:
case AV_PIX_FMT_YUV420P10:
case AV_PIX_FMT_YUVA444P10:
case AV_PIX_FMT_YUVA422P10:
case AV_PIX_FMT_YUVA420P10:
case AV_PIX_FMT_YUV420P12:
case AV_PIX_FMT_YUV422P12:
case AV_PIX_FMT_YUV444P12:
case AV_PIX_FMT_YUV420P14:
case AV_PIX_FMT_YUV422P14:
case AV_PIX_FMT_YUV444P14:
case AV_PIX_FMT_YUV444P16:
case AV_PIX_FMT_YUV422P16:
case AV_PIX_FMT_YUV420P16:
case AV_PIX_FMT_YUVA444P16:
case AV_PIX_FMT_YUVA422P16:
case AV_PIX_FMT_YUVA420P16:
cpyresult = libopenjpeg_copy_unpacked16(avctx, frame, image);
break;
default:
av_log(avctx, AV_LOG_ERROR,
"The frame's pixel format '%s' is not supported\n",
av_get_pix_fmt_name(avctx->pix_fmt));
return AVERROR(EINVAL);
break;
}
if (!cpyresult) {
av_log(avctx, AV_LOG_ERROR,
"Could not copy the frame data to the internal image buffer\n");
return -1;
}
cio_seek(stream, 0);
if (!opj_encode(compress, stream, image, NULL)) {
av_log(avctx, AV_LOG_ERROR, "Error during the opj encode\n");
return -1;
}
len = cio_tell(stream);
if ((ret = ff_alloc_packet2(avctx, pkt, len)) < 0) {
return ret;
}
memcpy(pkt->data, stream->buffer, len);
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
return 0;
}
| 22,075 |
qemu | b47d8efa9f430c332bf96ce6eede169eb48422ad | 0 | static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as)
{
VFIOGroup *group;
char path[32];
struct vfio_group_status status = { .argsz = sizeof(status) };
QLIST_FOREACH(group, &group_list, next) {
if (group->groupid == groupid) {
/* Found it. Now is it already in the right context? */
if (group->container->space->as == as) {
return group;
} else {
error_report("vfio: group %d used in multiple address spaces",
group->groupid);
return NULL;
}
}
}
group = g_malloc0(sizeof(*group));
snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
group->fd = qemu_open(path, O_RDWR);
if (group->fd < 0) {
error_report("vfio: error opening %s: %m", path);
goto free_group_exit;
}
if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
error_report("vfio: error getting group status: %m");
goto close_fd_exit;
}
if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
error_report("vfio: error, group %d is not viable, please ensure "
"all devices within the iommu_group are bound to their "
"vfio bus driver.", groupid);
goto close_fd_exit;
}
group->groupid = groupid;
QLIST_INIT(&group->device_list);
if (vfio_connect_container(group, as)) {
error_report("vfio: failed to setup container for group %d", groupid);
goto close_fd_exit;
}
if (QLIST_EMPTY(&group_list)) {
qemu_register_reset(vfio_pci_reset_handler, NULL);
}
QLIST_INSERT_HEAD(&group_list, group, next);
vfio_kvm_device_add_group(group);
return group;
close_fd_exit:
close(group->fd);
free_group_exit:
g_free(group);
return NULL;
}
| 22,076 |
qemu | 172061a0a0d98c974ea8d5ed715195237bc44225 | 0 | int main_loop_init(void)
{
int ret;
qemu_mutex_lock_iothread();
ret = qemu_signal_init();
if (ret) {
return ret;
}
/* Note eventfd must be drained before signalfd handlers run */
ret = qemu_event_init();
if (ret) {
return ret;
}
return 0;
}
| 22,077 |
qemu | a153bf52b37e148f052b0869600877130671a03d | 0 | bool aio_poll(AioContext *ctx, bool blocking)
{
AioHandler *node;
int i;
int ret = 0;
bool progress;
int64_t timeout;
int64_t start = 0;
/* aio_notify can avoid the expensive event_notifier_set if
* everything (file descriptors, bottom halves, timers) will
* be re-evaluated before the next blocking poll(). This is
* already true when aio_poll is called with blocking == false;
* if blocking == true, it is only true after poll() returns,
* so disable the optimization now.
*/
if (blocking) {
atomic_add(&ctx->notify_me, 2);
}
qemu_lockcnt_inc(&ctx->list_lock);
if (ctx->poll_max_ns) {
start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
}
progress = try_poll_mode(ctx, blocking);
if (!progress) {
assert(npfd == 0);
/* fill pollfds */
if (!aio_epoll_enabled(ctx)) {
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (!node->deleted && node->pfd.events
&& aio_node_check(ctx, node->is_external)) {
add_pollfd(node);
}
}
}
timeout = blocking ? aio_compute_timeout(ctx) : 0;
/* wait until next event */
if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
AioHandler epoll_handler;
epoll_handler.pfd.fd = ctx->epollfd;
epoll_handler.pfd.events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR;
npfd = 0;
add_pollfd(&epoll_handler);
ret = aio_epoll(ctx, pollfds, npfd, timeout);
} else {
ret = qemu_poll_ns(pollfds, npfd, timeout);
}
}
if (blocking) {
atomic_sub(&ctx->notify_me, 2);
}
/* Adjust polling time */
if (ctx->poll_max_ns) {
int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start;
if (block_ns <= ctx->poll_ns) {
/* This is the sweet spot, no adjustment needed */
} else if (block_ns > ctx->poll_max_ns) {
/* We'd have to poll for too long, poll less */
int64_t old = ctx->poll_ns;
if (ctx->poll_shrink) {
ctx->poll_ns /= ctx->poll_shrink;
} else {
ctx->poll_ns = 0;
}
trace_poll_shrink(ctx, old, ctx->poll_ns);
} else if (ctx->poll_ns < ctx->poll_max_ns &&
block_ns < ctx->poll_max_ns) {
/* There is room to grow, poll longer */
int64_t old = ctx->poll_ns;
int64_t grow = ctx->poll_grow;
if (grow == 0) {
grow = 2;
}
if (ctx->poll_ns) {
ctx->poll_ns *= grow;
} else {
ctx->poll_ns = 4000; /* start polling at 4 microseconds */
}
if (ctx->poll_ns > ctx->poll_max_ns) {
ctx->poll_ns = ctx->poll_max_ns;
}
trace_poll_grow(ctx, old, ctx->poll_ns);
}
}
aio_notify_accept(ctx);
/* if we have any readable fds, dispatch event */
if (ret > 0) {
for (i = 0; i < npfd; i++) {
nodes[i]->pfd.revents = pollfds[i].revents;
}
}
npfd = 0;
qemu_lockcnt_dec(&ctx->list_lock);
/* Run dispatch even if there were no readable fds to run timers */
if (aio_dispatch(ctx, ret > 0)) {
progress = true;
}
return progress;
}
| 22,078 |
qemu | dfd100f242370886bb6732f70f1f7cbd8eb9fedc | 0 | void qio_channel_socket_listen_async(QIOChannelSocket *ioc,
SocketAddress *addr,
QIOTaskFunc callback,
gpointer opaque,
GDestroyNotify destroy)
{
QIOTask *task = qio_task_new(
OBJECT(ioc), callback, opaque, destroy);
SocketAddress *addrCopy;
addrCopy = QAPI_CLONE(SocketAddress, addr);
/* socket_listen() blocks in DNS lookups, so we must use a thread */
trace_qio_channel_socket_listen_async(ioc, addr);
qio_task_run_in_thread(task,
qio_channel_socket_listen_worker,
addrCopy,
(GDestroyNotify)qapi_free_SocketAddress);
}
| 22,079 |
qemu | 3ae43202754711808ea5186e327bfd0533dd88fc | 0 | long do_syscall(void *cpu_env, int num, long arg1, long arg2, long arg3,
long arg4, long arg5, long arg6)
{
long ret;
struct stat st;
struct statfs stfs;
void *p;
#ifdef DEBUG
gemu_log("syscall %d", num);
#endif
switch(num) {
case TARGET_NR_exit:
#ifdef HAVE_GPROF
_mcleanup();
#endif
gdb_exit(cpu_env, arg1);
/* XXX: should free thread stack and CPU env */
_exit(arg1);
ret = 0; /* avoid warning */
break;
case TARGET_NR_read:
page_unprotect_range(arg2, arg3);
p = lock_user(arg2, arg3, 0);
ret = get_errno(read(arg1, p, arg3));
unlock_user(p, arg2, ret);
break;
case TARGET_NR_write:
p = lock_user(arg2, arg3, 1);
ret = get_errno(write(arg1, p, arg3));
unlock_user(p, arg2, 0);
break;
case TARGET_NR_open:
p = lock_user_string(arg1);
ret = get_errno(open(path(p),
target_to_host_bitmask(arg2, fcntl_flags_tbl),
arg3));
unlock_user(p, arg1, 0);
break;
case TARGET_NR_close:
ret = get_errno(close(arg1));
break;
case TARGET_NR_brk:
ret = do_brk(arg1);
break;
case TARGET_NR_fork:
ret = get_errno(do_fork(cpu_env, SIGCHLD, 0));
break;
#ifdef TARGET_NR_waitpid
case TARGET_NR_waitpid:
{
int status;
ret = get_errno(waitpid(arg1, &status, arg3));
if (!is_error(ret) && arg2)
tput32(arg2, status);
}
break;
#endif
#ifdef TARGET_NR_creat /* not on alpha */
case TARGET_NR_creat:
p = lock_user_string(arg1);
ret = get_errno(creat(p, arg2));
unlock_user(p, arg1, 0);
break;
#endif
case TARGET_NR_link:
{
void * p2;
p = lock_user_string(arg1);
p2 = lock_user_string(arg2);
ret = get_errno(link(p, p2));
unlock_user(p2, arg2, 0);
unlock_user(p, arg1, 0);
}
break;
case TARGET_NR_unlink:
p = lock_user_string(arg1);
ret = get_errno(unlink(p));
unlock_user(p, arg1, 0);
break;
case TARGET_NR_execve:
{
char **argp, **envp;
int argc, envc;
target_ulong gp;
target_ulong guest_argp;
target_ulong guest_envp;
target_ulong addr;
char **q;
argc = 0;
guest_argp = arg2;
for (gp = guest_argp; tgetl(gp); gp++)
argc++;
envc = 0;
guest_envp = arg3;
for (gp = guest_envp; tgetl(gp); gp++)
envc++;
argp = alloca((argc + 1) * sizeof(void *));
envp = alloca((envc + 1) * sizeof(void *));
for (gp = guest_argp, q = argp; ;
gp += sizeof(target_ulong), q++) {
addr = tgetl(gp);
if (!addr)
break;
*q = lock_user_string(addr);
}
*q = NULL;
for (gp = guest_envp, q = envp; ;
gp += sizeof(target_ulong), q++) {
addr = tgetl(gp);
if (!addr)
break;
*q = lock_user_string(addr);
}
*q = NULL;
p = lock_user_string(arg1);
ret = get_errno(execve(p, argp, envp));
unlock_user(p, arg1, 0);
for (gp = guest_argp, q = argp; *q;
gp += sizeof(target_ulong), q++) {
addr = tgetl(gp);
unlock_user(*q, addr, 0);
}
for (gp = guest_envp, q = envp; *q;
gp += sizeof(target_ulong), q++) {
addr = tgetl(gp);
unlock_user(*q, addr, 0);
}
}
break;
case TARGET_NR_chdir:
p = lock_user_string(arg1);
ret = get_errno(chdir(p));
unlock_user(p, arg1, 0);
break;
#ifdef TARGET_NR_time
case TARGET_NR_time:
{
time_t host_time;
ret = get_errno(time(&host_time));
if (!is_error(ret) && arg1)
tputl(arg1, host_time);
}
break;
#endif
case TARGET_NR_mknod:
p = lock_user_string(arg1);
ret = get_errno(mknod(p, arg2, arg3));
unlock_user(p, arg1, 0);
break;
case TARGET_NR_chmod:
p = lock_user_string(arg1);
ret = get_errno(chmod(p, arg2));
unlock_user(p, arg1, 0);
break;
#ifdef TARGET_NR_break
case TARGET_NR_break:
goto unimplemented;
#endif
#ifdef TARGET_NR_oldstat
case TARGET_NR_oldstat:
goto unimplemented;
#endif
case TARGET_NR_lseek:
ret = get_errno(lseek(arg1, arg2, arg3));
break;
#ifdef TARGET_NR_getxpid
case TARGET_NR_getxpid:
#else
case TARGET_NR_getpid:
#endif
ret = get_errno(getpid());
break;
case TARGET_NR_mount:
{
/* need to look at the data field */
void *p2, *p3;
p = lock_user_string(arg1);
p2 = lock_user_string(arg2);
p3 = lock_user_string(arg3);
ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, (const void *)arg5));
unlock_user(p, arg1, 0);
unlock_user(p2, arg2, 0);
unlock_user(p3, arg3, 0);
break;
}
#ifdef TARGET_NR_umount
case TARGET_NR_umount:
p = lock_user_string(arg1);
ret = get_errno(umount(p));
unlock_user(p, arg1, 0);
break;
#endif
#ifdef TARGET_NR_stime /* not on alpha */
case TARGET_NR_stime:
{
time_t host_time;
host_time = tgetl(arg1);
ret = get_errno(stime(&host_time));
}
break;
#endif
case TARGET_NR_ptrace:
goto unimplemented;
#ifdef TARGET_NR_alarm /* not on alpha */
case TARGET_NR_alarm:
ret = alarm(arg1);
break;
#endif
#ifdef TARGET_NR_oldfstat
case TARGET_NR_oldfstat:
goto unimplemented;
#endif
#ifdef TARGET_NR_pause /* not on alpha */
case TARGET_NR_pause:
ret = get_errno(pause());
break;
#endif
#ifdef TARGET_NR_utime
case TARGET_NR_utime:
{
struct utimbuf tbuf, *host_tbuf;
struct target_utimbuf *target_tbuf;
if (arg2) {
lock_user_struct(target_tbuf, arg2, 1);
tbuf.actime = tswapl(target_tbuf->actime);
tbuf.modtime = tswapl(target_tbuf->modtime);
unlock_user_struct(target_tbuf, arg2, 0);
host_tbuf = &tbuf;
} else {
host_tbuf = NULL;
}
p = lock_user_string(arg1);
ret = get_errno(utime(p, host_tbuf));
unlock_user(p, arg1, 0);
}
break;
#endif
case TARGET_NR_utimes:
{
struct timeval *tvp, tv[2];
if (arg2) {
target_to_host_timeval(&tv[0], arg2);
target_to_host_timeval(&tv[1],
arg2 + sizeof (struct target_timeval));
tvp = tv;
} else {
tvp = NULL;
}
p = lock_user_string(arg1);
ret = get_errno(utimes(p, tvp));
unlock_user(p, arg1, 0);
}
break;
#ifdef TARGET_NR_stty
case TARGET_NR_stty:
goto unimplemented;
#endif
#ifdef TARGET_NR_gtty
case TARGET_NR_gtty:
goto unimplemented;
#endif
case TARGET_NR_access:
p = lock_user_string(arg1);
ret = get_errno(access(p, arg2));
unlock_user(p, arg1, 0);
break;
#ifdef TARGET_NR_nice /* not on alpha */
case TARGET_NR_nice:
ret = get_errno(nice(arg1));
break;
#endif
#ifdef TARGET_NR_ftime
case TARGET_NR_ftime:
goto unimplemented;
#endif
case TARGET_NR_sync:
sync();
ret = 0;
break;
case TARGET_NR_kill:
ret = get_errno(kill(arg1, arg2));
break;
case TARGET_NR_rename:
{
void *p2;
p = lock_user_string(arg1);
p2 = lock_user_string(arg2);
ret = get_errno(rename(p, p2));
unlock_user(p2, arg2, 0);
unlock_user(p, arg1, 0);
}
break;
case TARGET_NR_mkdir:
p = lock_user_string(arg1);
ret = get_errno(mkdir(p, arg2));
unlock_user(p, arg1, 0);
break;
case TARGET_NR_rmdir:
p = lock_user_string(arg1);
ret = get_errno(rmdir(p));
unlock_user(p, arg1, 0);
break;
case TARGET_NR_dup:
ret = get_errno(dup(arg1));
break;
case TARGET_NR_pipe:
{
int host_pipe[2];
ret = get_errno(pipe(host_pipe));
if (!is_error(ret)) {
#if defined(TARGET_MIPS)
CPUMIPSState *env = (CPUMIPSState*)cpu_env;
env->gpr[3][env->current_tc] = host_pipe[1];
ret = host_pipe[0];
#else
tput32(arg1, host_pipe[0]);
tput32(arg1 + 4, host_pipe[1]);
#endif
}
}
break;
case TARGET_NR_times:
{
struct target_tms *tmsp;
struct tms tms;
ret = get_errno(times(&tms));
if (arg1) {
tmsp = lock_user(arg1, sizeof(struct target_tms), 0);
tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
}
if (!is_error(ret))
ret = host_to_target_clock_t(ret);
}
break;
#ifdef TARGET_NR_prof
case TARGET_NR_prof:
goto unimplemented;
#endif
#ifdef TARGET_NR_signal
case TARGET_NR_signal:
goto unimplemented;
#endif
case TARGET_NR_acct:
p = lock_user_string(arg1);
ret = get_errno(acct(path(p)));
unlock_user(p, arg1, 0);
break;
#ifdef TARGET_NR_umount2 /* not on alpha */
case TARGET_NR_umount2:
p = lock_user_string(arg1);
ret = get_errno(umount2(p, arg2));
unlock_user(p, arg1, 0);
break;
#endif
#ifdef TARGET_NR_lock
case TARGET_NR_lock:
goto unimplemented;
#endif
case TARGET_NR_ioctl:
ret = do_ioctl(arg1, arg2, arg3);
break;
case TARGET_NR_fcntl:
ret = get_errno(do_fcntl(arg1, arg2, arg3));
break;
#ifdef TARGET_NR_mpx
case TARGET_NR_mpx:
goto unimplemented;
#endif
case TARGET_NR_setpgid:
ret = get_errno(setpgid(arg1, arg2));
break;
#ifdef TARGET_NR_ulimit
case TARGET_NR_ulimit:
goto unimplemented;
#endif
#ifdef TARGET_NR_oldolduname
case TARGET_NR_oldolduname:
goto unimplemented;
#endif
case TARGET_NR_umask:
ret = get_errno(umask(arg1));
break;
case TARGET_NR_chroot:
p = lock_user_string(arg1);
ret = get_errno(chroot(p));
unlock_user(p, arg1, 0);
break;
case TARGET_NR_ustat:
goto unimplemented;
case TARGET_NR_dup2:
ret = get_errno(dup2(arg1, arg2));
break;
#ifdef TARGET_NR_getppid /* not on alpha */
case TARGET_NR_getppid:
ret = get_errno(getppid());
break;
#endif
case TARGET_NR_getpgrp:
ret = get_errno(getpgrp());
break;
case TARGET_NR_setsid:
ret = get_errno(setsid());
break;
#ifdef TARGET_NR_sigaction
case TARGET_NR_sigaction:
{
#if !defined(TARGET_MIPS)
struct target_old_sigaction *old_act;
struct target_sigaction act, oact, *pact;
if (arg2) {
lock_user_struct(old_act, arg2, 1);
act._sa_handler = old_act->_sa_handler;
target_siginitset(&act.sa_mask, old_act->sa_mask);
act.sa_flags = old_act->sa_flags;
act.sa_restorer = old_act->sa_restorer;
unlock_user_struct(old_act, arg2, 0);
pact = &act;
} else {
pact = NULL;
}
ret = get_errno(do_sigaction(arg1, pact, &oact));
if (!is_error(ret) && arg3) {
lock_user_struct(old_act, arg3, 0);
old_act->_sa_handler = oact._sa_handler;
old_act->sa_mask = oact.sa_mask.sig[0];
old_act->sa_flags = oact.sa_flags;
old_act->sa_restorer = oact.sa_restorer;
unlock_user_struct(old_act, arg3, 1);
}
#else
struct target_sigaction act, oact, *pact, *old_act;
if (arg2) {
lock_user_struct(old_act, arg2, 1);
act._sa_handler = old_act->_sa_handler;
target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
act.sa_flags = old_act->sa_flags;
unlock_user_struct(old_act, arg2, 0);
pact = &act;
} else {
pact = NULL;
}
ret = get_errno(do_sigaction(arg1, pact, &oact));
if (!is_error(ret) && arg3) {
lock_user_struct(old_act, arg3, 0);
old_act->_sa_handler = oact._sa_handler;
old_act->sa_flags = oact.sa_flags;
old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
old_act->sa_mask.sig[1] = 0;
old_act->sa_mask.sig[2] = 0;
old_act->sa_mask.sig[3] = 0;
unlock_user_struct(old_act, arg3, 1);
}
#endif
}
break;
#endif
case TARGET_NR_rt_sigaction:
{
struct target_sigaction *act;
struct target_sigaction *oact;
if (arg2)
lock_user_struct(act, arg2, 1);
else
act = NULL;
if (arg3)
lock_user_struct(oact, arg3, 0);
else
oact = NULL;
ret = get_errno(do_sigaction(arg1, act, oact));
if (arg2)
unlock_user_struct(act, arg2, 0);
if (arg3)
unlock_user_struct(oact, arg3, 1);
}
break;
#ifdef TARGET_NR_sgetmask /* not on alpha */
case TARGET_NR_sgetmask:
{
sigset_t cur_set;
target_ulong target_set;
sigprocmask(0, NULL, &cur_set);
host_to_target_old_sigset(&target_set, &cur_set);
ret = target_set;
}
break;
#endif
#ifdef TARGET_NR_ssetmask /* not on alpha */
case TARGET_NR_ssetmask:
{
sigset_t set, oset, cur_set;
target_ulong target_set = arg1;
sigprocmask(0, NULL, &cur_set);
target_to_host_old_sigset(&set, &target_set);
sigorset(&set, &set, &cur_set);
sigprocmask(SIG_SETMASK, &set, &oset);
host_to_target_old_sigset(&target_set, &oset);
ret = target_set;
}
break;
#endif
#ifdef TARGET_NR_sigprocmask
case TARGET_NR_sigprocmask:
{
int how = arg1;
sigset_t set, oldset, *set_ptr;
if (arg2) {
switch(how) {
case TARGET_SIG_BLOCK:
how = SIG_BLOCK;
break;
case TARGET_SIG_UNBLOCK:
how = SIG_UNBLOCK;
break;
case TARGET_SIG_SETMASK:
how = SIG_SETMASK;
break;
default:
ret = -EINVAL;
goto fail;
}
p = lock_user(arg2, sizeof(target_sigset_t), 1);
target_to_host_old_sigset(&set, p);
unlock_user(p, arg2, 0);
set_ptr = &set;
} else {
how = 0;
set_ptr = NULL;
}
ret = get_errno(sigprocmask(arg1, set_ptr, &oldset));
if (!is_error(ret) && arg3) {
p = lock_user(arg3, sizeof(target_sigset_t), 0);
host_to_target_old_sigset(p, &oldset);
unlock_user(p, arg3, sizeof(target_sigset_t));
}
}
break;
#endif
case TARGET_NR_rt_sigprocmask:
{
int how = arg1;
sigset_t set, oldset, *set_ptr;
if (arg2) {
switch(how) {
case TARGET_SIG_BLOCK:
how = SIG_BLOCK;
break;
case TARGET_SIG_UNBLOCK:
how = SIG_UNBLOCK;
break;
case TARGET_SIG_SETMASK:
how = SIG_SETMASK;
break;
default:
ret = -EINVAL;
goto fail;
}
p = lock_user(arg2, sizeof(target_sigset_t), 1);
target_to_host_sigset(&set, p);
unlock_user(p, arg2, 0);
set_ptr = &set;
} else {
how = 0;
set_ptr = NULL;
}
ret = get_errno(sigprocmask(how, set_ptr, &oldset));
if (!is_error(ret) && arg3) {
p = lock_user(arg3, sizeof(target_sigset_t), 0);
host_to_target_sigset(p, &oldset);
unlock_user(p, arg3, sizeof(target_sigset_t));
}
}
break;
#ifdef TARGET_NR_sigpending
case TARGET_NR_sigpending:
{
sigset_t set;
ret = get_errno(sigpending(&set));
if (!is_error(ret)) {
p = lock_user(arg1, sizeof(target_sigset_t), 0);
host_to_target_old_sigset(p, &set);
unlock_user(p, arg1, sizeof(target_sigset_t));
}
}
break;
#endif
case TARGET_NR_rt_sigpending:
{
sigset_t set;
ret = get_errno(sigpending(&set));
if (!is_error(ret)) {
p = lock_user(arg1, sizeof(target_sigset_t), 0);
host_to_target_sigset(p, &set);
unlock_user(p, arg1, sizeof(target_sigset_t));
}
}
break;
#ifdef TARGET_NR_sigsuspend
case TARGET_NR_sigsuspend:
{
sigset_t set;
p = lock_user(arg1, sizeof(target_sigset_t), 1);
target_to_host_old_sigset(&set, p);
unlock_user(p, arg1, 0);
ret = get_errno(sigsuspend(&set));
}
break;
#endif
case TARGET_NR_rt_sigsuspend:
{
sigset_t set;
p = lock_user(arg1, sizeof(target_sigset_t), 1);
target_to_host_sigset(&set, p);
unlock_user(p, arg1, 0);
ret = get_errno(sigsuspend(&set));
}
break;
case TARGET_NR_rt_sigtimedwait:
{
sigset_t set;
struct timespec uts, *puts;
siginfo_t uinfo;
p = lock_user(arg1, sizeof(target_sigset_t), 1);
target_to_host_sigset(&set, p);
unlock_user(p, arg1, 0);
if (arg3) {
puts = &uts;
target_to_host_timespec(puts, arg3);
} else {
puts = NULL;
}
ret = get_errno(sigtimedwait(&set, &uinfo, puts));
if (!is_error(ret) && arg2) {
p = lock_user(arg2, sizeof(target_sigset_t), 0);
host_to_target_siginfo(p, &uinfo);
unlock_user(p, arg2, sizeof(target_sigset_t));
}
}
break;
case TARGET_NR_rt_sigqueueinfo:
{
siginfo_t uinfo;
p = lock_user(arg3, sizeof(target_sigset_t), 1);
target_to_host_siginfo(&uinfo, p);
unlock_user(p, arg1, 0);
ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
}
break;
#ifdef TARGET_NR_sigreturn
case TARGET_NR_sigreturn:
/* NOTE: ret is eax, so not transcoding must be done */
ret = do_sigreturn(cpu_env);
break;
#endif
case TARGET_NR_rt_sigreturn:
/* NOTE: ret is eax, so not transcoding must be done */
ret = do_rt_sigreturn(cpu_env);
break;
case TARGET_NR_sethostname:
p = lock_user_string(arg1);
ret = get_errno(sethostname(p, arg2));
unlock_user(p, arg1, 0);
break;
case TARGET_NR_setrlimit:
{
/* XXX: convert resource ? */
int resource = arg1;
struct target_rlimit *target_rlim;
struct rlimit rlim;
lock_user_struct(target_rlim, arg2, 1);
rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
rlim.rlim_max = tswapl(target_rlim->rlim_max);
unlock_user_struct(target_rlim, arg2, 0);
ret = get_errno(setrlimit(resource, &rlim));
}
break;
case TARGET_NR_getrlimit:
{
/* XXX: convert resource ? */
int resource = arg1;
struct target_rlimit *target_rlim;
struct rlimit rlim;
ret = get_errno(getrlimit(resource, &rlim));
if (!is_error(ret)) {
lock_user_struct(target_rlim, arg2, 0);
rlim.rlim_cur = tswapl(target_rlim->rlim_cur);
rlim.rlim_max = tswapl(target_rlim->rlim_max);
unlock_user_struct(target_rlim, arg2, 1);
}
}
break;
case TARGET_NR_getrusage:
{
struct rusage rusage;
ret = get_errno(getrusage(arg1, &rusage));
if (!is_error(ret)) {
host_to_target_rusage(arg2, &rusage);
}
}
break;
case TARGET_NR_gettimeofday:
{
struct timeval tv;
ret = get_errno(gettimeofday(&tv, NULL));
if (!is_error(ret)) {
host_to_target_timeval(arg1, &tv);
}
}
break;
case TARGET_NR_settimeofday:
{
struct timeval tv;
target_to_host_timeval(&tv, arg1);
ret = get_errno(settimeofday(&tv, NULL));
}
break;
#ifdef TARGET_NR_select
case TARGET_NR_select:
{
struct target_sel_arg_struct *sel;
target_ulong inp, outp, exp, tvp;
long nsel;
lock_user_struct(sel, arg1, 1);
nsel = tswapl(sel->n);
inp = tswapl(sel->inp);
outp = tswapl(sel->outp);
exp = tswapl(sel->exp);
tvp = tswapl(sel->tvp);
unlock_user_struct(sel, arg1, 0);
ret = do_select(nsel, inp, outp, exp, tvp);
}
break;
#endif
case TARGET_NR_symlink:
{
void *p2;
p = lock_user_string(arg1);
p2 = lock_user_string(arg2);
ret = get_errno(symlink(p, p2));
unlock_user(p2, arg2, 0);
unlock_user(p, arg1, 0);
}
break;
#ifdef TARGET_NR_oldlstat
case TARGET_NR_oldlstat:
goto unimplemented;
#endif
case TARGET_NR_readlink:
{
void *p2;
p = lock_user_string(arg1);
p2 = lock_user(arg2, arg3, 0);
ret = get_errno(readlink(path(p), p2, arg3));
unlock_user(p2, arg2, ret);
unlock_user(p, arg1, 0);
}
break;
#ifdef TARGET_NR_uselib
case TARGET_NR_uselib:
goto unimplemented;
#endif
#ifdef TARGET_NR_swapon
case TARGET_NR_swapon:
p = lock_user_string(arg1);
ret = get_errno(swapon(p, arg2));
unlock_user(p, arg1, 0);
break;
#endif
case TARGET_NR_reboot:
goto unimplemented;
#ifdef TARGET_NR_readdir
case TARGET_NR_readdir:
goto unimplemented;
#endif
#ifdef TARGET_NR_mmap
case TARGET_NR_mmap:
#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_M68K)
{
target_ulong *v;
target_ulong v1, v2, v3, v4, v5, v6;
v = lock_user(arg1, 6 * sizeof(target_ulong), 1);
v1 = tswapl(v[0]);
v2 = tswapl(v[1]);
v3 = tswapl(v[2]);
v4 = tswapl(v[3]);
v5 = tswapl(v[4]);
v6 = tswapl(v[5]);
unlock_user(v, arg1, 0);
ret = get_errno(target_mmap(v1, v2, v3,
target_to_host_bitmask(v4, mmap_flags_tbl),
v5, v6));
}
#else
ret = get_errno(target_mmap(arg1, arg2, arg3,
target_to_host_bitmask(arg4, mmap_flags_tbl),
arg5,
arg6));
#endif
break;
#endif
#ifdef TARGET_NR_mmap2
case TARGET_NR_mmap2:
#if defined(TARGET_SPARC) || defined(TARGET_MIPS)
#define MMAP_SHIFT 12
#else
#define MMAP_SHIFT TARGET_PAGE_BITS
#endif
ret = get_errno(target_mmap(arg1, arg2, arg3,
target_to_host_bitmask(arg4, mmap_flags_tbl),
arg5,
arg6 << MMAP_SHIFT));
break;
#endif
case TARGET_NR_munmap:
ret = get_errno(target_munmap(arg1, arg2));
break;
case TARGET_NR_mprotect:
ret = get_errno(target_mprotect(arg1, arg2, arg3));
break;
#ifdef TARGET_NR_mremap
case TARGET_NR_mremap:
ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
break;
#endif
/* ??? msync/mlock/munlock are broken for softmmu. */
#ifdef TARGET_NR_msync
case TARGET_NR_msync:
ret = get_errno(msync(g2h(arg1), arg2, arg3));
break;
#endif
#ifdef TARGET_NR_mlock
case TARGET_NR_mlock:
ret = get_errno(mlock(g2h(arg1), arg2));
break;
#endif
#ifdef TARGET_NR_munlock
case TARGET_NR_munlock:
ret = get_errno(munlock(g2h(arg1), arg2));
break;
#endif
#ifdef TARGET_NR_mlockall
case TARGET_NR_mlockall:
ret = get_errno(mlockall(arg1));
break;
#endif
#ifdef TARGET_NR_munlockall
case TARGET_NR_munlockall:
ret = get_errno(munlockall());
break;
#endif
case TARGET_NR_truncate:
p = lock_user_string(arg1);
ret = get_errno(truncate(p, arg2));
unlock_user(p, arg1, 0);
break;
case TARGET_NR_ftruncate:
ret = get_errno(ftruncate(arg1, arg2));
break;
case TARGET_NR_fchmod:
ret = get_errno(fchmod(arg1, arg2));
break;
case TARGET_NR_getpriority:
ret = get_errno(getpriority(arg1, arg2));
break;
case TARGET_NR_setpriority:
ret = get_errno(setpriority(arg1, arg2, arg3));
break;
#ifdef TARGET_NR_profil
case TARGET_NR_profil:
goto unimplemented;
#endif
case TARGET_NR_statfs:
p = lock_user_string(arg1);
ret = get_errno(statfs(path(p), &stfs));
unlock_user(p, arg1, 0);
convert_statfs:
if (!is_error(ret)) {
struct target_statfs *target_stfs;
lock_user_struct(target_stfs, arg2, 0);
/* ??? put_user is probably wrong. */
put_user(stfs.f_type, &target_stfs->f_type);
put_user(stfs.f_bsize, &target_stfs->f_bsize);
put_user(stfs.f_blocks, &target_stfs->f_blocks);
put_user(stfs.f_bfree, &target_stfs->f_bfree);
put_user(stfs.f_bavail, &target_stfs->f_bavail);
put_user(stfs.f_files, &target_stfs->f_files);
put_user(stfs.f_ffree, &target_stfs->f_ffree);
put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
put_user(stfs.f_namelen, &target_stfs->f_namelen);
unlock_user_struct(target_stfs, arg2, 1);
}
break;
case TARGET_NR_fstatfs:
ret = get_errno(fstatfs(arg1, &stfs));
goto convert_statfs;
#ifdef TARGET_NR_statfs64
case TARGET_NR_statfs64:
p = lock_user_string(arg1);
ret = get_errno(statfs(path(p), &stfs));
unlock_user(p, arg1, 0);
convert_statfs64:
if (!is_error(ret)) {
struct target_statfs64 *target_stfs;
lock_user_struct(target_stfs, arg3, 0);
/* ??? put_user is probably wrong. */
put_user(stfs.f_type, &target_stfs->f_type);
put_user(stfs.f_bsize, &target_stfs->f_bsize);
put_user(stfs.f_blocks, &target_stfs->f_blocks);
put_user(stfs.f_bfree, &target_stfs->f_bfree);
put_user(stfs.f_bavail, &target_stfs->f_bavail);
put_user(stfs.f_files, &target_stfs->f_files);
put_user(stfs.f_ffree, &target_stfs->f_ffree);
put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
put_user(stfs.f_namelen, &target_stfs->f_namelen);
unlock_user_struct(target_stfs, arg3, 0);
}
break;
case TARGET_NR_fstatfs64:
ret = get_errno(fstatfs(arg1, &stfs));
goto convert_statfs64;
#endif
#ifdef TARGET_NR_ioperm
case TARGET_NR_ioperm:
goto unimplemented;
#endif
#ifdef TARGET_NR_socketcall
case TARGET_NR_socketcall:
ret = do_socketcall(arg1, arg2);
break;
#endif
#ifdef TARGET_NR_accept
case TARGET_NR_accept:
ret = do_accept(arg1, arg2, arg3);
break;
#endif
#ifdef TARGET_NR_bind
case TARGET_NR_bind:
ret = do_bind(arg1, arg2, arg3);
break;
#endif
#ifdef TARGET_NR_connect
case TARGET_NR_connect:
ret = do_connect(arg1, arg2, arg3);
break;
#endif
#ifdef TARGET_NR_getpeername
case TARGET_NR_getpeername:
ret = do_getpeername(arg1, arg2, arg3);
break;
#endif
#ifdef TARGET_NR_getsockname
case TARGET_NR_getsockname:
ret = do_getsockname(arg1, arg2, arg3);
break;
#endif
#ifdef TARGET_NR_getsockopt
case TARGET_NR_getsockopt:
ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
break;
#endif
#ifdef TARGET_NR_listen
case TARGET_NR_listen:
ret = get_errno(listen(arg1, arg2));
break;
#endif
#ifdef TARGET_NR_recv
case TARGET_NR_recv:
ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
break;
#endif
#ifdef TARGET_NR_recvfrom
case TARGET_NR_recvfrom:
ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
break;
#endif
#ifdef TARGET_NR_recvmsg
case TARGET_NR_recvmsg:
ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
break;
#endif
#ifdef TARGET_NR_send
case TARGET_NR_send:
ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
break;
#endif
#ifdef TARGET_NR_sendmsg
case TARGET_NR_sendmsg:
ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
break;
#endif
#ifdef TARGET_NR_sendto
case TARGET_NR_sendto:
ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
break;
#endif
#ifdef TARGET_NR_shutdown
case TARGET_NR_shutdown:
ret = get_errno(shutdown(arg1, arg2));
break;
#endif
#ifdef TARGET_NR_socket
case TARGET_NR_socket:
ret = do_socket(arg1, arg2, arg3);
break;
#endif
#ifdef TARGET_NR_socketpair
case TARGET_NR_socketpair:
ret = do_socketpair(arg1, arg2, arg3, arg4);
break;
#endif
#ifdef TARGET_NR_setsockopt
case TARGET_NR_setsockopt:
ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
break;
#endif
case TARGET_NR_syslog:
p = lock_user_string(arg2);
ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
unlock_user(p, arg2, 0);
break;
case TARGET_NR_setitimer:
{
struct itimerval value, ovalue, *pvalue;
if (arg2) {
pvalue = &value;
target_to_host_timeval(&pvalue->it_interval,
arg2);
target_to_host_timeval(&pvalue->it_value,
arg2 + sizeof(struct target_timeval));
} else {
pvalue = NULL;
}
ret = get_errno(setitimer(arg1, pvalue, &ovalue));
if (!is_error(ret) && arg3) {
host_to_target_timeval(arg3,
&ovalue.it_interval);
host_to_target_timeval(arg3 + sizeof(struct target_timeval),
&ovalue.it_value);
}
}
break;
case TARGET_NR_getitimer:
{
struct itimerval value;
ret = get_errno(getitimer(arg1, &value));
if (!is_error(ret) && arg2) {
host_to_target_timeval(arg2,
&value.it_interval);
host_to_target_timeval(arg2 + sizeof(struct target_timeval),
&value.it_value);
}
}
break;
case TARGET_NR_stat:
p = lock_user_string(arg1);
ret = get_errno(stat(path(p), &st));
unlock_user(p, arg1, 0);
goto do_stat;
case TARGET_NR_lstat:
p = lock_user_string(arg1);
ret = get_errno(lstat(path(p), &st));
unlock_user(p, arg1, 0);
goto do_stat;
case TARGET_NR_fstat:
{
ret = get_errno(fstat(arg1, &st));
do_stat:
if (!is_error(ret)) {
struct target_stat *target_st;
lock_user_struct(target_st, arg2, 0);
#if defined(TARGET_MIPS) || defined(TARGET_SPARC64)
target_st->st_dev = tswap32(st.st_dev);
#else
target_st->st_dev = tswap16(st.st_dev);
#endif
target_st->st_ino = tswapl(st.st_ino);
#if defined(TARGET_PPC) || defined(TARGET_MIPS)
target_st->st_mode = tswapl(st.st_mode); /* XXX: check this */
target_st->st_uid = tswap32(st.st_uid);
target_st->st_gid = tswap32(st.st_gid);
#elif defined(TARGET_SPARC64)
target_st->st_mode = tswap32(st.st_mode);
target_st->st_uid = tswap32(st.st_uid);
target_st->st_gid = tswap32(st.st_gid);
#else
target_st->st_mode = tswap16(st.st_mode);
target_st->st_uid = tswap16(st.st_uid);
target_st->st_gid = tswap16(st.st_gid);
#endif
#if defined(TARGET_MIPS)
/* If this is the same on PPC, then just merge w/ the above ifdef */
target_st->st_nlink = tswapl(st.st_nlink);
target_st->st_rdev = tswapl(st.st_rdev);
#elif defined(TARGET_SPARC64)
target_st->st_nlink = tswap32(st.st_nlink);
target_st->st_rdev = tswap32(st.st_rdev);
#else
target_st->st_nlink = tswap16(st.st_nlink);
target_st->st_rdev = tswap16(st.st_rdev);
#endif
target_st->st_size = tswapl(st.st_size);
target_st->st_blksize = tswapl(st.st_blksize);
target_st->st_blocks = tswapl(st.st_blocks);
target_st->target_st_atime = tswapl(st.st_atime);
target_st->target_st_mtime = tswapl(st.st_mtime);
target_st->target_st_ctime = tswapl(st.st_ctime);
unlock_user_struct(target_st, arg2, 1);
}
}
break;
#ifdef TARGET_NR_olduname
case TARGET_NR_olduname:
goto unimplemented;
#endif
#ifdef TARGET_NR_iopl
case TARGET_NR_iopl:
goto unimplemented;
#endif
case TARGET_NR_vhangup:
ret = get_errno(vhangup());
break;
#ifdef TARGET_NR_idle
case TARGET_NR_idle:
goto unimplemented;
#endif
#ifdef TARGET_NR_syscall
case TARGET_NR_syscall:
ret = do_syscall(cpu_env,arg1 & 0xffff,arg2,arg3,arg4,arg5,arg6,0);
break;
#endif
case TARGET_NR_wait4:
{
int status;
target_long status_ptr = arg2;
struct rusage rusage, *rusage_ptr;
target_ulong target_rusage = arg4;
if (target_rusage)
rusage_ptr = &rusage;
else
rusage_ptr = NULL;
ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
if (!is_error(ret)) {
if (status_ptr)
tputl(status_ptr, status);
if (target_rusage) {
host_to_target_rusage(target_rusage, &rusage);
}
}
}
break;
#ifdef TARGET_NR_swapoff
case TARGET_NR_swapoff:
p = lock_user_string(arg1);
ret = get_errno(swapoff(p));
unlock_user(p, arg1, 0);
break;
#endif
case TARGET_NR_sysinfo:
{
struct target_sysinfo *target_value;
struct sysinfo value;
ret = get_errno(sysinfo(&value));
if (!is_error(ret) && arg1)
{
/* ??? __put_user is probably wrong. */
lock_user_struct(target_value, arg1, 0);
__put_user(value.uptime, &target_value->uptime);
__put_user(value.loads[0], &target_value->loads[0]);
__put_user(value.loads[1], &target_value->loads[1]);
__put_user(value.loads[2], &target_value->loads[2]);
__put_user(value.totalram, &target_value->totalram);
__put_user(value.freeram, &target_value->freeram);
__put_user(value.sharedram, &target_value->sharedram);
__put_user(value.bufferram, &target_value->bufferram);
__put_user(value.totalswap, &target_value->totalswap);
__put_user(value.freeswap, &target_value->freeswap);
__put_user(value.procs, &target_value->procs);
__put_user(value.totalhigh, &target_value->totalhigh);
__put_user(value.freehigh, &target_value->freehigh);
__put_user(value.mem_unit, &target_value->mem_unit);
unlock_user_struct(target_value, arg1, 1);
}
}
break;
#ifdef TARGET_NR_ipc
case TARGET_NR_ipc:
ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
break;
#endif
case TARGET_NR_fsync:
ret = get_errno(fsync(arg1));
break;
case TARGET_NR_clone:
ret = get_errno(do_fork(cpu_env, arg1, arg2));
break;
#ifdef __NR_exit_group
/* new thread calls */
case TARGET_NR_exit_group:
gdb_exit(cpu_env, arg1);
ret = get_errno(exit_group(arg1));
break;
#endif
case TARGET_NR_setdomainname:
p = lock_user_string(arg1);
ret = get_errno(setdomainname(p, arg2));
unlock_user(p, arg1, 0);
break;
case TARGET_NR_uname:
/* no need to transcode because we use the linux syscall */
{
struct new_utsname * buf;
lock_user_struct(buf, arg1, 0);
ret = get_errno(sys_uname(buf));
if (!is_error(ret)) {
/* Overrite the native machine name with whatever is being
emulated. */
strcpy (buf->machine, UNAME_MACHINE);
/* Allow the user to override the reported release. */
if (qemu_uname_release && *qemu_uname_release)
strcpy (buf->release, qemu_uname_release);
}
unlock_user_struct(buf, arg1, 1);
}
break;
#ifdef TARGET_I386
case TARGET_NR_modify_ldt:
ret = get_errno(do_modify_ldt(cpu_env, arg1, arg2, arg3));
break;
#if !defined(TARGET_X86_64)
case TARGET_NR_vm86old:
goto unimplemented;
case TARGET_NR_vm86:
ret = do_vm86(cpu_env, arg1, arg2);
break;
#endif
#endif
case TARGET_NR_adjtimex:
goto unimplemented;
#ifdef TARGET_NR_create_module
case TARGET_NR_create_module:
#endif
case TARGET_NR_init_module:
case TARGET_NR_delete_module:
#ifdef TARGET_NR_get_kernel_syms
case TARGET_NR_get_kernel_syms:
#endif
goto unimplemented;
case TARGET_NR_quotactl:
goto unimplemented;
case TARGET_NR_getpgid:
ret = get_errno(getpgid(arg1));
break;
case TARGET_NR_fchdir:
ret = get_errno(fchdir(arg1));
break;
#ifdef TARGET_NR_bdflush /* not on x86_64 */
case TARGET_NR_bdflush:
goto unimplemented;
#endif
#ifdef TARGET_NR_sysfs
case TARGET_NR_sysfs:
goto unimplemented;
#endif
case TARGET_NR_personality:
ret = get_errno(personality(arg1));
break;
#ifdef TARGET_NR_afs_syscall
case TARGET_NR_afs_syscall:
goto unimplemented;
#endif
#ifdef TARGET_NR__llseek /* Not on alpha */
case TARGET_NR__llseek:
{
#if defined (__x86_64__)
ret = get_errno(lseek(arg1, ((uint64_t )arg2 << 32) | arg3, arg5));
tput64(arg4, ret);
#else
int64_t res;
ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
tput64(arg4, res);
#endif
}
break;
#endif
case TARGET_NR_getdents:
#if TARGET_LONG_SIZE != 4
goto unimplemented;
#warning not supported
#elif TARGET_LONG_SIZE == 4 && HOST_LONG_SIZE == 8
{
struct target_dirent *target_dirp;
struct dirent *dirp;
long count = arg3;
dirp = malloc(count);
if (!dirp)
return -ENOMEM;
ret = get_errno(sys_getdents(arg1, dirp, count));
if (!is_error(ret)) {
struct dirent *de;
struct target_dirent *tde;
int len = ret;
int reclen, treclen;
int count1, tnamelen;
count1 = 0;
de = dirp;
target_dirp = lock_user(arg2, count, 0);
tde = target_dirp;
while (len > 0) {
reclen = de->d_reclen;
treclen = reclen - (2 * (sizeof(long) - sizeof(target_long)));
tde->d_reclen = tswap16(treclen);
tde->d_ino = tswapl(de->d_ino);
tde->d_off = tswapl(de->d_off);
tnamelen = treclen - (2 * sizeof(target_long) + 2);
if (tnamelen > 256)
tnamelen = 256;
/* XXX: may not be correct */
strncpy(tde->d_name, de->d_name, tnamelen);
de = (struct dirent *)((char *)de + reclen);
len -= reclen;
tde = (struct target_dirent *)((char *)tde + treclen);
count1 += treclen;
}
ret = count1;
}
unlock_user(target_dirp, arg2, ret);
free(dirp);
}
#else
{
struct dirent *dirp;
long count = arg3;
dirp = lock_user(arg2, count, 0);
ret = get_errno(sys_getdents(arg1, dirp, count));
if (!is_error(ret)) {
struct dirent *de;
int len = ret;
int reclen;
de = dirp;
while (len > 0) {
reclen = de->d_reclen;
if (reclen > len)
break;
de->d_reclen = tswap16(reclen);
tswapls(&de->d_ino);
tswapls(&de->d_off);
de = (struct dirent *)((char *)de + reclen);
len -= reclen;
}
}
unlock_user(dirp, arg2, ret);
}
#endif
break;
#ifdef TARGET_NR_getdents64
case TARGET_NR_getdents64:
{
struct dirent64 *dirp;
long count = arg3;
dirp = lock_user(arg2, count, 0);
ret = get_errno(sys_getdents64(arg1, dirp, count));
if (!is_error(ret)) {
struct dirent64 *de;
int len = ret;
int reclen;
de = dirp;
while (len > 0) {
reclen = de->d_reclen;
if (reclen > len)
break;
de->d_reclen = tswap16(reclen);
tswap64s(&de->d_ino);
tswap64s(&de->d_off);
de = (struct dirent64 *)((char *)de + reclen);
len -= reclen;
}
}
unlock_user(dirp, arg2, ret);
}
break;
#endif /* TARGET_NR_getdents64 */
#ifdef TARGET_NR__newselect
case TARGET_NR__newselect:
ret = do_select(arg1, arg2, arg3, arg4, arg5);
break;
#endif
#ifdef TARGET_NR_poll
case TARGET_NR_poll:
{
struct target_pollfd *target_pfd;
unsigned int nfds = arg2;
int timeout = arg3;
struct pollfd *pfd;
unsigned int i;
target_pfd = lock_user(arg1, sizeof(struct target_pollfd) * nfds, 1);
pfd = alloca(sizeof(struct pollfd) * nfds);
for(i = 0; i < nfds; i++) {
pfd[i].fd = tswap32(target_pfd[i].fd);
pfd[i].events = tswap16(target_pfd[i].events);
}
ret = get_errno(poll(pfd, nfds, timeout));
if (!is_error(ret)) {
for(i = 0; i < nfds; i++) {
target_pfd[i].revents = tswap16(pfd[i].revents);
}
ret += nfds * (sizeof(struct target_pollfd)
- sizeof(struct pollfd));
}
unlock_user(target_pfd, arg1, ret);
}
break;
#endif
case TARGET_NR_flock:
/* NOTE: the flock constant seems to be the same for every
Linux platform */
ret = get_errno(flock(arg1, arg2));
break;
case TARGET_NR_readv:
{
int count = arg3;
struct iovec *vec;
vec = alloca(count * sizeof(struct iovec));
lock_iovec(vec, arg2, count, 0);
ret = get_errno(readv(arg1, vec, count));
unlock_iovec(vec, arg2, count, 1);
}
break;
case TARGET_NR_writev:
{
int count = arg3;
struct iovec *vec;
vec = alloca(count * sizeof(struct iovec));
lock_iovec(vec, arg2, count, 1);
ret = get_errno(writev(arg1, vec, count));
unlock_iovec(vec, arg2, count, 0);
}
break;
case TARGET_NR_getsid:
ret = get_errno(getsid(arg1));
break;
#if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
case TARGET_NR_fdatasync:
ret = get_errno(fdatasync(arg1));
break;
#endif
case TARGET_NR__sysctl:
/* We don't implement this, but ENODIR is always a safe
return value. */
return -ENOTDIR;
case TARGET_NR_sched_setparam:
{
struct sched_param *target_schp;
struct sched_param schp;
lock_user_struct(target_schp, arg2, 1);
schp.sched_priority = tswap32(target_schp->sched_priority);
unlock_user_struct(target_schp, arg2, 0);
ret = get_errno(sched_setparam(arg1, &schp));
}
break;
case TARGET_NR_sched_getparam:
{
struct sched_param *target_schp;
struct sched_param schp;
ret = get_errno(sched_getparam(arg1, &schp));
if (!is_error(ret)) {
lock_user_struct(target_schp, arg2, 0);
target_schp->sched_priority = tswap32(schp.sched_priority);
unlock_user_struct(target_schp, arg2, 1);
}
}
break;
case TARGET_NR_sched_setscheduler:
{
struct sched_param *target_schp;
struct sched_param schp;
lock_user_struct(target_schp, arg3, 1);
schp.sched_priority = tswap32(target_schp->sched_priority);
unlock_user_struct(target_schp, arg3, 0);
ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
}
break;
case TARGET_NR_sched_getscheduler:
ret = get_errno(sched_getscheduler(arg1));
break;
case TARGET_NR_sched_yield:
ret = get_errno(sched_yield());
break;
case TARGET_NR_sched_get_priority_max:
ret = get_errno(sched_get_priority_max(arg1));
break;
case TARGET_NR_sched_get_priority_min:
ret = get_errno(sched_get_priority_min(arg1));
break;
case TARGET_NR_sched_rr_get_interval:
{
struct timespec ts;
ret = get_errno(sched_rr_get_interval(arg1, &ts));
if (!is_error(ret)) {
host_to_target_timespec(arg2, &ts);
}
}
break;
case TARGET_NR_nanosleep:
{
struct timespec req, rem;
target_to_host_timespec(&req, arg1);
ret = get_errno(nanosleep(&req, &rem));
if (is_error(ret) && arg2) {
host_to_target_timespec(arg2, &rem);
}
}
break;
#ifdef TARGET_NR_query_module
case TARGET_NR_query_module:
goto unimplemented;
#endif
#ifdef TARGET_NR_nfsservctl
case TARGET_NR_nfsservctl:
goto unimplemented;
#endif
case TARGET_NR_prctl:
switch (arg1)
{
case PR_GET_PDEATHSIG:
{
int deathsig;
ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
if (!is_error(ret) && arg2)
tput32(arg2, deathsig);
}
break;
default:
ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
break;
}
break;
#ifdef TARGET_NR_pread
case TARGET_NR_pread:
page_unprotect_range(arg2, arg3);
p = lock_user(arg2, arg3, 0);
ret = get_errno(pread(arg1, p, arg3, arg4));
unlock_user(p, arg2, ret);
break;
case TARGET_NR_pwrite:
p = lock_user(arg2, arg3, 1);
ret = get_errno(pwrite(arg1, p, arg3, arg4));
unlock_user(p, arg2, 0);
break;
#endif
case TARGET_NR_getcwd:
p = lock_user(arg1, arg2, 0);
ret = get_errno(sys_getcwd1(p, arg2));
unlock_user(p, arg1, ret);
break;
case TARGET_NR_capget:
goto unimplemented;
case TARGET_NR_capset:
goto unimplemented;
case TARGET_NR_sigaltstack:
goto unimplemented;
case TARGET_NR_sendfile:
goto unimplemented;
#ifdef TARGET_NR_getpmsg
case TARGET_NR_getpmsg:
goto unimplemented;
#endif
#ifdef TARGET_NR_putpmsg
case TARGET_NR_putpmsg:
goto unimplemented;
#endif
#ifdef TARGET_NR_vfork
case TARGET_NR_vfork:
ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD, 0));
break;
#endif
#ifdef TARGET_NR_ugetrlimit
case TARGET_NR_ugetrlimit:
{
struct rlimit rlim;
ret = get_errno(getrlimit(arg1, &rlim));
if (!is_error(ret)) {
struct target_rlimit *target_rlim;
lock_user_struct(target_rlim, arg2, 0);
target_rlim->rlim_cur = tswapl(rlim.rlim_cur);
target_rlim->rlim_max = tswapl(rlim.rlim_max);
unlock_user_struct(target_rlim, arg2, 1);
}
break;
}
#endif
#ifdef TARGET_NR_truncate64
case TARGET_NR_truncate64:
p = lock_user_string(arg1);
ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
unlock_user(p, arg1, 0);
break;
#endif
#ifdef TARGET_NR_ftruncate64
case TARGET_NR_ftruncate64:
ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
break;
#endif
#ifdef TARGET_NR_stat64
case TARGET_NR_stat64:
p = lock_user_string(arg1);
ret = get_errno(stat(path(p), &st));
unlock_user(p, arg1, 0);
goto do_stat64;
#endif
#ifdef TARGET_NR_lstat64
case TARGET_NR_lstat64:
p = lock_user_string(arg1);
ret = get_errno(lstat(path(p), &st));
unlock_user(p, arg1, 0);
goto do_stat64;
#endif
#ifdef TARGET_NR_fstat64
case TARGET_NR_fstat64:
{
ret = get_errno(fstat(arg1, &st));
do_stat64:
if (!is_error(ret)) {
#ifdef TARGET_ARM
if (((CPUARMState *)cpu_env)->eabi) {
struct target_eabi_stat64 *target_st;
lock_user_struct(target_st, arg2, 1);
memset(target_st, 0, sizeof(struct target_eabi_stat64));
/* put_user is probably wrong. */
put_user(st.st_dev, &target_st->st_dev);
put_user(st.st_ino, &target_st->st_ino);
#ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
put_user(st.st_ino, &target_st->__st_ino);
#endif
put_user(st.st_mode, &target_st->st_mode);
put_user(st.st_nlink, &target_st->st_nlink);
put_user(st.st_uid, &target_st->st_uid);
put_user(st.st_gid, &target_st->st_gid);
put_user(st.st_rdev, &target_st->st_rdev);
/* XXX: better use of kernel struct */
put_user(st.st_size, &target_st->st_size);
put_user(st.st_blksize, &target_st->st_blksize);
put_user(st.st_blocks, &target_st->st_blocks);
put_user(st.st_atime, &target_st->target_st_atime);
put_user(st.st_mtime, &target_st->target_st_mtime);
put_user(st.st_ctime, &target_st->target_st_ctime);
unlock_user_struct(target_st, arg2, 0);
} else
#endif
{
struct target_stat64 *target_st;
lock_user_struct(target_st, arg2, 1);
memset(target_st, 0, sizeof(struct target_stat64));
/* ??? put_user is probably wrong. */
put_user(st.st_dev, &target_st->st_dev);
put_user(st.st_ino, &target_st->st_ino);
#ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
put_user(st.st_ino, &target_st->__st_ino);
#endif
put_user(st.st_mode, &target_st->st_mode);
put_user(st.st_nlink, &target_st->st_nlink);
put_user(st.st_uid, &target_st->st_uid);
put_user(st.st_gid, &target_st->st_gid);
put_user(st.st_rdev, &target_st->st_rdev);
/* XXX: better use of kernel struct */
put_user(st.st_size, &target_st->st_size);
put_user(st.st_blksize, &target_st->st_blksize);
put_user(st.st_blocks, &target_st->st_blocks);
put_user(st.st_atime, &target_st->target_st_atime);
put_user(st.st_mtime, &target_st->target_st_mtime);
put_user(st.st_ctime, &target_st->target_st_ctime);
unlock_user_struct(target_st, arg2, 0);
}
}
}
break;
#endif
#ifdef USE_UID16
case TARGET_NR_lchown:
p = lock_user_string(arg1);
ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
unlock_user(p, arg1, 0);
break;
case TARGET_NR_getuid:
ret = get_errno(high2lowuid(getuid()));
break;
case TARGET_NR_getgid:
ret = get_errno(high2lowgid(getgid()));
break;
case TARGET_NR_geteuid:
ret = get_errno(high2lowuid(geteuid()));
break;
case TARGET_NR_getegid:
ret = get_errno(high2lowgid(getegid()));
break;
case TARGET_NR_setreuid:
ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
break;
case TARGET_NR_setregid:
ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
break;
case TARGET_NR_getgroups:
{
int gidsetsize = arg1;
uint16_t *target_grouplist;
gid_t *grouplist;
int i;
grouplist = alloca(gidsetsize * sizeof(gid_t));
ret = get_errno(getgroups(gidsetsize, grouplist));
if (!is_error(ret)) {
target_grouplist = lock_user(arg2, gidsetsize * 2, 0);
for(i = 0;i < gidsetsize; i++)
target_grouplist[i] = tswap16(grouplist[i]);
unlock_user(target_grouplist, arg2, gidsetsize * 2);
}
}
break;
case TARGET_NR_setgroups:
{
int gidsetsize = arg1;
uint16_t *target_grouplist;
gid_t *grouplist;
int i;
grouplist = alloca(gidsetsize * sizeof(gid_t));
target_grouplist = lock_user(arg2, gidsetsize * 2, 1);
for(i = 0;i < gidsetsize; i++)
grouplist[i] = tswap16(target_grouplist[i]);
unlock_user(target_grouplist, arg2, 0);
ret = get_errno(setgroups(gidsetsize, grouplist));
}
break;
case TARGET_NR_fchown:
ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
break;
#ifdef TARGET_NR_setresuid
case TARGET_NR_setresuid:
ret = get_errno(setresuid(low2highuid(arg1),
low2highuid(arg2),
low2highuid(arg3)));
break;
#endif
#ifdef TARGET_NR_getresuid
case TARGET_NR_getresuid:
{
uid_t ruid, euid, suid;
ret = get_errno(getresuid(&ruid, &euid, &suid));
if (!is_error(ret)) {
tput16(arg1, tswap16(high2lowuid(ruid)));
tput16(arg2, tswap16(high2lowuid(euid)));
tput16(arg3, tswap16(high2lowuid(suid)));
}
}
break;
#endif
#ifdef TARGET_NR_getresgid
case TARGET_NR_setresgid:
ret = get_errno(setresgid(low2highgid(arg1),
low2highgid(arg2),
low2highgid(arg3)));
break;
#endif
#ifdef TARGET_NR_getresgid
case TARGET_NR_getresgid:
{
gid_t rgid, egid, sgid;
ret = get_errno(getresgid(&rgid, &egid, &sgid));
if (!is_error(ret)) {
tput16(arg1, tswap16(high2lowgid(rgid)));
tput16(arg2, tswap16(high2lowgid(egid)));
tput16(arg3, tswap16(high2lowgid(sgid)));
}
}
break;
#endif
case TARGET_NR_chown:
p = lock_user_string(arg1);
ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
unlock_user(p, arg1, 0);
break;
case TARGET_NR_setuid:
ret = get_errno(setuid(low2highuid(arg1)));
break;
case TARGET_NR_setgid:
ret = get_errno(setgid(low2highgid(arg1)));
break;
case TARGET_NR_setfsuid:
ret = get_errno(setfsuid(arg1));
break;
case TARGET_NR_setfsgid:
ret = get_errno(setfsgid(arg1));
break;
#endif /* USE_UID16 */
#ifdef TARGET_NR_lchown32
case TARGET_NR_lchown32:
p = lock_user_string(arg1);
ret = get_errno(lchown(p, arg2, arg3));
unlock_user(p, arg1, 0);
break;
#endif
#ifdef TARGET_NR_getuid32
case TARGET_NR_getuid32:
ret = get_errno(getuid());
break;
#endif
#ifdef TARGET_NR_getgid32
case TARGET_NR_getgid32:
ret = get_errno(getgid());
break;
#endif
#ifdef TARGET_NR_geteuid32
case TARGET_NR_geteuid32:
ret = get_errno(geteuid());
break;
#endif
#ifdef TARGET_NR_getegid32
case TARGET_NR_getegid32:
ret = get_errno(getegid());
break;
#endif
#ifdef TARGET_NR_setreuid32
case TARGET_NR_setreuid32:
ret = get_errno(setreuid(arg1, arg2));
break;
#endif
#ifdef TARGET_NR_setregid32
case TARGET_NR_setregid32:
ret = get_errno(setregid(arg1, arg2));
break;
#endif
#ifdef TARGET_NR_getgroups32
case TARGET_NR_getgroups32:
{
int gidsetsize = arg1;
uint32_t *target_grouplist;
gid_t *grouplist;
int i;
grouplist = alloca(gidsetsize * sizeof(gid_t));
ret = get_errno(getgroups(gidsetsize, grouplist));
if (!is_error(ret)) {
target_grouplist = lock_user(arg2, gidsetsize * 4, 0);
for(i = 0;i < gidsetsize; i++)
target_grouplist[i] = tswap32(grouplist[i]);
unlock_user(target_grouplist, arg2, gidsetsize * 4);
}
}
break;
#endif
#ifdef TARGET_NR_setgroups32
case TARGET_NR_setgroups32:
{
int gidsetsize = arg1;
uint32_t *target_grouplist;
gid_t *grouplist;
int i;
grouplist = alloca(gidsetsize * sizeof(gid_t));
target_grouplist = lock_user(arg2, gidsetsize * 4, 1);
for(i = 0;i < gidsetsize; i++)
grouplist[i] = tswap32(target_grouplist[i]);
unlock_user(target_grouplist, arg2, 0);
ret = get_errno(setgroups(gidsetsize, grouplist));
}
break;
#endif
#ifdef TARGET_NR_fchown32
case TARGET_NR_fchown32:
ret = get_errno(fchown(arg1, arg2, arg3));
break;
#endif
#ifdef TARGET_NR_setresuid32
case TARGET_NR_setresuid32:
ret = get_errno(setresuid(arg1, arg2, arg3));
break;
#endif
#ifdef TARGET_NR_getresuid32
case TARGET_NR_getresuid32:
{
uid_t ruid, euid, suid;
ret = get_errno(getresuid(&ruid, &euid, &suid));
if (!is_error(ret)) {
tput32(arg1, tswap32(ruid));
tput32(arg2, tswap32(euid));
tput32(arg3, tswap32(suid));
}
}
break;
#endif
#ifdef TARGET_NR_setresgid32
case TARGET_NR_setresgid32:
ret = get_errno(setresgid(arg1, arg2, arg3));
break;
#endif
#ifdef TARGET_NR_getresgid32
case TARGET_NR_getresgid32:
{
gid_t rgid, egid, sgid;
ret = get_errno(getresgid(&rgid, &egid, &sgid));
if (!is_error(ret)) {
tput32(arg1, tswap32(rgid));
tput32(arg2, tswap32(egid));
tput32(arg3, tswap32(sgid));
}
}
break;
#endif
#ifdef TARGET_NR_chown32
case TARGET_NR_chown32:
p = lock_user_string(arg1);
ret = get_errno(chown(p, arg2, arg3));
unlock_user(p, arg1, 0);
break;
#endif
#ifdef TARGET_NR_setuid32
case TARGET_NR_setuid32:
ret = get_errno(setuid(arg1));
break;
#endif
#ifdef TARGET_NR_setgid32
case TARGET_NR_setgid32:
ret = get_errno(setgid(arg1));
break;
#endif
#ifdef TARGET_NR_setfsuid32
case TARGET_NR_setfsuid32:
ret = get_errno(setfsuid(arg1));
break;
#endif
#ifdef TARGET_NR_setfsgid32
case TARGET_NR_setfsgid32:
ret = get_errno(setfsgid(arg1));
break;
#endif
case TARGET_NR_pivot_root:
goto unimplemented;
#ifdef TARGET_NR_mincore
case TARGET_NR_mincore:
goto unimplemented;
#endif
#ifdef TARGET_NR_madvise
case TARGET_NR_madvise:
/* A straight passthrough may not be safe because qemu sometimes
turns private flie-backed mappings into anonymous mappings.
This will break MADV_DONTNEED.
This is a hint, so ignoring and returning success is ok. */
ret = get_errno(0);
break;
#endif
#if TARGET_LONG_BITS == 32
case TARGET_NR_fcntl64:
{
int cmd;
struct flock64 fl;
struct target_flock64 *target_fl;
#ifdef TARGET_ARM
struct target_eabi_flock64 *target_efl;
#endif
switch(arg2){
case TARGET_F_GETLK64:
cmd = F_GETLK64;
break;
case TARGET_F_SETLK64:
cmd = F_SETLK64;
break;
case TARGET_F_SETLKW64:
cmd = F_SETLK64;
break;
default:
cmd = arg2;
break;
}
switch(arg2) {
case TARGET_F_GETLK64:
#ifdef TARGET_ARM
if (((CPUARMState *)cpu_env)->eabi) {
lock_user_struct(target_efl, arg3, 1);
fl.l_type = tswap16(target_efl->l_type);
fl.l_whence = tswap16(target_efl->l_whence);
fl.l_start = tswap64(target_efl->l_start);
fl.l_len = tswap64(target_efl->l_len);
fl.l_pid = tswapl(target_efl->l_pid);
unlock_user_struct(target_efl, arg3, 0);
} else
#endif
{
lock_user_struct(target_fl, arg3, 1);
fl.l_type = tswap16(target_fl->l_type);
fl.l_whence = tswap16(target_fl->l_whence);
fl.l_start = tswap64(target_fl->l_start);
fl.l_len = tswap64(target_fl->l_len);
fl.l_pid = tswapl(target_fl->l_pid);
unlock_user_struct(target_fl, arg3, 0);
}
ret = get_errno(fcntl(arg1, cmd, &fl));
if (ret == 0) {
#ifdef TARGET_ARM
if (((CPUARMState *)cpu_env)->eabi) {
lock_user_struct(target_efl, arg3, 0);
target_efl->l_type = tswap16(fl.l_type);
target_efl->l_whence = tswap16(fl.l_whence);
target_efl->l_start = tswap64(fl.l_start);
target_efl->l_len = tswap64(fl.l_len);
target_efl->l_pid = tswapl(fl.l_pid);
unlock_user_struct(target_efl, arg3, 1);
} else
#endif
{
lock_user_struct(target_fl, arg3, 0);
target_fl->l_type = tswap16(fl.l_type);
target_fl->l_whence = tswap16(fl.l_whence);
target_fl->l_start = tswap64(fl.l_start);
target_fl->l_len = tswap64(fl.l_len);
target_fl->l_pid = tswapl(fl.l_pid);
unlock_user_struct(target_fl, arg3, 1);
}
}
break;
case TARGET_F_SETLK64:
case TARGET_F_SETLKW64:
#ifdef TARGET_ARM
if (((CPUARMState *)cpu_env)->eabi) {
lock_user_struct(target_efl, arg3, 1);
fl.l_type = tswap16(target_efl->l_type);
fl.l_whence = tswap16(target_efl->l_whence);
fl.l_start = tswap64(target_efl->l_start);
fl.l_len = tswap64(target_efl->l_len);
fl.l_pid = tswapl(target_efl->l_pid);
unlock_user_struct(target_efl, arg3, 0);
} else
#endif
{
lock_user_struct(target_fl, arg3, 1);
fl.l_type = tswap16(target_fl->l_type);
fl.l_whence = tswap16(target_fl->l_whence);
fl.l_start = tswap64(target_fl->l_start);
fl.l_len = tswap64(target_fl->l_len);
fl.l_pid = tswapl(target_fl->l_pid);
unlock_user_struct(target_fl, arg3, 0);
}
ret = get_errno(fcntl(arg1, cmd, &fl));
break;
default:
ret = get_errno(do_fcntl(arg1, cmd, arg3));
break;
}
break;
}
#endif
#ifdef TARGET_NR_cacheflush
case TARGET_NR_cacheflush:
/* self-modifying code is handled automatically, so nothing needed */
ret = 0;
break;
#endif
#ifdef TARGET_NR_security
case TARGET_NR_security:
goto unimplemented;
#endif
#ifdef TARGET_NR_getpagesize
case TARGET_NR_getpagesize:
ret = TARGET_PAGE_SIZE;
break;
#endif
case TARGET_NR_gettid:
ret = get_errno(gettid());
break;
#ifdef TARGET_NR_readahead
case TARGET_NR_readahead:
goto unimplemented;
#endif
#ifdef TARGET_NR_setxattr
case TARGET_NR_setxattr:
case TARGET_NR_lsetxattr:
case TARGET_NR_fsetxattr:
case TARGET_NR_getxattr:
case TARGET_NR_lgetxattr:
case TARGET_NR_fgetxattr:
case TARGET_NR_listxattr:
case TARGET_NR_llistxattr:
case TARGET_NR_flistxattr:
case TARGET_NR_removexattr:
case TARGET_NR_lremovexattr:
case TARGET_NR_fremovexattr:
goto unimplemented_nowarn;
#endif
#ifdef TARGET_NR_set_thread_area
case TARGET_NR_set_thread_area:
#ifdef TARGET_MIPS
((CPUMIPSState *) cpu_env)->tls_value = arg1;
ret = 0;
break;
#else
goto unimplemented_nowarn;
#endif
#endif
#ifdef TARGET_NR_get_thread_area
case TARGET_NR_get_thread_area:
goto unimplemented_nowarn;
#endif
#ifdef TARGET_NR_getdomainname
case TARGET_NR_getdomainname:
goto unimplemented_nowarn;
#endif
#ifdef TARGET_NR_clock_gettime
case TARGET_NR_clock_gettime:
{
struct timespec ts;
ret = get_errno(clock_gettime(arg1, &ts));
if (!is_error(ret)) {
host_to_target_timespec(arg2, &ts);
}
break;
}
#endif
#ifdef TARGET_NR_clock_getres
case TARGET_NR_clock_getres:
{
struct timespec ts;
ret = get_errno(clock_getres(arg1, &ts));
if (!is_error(ret)) {
host_to_target_timespec(arg2, &ts);
}
break;
}
#endif
#if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
case TARGET_NR_set_tid_address:
ret = get_errno(set_tid_address((int *) arg1));
break;
#endif
#ifdef TARGET_NR_tkill
case TARGET_NR_tkill:
ret = get_errno(sys_tkill((int)arg1, (int)arg2));
break;
#endif
#ifdef TARGET_NR_tgkill
case TARGET_NR_tgkill:
ret = get_errno(sys_tgkill((int)arg1, (int)arg2, (int)arg3));
break;
#endif
#ifdef TARGET_NR_set_robust_list
case TARGET_NR_set_robust_list:
goto unimplemented_nowarn;
#endif
default:
unimplemented:
gemu_log("qemu: Unsupported syscall: %d\n", num);
#if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
unimplemented_nowarn:
#endif
ret = -ENOSYS;
break;
}
fail:
#ifdef DEBUG
gemu_log(" = %ld\n", ret);
#endif
return ret;
}
| 22,084 |
qemu | bd269ebc82fbaa5fe7ce5bc7c1770ac8acecd884 | 0 | void unix_start_outgoing_migration(MigrationState *s,
const char *path,
Error **errp)
{
SocketAddressLegacy *saddr = unix_build_address(path);
socket_start_outgoing_migration(s, saddr, errp);
}
| 22,085 |
FFmpeg | 80a5d05108cb218e8cd2e25c6621a3bfef0a832e | 0 | static av_cold int vaapi_encode_mjpeg_init(AVCodecContext *avctx)
{
return ff_vaapi_encode_init(avctx, &vaapi_encode_type_mjpeg);
}
| 22,087 |
FFmpeg | d7e14c0d103a2c9cca6c50568e09b40d6f48ea19 | 0 | static inline int is_yuv_planar(const PixFmtInfo *ps)
{
return (ps->color_type == FF_COLOR_YUV ||
ps->color_type == FF_COLOR_YUV_JPEG) &&
ps->pixel_type == FF_PIXEL_PLANAR;
}
| 22,088 |
FFmpeg | 57d77b3963ce1023eaf5ada8cba58b9379405cc8 | 0 | int av_opencl_create_kernel(AVOpenCLKernelEnv *env, const char *kernel_name)
{
cl_int status;
int i, ret = 0;
LOCK_OPENCL;
if (strlen(kernel_name) + 1 > AV_OPENCL_MAX_KERNEL_NAME_SIZE) {
av_log(&openclutils, AV_LOG_ERROR, "Created kernel name %s is too long\n", kernel_name);
ret = AVERROR(EINVAL);
goto end;
}
if (!env->kernel) {
if (gpu_env.kernel_count >= MAX_KERNEL_NUM) {
av_log(&openclutils, AV_LOG_ERROR,
"Could not create kernel with name '%s', maximum number of kernels %d already reached\n",
kernel_name, MAX_KERNEL_NUM);
ret = AVERROR(EINVAL);
goto end;
}
for (i = 0; i < gpu_env.program_count; i++) {
env->kernel = clCreateKernel(gpu_env.programs[i], kernel_name, &status);
if (status == CL_SUCCESS)
break;
}
if (status != CL_SUCCESS) {
av_log(&openclutils, AV_LOG_ERROR, "Could not create OpenCL kernel: %s\n", opencl_errstr(status));
ret = AVERROR_EXTERNAL;
goto end;
}
gpu_env.kernel_count++;
env->command_queue = gpu_env.command_queue;
av_strlcpy(env->kernel_name, kernel_name, sizeof(env->kernel_name));
}
end:
UNLOCK_OPENCL;
return ret;
}
| 22,090 |
FFmpeg | f5695926235c9b2a60af07b21c2d6f1db990cc2a | 0 | static int pulse_write_packet(AVFormatContext *h, AVPacket *pkt)
{
PulseData *s = h->priv_data;
int size = pkt->size;
uint8_t *buf = pkt->data;
int error;
if (s->stream_index != pkt->stream_index)
return 0;
if ((error = pa_simple_write(s->pa, buf, size, &error))) {
av_log(s, AV_LOG_ERROR, "pa_simple_write failed: %s\n", pa_strerror(error));
return AVERROR(EIO);
}
return 0;
}
| 22,092 |
FFmpeg | 255d4e717faa98ab783401acd68a278af32f6360 | 0 | static inline int decode_picture_parameter_set(H264Context *h, int bit_length){
MpegEncContext * const s = &h->s;
unsigned int tmp, pps_id= get_ue_golomb(&s->gb);
PPS *pps;
pps = alloc_parameter_set(h, (void **)h->pps_buffers, pps_id, MAX_PPS_COUNT, sizeof(PPS), "pps");
if(pps == NULL)
return -1;
tmp= get_ue_golomb(&s->gb);
if(tmp>=MAX_SPS_COUNT || h->sps_buffers[tmp] == NULL){
av_log(h->s.avctx, AV_LOG_ERROR, "sps_id out of range\n");
return -1;
}
pps->sps_id= tmp;
pps->cabac= get_bits1(&s->gb);
pps->pic_order_present= get_bits1(&s->gb);
pps->slice_group_count= get_ue_golomb(&s->gb) + 1;
if(pps->slice_group_count > 1 ){
pps->mb_slice_group_map_type= get_ue_golomb(&s->gb);
av_log(h->s.avctx, AV_LOG_ERROR, "FMO not supported\n");
switch(pps->mb_slice_group_map_type){
case 0:
#if 0
| for( i = 0; i <= num_slice_groups_minus1; i++ ) | | |
| run_length[ i ] |1 |ue(v) |
#endif
break;
case 2:
#if 0
| for( i = 0; i < num_slice_groups_minus1; i++ ) | | |
|{ | | |
| top_left_mb[ i ] |1 |ue(v) |
| bottom_right_mb[ i ] |1 |ue(v) |
| } | | |
#endif
break;
case 3:
case 4:
case 5:
#if 0
| slice_group_change_direction_flag |1 |u(1) |
| slice_group_change_rate_minus1 |1 |ue(v) |
#endif
break;
case 6:
#if 0
| slice_group_id_cnt_minus1 |1 |ue(v) |
| for( i = 0; i <= slice_group_id_cnt_minus1; i++ | | |
|) | | |
| slice_group_id[ i ] |1 |u(v) |
#endif
break;
}
}
pps->ref_count[0]= get_ue_golomb(&s->gb) + 1;
pps->ref_count[1]= get_ue_golomb(&s->gb) + 1;
if(pps->ref_count[0]-1 > 32-1 || pps->ref_count[1]-1 > 32-1){
av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow (pps)\n");
pps->ref_count[0]= pps->ref_count[1]= 1;
return -1;
}
pps->weighted_pred= get_bits1(&s->gb);
pps->weighted_bipred_idc= get_bits(&s->gb, 2);
pps->init_qp= get_se_golomb(&s->gb) + 26;
pps->init_qs= get_se_golomb(&s->gb) + 26;
pps->chroma_qp_index_offset[0]= get_se_golomb(&s->gb);
pps->deblocking_filter_parameters_present= get_bits1(&s->gb);
pps->constrained_intra_pred= get_bits1(&s->gb);
pps->redundant_pic_cnt_present = get_bits1(&s->gb);
pps->transform_8x8_mode= 0;
h->dequant_coeff_pps= -1; //contents of sps/pps can change even if id doesn't, so reinit
memcpy(pps->scaling_matrix4, h->sps_buffers[pps->sps_id]->scaling_matrix4, sizeof(pps->scaling_matrix4));
memcpy(pps->scaling_matrix8, h->sps_buffers[pps->sps_id]->scaling_matrix8, sizeof(pps->scaling_matrix8));
if(get_bits_count(&s->gb) < bit_length){
pps->transform_8x8_mode= get_bits1(&s->gb);
decode_scaling_matrices(h, h->sps_buffers[pps->sps_id], pps, 0, pps->scaling_matrix4, pps->scaling_matrix8);
pps->chroma_qp_index_offset[1]= get_se_golomb(&s->gb); //second_chroma_qp_index_offset
} else {
pps->chroma_qp_index_offset[1]= pps->chroma_qp_index_offset[0];
}
build_qp_table(pps, 0, pps->chroma_qp_index_offset[0]);
build_qp_table(pps, 1, pps->chroma_qp_index_offset[1]);
if(pps->chroma_qp_index_offset[0] != pps->chroma_qp_index_offset[1])
h->pps.chroma_qp_diff= 1;
if(s->avctx->debug&FF_DEBUG_PICT_INFO){
av_log(h->s.avctx, AV_LOG_DEBUG, "pps:%u sps:%u %s slice_groups:%d ref:%d/%d %s qp:%d/%d/%d/%d %s %s %s %s\n",
pps_id, pps->sps_id,
pps->cabac ? "CABAC" : "CAVLC",
pps->slice_group_count,
pps->ref_count[0], pps->ref_count[1],
pps->weighted_pred ? "weighted" : "",
pps->init_qp, pps->init_qs, pps->chroma_qp_index_offset[0], pps->chroma_qp_index_offset[1],
pps->deblocking_filter_parameters_present ? "LPAR" : "",
pps->constrained_intra_pred ? "CONSTR" : "",
pps->redundant_pic_cnt_present ? "REDU" : "",
pps->transform_8x8_mode ? "8x8DCT" : ""
);
}
return 0;
}
| 22,095 |
qemu | 3eff1f46f08a360a4ae9f834ce9fef4c45bf6f0f | 0 | static VirtIOSCSIReq *virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq)
{
VirtIOSCSIReq *req;
req = g_malloc(sizeof(*req));
req->vq = vq;
req->dev = s;
req->sreq = NULL;
qemu_sglist_init(&req->qsgl, DEVICE(s), 8, &address_space_memory);
return req;
}
| 22,096 |
qemu | 1eeb5c7deacbfb4d4cad17590a16a99f3d85eabb | 0 | static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp)
{
BCM2835PeripheralState *s = BCM2835_PERIPHERALS(dev);
Object *obj;
MemoryRegion *ram;
Error *err = NULL;
uint32_t ram_size, vcram_size;
int n;
obj = object_property_get_link(OBJECT(dev), "ram", &err);
if (obj == NULL) {
error_setg(errp, "%s: required ram link not found: %s",
__func__, error_get_pretty(err));
return;
}
ram = MEMORY_REGION(obj);
ram_size = memory_region_size(ram);
/* Map peripherals and RAM into the GPU address space. */
memory_region_init_alias(&s->peri_mr_alias, OBJECT(s),
"bcm2835-peripherals", &s->peri_mr, 0,
memory_region_size(&s->peri_mr));
memory_region_add_subregion_overlap(&s->gpu_bus_mr, BCM2835_VC_PERI_BASE,
&s->peri_mr_alias, 1);
/* RAM is aliased four times (different cache configurations) on the GPU */
for (n = 0; n < 4; n++) {
memory_region_init_alias(&s->ram_alias[n], OBJECT(s),
"bcm2835-gpu-ram-alias[*]", ram, 0, ram_size);
memory_region_add_subregion_overlap(&s->gpu_bus_mr, (hwaddr)n << 30,
&s->ram_alias[n], 0);
}
/* Interrupt Controller */
object_property_set_bool(OBJECT(&s->ic), true, "realized", &err);
if (err) {
error_propagate(errp, err);
return;
}
memory_region_add_subregion(&s->peri_mr, ARMCTRL_IC_OFFSET,
sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->ic), 0));
sysbus_pass_irq(SYS_BUS_DEVICE(s), SYS_BUS_DEVICE(&s->ic));
/* UART0 */
qdev_prop_set_chr(DEVICE(s->uart0), "chardev", serial_hds[0]);
object_property_set_bool(OBJECT(s->uart0), true, "realized", &err);
if (err) {
error_propagate(errp, err);
return;
}
memory_region_add_subregion(&s->peri_mr, UART0_OFFSET,
sysbus_mmio_get_region(s->uart0, 0));
sysbus_connect_irq(s->uart0, 0,
qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ,
INTERRUPT_UART));
/* AUX / UART1 */
qdev_prop_set_chr(DEVICE(&s->aux), "chardev", serial_hds[1]);
object_property_set_bool(OBJECT(&s->aux), true, "realized", &err);
if (err) {
error_propagate(errp, err);
return;
}
memory_region_add_subregion(&s->peri_mr, UART1_OFFSET,
sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->aux), 0));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->aux), 0,
qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ,
INTERRUPT_AUX));
/* Mailboxes */
object_property_set_bool(OBJECT(&s->mboxes), true, "realized", &err);
if (err) {
error_propagate(errp, err);
return;
}
memory_region_add_subregion(&s->peri_mr, ARMCTRL_0_SBM_OFFSET,
sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->mboxes), 0));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->mboxes), 0,
qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_ARM_IRQ,
INTERRUPT_ARM_MAILBOX));
/* Framebuffer */
vcram_size = (uint32_t)object_property_get_int(OBJECT(s), "vcram-size",
&err);
if (err) {
error_propagate(errp, err);
return;
}
object_property_set_int(OBJECT(&s->fb), ram_size - vcram_size,
"vcram-base", &err);
if (err) {
error_propagate(errp, err);
return;
}
object_property_set_bool(OBJECT(&s->fb), true, "realized", &err);
if (err) {
error_propagate(errp, err);
return;
}
memory_region_add_subregion(&s->mbox_mr, MBOX_CHAN_FB << MBOX_AS_CHAN_SHIFT,
sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->fb), 0));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->fb), 0,
qdev_get_gpio_in(DEVICE(&s->mboxes), MBOX_CHAN_FB));
/* Property channel */
object_property_set_bool(OBJECT(&s->property), true, "realized", &err);
if (err) {
error_propagate(errp, err);
return;
}
memory_region_add_subregion(&s->mbox_mr,
MBOX_CHAN_PROPERTY << MBOX_AS_CHAN_SHIFT,
sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->property), 0));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->property), 0,
qdev_get_gpio_in(DEVICE(&s->mboxes), MBOX_CHAN_PROPERTY));
/* Random Number Generator */
object_property_set_bool(OBJECT(&s->rng), true, "realized", &err);
if (err) {
error_propagate(errp, err);
return;
}
memory_region_add_subregion(&s->peri_mr, RNG_OFFSET,
sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->rng), 0));
/* Extended Mass Media Controller */
object_property_set_int(OBJECT(&s->sdhci), BCM2835_SDHC_CAPAREG, "capareg",
&err);
if (err) {
error_propagate(errp, err);
return;
}
object_property_set_bool(OBJECT(&s->sdhci), true, "pending-insert-quirk",
&err);
if (err) {
error_propagate(errp, err);
return;
}
object_property_set_bool(OBJECT(&s->sdhci), true, "realized", &err);
if (err) {
error_propagate(errp, err);
return;
}
memory_region_add_subregion(&s->peri_mr, EMMC_OFFSET,
sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->sdhci), 0));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdhci), 0,
qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ,
INTERRUPT_ARASANSDIO));
object_property_add_alias(OBJECT(s), "sd-bus", OBJECT(&s->sdhci), "sd-bus",
&err);
if (err) {
error_propagate(errp, err);
return;
}
/* DMA Channels */
object_property_set_bool(OBJECT(&s->dma), true, "realized", &err);
if (err) {
error_propagate(errp, err);
return;
}
memory_region_add_subregion(&s->peri_mr, DMA_OFFSET,
sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->dma), 0));
memory_region_add_subregion(&s->peri_mr, DMA15_OFFSET,
sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->dma), 1));
for (n = 0; n <= 12; n++) {
sysbus_connect_irq(SYS_BUS_DEVICE(&s->dma), n,
qdev_get_gpio_in_named(DEVICE(&s->ic),
BCM2835_IC_GPU_IRQ,
INTERRUPT_DMA0 + n));
}
}
| 22,098 |
qemu | 0ce470cd4ca88e84e547a3b95159d23ce6be419e | 0 | static void ppc_cpu_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cs = CPU(dev);
PowerPCCPU *cpu = POWERPC_CPU(dev);
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
Error *local_err = NULL;
#if !defined(CONFIG_USER_ONLY)
int max_smt = kvm_enabled() ? kvmppc_smt_threads() : 1;
#endif
#if !defined(CONFIG_USER_ONLY)
if (smp_threads > max_smt) {
error_setg(errp, "Cannot support more than %d threads on PPC with %s",
max_smt, kvm_enabled() ? "KVM" : "TCG");
return;
}
#endif
if (kvm_enabled()) {
if (kvmppc_fixup_cpu(cpu) != 0) {
error_setg(errp, "Unable to virtualize selected CPU with KVM");
return;
}
} else if (tcg_enabled()) {
if (ppc_fixup_cpu(cpu) != 0) {
error_setg(errp, "Unable to emulate selected CPU with TCG");
return;
}
}
#if defined(TARGET_PPCEMB)
if (!ppc_cpu_is_valid(pcc)) {
error_setg(errp, "CPU does not possess a BookE or 4xx MMU. "
"Please use qemu-system-ppc or qemu-system-ppc64 instead "
"or choose another CPU model.");
return;
}
#endif
create_ppc_opcodes(cpu, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
return;
}
init_ppc_proc(cpu);
if (pcc->insns_flags & PPC_FLOAT) {
gdb_register_coprocessor(cs, gdb_get_float_reg, gdb_set_float_reg,
33, "power-fpu.xml", 0);
}
if (pcc->insns_flags & PPC_ALTIVEC) {
gdb_register_coprocessor(cs, gdb_get_avr_reg, gdb_set_avr_reg,
34, "power-altivec.xml", 0);
}
if (pcc->insns_flags & PPC_SPE) {
gdb_register_coprocessor(cs, gdb_get_spe_reg, gdb_set_spe_reg,
34, "power-spe.xml", 0);
}
qemu_init_vcpu(cs);
pcc->parent_realize(dev, errp);
#if defined(PPC_DUMP_CPU)
{
CPUPPCState *env = &cpu->env;
const char *mmu_model, *excp_model, *bus_model;
switch (env->mmu_model) {
case POWERPC_MMU_32B:
mmu_model = "PowerPC 32";
break;
case POWERPC_MMU_SOFT_6xx:
mmu_model = "PowerPC 6xx/7xx with software driven TLBs";
break;
case POWERPC_MMU_SOFT_74xx:
mmu_model = "PowerPC 74xx with software driven TLBs";
break;
case POWERPC_MMU_SOFT_4xx:
mmu_model = "PowerPC 4xx with software driven TLBs";
break;
case POWERPC_MMU_SOFT_4xx_Z:
mmu_model = "PowerPC 4xx with software driven TLBs "
"and zones protections";
break;
case POWERPC_MMU_REAL:
mmu_model = "PowerPC real mode only";
break;
case POWERPC_MMU_MPC8xx:
mmu_model = "PowerPC MPC8xx";
break;
case POWERPC_MMU_BOOKE:
mmu_model = "PowerPC BookE";
break;
case POWERPC_MMU_BOOKE206:
mmu_model = "PowerPC BookE 2.06";
break;
case POWERPC_MMU_601:
mmu_model = "PowerPC 601";
break;
#if defined (TARGET_PPC64)
case POWERPC_MMU_64B:
mmu_model = "PowerPC 64";
break;
#endif
default:
mmu_model = "Unknown or invalid";
break;
}
switch (env->excp_model) {
case POWERPC_EXCP_STD:
excp_model = "PowerPC";
break;
case POWERPC_EXCP_40x:
excp_model = "PowerPC 40x";
break;
case POWERPC_EXCP_601:
excp_model = "PowerPC 601";
break;
case POWERPC_EXCP_602:
excp_model = "PowerPC 602";
break;
case POWERPC_EXCP_603:
excp_model = "PowerPC 603";
break;
case POWERPC_EXCP_603E:
excp_model = "PowerPC 603e";
break;
case POWERPC_EXCP_604:
excp_model = "PowerPC 604";
break;
case POWERPC_EXCP_7x0:
excp_model = "PowerPC 740/750";
break;
case POWERPC_EXCP_7x5:
excp_model = "PowerPC 745/755";
break;
case POWERPC_EXCP_74xx:
excp_model = "PowerPC 74xx";
break;
case POWERPC_EXCP_BOOKE:
excp_model = "PowerPC BookE";
break;
#if defined (TARGET_PPC64)
case POWERPC_EXCP_970:
excp_model = "PowerPC 970";
break;
#endif
default:
excp_model = "Unknown or invalid";
break;
}
switch (env->bus_model) {
case PPC_FLAGS_INPUT_6xx:
bus_model = "PowerPC 6xx";
break;
case PPC_FLAGS_INPUT_BookE:
bus_model = "PowerPC BookE";
break;
case PPC_FLAGS_INPUT_405:
bus_model = "PowerPC 405";
break;
case PPC_FLAGS_INPUT_401:
bus_model = "PowerPC 401/403";
break;
case PPC_FLAGS_INPUT_RCPU:
bus_model = "RCPU / MPC8xx";
break;
#if defined (TARGET_PPC64)
case PPC_FLAGS_INPUT_970:
bus_model = "PowerPC 970";
break;
#endif
default:
bus_model = "Unknown or invalid";
break;
}
printf("PowerPC %-12s : PVR %08x MSR %016" PRIx64 "\n"
" MMU model : %s\n",
object_class_get_name(OBJECT_CLASS(pcc)),
pcc->pvr, pcc->msr_mask, mmu_model);
#if !defined(CONFIG_USER_ONLY)
if (env->tlb.tlb6) {
printf(" %d %s TLB in %d ways\n",
env->nb_tlb, env->id_tlbs ? "splitted" : "merged",
env->nb_ways);
}
#endif
printf(" Exceptions model : %s\n"
" Bus model : %s\n",
excp_model, bus_model);
printf(" MSR features :\n");
if (env->flags & POWERPC_FLAG_SPE)
printf(" signal processing engine enable"
"\n");
else if (env->flags & POWERPC_FLAG_VRE)
printf(" vector processor enable\n");
if (env->flags & POWERPC_FLAG_TGPR)
printf(" temporary GPRs\n");
else if (env->flags & POWERPC_FLAG_CE)
printf(" critical input enable\n");
if (env->flags & POWERPC_FLAG_SE)
printf(" single-step trace mode\n");
else if (env->flags & POWERPC_FLAG_DWE)
printf(" debug wait enable\n");
else if (env->flags & POWERPC_FLAG_UBLE)
printf(" user BTB lock enable\n");
if (env->flags & POWERPC_FLAG_BE)
printf(" branch-step trace mode\n");
else if (env->flags & POWERPC_FLAG_DE)
printf(" debug interrupt enable\n");
if (env->flags & POWERPC_FLAG_PX)
printf(" inclusive protection\n");
else if (env->flags & POWERPC_FLAG_PMM)
printf(" performance monitor mark\n");
if (env->flags == POWERPC_FLAG_NONE)
printf(" none\n");
printf(" Time-base/decrementer clock source: %s\n",
env->flags & POWERPC_FLAG_RTC_CLK ? "RTC clock" : "bus clock");
dump_ppc_insns(env);
dump_ppc_sprs(env);
fflush(stdout);
}
#endif
}
| 22,099 |
qemu | 99a3c89d5d538dc6c360e35dffb797cfe06e9cda | 0 | static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVRBDState *s = bs->opaque;
char pool[RBD_MAX_POOL_NAME_SIZE];
char snap_buf[RBD_MAX_SNAP_NAME_SIZE];
char conf[RBD_MAX_CONF_SIZE];
char clientname_buf[RBD_MAX_CONF_SIZE];
char *clientname;
QemuOpts *opts;
Error *local_err = NULL;
const char *filename;
int r;
opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
qemu_opts_absorb_qdict(opts, options, &local_err);
if (local_err) {
error_propagate(errp, local_err);
qemu_opts_del(opts);
return -EINVAL;
}
filename = qemu_opt_get(opts, "filename");
if (qemu_rbd_parsename(filename, pool, sizeof(pool),
snap_buf, sizeof(snap_buf),
s->name, sizeof(s->name),
conf, sizeof(conf), errp) < 0) {
r = -EINVAL;
goto failed_opts;
}
clientname = qemu_rbd_parse_clientname(conf, clientname_buf);
r = rados_create(&s->cluster, clientname);
if (r < 0) {
error_setg(errp, "error initializing");
goto failed_opts;
}
s->snap = NULL;
if (snap_buf[0] != '\0') {
s->snap = g_strdup(snap_buf);
}
/*
* Fallback to more conservative semantics if setting cache
* options fails. Ignore errors from setting rbd_cache because the
* only possible error is that the option does not exist, and
* librbd defaults to no caching. If write through caching cannot
* be set up, fall back to no caching.
*/
if (flags & BDRV_O_NOCACHE) {
rados_conf_set(s->cluster, "rbd_cache", "false");
} else {
rados_conf_set(s->cluster, "rbd_cache", "true");
}
if (strstr(conf, "conf=") == NULL) {
/* try default location, but ignore failure */
rados_conf_read_file(s->cluster, NULL);
}
if (conf[0] != '\0') {
r = qemu_rbd_set_conf(s->cluster, conf, errp);
if (r < 0) {
goto failed_shutdown;
}
}
r = rados_connect(s->cluster);
if (r < 0) {
error_setg(errp, "error connecting");
goto failed_shutdown;
}
r = rados_ioctx_create(s->cluster, pool, &s->io_ctx);
if (r < 0) {
error_setg(errp, "error opening pool %s", pool);
goto failed_shutdown;
}
r = rbd_open(s->io_ctx, s->name, &s->image, s->snap);
if (r < 0) {
error_setg(errp, "error reading header from %s", s->name);
goto failed_open;
}
bs->read_only = (s->snap != NULL);
qemu_opts_del(opts);
return 0;
failed_open:
rados_ioctx_destroy(s->io_ctx);
failed_shutdown:
rados_shutdown(s->cluster);
g_free(s->snap);
failed_opts:
qemu_opts_del(opts);
return r;
}
| 22,100 |
qemu | 2eb74e1a1ef145034aa41255c4a6f469d560c96d | 0 | static void render_memory_region(FlatView *view,
MemoryRegion *mr,
Int128 base,
AddrRange clip,
bool readonly)
{
MemoryRegion *subregion;
unsigned i;
hwaddr offset_in_region;
Int128 remain;
Int128 now;
FlatRange fr;
AddrRange tmp;
if (!mr->enabled) {
return;
}
int128_addto(&base, int128_make64(mr->addr));
readonly |= mr->readonly;
tmp = addrrange_make(base, mr->size);
if (!addrrange_intersects(tmp, clip)) {
return;
}
clip = addrrange_intersection(tmp, clip);
if (mr->alias) {
int128_subfrom(&base, int128_make64(mr->alias->addr));
int128_subfrom(&base, int128_make64(mr->alias_offset));
render_memory_region(view, mr->alias, base, clip, readonly);
return;
}
/* Render subregions in priority order. */
QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
render_memory_region(view, subregion, base, clip, readonly);
}
if (!mr->terminates) {
return;
}
offset_in_region = int128_get64(int128_sub(clip.start, base));
base = clip.start;
remain = clip.size;
/* Render the region itself into any gaps left by the current view. */
for (i = 0; i < view->nr && int128_nz(remain); ++i) {
if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
continue;
}
if (int128_lt(base, view->ranges[i].addr.start)) {
now = int128_min(remain,
int128_sub(view->ranges[i].addr.start, base));
fr.mr = mr;
fr.offset_in_region = offset_in_region;
fr.addr = addrrange_make(base, now);
fr.dirty_log_mask = mr->dirty_log_mask;
fr.romd_mode = mr->romd_mode;
fr.readonly = readonly;
flatview_insert(view, i, &fr);
++i;
int128_addto(&base, now);
offset_in_region += int128_get64(now);
int128_subfrom(&remain, now);
}
now = int128_sub(int128_min(int128_add(base, remain),
addrrange_end(view->ranges[i].addr)),
base);
int128_addto(&base, now);
offset_in_region += int128_get64(now);
int128_subfrom(&remain, now);
}
if (int128_nz(remain)) {
fr.mr = mr;
fr.offset_in_region = offset_in_region;
fr.addr = addrrange_make(base, remain);
fr.dirty_log_mask = mr->dirty_log_mask;
fr.romd_mode = mr->romd_mode;
fr.readonly = readonly;
flatview_insert(view, i, &fr);
}
}
| 22,101 |
qemu | 4534ff5426afeeae5238ba10a696cafa9a0168ee | 0 | static int vdi_check(BlockDriverState *bs, BdrvCheckResult *res)
{
/* TODO: additional checks possible. */
BDRVVdiState *s = (BDRVVdiState *)bs->opaque;
uint32_t blocks_allocated = 0;
uint32_t block;
uint32_t *bmap;
logout("\n");
bmap = g_malloc(s->header.blocks_in_image * sizeof(uint32_t));
memset(bmap, 0xff, s->header.blocks_in_image * sizeof(uint32_t));
/* Check block map and value of blocks_allocated. */
for (block = 0; block < s->header.blocks_in_image; block++) {
uint32_t bmap_entry = le32_to_cpu(s->bmap[block]);
if (VDI_IS_ALLOCATED(bmap_entry)) {
if (bmap_entry < s->header.blocks_in_image) {
blocks_allocated++;
if (!VDI_IS_ALLOCATED(bmap[bmap_entry])) {
bmap[bmap_entry] = bmap_entry;
} else {
fprintf(stderr, "ERROR: block index %" PRIu32
" also used by %" PRIu32 "\n", bmap[bmap_entry], bmap_entry);
res->corruptions++;
}
} else {
fprintf(stderr, "ERROR: block index %" PRIu32
" too large, is %" PRIu32 "\n", block, bmap_entry);
res->corruptions++;
}
}
}
if (blocks_allocated != s->header.blocks_allocated) {
fprintf(stderr, "ERROR: allocated blocks mismatch, is %" PRIu32
", should be %" PRIu32 "\n",
blocks_allocated, s->header.blocks_allocated);
res->corruptions++;
}
g_free(bmap);
return 0;
}
| 22,102 |
qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | 0 | static const char *exynos4210_uart_regname(target_phys_addr_t offset)
{
int regs_number = sizeof(exynos4210_uart_regs) / sizeof(Exynos4210UartReg);
int i;
for (i = 0; i < regs_number; i++) {
if (offset == exynos4210_uart_regs[i].offset) {
return exynos4210_uart_regs[i].name;
}
}
return NULL;
}
| 22,103 |
FFmpeg | aa48446c9a42fc29ae46ea98717f29edc7fec27d | 0 | static int xface_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *frame, int *got_packet)
{
XFaceContext *xface = avctx->priv_data;
ProbRangesQueue pq = {{ 0 }, 0};
uint8_t bitmap_copy[XFACE_PIXELS];
BigInt b = {0};
int i, j, k, ret = 0;
const uint8_t *buf;
uint8_t *p;
char intbuf[XFACE_MAX_DIGITS];
if (avctx->width || avctx->height) {
if (avctx->width != XFACE_WIDTH || avctx->height != XFACE_HEIGHT) {
av_log(avctx, AV_LOG_ERROR,
"Size value %dx%d not supported, only accepts a size of %dx%d\n",
avctx->width, avctx->height, XFACE_WIDTH, XFACE_HEIGHT);
return AVERROR(EINVAL);
}
}
avctx->width = XFACE_WIDTH;
avctx->height = XFACE_HEIGHT;
/* convert image from MONOWHITE to 1=black 0=white bitmap */
buf = frame->data[0];
for (i = 0, j = 0; i < XFACE_PIXELS; ) {
for (k = 0; k < 8; k++)
xface->bitmap[i++] = (buf[j]>>(7-k))&1;
if (++j == XFACE_WIDTH/8) {
buf += frame->linesize[0];
j = 0;
}
}
/* create a copy of bitmap */
memcpy(bitmap_copy, xface->bitmap, XFACE_PIXELS);
ff_xface_generate_face(xface->bitmap, bitmap_copy);
encode_block(xface->bitmap, 16, 16, 0, &pq);
encode_block(xface->bitmap + 16, 16, 16, 0, &pq);
encode_block(xface->bitmap + 32, 16, 16, 0, &pq);
encode_block(xface->bitmap + XFACE_WIDTH * 16, 16, 16, 0, &pq);
encode_block(xface->bitmap + XFACE_WIDTH * 16 + 16, 16, 16, 0, &pq);
encode_block(xface->bitmap + XFACE_WIDTH * 16 + 32, 16, 16, 0, &pq);
encode_block(xface->bitmap + XFACE_WIDTH * 32, 16, 16, 0, &pq);
encode_block(xface->bitmap + XFACE_WIDTH * 32 + 16, 16, 16, 0, &pq);
encode_block(xface->bitmap + XFACE_WIDTH * 32 + 32, 16, 16, 0, &pq);
while (pq.prob_ranges_idx > 0)
push_integer(&b, pq.prob_ranges[--pq.prob_ranges_idx]);
/* write the inverted big integer in b to intbuf */
i = 0;
while (b.nb_words) {
uint8_t r;
ff_big_div(&b, XFACE_PRINTS, &r);
intbuf[i++] = r + XFACE_FIRST_PRINT;
}
if ((ret = ff_alloc_packet2(avctx, pkt, i+2)) < 0)
return ret;
/* revert the number, and close the buffer */
p = pkt->data;
while (--i >= 0)
*(p++) = intbuf[i];
*(p++) = '\n';
*(p++) = 0;
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
return 0;
}
| 22,106 |
qemu | 9dbbc748d671c70599101836cd1c2719d92f3017 | 0 | static inline bool fp_access_check(DisasContext *s)
{
assert(!s->fp_access_checked);
s->fp_access_checked = true;
if (s->cpacr_fpen) {
return true;
}
gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
default_exception_el(s));
return false;
}
| 22,107 |
qemu | 751ebd76e654bd1e65da08ecf694325282b4cfcc | 0 | void block_job_complete(BlockJob *job, Error **errp)
{
if (job->paused || job->cancelled || !job->driver->complete) {
error_set(errp, QERR_BLOCK_JOB_NOT_READY,
bdrv_get_device_name(job->bs));
return;
}
job->driver->complete(job, errp);
}
| 22,108 |
qemu | 0b3652bc70891940f2c7142d39576d17c4d07196 | 0 | static int oss_open (int in, struct oss_params *req,
struct oss_params *obt, int *pfd)
{
int fd;
int oflags;
int mmmmssss;
audio_buf_info abinfo;
int fmt, freq, nchannels;
const char *dspname = in ? conf.devpath_in : conf.devpath_out;
const char *typ = in ? "ADC" : "DAC";
/* Kludge needed to have working mmap on Linux */
oflags = conf.try_mmap ? O_RDWR : (in ? O_RDONLY : O_WRONLY);
fd = open (dspname, oflags | O_NONBLOCK);
if (-1 == fd) {
oss_logerr2 (errno, typ, "Failed to open `%s'\n", dspname);
return -1;
}
freq = req->freq;
nchannels = req->nchannels;
fmt = req->fmt;
if (ioctl (fd, SNDCTL_DSP_SAMPLESIZE, &fmt)) {
oss_logerr2 (errno, typ, "Failed to set sample size %d\n", req->fmt);
goto err;
}
if (ioctl (fd, SNDCTL_DSP_CHANNELS, &nchannels)) {
oss_logerr2 (errno, typ, "Failed to set number of channels %d\n",
req->nchannels);
goto err;
}
if (ioctl (fd, SNDCTL_DSP_SPEED, &freq)) {
oss_logerr2 (errno, typ, "Failed to set frequency %d\n", req->freq);
goto err;
}
if (ioctl (fd, SNDCTL_DSP_NONBLOCK, NULL)) {
oss_logerr2 (errno, typ, "Failed to set non-blocking mode\n");
goto err;
}
mmmmssss = (req->nfrags << 16) | ctz32 (req->fragsize);
if (ioctl (fd, SNDCTL_DSP_SETFRAGMENT, &mmmmssss)) {
oss_logerr2 (errno, typ, "Failed to set buffer length (%d, %d)\n",
req->nfrags, req->fragsize);
goto err;
}
if (ioctl (fd, in ? SNDCTL_DSP_GETISPACE : SNDCTL_DSP_GETOSPACE, &abinfo)) {
oss_logerr2 (errno, typ, "Failed to get buffer length\n");
goto err;
}
if (!abinfo.fragstotal || !abinfo.fragsize) {
AUD_log (AUDIO_CAP, "Returned bogus buffer information(%d, %d) for %s\n",
abinfo.fragstotal, abinfo.fragsize, typ);
goto err;
}
obt->fmt = fmt;
obt->nchannels = nchannels;
obt->freq = freq;
obt->nfrags = abinfo.fragstotal;
obt->fragsize = abinfo.fragsize;
*pfd = fd;
#ifdef DEBUG_MISMATCHES
if ((req->fmt != obt->fmt) ||
(req->nchannels != obt->nchannels) ||
(req->freq != obt->freq) ||
(req->fragsize != obt->fragsize) ||
(req->nfrags != obt->nfrags)) {
dolog ("Audio parameters mismatch\n");
oss_dump_info (req, obt);
}
#endif
#ifdef DEBUG
oss_dump_info (req, obt);
#endif
return 0;
err:
oss_anal_close (&fd);
return -1;
}
| 22,110 |
qemu | e0a039e50d481dce6b4ee45a29002538a258cd89 | 0 | static void netfilter_set_status(Object *obj, const char *str, Error **errp)
{
NetFilterState *nf = NETFILTER(obj);
NetFilterClass *nfc = NETFILTER_GET_CLASS(obj);
if (strcmp(str, "on") && strcmp(str, "off")) {
error_setg(errp, "Invalid value for netfilter status, "
"should be 'on' or 'off'");
return;
}
if (nf->on == !strcmp(str, "on")) {
return;
}
nf->on = !nf->on;
if (nfc->status_changed) {
nfc->status_changed(nf, errp);
}
}
| 22,111 |
qemu | 02a2cbc872df99205eeafd399f01c210e0b797c4 | 0 | static void vtd_interrupt_remap_table_setup(IntelIOMMUState *s)
{
uint64_t value = 0;
value = vtd_get_quad_raw(s, DMAR_IRTA_REG);
s->intr_size = 1UL << ((value & VTD_IRTA_SIZE_MASK) + 1);
s->intr_root = value & VTD_IRTA_ADDR_MASK;
/* TODO: invalidate interrupt entry cache */
VTD_DPRINTF(CSR, "int remap table addr 0x%"PRIx64 " size %"PRIu32,
s->intr_root, s->intr_size);
}
| 22,112 |
qemu | 7d553f27fce284805d7f94603932045ee3bbb979 | 0 | static void usb_msd_password_cb(void *opaque, int err)
{
MSDState *s = opaque;
if (!err)
err = usb_device_attach(&s->dev);
if (err)
qdev_unplug(&s->dev.qdev, NULL);
}
| 22,113 |
qemu | e6f9e6b496fbba419f0f447fbee56a8464a4cc41 | 0 | static int cpu_x86_fill_model_id(char *str)
{
uint32_t eax, ebx, ecx, edx;
int i;
for (i = 0; i < 3; i++) {
host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
memcpy(str + i * 16 + 0, &eax, 4);
memcpy(str + i * 16 + 4, &ebx, 4);
memcpy(str + i * 16 + 8, &ecx, 4);
memcpy(str + i * 16 + 12, &edx, 4);
}
return 0;
}
| 22,114 |
qemu | 9bf3eb2ca542dd9306cb2e72fc68e02ba3e56e2e | 0 | static void gen_movci (DisasContext *ctx, int rd, int rs, int cc, int tf)
{
int l1 = gen_new_label();
uint32_t ccbit;
TCGCond cond;
TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL);
TCGv t1 = tcg_temp_local_new(TCG_TYPE_TL);
TCGv r_tmp = tcg_temp_local_new(TCG_TYPE_I32);
if (cc)
ccbit = 1 << (24 + cc);
else
ccbit = 1 << 23;
if (tf)
cond = TCG_COND_EQ;
else
cond = TCG_COND_NE;
gen_load_gpr(t0, rd);
gen_load_gpr(t1, rs);
tcg_gen_andi_i32(r_tmp, fpu_fcr31, ccbit);
tcg_gen_brcondi_i32(cond, r_tmp, 0, l1);
tcg_temp_free(r_tmp);
tcg_gen_mov_tl(t0, t1);
tcg_temp_free(t1);
gen_set_label(l1);
gen_store_gpr(t0, rd);
tcg_temp_free(t0);
}
| 22,115 |
qemu | 42a268c241183877192c376d03bd9b6d527407c7 | 0 | DISAS_INSN(branch)
{
int32_t offset;
uint32_t base;
int op;
int l1;
base = s->pc;
op = (insn >> 8) & 0xf;
offset = (int8_t)insn;
if (offset == 0) {
offset = cpu_ldsw_code(env, s->pc);
s->pc += 2;
} else if (offset == -1) {
offset = read_im32(env, s);
}
if (op == 1) {
/* bsr */
gen_push(s, tcg_const_i32(s->pc));
}
gen_flush_cc_op(s);
if (op > 1) {
/* Bcc */
l1 = gen_new_label();
gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
gen_jmp_tb(s, 1, base + offset);
gen_set_label(l1);
gen_jmp_tb(s, 0, s->pc);
} else {
/* Unconditional branch. */
gen_jmp_tb(s, 0, base + offset);
}
}
| 22,116 |
FFmpeg | 13a099799e89a76eb921ca452e1b04a7a28a9855 | 0 | yuv2422_1_c_template(SwsContext *c, const uint16_t *buf0,
const uint16_t *ubuf0, const uint16_t *ubuf1,
const uint16_t *vbuf0, const uint16_t *vbuf1,
const uint16_t *abuf0, uint8_t *dest, int dstW,
int uvalpha, enum PixelFormat dstFormat,
int flags, int y, enum PixelFormat target)
{
int i;
if (uvalpha < 2048) {
for (i = 0; i < (dstW >> 1); i++) {
int Y1 = buf0[i * 2] >> 7;
int Y2 = buf0[i * 2 + 1] >> 7;
int U = ubuf1[i] >> 7;
int V = vbuf1[i] >> 7;
output_pixels(i * 4, Y1, U, Y2, V);
}
} else {
for (i = 0; i < (dstW >> 1); i++) {
int Y1 = buf0[i * 2] >> 7;
int Y2 = buf0[i * 2 + 1] >> 7;
int U = (ubuf0[i] + ubuf1[i]) >> 8;
int V = (vbuf0[i] + vbuf1[i]) >> 8;
output_pixels(i * 4, Y1, U, Y2, V);
}
}
}
| 22,117 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.