project
stringclasses 2
values | commit_id
stringlengths 40
40
| target
int64 0
1
| func
stringlengths 26
142k
| idx
int64 0
27.3k
|
---|---|---|---|---|
qemu | bc7c08a2c375acb7ae4d433054415588b176d34c | 0 | static void test_qemu_strtoull_correct(void)
{
const char *str = "12345 foo";
char f = 'X';
const char *endptr = &f;
uint64_t res = 999;
int err;
err = qemu_strtoull(str, &endptr, 0, &res);
g_assert_cmpint(err, ==, 0);
g_assert_cmpint(res, ==, 12345);
g_assert(endptr == str + 5);
}
| 16,317 |
qemu | 3f66f764ee25f10d3e1144ebc057a949421b7728 | 0 | static void test_visitor_in_enum(TestInputVisitorData *data,
const void *unused)
{
Error *err = NULL;
Visitor *v;
EnumOne i;
for (i = 0; EnumOne_lookup[i]; i++) {
EnumOne res = -1;
v = visitor_input_test_init(data, "%s", EnumOne_lookup[i]);
visit_type_EnumOne(v, &res, NULL, &err);
g_assert(!err);
g_assert_cmpint(i, ==, res);
}
}
| 16,320 |
qemu | ddca7f86ac022289840e0200fd4050b2b58e9176 | 0 | static void v9fs_lock(void *opaque)
{
int8_t status;
V9fsFlock *flock;
size_t offset = 7;
struct stat stbuf;
V9fsFidState *fidp;
int32_t fid, err = 0;
V9fsPDU *pdu = opaque;
V9fsState *s = pdu->s;
flock = g_malloc(sizeof(*flock));
pdu_unmarshal(pdu, offset, "dbdqqds", &fid, &flock->type,
&flock->flags, &flock->start, &flock->length,
&flock->proc_id, &flock->client_id);
trace_v9fs_lock(pdu->tag, pdu->id, fid,
flock->type, flock->start, flock->length);
status = P9_LOCK_ERROR;
/* We support only block flag now (that too ignored currently) */
if (flock->flags & ~P9_LOCK_FLAGS_BLOCK) {
err = -EINVAL;
goto out_nofid;
}
fidp = get_fid(pdu, fid);
if (fidp == NULL) {
err = -ENOENT;
goto out_nofid;
}
err = v9fs_co_fstat(pdu, fidp, &stbuf);
if (err < 0) {
goto out;
}
status = P9_LOCK_SUCCESS;
out:
put_fid(pdu, fidp);
out_nofid:
err = offset;
err += pdu_marshal(pdu, offset, "b", status);
trace_v9fs_lock_return(pdu->tag, pdu->id, status);
complete_pdu(s, pdu, err);
v9fs_string_free(&flock->client_id);
g_free(flock);
}
| 16,321 |
FFmpeg | 577393321c389ad2973bec6168a8045c94a9e099 | 0 | int ff_intrax8_decode_picture(IntraX8Context *const w, int dquant,
int quant_offset)
{
MpegEncContext *const s = w->s;
int mb_xy;
assert(s);
w->use_quant_matrix = get_bits1(&s->gb);
w->dquant = dquant;
w->quant = dquant >> 1;
w->qsum = quant_offset;
w->divide_quant_dc_luma = ((1 << 16) + (w->quant >> 1)) / w->quant;
if (w->quant < 5) {
w->quant_dc_chroma = w->quant;
w->divide_quant_dc_chroma = w->divide_quant_dc_luma;
} else {
w->quant_dc_chroma = w->quant + ((w->quant + 3) >> 3);
w->divide_quant_dc_chroma = ((1 << 16) + (w->quant_dc_chroma >> 1)) / w->quant_dc_chroma;
}
x8_reset_vlc_tables(w);
for (s->mb_y = 0; s->mb_y < s->mb_height * 2; s->mb_y++) {
x8_init_block_index(w, s->current_picture.f, s->mb_y);
mb_xy = (s->mb_y >> 1) * s->mb_stride;
for (s->mb_x = 0; s->mb_x < s->mb_width * 2; s->mb_x++) {
x8_get_prediction(w);
if (x8_setup_spatial_predictor(w, 0))
goto error;
if (x8_decode_intra_mb(w, 0))
goto error;
if (s->mb_x & s->mb_y & 1) {
x8_get_prediction_chroma(w);
/* when setting up chroma, no vlc is read,
* so no error condition can be reached */
x8_setup_spatial_predictor(w, 1);
if (x8_decode_intra_mb(w, 1))
goto error;
x8_setup_spatial_predictor(w, 2);
if (x8_decode_intra_mb(w, 2))
goto error;
w->dest[1] += 8;
w->dest[2] += 8;
/* emulate MB info in the relevant tables */
s->mbskip_table[mb_xy] = 0;
s->mbintra_table[mb_xy] = 1;
s->current_picture.qscale_table[mb_xy] = w->quant;
mb_xy++;
}
w->dest[0] += 8;
}
if (s->mb_y & 1)
ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 8, 16);
}
error:
return 0;
}
| 16,322 |
qemu | 18da7f94cdce130f2a71387de4980ffa817181a1 | 0 | void commit_start(BlockDriverState *bs, BlockDriverState *base,
BlockDriverState *top, int64_t speed,
BlockdevOnError on_error, BlockDriverCompletionFunc *cb,
void *opaque, Error **errp)
{
CommitBlockJob *s;
BlockReopenQueue *reopen_queue = NULL;
int orig_overlay_flags;
int orig_base_flags;
BlockDriverState *overlay_bs;
Error *local_err = NULL;
if ((on_error == BLOCKDEV_ON_ERROR_STOP ||
on_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
!bdrv_iostatus_is_enabled(bs)) {
error_set(errp, QERR_INVALID_PARAMETER_COMBINATION);
return;
}
/* Once we support top == active layer, remove this check */
if (top == bs) {
error_setg(errp,
"Top image as the active layer is currently unsupported");
return;
}
if (top == base) {
error_setg(errp, "Invalid files for merge: top and base are the same");
return;
}
overlay_bs = bdrv_find_overlay(bs, top);
if (overlay_bs == NULL) {
error_setg(errp, "Could not find overlay image for %s:", top->filename);
return;
}
orig_base_flags = bdrv_get_flags(base);
orig_overlay_flags = bdrv_get_flags(overlay_bs);
/* convert base & overlay_bs to r/w, if necessary */
if (!(orig_base_flags & BDRV_O_RDWR)) {
reopen_queue = bdrv_reopen_queue(reopen_queue, base,
orig_base_flags | BDRV_O_RDWR);
}
if (!(orig_overlay_flags & BDRV_O_RDWR)) {
reopen_queue = bdrv_reopen_queue(reopen_queue, overlay_bs,
orig_overlay_flags | BDRV_O_RDWR);
}
if (reopen_queue) {
bdrv_reopen_multiple(reopen_queue, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
return;
}
}
s = block_job_create(&commit_job_driver, bs, speed, cb, opaque, errp);
if (!s) {
return;
}
s->base = base;
s->top = top;
s->active = bs;
s->base_flags = orig_base_flags;
s->orig_overlay_flags = orig_overlay_flags;
s->on_error = on_error;
s->common.co = qemu_coroutine_create(commit_run);
trace_commit_start(bs, base, top, s, s->common.co, opaque);
qemu_coroutine_enter(s->common.co, s);
}
| 16,323 |
qemu | 2c62f08ddbf3fa80dc7202eb9a2ea60ae44e2cc5 | 0 | static void blizzard_screen_dump(void *opaque, const char *filename,
bool cswitch, Error **errp)
{
BlizzardState *s = (BlizzardState *) opaque;
DisplaySurface *surface = qemu_console_surface(s->con);
blizzard_update_display(opaque);
if (s && surface_data(surface)) {
ppm_save(filename, surface, errp);
}
}
| 16,324 |
qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | 0 | static void lan9118_writew(void *opaque, target_phys_addr_t offset,
uint32_t val)
{
lan9118_state *s = (lan9118_state *)opaque;
offset &= 0xff;
if (s->write_word_prev_offset != (offset & ~0x3)) {
/* New offset, reset word counter */
s->write_word_n = 0;
s->write_word_prev_offset = offset & ~0x3;
}
if (offset & 0x2) {
s->write_word_h = val;
} else {
s->write_word_l = val;
}
//DPRINTF("Writew reg 0x%02x = 0x%08x\n", (int)offset, val);
s->write_word_n++;
if (s->write_word_n == 2) {
s->write_word_n = 0;
lan9118_writel(s, offset & ~3, s->write_word_l +
(s->write_word_h << 16), 4);
}
}
| 16,325 |
qemu | f090c9d4ad5812fb92843d6470a1111c15190c4c | 0 | int float32_eq( float32 a, float32 b STATUS_PARAM )
{
if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) )
|| ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) )
) {
if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) {
float_raise( float_flag_invalid STATUS_VAR);
}
return 0;
}
return ( a == b ) || ( (bits32) ( ( a | b )<<1 ) == 0 );
}
| 16,326 |
qemu | e7d336959b7c01699702dcda4b54a822972d74a8 | 0 | int chsc_sei_nt2_get_event(void *res)
{
ChscSeiNt2Res *nt2_res = (ChscSeiNt2Res *)res;
PciCcdfAvail *accdf;
PciCcdfErr *eccdf;
int rc = 1;
SeiContainer *sei_cont;
S390pciState *s = S390_PCI_HOST_BRIDGE(
object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL));
if (!s) {
return rc;
}
sei_cont = QTAILQ_FIRST(&s->pending_sei);
if (sei_cont) {
QTAILQ_REMOVE(&s->pending_sei, sei_cont, link);
nt2_res->nt = 2;
nt2_res->cc = sei_cont->cc;
nt2_res->length = cpu_to_be16(sizeof(ChscSeiNt2Res));
switch (sei_cont->cc) {
case 1: /* error event */
eccdf = (PciCcdfErr *)nt2_res->ccdf;
eccdf->fid = cpu_to_be32(sei_cont->fid);
eccdf->fh = cpu_to_be32(sei_cont->fh);
eccdf->e = cpu_to_be32(sei_cont->e);
eccdf->faddr = cpu_to_be64(sei_cont->faddr);
eccdf->pec = cpu_to_be16(sei_cont->pec);
break;
case 2: /* availability event */
accdf = (PciCcdfAvail *)nt2_res->ccdf;
accdf->fid = cpu_to_be32(sei_cont->fid);
accdf->fh = cpu_to_be32(sei_cont->fh);
accdf->pec = cpu_to_be16(sei_cont->pec);
break;
default:
abort();
}
g_free(sei_cont);
rc = 0;
}
return rc;
}
| 16,327 |
qemu | b39e3f34c9de7ead6a11a74aa2de78baf41d81a7 | 0 | static void icount_adjust_rt(void *opaque)
{
timer_mod(icount_rt_timer,
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
icount_adjust();
}
| 16,328 |
qemu | f02ca5cbeaf86038834c1953247a1579d7921927 | 0 | static inline int tcg_target_const_match(tcg_target_long val,
const TCGArgConstraint *arg_ct)
{
int ct;
ct = arg_ct->ct;
if (ct & TCG_CT_CONST)
return 1;
else if ((ct & TCG_CT_CONST_S11) && ABS(val) == (ABS(val) & 0x3ff))
return 1;
else if ((ct & TCG_CT_CONST_S13) && ABS(val) == (ABS(val) & 0xfff))
return 1;
else
return 0;
}
| 16,329 |
qemu | e1123a3b40a1a9a625a29c8ed4debb7e206ea690 | 0 | static void iscsi_close(BlockDriverState *bs)
{
IscsiLun *iscsilun = bs->opaque;
struct iscsi_context *iscsi = iscsilun->iscsi;
iscsi_detach_aio_context(bs);
if (iscsi_is_logged_in(iscsi)) {
iscsi_logout_sync(iscsi);
}
iscsi_destroy_context(iscsi);
g_free(iscsilun->zeroblock);
g_free(iscsilun->allocationmap);
memset(iscsilun, 0, sizeof(IscsiLun));
}
| 16,330 |
qemu | 10c4c98ab7dc18169b37b76f6ea5e60ebe65222b | 0 | void i2c_register_slave(I2CSlaveInfo *info)
{
assert(info->qdev.size >= sizeof(i2c_slave));
info->qdev.init = i2c_slave_qdev_init;
info->qdev.bus_type = BUS_TYPE_I2C;
qdev_register(&info->qdev);
}
| 16,331 |
qemu | 67a0fd2a9bca204d2b39f910a97c7137636a0715 | 0 | static int img_compare(int argc, char **argv)
{
const char *fmt1 = NULL, *fmt2 = NULL, *cache, *filename1, *filename2;
BlockBackend *blk1, *blk2;
BlockDriverState *bs1, *bs2;
int64_t total_sectors1, total_sectors2;
uint8_t *buf1 = NULL, *buf2 = NULL;
int pnum1, pnum2;
int allocated1, allocated2;
int ret = 0; /* return value - 0 Ident, 1 Different, >1 Error */
bool progress = false, quiet = false, strict = false;
int flags;
int64_t total_sectors;
int64_t sector_num = 0;
int64_t nb_sectors;
int c, pnum;
uint64_t progress_base;
cache = BDRV_DEFAULT_CACHE;
for (;;) {
c = getopt(argc, argv, "hf:F:T:pqs");
if (c == -1) {
break;
}
switch (c) {
case '?':
case 'h':
help();
break;
case 'f':
fmt1 = optarg;
break;
case 'F':
fmt2 = optarg;
break;
case 'T':
cache = optarg;
break;
case 'p':
progress = true;
break;
case 'q':
quiet = true;
break;
case 's':
strict = true;
break;
}
}
/* Progress is not shown in Quiet mode */
if (quiet) {
progress = false;
}
if (optind != argc - 2) {
error_exit("Expecting two image file names");
}
filename1 = argv[optind++];
filename2 = argv[optind++];
/* Initialize before goto out */
qemu_progress_init(progress, 2.0);
flags = BDRV_O_FLAGS;
ret = bdrv_parse_cache_flags(cache, &flags);
if (ret < 0) {
error_report("Invalid source cache option: %s", cache);
ret = 2;
goto out3;
}
blk1 = img_open("image_1", filename1, fmt1, flags, true, quiet);
if (!blk1) {
ret = 2;
goto out3;
}
bs1 = blk_bs(blk1);
blk2 = img_open("image_2", filename2, fmt2, flags, true, quiet);
if (!blk2) {
ret = 2;
goto out2;
}
bs2 = blk_bs(blk2);
buf1 = blk_blockalign(blk1, IO_BUF_SIZE);
buf2 = blk_blockalign(blk2, IO_BUF_SIZE);
total_sectors1 = blk_nb_sectors(blk1);
if (total_sectors1 < 0) {
error_report("Can't get size of %s: %s",
filename1, strerror(-total_sectors1));
ret = 4;
goto out;
}
total_sectors2 = blk_nb_sectors(blk2);
if (total_sectors2 < 0) {
error_report("Can't get size of %s: %s",
filename2, strerror(-total_sectors2));
ret = 4;
goto out;
}
total_sectors = MIN(total_sectors1, total_sectors2);
progress_base = MAX(total_sectors1, total_sectors2);
qemu_progress_print(0, 100);
if (strict && total_sectors1 != total_sectors2) {
ret = 1;
qprintf(quiet, "Strict mode: Image size mismatch!\n");
goto out;
}
for (;;) {
int64_t status1, status2;
nb_sectors = sectors_to_process(total_sectors, sector_num);
if (nb_sectors <= 0) {
break;
}
status1 = bdrv_get_block_status_above(bs1, NULL, sector_num,
total_sectors1 - sector_num,
&pnum1);
if (status1 < 0) {
ret = 3;
error_report("Sector allocation test failed for %s", filename1);
goto out;
}
allocated1 = status1 & BDRV_BLOCK_ALLOCATED;
status2 = bdrv_get_block_status_above(bs2, NULL, sector_num,
total_sectors2 - sector_num,
&pnum2);
if (status2 < 0) {
ret = 3;
error_report("Sector allocation test failed for %s", filename2);
goto out;
}
allocated2 = status2 & BDRV_BLOCK_ALLOCATED;
if (pnum1) {
nb_sectors = MIN(nb_sectors, pnum1);
}
if (pnum2) {
nb_sectors = MIN(nb_sectors, pnum2);
}
if (strict) {
if ((status1 & ~BDRV_BLOCK_OFFSET_MASK) !=
(status2 & ~BDRV_BLOCK_OFFSET_MASK)) {
ret = 1;
qprintf(quiet, "Strict mode: Offset %" PRId64
" block status mismatch!\n",
sectors_to_bytes(sector_num));
goto out;
}
}
if ((status1 & BDRV_BLOCK_ZERO) && (status2 & BDRV_BLOCK_ZERO)) {
nb_sectors = MIN(pnum1, pnum2);
} else if (allocated1 == allocated2) {
if (allocated1) {
ret = blk_read(blk1, sector_num, buf1, nb_sectors);
if (ret < 0) {
error_report("Error while reading offset %" PRId64 " of %s:"
" %s", sectors_to_bytes(sector_num), filename1,
strerror(-ret));
ret = 4;
goto out;
}
ret = blk_read(blk2, sector_num, buf2, nb_sectors);
if (ret < 0) {
error_report("Error while reading offset %" PRId64
" of %s: %s", sectors_to_bytes(sector_num),
filename2, strerror(-ret));
ret = 4;
goto out;
}
ret = compare_sectors(buf1, buf2, nb_sectors, &pnum);
if (ret || pnum != nb_sectors) {
qprintf(quiet, "Content mismatch at offset %" PRId64 "!\n",
sectors_to_bytes(
ret ? sector_num : sector_num + pnum));
ret = 1;
goto out;
}
}
} else {
if (allocated1) {
ret = check_empty_sectors(blk1, sector_num, nb_sectors,
filename1, buf1, quiet);
} else {
ret = check_empty_sectors(blk2, sector_num, nb_sectors,
filename2, buf1, quiet);
}
if (ret) {
if (ret < 0) {
error_report("Error while reading offset %" PRId64 ": %s",
sectors_to_bytes(sector_num), strerror(-ret));
ret = 4;
}
goto out;
}
}
sector_num += nb_sectors;
qemu_progress_print(((float) nb_sectors / progress_base)*100, 100);
}
if (total_sectors1 != total_sectors2) {
BlockBackend *blk_over;
int64_t total_sectors_over;
const char *filename_over;
qprintf(quiet, "Warning: Image size mismatch!\n");
if (total_sectors1 > total_sectors2) {
total_sectors_over = total_sectors1;
blk_over = blk1;
filename_over = filename1;
} else {
total_sectors_over = total_sectors2;
blk_over = blk2;
filename_over = filename2;
}
for (;;) {
nb_sectors = sectors_to_process(total_sectors_over, sector_num);
if (nb_sectors <= 0) {
break;
}
ret = bdrv_is_allocated_above(blk_bs(blk_over), NULL, sector_num,
nb_sectors, &pnum);
if (ret < 0) {
ret = 3;
error_report("Sector allocation test failed for %s",
filename_over);
goto out;
}
nb_sectors = pnum;
if (ret) {
ret = check_empty_sectors(blk_over, sector_num, nb_sectors,
filename_over, buf1, quiet);
if (ret) {
if (ret < 0) {
error_report("Error while reading offset %" PRId64
" of %s: %s", sectors_to_bytes(sector_num),
filename_over, strerror(-ret));
ret = 4;
}
goto out;
}
}
sector_num += nb_sectors;
qemu_progress_print(((float) nb_sectors / progress_base)*100, 100);
}
}
qprintf(quiet, "Images are identical.\n");
ret = 0;
out:
qemu_vfree(buf1);
qemu_vfree(buf2);
blk_unref(blk2);
out2:
blk_unref(blk1);
out3:
qemu_progress_end();
return ret;
}
| 16,332 |
FFmpeg | c04c3282b4334ff64cfd69d40fea010602e830fd | 0 | static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
{
struct video_data *s = s1->priv_data;
AVStream *st;
int width, height;
int res, frame_rate, frame_rate_base;
uint32_t desired_format, capabilities;
const char *video_device;
if (!ap || ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) {
av_log(s1, AV_LOG_ERROR, "Missing/Wrong parameters\n");
return -1;
}
width = ap->width;
height = ap->height;
frame_rate = ap->time_base.den;
frame_rate_base = ap->time_base.num;
if((unsigned)width > 32767 || (unsigned)height > 32767) {
av_log(s1, AV_LOG_ERROR, "Wrong size %dx%d\n", width, height);
return -1;
}
st = av_new_stream(s1, 0);
if (!st) {
return -ENOMEM;
}
av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
s->width = width;
s->height = height;
s->frame_rate = frame_rate;
s->frame_rate_base = frame_rate_base;
video_device = ap->device;
if (!video_device) {
video_device = "/dev/video";
}
capabilities = 0;
s->fd = device_open(video_device, &capabilities);
if (s->fd < 0) {
av_free(st);
return AVERROR_IO;
}
av_log(s1, AV_LOG_ERROR, "[%d]Capabilities: %x\n", s->fd, capabilities);
desired_format = fmt_ff2v4l(ap->pix_fmt);
if (desired_format == 0 || (device_init(s->fd, &width, &height, desired_format) < 0)) {
int i, done;
done = 0; i = 0;
while (!done) {
desired_format = fmt_conversion_table[i].v4l2_fmt;
if (device_init(s->fd, &width, &height, desired_format) < 0) {
desired_format = 0;
i++;
} else {
done = 1;
}
if (i == sizeof(fmt_conversion_table) / sizeof(struct fmt_map)) {
done = 1;
}
}
}
if (desired_format == 0) {
av_log(s1, AV_LOG_ERROR, "Cannot find a proper format.\n");
close(s->fd);
av_free(st);
return AVERROR_IO;
}
s->frame_format = desired_format;
st->codec->pix_fmt = fmt_v4l2ff(desired_format);
s->frame_size = avpicture_get_size(st->codec->pix_fmt, width, height);
if (capabilities & V4L2_CAP_STREAMING) {
s->io_method = io_mmap;
res = mmap_init(s);
res = mmap_start(s);
} else {
s->io_method = io_read;
res = read_init(s);
}
if (res < 0) {
close(s->fd);
av_free(st);
return AVERROR_IO;
}
s->top_field_first = first_field(s->fd);
st->codec->codec_type = CODEC_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_RAWVIDEO;
st->codec->width = width;
st->codec->height = height;
st->codec->time_base.den = frame_rate;
st->codec->time_base.num = frame_rate_base;
st->codec->bit_rate = s->frame_size * 1/av_q2d(st->codec->time_base) * 8;
return 0;
}
| 16,333 |
qemu | 85f94f868fcd868f0f605e9d3c1ad6351c557190 | 0 | static void handle_keyup(DisplayState *ds, SDL_Event *ev)
{
int mod_state;
if (!alt_grab) {
mod_state = (ev->key.keysym.mod & gui_grab_code);
} else {
mod_state = (ev->key.keysym.mod & (gui_grab_code | KMOD_LSHIFT));
}
if (!mod_state && gui_key_modifier_pressed) {
gui_key_modifier_pressed = 0;
if (gui_keysym == 0) {
/* exit/enter grab if pressing Ctrl-Alt */
if (!gui_grab) {
/* If the application is not active, do not try to enter grab
* state. It prevents 'SDL_WM_GrabInput(SDL_GRAB_ON)' from
* blocking all the application (SDL bug). */
if (is_graphic_console() &&
SDL_GetAppState() & SDL_APPACTIVE) {
sdl_grab_start();
}
} else if (!gui_fullscreen) {
sdl_grab_end();
}
/* SDL does not send back all the modifiers key, so we must
* correct it. */
reset_keys();
return;
}
gui_keysym = 0;
}
if (is_graphic_console() && !gui_keysym) {
sdl_process_key(&ev->key);
}
}
| 16,334 |
FFmpeg | 1303d62d8416fa315a0cc7bbbe35cfdab787ea92 | 0 | static int mpegts_resync(ByteIOContext *pb)
{
int c, i;
for(i = 0;i < MAX_RESYNC_SIZE; i++) {
c = url_fgetc(pb);
if (c < 0)
return -1;
if (c == 0x47) {
url_fseek(pb, -1, SEEK_CUR);
return 0;
}
}
/* no sync found */
return -1;
}
| 16,336 |
FFmpeg | 50833c9f7b4e1922197a8955669f8ab3589c8cef | 1 | static int write_header(FlashSV2Context * s, uint8_t * buf, int buf_size)
{
PutBitContext pb;
int buf_pos, len;
if (buf_size < 5)
return -1;
init_put_bits(&pb, buf, buf_size * 8);
put_bits(&pb, 4, (s->block_width >> 4) - 1);
put_bits(&pb, 12, s->image_width);
put_bits(&pb, 4, (s->block_height >> 4) - 1);
put_bits(&pb, 12, s->image_height);
flush_put_bits(&pb);
buf_pos = 4;
buf[buf_pos++] = s->flags;
if (s->flags & HAS_PALLET_INFO) {
len = write_palette(s, buf + buf_pos, buf_size - buf_pos);
if (len < 0)
return -1;
buf_pos += len;
}
return buf_pos;
}
| 16,337 |
qemu | 187337f8b0ec0813dd3876d1efe37d415fb81c2e | 1 | void usb_ohci_init_pxa(target_phys_addr_t base, int num_ports, int devfn,
qemu_irq irq)
{
OHCIState *ohci = (OHCIState *)qemu_mallocz(sizeof(OHCIState));
usb_ohci_init(ohci, num_ports, devfn, irq,
OHCI_TYPE_PXA, "OHCI USB");
ohci->mem_base = base;
cpu_register_physical_memory(ohci->mem_base, 0xfff, ohci->mem);
}
| 16,338 |
FFmpeg | 1bab6f852c7ca433285d19f65c701885fa69cc57 | 1 | static void RENAME(yuv2rgb32_1)(SwsContext *c, const int16_t *buf0,
const int16_t *ubuf[2], const int16_t *bguf[2],
const int16_t *abuf0, uint8_t *dest,
int dstW, int uvalpha, int y)
{
const int16_t *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1(%%REGBP, %5)
YSCALEYUV2RGB1_ALPHA(%%REGBP)
WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (abuf0), "S" (ubuf0), "D" (ubuf1), "m" (dest),
"a" (&c->redDither)
);
} else {
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1(%%REGBP, %5)
"pcmpeqd %%mm7, %%mm7 \n\t"
WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
"a" (&c->redDither)
);
}
} else {
if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1b(%%REGBP, %5)
YSCALEYUV2RGB1_ALPHA(%%REGBP)
WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (abuf0), "S" (ubuf0), "D" (ubuf1), "m" (dest),
"a" (&c->redDither)
);
} else {
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1b(%%REGBP, %5)
"pcmpeqd %%mm7, %%mm7 \n\t"
WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
"a" (&c->redDither)
);
}
}
}
| 16,340 |
qemu | 12848bfc5d719bad536c5448205a3226be1fda47 | 1 | VirtIODevice *virtio_9p_init(DeviceState *dev, V9fsConf *conf)
{
V9fsState *s;
int i, len;
struct stat stat;
FsTypeEntry *fse;
s = (V9fsState *)virtio_common_init("virtio-9p",
VIRTIO_ID_9P,
sizeof(struct virtio_9p_config)+
MAX_TAG_LEN,
sizeof(V9fsState));
/* initialize pdu allocator */
QLIST_INIT(&s->free_list);
for (i = 0; i < (MAX_REQ - 1); i++) {
QLIST_INSERT_HEAD(&s->free_list, &s->pdus[i], next);
}
s->vq = virtio_add_queue(&s->vdev, MAX_REQ, handle_9p_output);
fse = get_fsdev_fsentry(conf->fsdev_id);
if (!fse) {
/* We don't have a fsdev identified by fsdev_id */
fprintf(stderr, "Virtio-9p device couldn't find fsdev "
"with the id %s\n", conf->fsdev_id);
exit(1);
}
if (!fse->path || !conf->tag) {
/* we haven't specified a mount_tag or the path */
fprintf(stderr, "fsdev with id %s needs path "
"and Virtio-9p device needs mount_tag arguments\n",
conf->fsdev_id);
exit(1);
}
if (!strcmp(fse->security_model, "passthrough")) {
/* Files on the Fileserver set to client user credentials */
s->ctx.fs_sm = SM_PASSTHROUGH;
} else if (!strcmp(fse->security_model, "mapped")) {
/* Files on the fileserver are set to QEMU credentials.
* Client user credentials are saved in extended attributes.
*/
s->ctx.fs_sm = SM_MAPPED;
} else {
/* user haven't specified a correct security option */
fprintf(stderr, "one of the following must be specified as the"
"security option:\n\t security_model=passthrough \n\t "
"security_model=mapped\n");
return NULL;
}
if (lstat(fse->path, &stat)) {
fprintf(stderr, "share path %s does not exist\n", fse->path);
exit(1);
} else if (!S_ISDIR(stat.st_mode)) {
fprintf(stderr, "share path %s is not a directory \n", fse->path);
exit(1);
}
s->ctx.fs_root = qemu_strdup(fse->path);
len = strlen(conf->tag);
if (len > MAX_TAG_LEN) {
len = MAX_TAG_LEN;
}
/* s->tag is non-NULL terminated string */
s->tag = qemu_malloc(len);
memcpy(s->tag, conf->tag, len);
s->tag_len = len;
s->ctx.uid = -1;
s->ops = fse->ops;
s->vdev.get_features = virtio_9p_get_features;
s->config_size = sizeof(struct virtio_9p_config) +
s->tag_len;
s->vdev.get_config = virtio_9p_get_config;
return &s->vdev;
}
| 16,341 |
qemu | 88ee13c7b656e5504613b527f3a51591e9afae69 | 1 | int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
{
CPUS390XState *env = &cpu->env;
S390PCIBusDevice *pbdev;
uint64_t offset;
uint64_t data;
MemoryRegion *mr;
uint8_t len;
uint32_t fh;
uint8_t pcias;
cpu_synchronize_state(CPU(cpu));
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 4);
return 0;
}
if (r2 & 0x1) {
program_interrupt(env, PGM_SPECIFICATION, 4);
return 0;
}
fh = env->regs[r2] >> 32;
pcias = (env->regs[r2] >> 16) & 0xf;
len = env->regs[r2] & 0xf;
offset = env->regs[r2 + 1];
pbdev = s390_pci_find_dev_by_fh(fh);
if (!pbdev) {
DPRINTF("pcilg no pci dev\n");
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
}
switch (pbdev->state) {
case ZPCI_FS_RESERVED:
case ZPCI_FS_STANDBY:
case ZPCI_FS_DISABLED:
case ZPCI_FS_PERMANENT_ERROR:
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
case ZPCI_FS_ERROR:
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
return 0;
default:
break;
}
if (pcias < 6) {
if ((8 - (offset & 0x7)) < len) {
program_interrupt(env, PGM_OPERAND, 4);
return 0;
}
mr = pbdev->pdev->io_regions[pcias].memory;
memory_region_dispatch_read(mr, offset, &data, len,
MEMTXATTRS_UNSPECIFIED);
} else if (pcias == 15) {
if ((4 - (offset & 0x3)) < len) {
program_interrupt(env, PGM_OPERAND, 4);
return 0;
}
data = pci_host_config_read_common(
pbdev->pdev, offset, pci_config_size(pbdev->pdev), len);
switch (len) {
case 1:
break;
case 2:
data = bswap16(data);
break;
case 4:
data = bswap32(data);
break;
case 8:
data = bswap64(data);
break;
default:
program_interrupt(env, PGM_OPERAND, 4);
return 0;
}
} else {
DPRINTF("invalid space\n");
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
return 0;
}
env->regs[r1] = data;
setcc(cpu, ZPCI_PCI_LS_OK);
return 0;
}
| 16,342 |
qemu | 3e305e4a4752f70c0b5c3cf5b43ec957881714f7 | 1 | static void vnc_client_cache_auth(VncState *client)
{
if (!client->info) {
return;
}
#ifdef CONFIG_VNC_TLS
if (client->tls.session &&
client->tls.dname) {
client->info->has_x509_dname = true;
client->info->x509_dname = g_strdup(client->tls.dname);
}
#endif
#ifdef CONFIG_VNC_SASL
if (client->sasl.conn &&
client->sasl.username) {
client->info->has_sasl_username = true;
client->info->sasl_username = g_strdup(client->sasl.username);
}
#endif
}
| 16,343 |
FFmpeg | 9e27e0d4f825e97104333e3006f450f566935af0 | 1 | static void add_wav(int16_t *dest, int n, int skip_first, int *m,
const int16_t *s1, const int8_t *s2, const int8_t *s3)
{
int i;
int v[3];
v[0] = 0;
for (i=!skip_first; i<3; i++)
v[i] = (gain_val_tab[n][i] * m[i]) >> gain_exp_tab[n];
dest[i] = (s1[i]*v[0] + s2[i]*v[1] + s3[i]*v[2]) >> 12;
| 16,344 |
qemu | 039380a8e18f618cdacf72486449c04dc1b70eef | 1 | void qtest_qmp(QTestState *s, const char *fmt, ...)
{
va_list ap;
bool has_reply = false;
int nesting = 0;
/* Send QMP request */
va_start(ap, fmt);
socket_sendf(s->qmp_fd, fmt, ap);
va_end(ap);
/* Receive reply */
while (!has_reply || nesting > 0) {
ssize_t len;
char c;
len = read(s->qmp_fd, &c, 1);
if (len == -1 && errno == EINTR) {
continue;
switch (c) {
case '{':
nesting++;
has_reply = true;
break;
case '}':
nesting--;
break; | 16,345 |
qemu | aa4a3dce1c88ed51b616806b8214b7c8428b7470 | 1 | static void vmxnet3_reset(VMXNET3State *s)
{
VMW_CBPRN("Resetting vmxnet3...");
vmxnet3_deactivate_device(s);
vmxnet3_reset_interrupt_states(s);
vmxnet_tx_pkt_reset(s->tx_pkt);
s->drv_shmem = 0;
s->tx_sop = true;
s->skip_current_tx_pkt = false;
}
| 16,346 |
FFmpeg | eae7338e1592f4a398b7c3cb9d1ac854b7a44ff8 | 1 | static av_cold int X264_close(AVCodecContext *avctx)
{
X264Context *x4 = avctx->priv_data;
av_freep(&avctx->extradata);
av_free(x4->sei);
if (x4->enc)
x264_encoder_close(x4->enc);
av_frame_free(&avctx->coded_frame);
return 0;
}
| 16,347 |
qemu | c3e10c7b4377c1cbc0a4fbc12312c2cf41c0cda7 | 1 | static always_inline void gen_op_subfo_64 (void)
{
gen_op_move_T2_T0();
gen_op_subf();
gen_op_check_subfo_64();
}
| 16,349 |
qemu | d861b05ea30e6ac177de9b679da96194ebe21afc | 1 | int slirp_can_output(void)
{
return 1;
}
| 16,350 |
qemu | ec45bbe5f1921c6553fbf9c0c76b358b0403c22d | 1 | int main(int argc, char **argv, char **envp)
{
struct target_pt_regs regs1, *regs = ®s1;
struct image_info info1, *info = &info1;
struct linux_binprm bprm;
TaskState *ts;
CPUArchState *env;
CPUState *cpu;
int optind;
char **target_environ, **wrk;
char **target_argv;
int target_argc;
int i;
int ret;
int execfd;
module_call_init(MODULE_INIT_TRACE);
qemu_init_cpu_list();
module_call_init(MODULE_INIT_QOM);
if ((envlist = envlist_create()) == NULL) {
(void) fprintf(stderr, "Unable to allocate envlist\n");
exit(EXIT_FAILURE);
}
/* add current environment into the list */
for (wrk = environ; *wrk != NULL; wrk++) {
(void) envlist_setenv(envlist, *wrk);
}
/* Read the stack limit from the kernel. If it's "unlimited",
then we can do little else besides use the default. */
{
struct rlimit lim;
if (getrlimit(RLIMIT_STACK, &lim) == 0
&& lim.rlim_cur != RLIM_INFINITY
&& lim.rlim_cur == (target_long)lim.rlim_cur) {
guest_stack_size = lim.rlim_cur;
}
}
cpu_model = NULL;
srand(time(NULL));
qemu_add_opts(&qemu_trace_opts);
optind = parse_args(argc, argv);
if (!trace_init_backends()) {
exit(1);
}
trace_init_file(trace_file);
/* Zero out regs */
memset(regs, 0, sizeof(struct target_pt_regs));
/* Zero out image_info */
memset(info, 0, sizeof(struct image_info));
memset(&bprm, 0, sizeof (bprm));
/* Scan interp_prefix dir for replacement files. */
init_paths(interp_prefix);
init_qemu_uname_release();
if (cpu_model == NULL) {
#if defined(TARGET_I386)
#ifdef TARGET_X86_64
cpu_model = "qemu64";
#else
cpu_model = "qemu32";
#endif
#elif defined(TARGET_ARM)
cpu_model = "any";
#elif defined(TARGET_UNICORE32)
cpu_model = "any";
#elif defined(TARGET_M68K)
cpu_model = "any";
#elif defined(TARGET_SPARC)
#ifdef TARGET_SPARC64
cpu_model = "TI UltraSparc II";
#else
cpu_model = "Fujitsu MB86904";
#endif
#elif defined(TARGET_MIPS)
#if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
cpu_model = "5KEf";
#else
cpu_model = "24Kf";
#endif
#elif defined TARGET_OPENRISC
cpu_model = "or1200";
#elif defined(TARGET_PPC)
# ifdef TARGET_PPC64
cpu_model = "POWER8";
# else
cpu_model = "750";
# endif
#elif defined TARGET_SH4
cpu_model = TYPE_SH7785_CPU;
#elif defined TARGET_S390X
cpu_model = "qemu";
#else
cpu_model = "any";
#endif
}
tcg_exec_init(0);
/* NOTE: we need to init the CPU at this stage to get
qemu_host_page_size */
cpu = cpu_init(cpu_model);
if (!cpu) {
fprintf(stderr, "Unable to find CPU definition\n");
exit(EXIT_FAILURE);
}
env = cpu->env_ptr;
cpu_reset(cpu);
thread_cpu = cpu;
if (getenv("QEMU_STRACE")) {
do_strace = 1;
}
if (getenv("QEMU_RAND_SEED")) {
handle_arg_randseed(getenv("QEMU_RAND_SEED"));
}
target_environ = envlist_to_environ(envlist, NULL);
envlist_free(envlist);
/*
* Now that page sizes are configured in cpu_init() we can do
* proper page alignment for guest_base.
*/
guest_base = HOST_PAGE_ALIGN(guest_base);
if (reserved_va || have_guest_base) {
guest_base = init_guest_space(guest_base, reserved_va, 0,
have_guest_base);
if (guest_base == (unsigned long)-1) {
fprintf(stderr, "Unable to reserve 0x%lx bytes of virtual address "
"space for use as guest address space (check your virtual "
"memory ulimit setting or reserve less using -R option)\n",
reserved_va);
exit(EXIT_FAILURE);
}
if (reserved_va) {
mmap_next_start = reserved_va;
}
}
/*
* Read in mmap_min_addr kernel parameter. This value is used
* When loading the ELF image to determine whether guest_base
* is needed. It is also used in mmap_find_vma.
*/
{
FILE *fp;
if ((fp = fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL) {
unsigned long tmp;
if (fscanf(fp, "%lu", &tmp) == 1) {
mmap_min_addr = tmp;
qemu_log_mask(CPU_LOG_PAGE, "host mmap_min_addr=0x%lx\n", mmap_min_addr);
}
fclose(fp);
}
}
/*
* Prepare copy of argv vector for target.
*/
target_argc = argc - optind;
target_argv = calloc(target_argc + 1, sizeof (char *));
if (target_argv == NULL) {
(void) fprintf(stderr, "Unable to allocate memory for target_argv\n");
exit(EXIT_FAILURE);
}
/*
* If argv0 is specified (using '-0' switch) we replace
* argv[0] pointer with the given one.
*/
i = 0;
if (argv0 != NULL) {
target_argv[i++] = strdup(argv0);
}
for (; i < target_argc; i++) {
target_argv[i] = strdup(argv[optind + i]);
}
target_argv[target_argc] = NULL;
ts = g_new0(TaskState, 1);
init_task_state(ts);
/* build Task State */
ts->info = info;
ts->bprm = &bprm;
cpu->opaque = ts;
task_settid(ts);
execfd = qemu_getauxval(AT_EXECFD);
if (execfd == 0) {
execfd = open(filename, O_RDONLY);
if (execfd < 0) {
printf("Error while loading %s: %s\n", filename, strerror(errno));
_exit(EXIT_FAILURE);
}
}
ret = loader_exec(execfd, filename, target_argv, target_environ, regs,
info, &bprm);
if (ret != 0) {
printf("Error while loading %s: %s\n", filename, strerror(-ret));
_exit(EXIT_FAILURE);
}
for (wrk = target_environ; *wrk; wrk++) {
free(*wrk);
}
free(target_environ);
if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
qemu_log("guest_base 0x%lx\n", guest_base);
log_page_dump();
qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk);
qemu_log("end_code 0x" TARGET_ABI_FMT_lx "\n", info->end_code);
qemu_log("start_code 0x" TARGET_ABI_FMT_lx "\n", info->start_code);
qemu_log("start_data 0x" TARGET_ABI_FMT_lx "\n", info->start_data);
qemu_log("end_data 0x" TARGET_ABI_FMT_lx "\n", info->end_data);
qemu_log("start_stack 0x" TARGET_ABI_FMT_lx "\n", info->start_stack);
qemu_log("brk 0x" TARGET_ABI_FMT_lx "\n", info->brk);
qemu_log("entry 0x" TARGET_ABI_FMT_lx "\n", info->entry);
qemu_log("argv_start 0x" TARGET_ABI_FMT_lx "\n", info->arg_start);
qemu_log("env_start 0x" TARGET_ABI_FMT_lx "\n",
info->arg_end + (abi_ulong)sizeof(abi_ulong));
qemu_log("auxv_start 0x" TARGET_ABI_FMT_lx "\n", info->saved_auxv);
}
target_set_brk(info->brk);
syscall_init();
signal_init();
/* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
generating the prologue until now so that the prologue can take
the real value of GUEST_BASE into account. */
tcg_prologue_init(&tcg_ctx);
#if defined(TARGET_I386)
env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK;
env->hflags |= HF_PE_MASK | HF_CPL_MASK;
if (env->features[FEAT_1_EDX] & CPUID_SSE) {
env->cr[4] |= CR4_OSFXSR_MASK;
env->hflags |= HF_OSFXSR_MASK;
}
#ifndef TARGET_ABI32
/* enable 64 bit mode if possible */
if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
fprintf(stderr, "The selected x86 CPU does not support 64 bit mode\n");
exit(EXIT_FAILURE);
}
env->cr[4] |= CR4_PAE_MASK;
env->efer |= MSR_EFER_LMA | MSR_EFER_LME;
env->hflags |= HF_LMA_MASK;
#endif
/* flags setup : we activate the IRQs by default as in user mode */
env->eflags |= IF_MASK;
/* linux register setup */
#ifndef TARGET_ABI32
env->regs[R_EAX] = regs->rax;
env->regs[R_EBX] = regs->rbx;
env->regs[R_ECX] = regs->rcx;
env->regs[R_EDX] = regs->rdx;
env->regs[R_ESI] = regs->rsi;
env->regs[R_EDI] = regs->rdi;
env->regs[R_EBP] = regs->rbp;
env->regs[R_ESP] = regs->rsp;
env->eip = regs->rip;
#else
env->regs[R_EAX] = regs->eax;
env->regs[R_EBX] = regs->ebx;
env->regs[R_ECX] = regs->ecx;
env->regs[R_EDX] = regs->edx;
env->regs[R_ESI] = regs->esi;
env->regs[R_EDI] = regs->edi;
env->regs[R_EBP] = regs->ebp;
env->regs[R_ESP] = regs->esp;
env->eip = regs->eip;
#endif
/* linux interrupt setup */
#ifndef TARGET_ABI32
env->idt.limit = 511;
#else
env->idt.limit = 255;
#endif
env->idt.base = target_mmap(0, sizeof(uint64_t) * (env->idt.limit + 1),
PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
idt_table = g2h(env->idt.base);
set_idt(0, 0);
set_idt(1, 0);
set_idt(2, 0);
set_idt(3, 3);
set_idt(4, 3);
set_idt(5, 0);
set_idt(6, 0);
set_idt(7, 0);
set_idt(8, 0);
set_idt(9, 0);
set_idt(10, 0);
set_idt(11, 0);
set_idt(12, 0);
set_idt(13, 0);
set_idt(14, 0);
set_idt(15, 0);
set_idt(16, 0);
set_idt(17, 0);
set_idt(18, 0);
set_idt(19, 0);
set_idt(0x80, 3);
/* linux segment setup */
{
uint64_t *gdt_table;
env->gdt.base = target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES,
PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
env->gdt.limit = sizeof(uint64_t) * TARGET_GDT_ENTRIES - 1;
gdt_table = g2h(env->gdt.base);
#ifdef TARGET_ABI32
write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
(3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT));
#else
/* 64 bit code segment */
write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
DESC_L_MASK |
(3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT));
#endif
write_dt(&gdt_table[__USER_DS >> 3], 0, 0xfffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
(3 << DESC_DPL_SHIFT) | (0x2 << DESC_TYPE_SHIFT));
}
cpu_x86_load_seg(env, R_CS, __USER_CS);
cpu_x86_load_seg(env, R_SS, __USER_DS);
#ifdef TARGET_ABI32
cpu_x86_load_seg(env, R_DS, __USER_DS);
cpu_x86_load_seg(env, R_ES, __USER_DS);
cpu_x86_load_seg(env, R_FS, __USER_DS);
cpu_x86_load_seg(env, R_GS, __USER_DS);
/* This hack makes Wine work... */
env->segs[R_FS].selector = 0;
#else
cpu_x86_load_seg(env, R_DS, 0);
cpu_x86_load_seg(env, R_ES, 0);
cpu_x86_load_seg(env, R_FS, 0);
cpu_x86_load_seg(env, R_GS, 0);
#endif
#elif defined(TARGET_AARCH64)
{
int i;
if (!(arm_feature(env, ARM_FEATURE_AARCH64))) {
fprintf(stderr,
"The selected ARM CPU does not support 64 bit mode\n");
exit(EXIT_FAILURE);
}
for (i = 0; i < 31; i++) {
env->xregs[i] = regs->regs[i];
}
env->pc = regs->pc;
env->xregs[31] = regs->sp;
}
#elif defined(TARGET_ARM)
{
int i;
cpsr_write(env, regs->uregs[16], CPSR_USER | CPSR_EXEC,
CPSRWriteByInstr);
for(i = 0; i < 16; i++) {
env->regs[i] = regs->uregs[i];
}
#ifdef TARGET_WORDS_BIGENDIAN
/* Enable BE8. */
if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4
&& (info->elf_flags & EF_ARM_BE8)) {
env->uncached_cpsr |= CPSR_E;
env->cp15.sctlr_el[1] |= SCTLR_E0E;
} else {
env->cp15.sctlr_el[1] |= SCTLR_B;
}
#endif
}
#elif defined(TARGET_UNICORE32)
{
int i;
cpu_asr_write(env, regs->uregs[32], 0xffffffff);
for (i = 0; i < 32; i++) {
env->regs[i] = regs->uregs[i];
}
}
#elif defined(TARGET_SPARC)
{
int i;
env->pc = regs->pc;
env->npc = regs->npc;
env->y = regs->y;
for(i = 0; i < 8; i++)
env->gregs[i] = regs->u_regs[i];
for(i = 0; i < 8; i++)
env->regwptr[i] = regs->u_regs[i + 8];
}
#elif defined(TARGET_PPC)
{
int i;
#if defined(TARGET_PPC64)
int flag = (env->insns_flags2 & PPC2_BOOKE206) ? MSR_CM : MSR_SF;
#if defined(TARGET_ABI32)
env->msr &= ~((target_ulong)1 << flag);
#else
env->msr |= (target_ulong)1 << flag;
#endif
#endif
env->nip = regs->nip;
for(i = 0; i < 32; i++) {
env->gpr[i] = regs->gpr[i];
}
}
#elif defined(TARGET_M68K)
{
env->pc = regs->pc;
env->dregs[0] = regs->d0;
env->dregs[1] = regs->d1;
env->dregs[2] = regs->d2;
env->dregs[3] = regs->d3;
env->dregs[4] = regs->d4;
env->dregs[5] = regs->d5;
env->dregs[6] = regs->d6;
env->dregs[7] = regs->d7;
env->aregs[0] = regs->a0;
env->aregs[1] = regs->a1;
env->aregs[2] = regs->a2;
env->aregs[3] = regs->a3;
env->aregs[4] = regs->a4;
env->aregs[5] = regs->a5;
env->aregs[6] = regs->a6;
env->aregs[7] = regs->usp;
env->sr = regs->sr;
ts->sim_syscalls = 1;
}
#elif defined(TARGET_MICROBLAZE)
{
env->regs[0] = regs->r0;
env->regs[1] = regs->r1;
env->regs[2] = regs->r2;
env->regs[3] = regs->r3;
env->regs[4] = regs->r4;
env->regs[5] = regs->r5;
env->regs[6] = regs->r6;
env->regs[7] = regs->r7;
env->regs[8] = regs->r8;
env->regs[9] = regs->r9;
env->regs[10] = regs->r10;
env->regs[11] = regs->r11;
env->regs[12] = regs->r12;
env->regs[13] = regs->r13;
env->regs[14] = regs->r14;
env->regs[15] = regs->r15;
env->regs[16] = regs->r16;
env->regs[17] = regs->r17;
env->regs[18] = regs->r18;
env->regs[19] = regs->r19;
env->regs[20] = regs->r20;
env->regs[21] = regs->r21;
env->regs[22] = regs->r22;
env->regs[23] = regs->r23;
env->regs[24] = regs->r24;
env->regs[25] = regs->r25;
env->regs[26] = regs->r26;
env->regs[27] = regs->r27;
env->regs[28] = regs->r28;
env->regs[29] = regs->r29;
env->regs[30] = regs->r30;
env->regs[31] = regs->r31;
env->sregs[SR_PC] = regs->pc;
}
#elif defined(TARGET_MIPS)
{
int i;
for(i = 0; i < 32; i++) {
env->active_tc.gpr[i] = regs->regs[i];
}
env->active_tc.PC = regs->cp0_epc & ~(target_ulong)1;
if (regs->cp0_epc & 1) {
env->hflags |= MIPS_HFLAG_M16;
}
if (((info->elf_flags & EF_MIPS_NAN2008) != 0) !=
((env->active_fpu.fcr31 & (1 << FCR31_NAN2008)) != 0)) {
if ((env->active_fpu.fcr31_rw_bitmask &
(1 << FCR31_NAN2008)) == 0) {
fprintf(stderr, "ELF binary's NaN mode not supported by CPU\n");
exit(1);
}
if ((info->elf_flags & EF_MIPS_NAN2008) != 0) {
env->active_fpu.fcr31 |= (1 << FCR31_NAN2008);
} else {
env->active_fpu.fcr31 &= ~(1 << FCR31_NAN2008);
}
restore_snan_bit_mode(env);
}
}
#elif defined(TARGET_NIOS2)
{
env->regs[0] = 0;
env->regs[1] = regs->r1;
env->regs[2] = regs->r2;
env->regs[3] = regs->r3;
env->regs[4] = regs->r4;
env->regs[5] = regs->r5;
env->regs[6] = regs->r6;
env->regs[7] = regs->r7;
env->regs[8] = regs->r8;
env->regs[9] = regs->r9;
env->regs[10] = regs->r10;
env->regs[11] = regs->r11;
env->regs[12] = regs->r12;
env->regs[13] = regs->r13;
env->regs[14] = regs->r14;
env->regs[15] = regs->r15;
/* TODO: unsigned long orig_r2; */
env->regs[R_RA] = regs->ra;
env->regs[R_FP] = regs->fp;
env->regs[R_SP] = regs->sp;
env->regs[R_GP] = regs->gp;
env->regs[CR_ESTATUS] = regs->estatus;
env->regs[R_EA] = regs->ea;
/* TODO: unsigned long orig_r7; */
/* Emulate eret when starting thread. */
env->regs[R_PC] = regs->ea;
}
#elif defined(TARGET_OPENRISC)
{
int i;
for (i = 0; i < 32; i++) {
env->gpr[i] = regs->gpr[i];
}
env->pc = regs->pc;
cpu_set_sr(env, regs->sr);
}
#elif defined(TARGET_SH4)
{
int i;
for(i = 0; i < 16; i++) {
env->gregs[i] = regs->regs[i];
}
env->pc = regs->pc;
}
#elif defined(TARGET_ALPHA)
{
int i;
for(i = 0; i < 28; i++) {
env->ir[i] = ((abi_ulong *)regs)[i];
}
env->ir[IR_SP] = regs->usp;
env->pc = regs->pc;
}
#elif defined(TARGET_CRIS)
{
env->regs[0] = regs->r0;
env->regs[1] = regs->r1;
env->regs[2] = regs->r2;
env->regs[3] = regs->r3;
env->regs[4] = regs->r4;
env->regs[5] = regs->r5;
env->regs[6] = regs->r6;
env->regs[7] = regs->r7;
env->regs[8] = regs->r8;
env->regs[9] = regs->r9;
env->regs[10] = regs->r10;
env->regs[11] = regs->r11;
env->regs[12] = regs->r12;
env->regs[13] = regs->r13;
env->regs[14] = info->start_stack;
env->regs[15] = regs->acr;
env->pc = regs->erp;
}
#elif defined(TARGET_S390X)
{
int i;
for (i = 0; i < 16; i++) {
env->regs[i] = regs->gprs[i];
}
env->psw.mask = regs->psw.mask;
env->psw.addr = regs->psw.addr;
}
#elif defined(TARGET_TILEGX)
{
int i;
for (i = 0; i < TILEGX_R_COUNT; i++) {
env->regs[i] = regs->regs[i];
}
for (i = 0; i < TILEGX_SPR_COUNT; i++) {
env->spregs[i] = 0;
}
env->pc = regs->pc;
}
#elif defined(TARGET_HPPA)
{
int i;
for (i = 1; i < 32; i++) {
env->gr[i] = regs->gr[i];
}
env->iaoq_f = regs->iaoq[0];
env->iaoq_b = regs->iaoq[1];
}
#else
#error unsupported target CPU
#endif
#if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
ts->stack_base = info->start_stack;
ts->heap_base = info->brk;
/* This will be filled in on the first SYS_HEAPINFO call. */
ts->heap_limit = 0;
#endif
if (gdbstub_port) {
if (gdbserver_start(gdbstub_port) < 0) {
fprintf(stderr, "qemu: could not open gdbserver on port %d\n",
gdbstub_port);
exit(EXIT_FAILURE);
}
gdb_handlesig(cpu, 0);
}
cpu_loop(env);
/* never exits */
return 0;
}
| 16,351 |
FFmpeg | 9c7fd997f794d3180ef4cbde019e4827ff309988 | 1 | static int aiff_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
int size, filesize;
offset_t offset = 0;
uint32_t tag;
unsigned version = AIFF_C_VERSION1;
ByteIOContext *pb = s->pb;
AVStream * st = s->streams[0];
/* check FORM header */
filesize = get_tag(pb, &tag);
if (filesize < 0 || tag != MKTAG('F', 'O', 'R', 'M'))
return AVERROR_INVALIDDATA;
/* AIFF data type */
tag = get_le32(pb);
if (tag == MKTAG('A', 'I', 'F', 'F')) /* Got an AIFF file */
version = AIFF;
else if (tag != MKTAG('A', 'I', 'F', 'C')) /* An AIFF-C file then */
return AVERROR_INVALIDDATA;
filesize -= 4;
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
while (filesize > 0) {
/* parse different chunks */
size = get_tag(pb, &tag);
if (size < 0)
return size;
filesize -= size + 8;
switch (tag) {
case MKTAG('C', 'O', 'M', 'M'): /* Common chunk */
/* Then for the complete header info */
st->nb_frames = get_aiff_header (pb, st->codec, size, version);
if (st->nb_frames < 0)
return st->nb_frames;
if (offset > 0) // COMM is after SSND
goto got_sound;
break;
case MKTAG('F', 'V', 'E', 'R'): /* Version chunk */
version = get_be32(pb);
break;
case MKTAG('N', 'A', 'M', 'E'): /* Sample name chunk */
get_meta (pb, s->title, sizeof(s->title), size);
break;
case MKTAG('A', 'U', 'T', 'H'): /* Author chunk */
get_meta (pb, s->author, sizeof(s->author), size);
break;
case MKTAG('(', 'c', ')', ' '): /* Copyright chunk */
get_meta (pb, s->copyright, sizeof(s->copyright), size);
break;
case MKTAG('A', 'N', 'N', 'O'): /* Annotation chunk */
get_meta (pb, s->comment, sizeof(s->comment), size);
break;
case MKTAG('S', 'S', 'N', 'D'): /* Sampled sound chunk */
offset = get_be32(pb); /* Offset of sound data */
get_be32(pb); /* BlockSize... don't care */
offset += url_ftell(pb); /* Compute absolute data offset */
if (st->codec->codec_id) /* Assume COMM already parsed */
goto got_sound;
if (url_is_streamed(pb)) {
av_log(s, AV_LOG_ERROR, "file is not seekable\n");
}
url_fskip(pb, size - 8);
break;
case MKTAG('w', 'a', 'v', 'e'):
st->codec->extradata = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!st->codec->extradata)
return AVERROR(ENOMEM);
st->codec->extradata_size = size;
get_buffer(pb, st->codec->extradata, size);
break;
default: /* Jump */
if (size & 1) /* Always even aligned */
size++;
url_fskip (pb, size);
}
}
/* End of loop and didn't get sound */
return AVERROR_INVALIDDATA;
got_sound:
/* Now positioned, get the sound data start and end */
if (st->nb_frames)
s->file_size = st->nb_frames * st->codec->block_align;
av_set_pts_info(st, 64, 1, st->codec->sample_rate);
st->start_time = 0;
st->duration = st->codec->frame_size ?
st->nb_frames * st->codec->frame_size : st->nb_frames;
/* Position the stream at the first block */
url_fseek(pb, offset, SEEK_SET);
return 0;
} | 16,353 |
FFmpeg | f431315a866da9600e3eaa99fc54da1f554f170c | 1 | static int start_frame_overlay(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
{
AVFilterContext *ctx = inlink->dst;
OverlayContext *over = ctx->priv;
inlink->cur_buf = NULL;
over->overpicref = inpicref;
over->overpicref->pts = av_rescale_q(inpicref->pts, ctx->inputs[OVERLAY]->time_base,
ctx->outputs[0]->time_base);
return 0;
} | 16,354 |
qemu | 8f94a6e40e46cbc8e8014da825d25824b1803b34 | 1 | int bdrv_file_open(BlockDriverState **pbs, const char *filename,
QDict *options, int flags, Error **errp)
{
BlockDriverState *bs;
BlockDriver *drv;
const char *drvname;
bool allow_protocol_prefix = false;
Error *local_err = NULL;
int ret;
/* NULL means an empty set of options */
if (options == NULL) {
options = qdict_new();
}
bs = bdrv_new("");
bs->options = options;
options = qdict_clone_shallow(options);
/* Fetch the file name from the options QDict if necessary */
if (!filename) {
filename = qdict_get_try_str(options, "filename");
} else if (filename && !qdict_haskey(options, "filename")) {
qdict_put(options, "filename", qstring_from_str(filename));
allow_protocol_prefix = true;
} else {
error_setg(errp, "Can't specify 'file' and 'filename' options at the "
"same time");
ret = -EINVAL;
goto fail;
}
/* Find the right block driver */
drvname = qdict_get_try_str(options, "driver");
if (drvname) {
drv = bdrv_find_whitelisted_format(drvname, !(flags & BDRV_O_RDWR));
if (!drv) {
error_setg(errp, "Unknown driver '%s'", drvname);
}
qdict_del(options, "driver");
} else if (filename) {
drv = bdrv_find_protocol(filename, allow_protocol_prefix);
if (!drv) {
error_setg(errp, "Unknown protocol");
}
} else {
error_setg(errp, "Must specify either driver or file");
drv = NULL;
}
if (!drv) {
/* errp has been set already */
ret = -ENOENT;
goto fail;
}
/* Parse the filename and open it */
if (drv->bdrv_parse_filename && filename) {
drv->bdrv_parse_filename(filename, options, &local_err);
if (error_is_set(&local_err)) {
error_propagate(errp, local_err);
ret = -EINVAL;
goto fail;
}
qdict_del(options, "filename");
} else if (drv->bdrv_needs_filename && !filename) {
error_setg(errp, "The '%s' block driver requires a file name",
drv->format_name);
ret = -EINVAL;
goto fail;
}
ret = bdrv_open_common(bs, NULL, options, flags, drv, &local_err);
if (ret < 0) {
error_propagate(errp, local_err);
goto fail;
}
/* Check if any unknown options were used */
if (qdict_size(options) != 0) {
const QDictEntry *entry = qdict_first(options);
error_setg(errp, "Block protocol '%s' doesn't support the option '%s'",
drv->format_name, entry->key);
ret = -EINVAL;
goto fail;
}
QDECREF(options);
bs->growable = 1;
*pbs = bs;
return 0;
fail:
QDECREF(options);
if (!bs->drv) {
QDECREF(bs->options);
}
bdrv_unref(bs);
return ret;
}
| 16,355 |
qemu | 6ab3fc32ea640026726bc5f9f4db622d0954fb8a | 1 | ser_write(void *opaque, hwaddr addr,
uint64_t val64, unsigned int size)
{
ETRAXSerial *s = opaque;
uint32_t value = val64;
unsigned char ch = val64;
D(qemu_log("%s " TARGET_FMT_plx "=%x\n", __func__, addr, value));
addr >>= 2;
switch (addr)
{
case RW_DOUT:
qemu_chr_fe_write(s->chr, &ch, 1);
s->regs[R_INTR] |= 3;
s->pending_tx = 1;
s->regs[addr] = value;
break;
case RW_ACK_INTR:
if (s->pending_tx) {
value &= ~1;
s->pending_tx = 0;
D(qemu_log("fixedup value=%x r_intr=%x\n",
value, s->regs[R_INTR]));
}
s->regs[addr] = value;
s->regs[R_INTR] &= ~value;
D(printf("r_intr=%x\n", s->regs[R_INTR]));
break;
default:
s->regs[addr] = value;
break;
}
ser_update_irq(s);
}
| 16,356 |
FFmpeg | 066dc0437368acd21546ee327a0a94c60c3808b2 | 1 | static av_cold int X264_init(AVCodecContext *avctx)
{
X264Context *x4 = avctx->priv_data;
int sw,sh;
if (avctx->global_quality > 0)
av_log(avctx, AV_LOG_WARNING, "-qscale is ignored, -crf is recommended.\n");
x264_param_default(&x4->params);
x4->params.b_deblocking_filter = avctx->flags & CODEC_FLAG_LOOP_FILTER;
if (x4->preset || x4->tune)
if (x264_param_default_preset(&x4->params, x4->preset, x4->tune) < 0) {
int i;
av_log(avctx, AV_LOG_ERROR, "Error setting preset/tune %s/%s.\n", x4->preset, x4->tune);
av_log(avctx, AV_LOG_INFO, "Possible presets:");
for (i = 0; x264_preset_names[i]; i++)
av_log(avctx, AV_LOG_INFO, " %s", x264_preset_names[i]);
av_log(avctx, AV_LOG_INFO, "\n");
av_log(avctx, AV_LOG_INFO, "Possible tunes:");
for (i = 0; x264_tune_names[i]; i++)
av_log(avctx, AV_LOG_INFO, " %s", x264_tune_names[i]);
av_log(avctx, AV_LOG_INFO, "\n");
return AVERROR(EINVAL);
}
if (avctx->level > 0)
x4->params.i_level_idc = avctx->level;
x4->params.pf_log = X264_log;
x4->params.p_log_private = avctx;
x4->params.i_log_level = X264_LOG_DEBUG;
x4->params.i_csp = convert_pix_fmt(avctx->pix_fmt);
OPT_STR("weightp", x4->wpredp);
if (avctx->bit_rate) {
x4->params.rc.i_bitrate = avctx->bit_rate / 1000;
x4->params.rc.i_rc_method = X264_RC_ABR;
}
x4->params.rc.i_vbv_buffer_size = avctx->rc_buffer_size / 1000;
x4->params.rc.i_vbv_max_bitrate = avctx->rc_max_rate / 1000;
x4->params.rc.b_stat_write = avctx->flags & CODEC_FLAG_PASS1;
if (avctx->flags & CODEC_FLAG_PASS2) {
x4->params.rc.b_stat_read = 1;
} else {
if (x4->crf >= 0) {
x4->params.rc.i_rc_method = X264_RC_CRF;
x4->params.rc.f_rf_constant = x4->crf;
} else if (x4->cqp >= 0) {
x4->params.rc.i_rc_method = X264_RC_CQP;
x4->params.rc.i_qp_constant = x4->cqp;
}
if (x4->crf_max >= 0)
x4->params.rc.f_rf_constant_max = x4->crf_max;
}
if (avctx->rc_buffer_size && avctx->rc_initial_buffer_occupancy > 0 &&
(avctx->rc_initial_buffer_occupancy <= avctx->rc_buffer_size)) {
x4->params.rc.f_vbv_buffer_init =
(float)avctx->rc_initial_buffer_occupancy / avctx->rc_buffer_size;
}
OPT_STR("level", x4->level);
if (avctx->i_quant_factor > 0)
x4->params.rc.f_ip_factor = 1 / fabs(avctx->i_quant_factor);
if (avctx->b_quant_factor > 0)
x4->params.rc.f_pb_factor = avctx->b_quant_factor;
if (avctx->chromaoffset)
x4->params.analyse.i_chroma_qp_offset = avctx->chromaoffset;
if (avctx->me_method == ME_EPZS)
x4->params.analyse.i_me_method = X264_ME_DIA;
else if (avctx->me_method == ME_HEX)
x4->params.analyse.i_me_method = X264_ME_HEX;
else if (avctx->me_method == ME_UMH)
x4->params.analyse.i_me_method = X264_ME_UMH;
else if (avctx->me_method == ME_FULL)
x4->params.analyse.i_me_method = X264_ME_ESA;
else if (avctx->me_method == ME_TESA)
x4->params.analyse.i_me_method = X264_ME_TESA;
if (avctx->gop_size >= 0)
x4->params.i_keyint_max = avctx->gop_size;
if (avctx->max_b_frames >= 0)
x4->params.i_bframe = avctx->max_b_frames;
if (avctx->scenechange_threshold >= 0)
x4->params.i_scenecut_threshold = avctx->scenechange_threshold;
if (avctx->qmin >= 0)
x4->params.rc.i_qp_min = avctx->qmin;
if (avctx->qmax >= 0)
x4->params.rc.i_qp_max = avctx->qmax;
if (avctx->max_qdiff >= 0)
x4->params.rc.i_qp_step = avctx->max_qdiff;
if (avctx->qblur >= 0)
x4->params.rc.f_qblur = avctx->qblur; /* temporally blur quants */
if (avctx->qcompress >= 0)
x4->params.rc.f_qcompress = avctx->qcompress; /* 0.0 => cbr, 1.0 => constant qp */
if (avctx->refs >= 0)
x4->params.i_frame_reference = avctx->refs;
else if (x4->level) {
int i;
int mbn = FF_CEIL_RSHIFT(avctx->width, 4) * FF_CEIL_RSHIFT(avctx->height, 4);
int level_id = -1;
char *tail;
int scale = X264_BUILD < 129 ? 384 : 1;
if (!strcmp(x4->level, "1b")) {
level_id = 9;
} else if (strlen(x4->level) <= 3){
level_id = av_strtod(x4->level, &tail) * 10 + 0.5;
if (*tail)
level_id = -1;
}
if (level_id <= 0)
av_log(avctx, AV_LOG_WARNING, "Failed to parse level\n");
for (i = 0; i<x264_levels[i].level_idc; i++)
if (x264_levels[i].level_idc == level_id)
x4->params.i_frame_reference = av_clip(x264_levels[i].dpb / mbn / scale, 1, x4->params.i_frame_reference);
}
if (avctx->trellis >= 0)
x4->params.analyse.i_trellis = avctx->trellis;
if (avctx->me_range >= 0)
x4->params.analyse.i_me_range = avctx->me_range;
if (avctx->noise_reduction >= 0)
x4->params.analyse.i_noise_reduction = avctx->noise_reduction;
if (avctx->me_subpel_quality >= 0)
x4->params.analyse.i_subpel_refine = avctx->me_subpel_quality;
if (avctx->b_frame_strategy >= 0)
x4->params.i_bframe_adaptive = avctx->b_frame_strategy;
if (avctx->keyint_min >= 0)
x4->params.i_keyint_min = avctx->keyint_min;
if (avctx->coder_type >= 0)
x4->params.b_cabac = avctx->coder_type == FF_CODER_TYPE_AC;
if (avctx->me_cmp >= 0)
x4->params.analyse.b_chroma_me = avctx->me_cmp & FF_CMP_CHROMA;
if (x4->aq_mode >= 0)
x4->params.rc.i_aq_mode = x4->aq_mode;
if (x4->aq_strength >= 0)
x4->params.rc.f_aq_strength = x4->aq_strength;
PARSE_X264_OPT("psy-rd", psy_rd);
PARSE_X264_OPT("deblock", deblock);
PARSE_X264_OPT("partitions", partitions);
PARSE_X264_OPT("stats", stats);
if (x4->psy >= 0)
x4->params.analyse.b_psy = x4->psy;
if (x4->rc_lookahead >= 0)
x4->params.rc.i_lookahead = x4->rc_lookahead;
if (x4->weightp >= 0)
x4->params.analyse.i_weighted_pred = x4->weightp;
if (x4->weightb >= 0)
x4->params.analyse.b_weighted_bipred = x4->weightb;
if (x4->cplxblur >= 0)
x4->params.rc.f_complexity_blur = x4->cplxblur;
if (x4->ssim >= 0)
x4->params.analyse.b_ssim = x4->ssim;
if (x4->intra_refresh >= 0)
x4->params.b_intra_refresh = x4->intra_refresh;
if (x4->bluray_compat >= 0) {
x4->params.b_bluray_compat = x4->bluray_compat;
x4->params.b_vfr_input = 0;
}
if (x4->avcintra_class >= 0)
#if X264_BUILD >= 142
x4->params.i_avcintra_class = x4->avcintra_class;
#else
av_log(avctx, AV_LOG_ERROR,
"x264 too old for AVC Intra, at least version 142 needed\n");
#endif
if (x4->b_bias != INT_MIN)
x4->params.i_bframe_bias = x4->b_bias;
if (x4->b_pyramid >= 0)
x4->params.i_bframe_pyramid = x4->b_pyramid;
if (x4->mixed_refs >= 0)
x4->params.analyse.b_mixed_references = x4->mixed_refs;
if (x4->dct8x8 >= 0)
x4->params.analyse.b_transform_8x8 = x4->dct8x8;
if (x4->fast_pskip >= 0)
x4->params.analyse.b_fast_pskip = x4->fast_pskip;
if (x4->aud >= 0)
x4->params.b_aud = x4->aud;
if (x4->mbtree >= 0)
x4->params.rc.b_mb_tree = x4->mbtree;
if (x4->direct_pred >= 0)
x4->params.analyse.i_direct_mv_pred = x4->direct_pred;
if (x4->slice_max_size >= 0)
x4->params.i_slice_max_size = x4->slice_max_size;
else {
/*
* Allow x264 to be instructed through AVCodecContext about the maximum
* size of the RTP payload. For example, this enables the production of
* payload suitable for the H.264 RTP packetization-mode 0 i.e. single
* NAL unit per RTP packet.
*/
if (avctx->rtp_payload_size)
x4->params.i_slice_max_size = avctx->rtp_payload_size;
}
if (x4->fastfirstpass)
x264_param_apply_fastfirstpass(&x4->params);
/* Allow specifying the x264 profile through AVCodecContext. */
if (!x4->profile)
switch (avctx->profile) {
case FF_PROFILE_H264_BASELINE:
x4->profile = av_strdup("baseline");
break;
case FF_PROFILE_H264_HIGH:
x4->profile = av_strdup("high");
break;
case FF_PROFILE_H264_HIGH_10:
x4->profile = av_strdup("high10");
break;
case FF_PROFILE_H264_HIGH_422:
x4->profile = av_strdup("high422");
break;
case FF_PROFILE_H264_HIGH_444:
x4->profile = av_strdup("high444");
break;
case FF_PROFILE_H264_MAIN:
x4->profile = av_strdup("main");
break;
default:
break;
}
if (x4->nal_hrd >= 0)
x4->params.i_nal_hrd = x4->nal_hrd;
if (x4->profile)
if (x264_param_apply_profile(&x4->params, x4->profile) < 0) {
int i;
av_log(avctx, AV_LOG_ERROR, "Error setting profile %s.\n", x4->profile);
av_log(avctx, AV_LOG_INFO, "Possible profiles:");
for (i = 0; x264_profile_names[i]; i++)
av_log(avctx, AV_LOG_INFO, " %s", x264_profile_names[i]);
av_log(avctx, AV_LOG_INFO, "\n");
return AVERROR(EINVAL);
}
x4->params.i_width = avctx->width;
x4->params.i_height = avctx->height;
av_reduce(&sw, &sh, avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 4096);
x4->params.vui.i_sar_width = sw;
x4->params.vui.i_sar_height = sh;
x4->params.i_timebase_den = avctx->time_base.den;
x4->params.i_timebase_num = avctx->time_base.num;
x4->params.i_fps_num = avctx->time_base.den;
x4->params.i_fps_den = avctx->time_base.num * avctx->ticks_per_frame;
x4->params.analyse.b_psnr = avctx->flags & CODEC_FLAG_PSNR;
x4->params.i_threads = avctx->thread_count;
if (avctx->thread_type)
x4->params.b_sliced_threads = avctx->thread_type == FF_THREAD_SLICE;
x4->params.b_interlaced = avctx->flags & CODEC_FLAG_INTERLACED_DCT;
x4->params.b_open_gop = !(avctx->flags & CODEC_FLAG_CLOSED_GOP);
x4->params.i_slice_count = avctx->slices;
x4->params.vui.b_fullrange = avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
avctx->color_range == AVCOL_RANGE_JPEG;
if (avctx->colorspace != AVCOL_SPC_UNSPECIFIED)
x4->params.vui.i_colmatrix = avctx->colorspace;
if (avctx->color_primaries != AVCOL_PRI_UNSPECIFIED)
x4->params.vui.i_colorprim = avctx->color_primaries;
if (avctx->color_trc != AVCOL_TRC_UNSPECIFIED)
x4->params.vui.i_transfer = avctx->color_trc;
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER)
x4->params.b_repeat_headers = 0;
if(x4->x264opts){
const char *p= x4->x264opts;
while(p){
char param[256]={0}, val[256]={0};
if(sscanf(p, "%255[^:=]=%255[^:]", param, val) == 1){
OPT_STR(param, "1");
}else
OPT_STR(param, val);
p= strchr(p, ':');
p+=!!p;
}
}
if (x4->x264_params) {
AVDictionary *dict = NULL;
AVDictionaryEntry *en = NULL;
if (!av_dict_parse_string(&dict, x4->x264_params, "=", ":", 0)) {
while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
if (x264_param_parse(&x4->params, en->key, en->value) < 0)
av_log(avctx, AV_LOG_WARNING,
"Error parsing option '%s = %s'.\n",
en->key, en->value);
}
av_dict_free(&dict);
}
}
// update AVCodecContext with x264 parameters
avctx->has_b_frames = x4->params.i_bframe ?
x4->params.i_bframe_pyramid ? 2 : 1 : 0;
if (avctx->max_b_frames < 0)
avctx->max_b_frames = 0;
avctx->bit_rate = x4->params.rc.i_bitrate*1000;
x4->enc = x264_encoder_open(&x4->params);
if (!x4->enc)
return -1;
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
x264_nal_t *nal;
uint8_t *p;
int nnal, s, i;
s = x264_encoder_headers(x4->enc, &nal, &nnal);
avctx->extradata = p = av_malloc(s);
for (i = 0; i < nnal; i++) {
/* Don't put the SEI in extradata. */
if (nal[i].i_type == NAL_SEI) {
av_log(avctx, AV_LOG_INFO, "%s\n", nal[i].p_payload+25);
x4->sei_size = nal[i].i_payload;
x4->sei = av_malloc(x4->sei_size);
if (!x4->sei)
memcpy(x4->sei, nal[i].p_payload, nal[i].i_payload);
continue;
}
memcpy(p, nal[i].p_payload, nal[i].i_payload);
p += nal[i].i_payload;
}
avctx->extradata_size = p - avctx->extradata;
}
return 0;
nomem:
X264_close(avctx);
return AVERROR(ENOMEM);
} | 16,357 |
qemu | aebf81680bf49c5953d7ae9ebb7b98c0f4dad8f3 | 1 | int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
VhostBackendType backend_type)
{
uint64_t features;
int i, r;
if (vhost_set_backend_type(hdev, backend_type) < 0) {
if (hdev->vhost_ops->vhost_backend_init(hdev, opaque) < 0) {
return -errno;
QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_OWNER, NULL);
if (r < 0) {
goto fail;
r = hdev->vhost_ops->vhost_call(hdev, VHOST_GET_FEATURES, &features);
if (r < 0) {
goto fail;
for (i = 0; i < hdev->nvqs; ++i) {
r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
if (r < 0) {
goto fail_vq;
hdev->features = features;
hdev->memory_listener = (MemoryListener) {
.begin = vhost_begin,
.commit = vhost_commit,
.region_add = vhost_region_add,
.region_del = vhost_region_del,
.region_nop = vhost_region_nop,
.log_start = vhost_log_start,
.log_stop = vhost_log_stop,
.log_sync = vhost_log_sync,
.log_global_start = vhost_log_global_start,
.log_global_stop = vhost_log_global_stop,
.eventfd_add = vhost_eventfd_add,
.eventfd_del = vhost_eventfd_del,
.priority = 10
};
hdev->migration_blocker = NULL;
if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
error_setg(&hdev->migration_blocker,
"Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
migrate_add_blocker(hdev->migration_blocker);
hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
hdev->n_mem_sections = 0;
hdev->mem_sections = NULL;
hdev->log = NULL;
hdev->log_size = 0;
hdev->log_enabled = false;
hdev->started = false;
hdev->memory_changed = false;
memory_listener_register(&hdev->memory_listener, &address_space_memory);
return 0;
fail_vq:
while (--i >= 0) {
vhost_virtqueue_cleanup(hdev->vqs + i);
fail:
r = -errno;
hdev->vhost_ops->vhost_backend_cleanup(hdev);
QLIST_REMOVE(hdev, entry);
return r;
| 16,359 |
qemu | 21ef45d71221b4577330fe3aacfb06afad91ad46 | 1 | static int vnc_display_disable_login(DisplayState *ds)
{
VncDisplay *vs = ds ? (VncDisplay *)ds->opaque : vnc_display;
if (!vs) {
return -1;
}
if (vs->password) {
g_free(vs->password);
}
vs->password = NULL;
if (vs->auth == VNC_AUTH_NONE) {
vs->auth = VNC_AUTH_VNC;
}
return 0;
}
| 16,362 |
qemu | 3df3e0a5872cbc8fcc55a0413416352eec68132e | 1 | static int vfio_connect_container(VFIOGroup *group)
{
VFIOContainer *container;
int ret, fd;
if (group->container) {
return 0;
}
QLIST_FOREACH(container, &container_list, next) {
if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
group->container = container;
QLIST_INSERT_HEAD(&container->group_list, group, container_next);
return 0;
}
}
fd = qemu_open("/dev/vfio/vfio", O_RDWR);
if (fd < 0) {
error_report("vfio: failed to open /dev/vfio/vfio: %m");
return -errno;
}
ret = ioctl(fd, VFIO_GET_API_VERSION);
if (ret != VFIO_API_VERSION) {
error_report("vfio: supported vfio version: %d, "
"reported version: %d", VFIO_API_VERSION, ret);
ret = -EINVAL;
goto close_fd_exit;
}
container = g_malloc0(sizeof(*container));
container->fd = fd;
if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
if (ret) {
error_report("vfio: failed to set group container: %m");
ret = -errno;
goto free_container_exit;
}
ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU);
if (ret) {
error_report("vfio: failed to set iommu for container: %m");
ret = -errno;
goto free_container_exit;
}
container->iommu_data.type1.listener = vfio_memory_listener;
container->iommu_data.release = vfio_listener_release;
memory_listener_register(&container->iommu_data.type1.listener,
&address_space_memory);
if (container->iommu_data.type1.error) {
ret = container->iommu_data.type1.error;
error_report("vfio: memory listener initialization failed for container");
goto listener_release_exit;
}
container->iommu_data.type1.initialized = true;
} else {
error_report("vfio: No available IOMMU models");
ret = -EINVAL;
goto free_container_exit;
}
QLIST_INIT(&container->group_list);
QLIST_INSERT_HEAD(&container_list, container, next);
group->container = container;
QLIST_INSERT_HEAD(&container->group_list, group, container_next);
return 0;
listener_release_exit:
vfio_listener_release(container);
free_container_exit:
g_free(container);
close_fd_exit:
close(fd);
return ret;
}
| 16,363 |
qemu | c572f23a3e7180dbeab5e86583e43ea2afed6271 | 1 | static void v9fs_fsync(void *opaque)
{
int err;
int32_t fid;
int datasync;
size_t offset = 7;
V9fsFidState *fidp;
V9fsPDU *pdu = opaque;
V9fsState *s = pdu->s;
pdu_unmarshal(pdu, offset, "dd", &fid, &datasync);
fidp = get_fid(pdu, fid);
if (fidp == NULL) {
err = -ENOENT;
goto out_nofid;
}
err = v9fs_co_fsync(pdu, fidp, datasync);
if (!err) {
err = offset;
}
put_fid(pdu, fidp);
out_nofid:
complete_pdu(s, pdu, err);
} | 16,364 |
FFmpeg | 4d986b71721ddf26ab8563b933ec8dd3ec88e59c | 1 | static float quantize_and_encode_band_cost(struct AACEncContext *s,
PutBitContext *pb, const float *in,
const float *scaled, int size, int scale_idx,
int cb, const float lambda, const float uplim,
int *bits)
{
const float IQ = ff_aac_pow2sf_tab[200 + scale_idx - SCALE_ONE_POS + SCALE_DIV_512];
const float Q = ff_aac_pow2sf_tab[200 - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
const float CLIPPED_ESCAPE = 165140.0f*IQ;
int i, j, k;
float cost = 0;
const int dim = cb < FIRST_PAIR_BT ? 4 : 2;
int resbits = 0;
#ifndef USE_REALLY_FULL_SEARCH
const float Q34 = sqrtf(Q * sqrtf(Q));
const int range = aac_cb_range[cb];
const int maxval = aac_cb_maxval[cb];
int offs[4];
#endif /* USE_REALLY_FULL_SEARCH */
if (!cb) {
for (i = 0; i < size; i++)
cost += in[i]*in[i];
if (bits)
*bits = 0;
return cost * lambda;
}
#ifndef USE_REALLY_FULL_SEARCH
offs[0] = 1;
for (i = 1; i < dim; i++)
offs[i] = offs[i-1]*range;
if (!scaled) {
abs_pow34_v(s->scoefs, in, size);
scaled = s->scoefs;
}
quantize_bands(s->qcoefs, in, scaled, size, Q34, !IS_CODEBOOK_UNSIGNED(cb), maxval);
#endif /* USE_REALLY_FULL_SEARCH */
for (i = 0; i < size; i += dim) {
float mincost;
int minidx = 0;
int minbits = 0;
const float *vec;
#ifndef USE_REALLY_FULL_SEARCH
int (*quants)[2] = &s->qcoefs[i];
mincost = 0.0f;
for (j = 0; j < dim; j++)
mincost += in[i+j]*in[i+j];
minidx = IS_CODEBOOK_UNSIGNED(cb) ? 0 : 40;
minbits = ff_aac_spectral_bits[cb-1][minidx];
mincost = mincost * lambda + minbits;
for (j = 0; j < (1<<dim); j++) {
float rd = 0.0f;
int curbits;
int curidx = IS_CODEBOOK_UNSIGNED(cb) ? 0 : 40;
int same = 0;
for (k = 0; k < dim; k++) {
if ((j & (1 << k)) && quants[k][0] == quants[k][1]) {
same = 1;
break;
}
}
if (same)
continue;
for (k = 0; k < dim; k++)
curidx += quants[k][!!(j & (1 << k))] * offs[dim - 1 - k];
curbits = ff_aac_spectral_bits[cb-1][curidx];
vec = &ff_aac_codebook_vectors[cb-1][curidx*dim];
#else
mincost = INFINITY;
vec = ff_aac_codebook_vectors[cb-1];
for (j = 0; j < ff_aac_spectral_sizes[cb-1]; j++, vec += dim) {
float rd = 0.0f;
int curbits = ff_aac_spectral_bits[cb-1][j];
int curidx = j;
#endif /* USE_REALLY_FULL_SEARCH */
if (IS_CODEBOOK_UNSIGNED(cb)) {
for (k = 0; k < dim; k++) {
float t = fabsf(in[i+k]);
float di;
if (vec[k] == 64.0f) { //FIXME: slow
//do not code with escape sequence small values
if (t < 39.0f*IQ) {
rd = INFINITY;
break;
}
if (t >= CLIPPED_ESCAPE) {
di = t - CLIPPED_ESCAPE;
curbits += 21;
} else {
int c = av_clip(quant(t, Q), 0, 8191);
di = t - c*cbrtf(c)*IQ;
curbits += av_log2(c)*2 - 4 + 1;
}
} else {
di = t - vec[k]*IQ;
}
if (vec[k] != 0.0f)
curbits++;
rd += di*di;
}
} else {
for (k = 0; k < dim; k++) {
float di = in[i+k] - vec[k]*IQ;
rd += di*di;
}
}
rd = rd * lambda + curbits;
if (rd < mincost) {
mincost = rd;
minidx = curidx;
minbits = curbits;
}
}
cost += mincost;
resbits += minbits;
if (cost >= uplim)
return uplim;
if (pb) {
put_bits(pb, ff_aac_spectral_bits[cb-1][minidx], ff_aac_spectral_codes[cb-1][minidx]);
if (IS_CODEBOOK_UNSIGNED(cb))
for (j = 0; j < dim; j++)
if (ff_aac_codebook_vectors[cb-1][minidx*dim+j] != 0.0f)
put_bits(pb, 1, in[i+j] < 0.0f);
if (cb == ESC_BT) {
for (j = 0; j < 2; j++) {
if (ff_aac_codebook_vectors[cb-1][minidx*2+j] == 64.0f) {
int coef = av_clip(quant(fabsf(in[i+j]), Q), 0, 8191);
int len = av_log2(coef);
put_bits(pb, len - 4 + 1, (1 << (len - 4 + 1)) - 2);
put_bits(pb, len, coef & ((1 << len) - 1));
}
}
}
}
}
if (bits)
*bits = resbits;
return cost;
}
| 16,366 |
qemu | 7ccc44fd7d1dfa62c4d6f3a680df809d6e7068ce | 1 | static SocketAddress *nbd_config(BDRVNBDState *s, QDict *options, char **export,
Error **errp)
{
SocketAddress *saddr;
if (qdict_haskey(options, "path") == qdict_haskey(options, "host")) {
if (qdict_haskey(options, "path")) {
error_setg(errp, "path and host may not be used at the same time.");
} else {
error_setg(errp, "one of path and host must be specified.");
}
return NULL;
}
saddr = g_new0(SocketAddress, 1);
if (qdict_haskey(options, "path")) {
UnixSocketAddress *q_unix;
saddr->type = SOCKET_ADDRESS_KIND_UNIX;
q_unix = saddr->u.q_unix.data = g_new0(UnixSocketAddress, 1);
q_unix->path = g_strdup(qdict_get_str(options, "path"));
qdict_del(options, "path");
} else {
InetSocketAddress *inet;
saddr->type = SOCKET_ADDRESS_KIND_INET;
inet = saddr->u.inet.data = g_new0(InetSocketAddress, 1);
inet->host = g_strdup(qdict_get_str(options, "host"));
if (!qdict_get_try_str(options, "port")) {
inet->port = g_strdup_printf("%d", NBD_DEFAULT_PORT);
} else {
inet->port = g_strdup(qdict_get_str(options, "port"));
}
qdict_del(options, "host");
qdict_del(options, "port");
}
s->client.is_unix = saddr->type == SOCKET_ADDRESS_KIND_UNIX;
*export = g_strdup(qdict_get_try_str(options, "export"));
if (*export) {
qdict_del(options, "export");
}
return saddr;
}
| 16,367 |
qemu | faadf50e2962dd54175647a80bd6fc4319c91973 | 1 | static void init_excp_602 (CPUPPCState *env)
{
#if !defined(CONFIG_USER_ONLY)
env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
env->excp_vectors[POWERPC_EXCP_FPA] = 0x00000E00;
env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001500;
env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001600;
env->excp_prefix = 0xFFF00000;
/* Hardware reset vector */
env->hreset_vector = 0xFFFFFFFCUL;
#endif
}
| 16,368 |
qemu | de82815db1c89da058b7fb941dab137d6d9ab738 | 1 | int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix)
{
BDRVQcowState *s = bs->opaque;
int64_t size, i, highest_cluster, nb_clusters;
int refcount1, refcount2;
QCowSnapshot *sn;
uint16_t *refcount_table;
int ret;
size = bdrv_getlength(bs->file);
if (size < 0) {
res->check_errors++;
return size;
}
nb_clusters = size_to_clusters(s, size);
if (nb_clusters > INT_MAX) {
res->check_errors++;
return -EFBIG;
}
refcount_table = g_malloc0(nb_clusters * sizeof(uint16_t));
res->bfi.total_clusters =
size_to_clusters(s, bs->total_sectors * BDRV_SECTOR_SIZE);
/* header */
inc_refcounts(bs, res, refcount_table, nb_clusters,
0, s->cluster_size);
/* current L1 table */
ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters,
s->l1_table_offset, s->l1_size, CHECK_FRAG_INFO);
if (ret < 0) {
goto fail;
}
/* snapshots */
for(i = 0; i < s->nb_snapshots; i++) {
sn = s->snapshots + i;
ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters,
sn->l1_table_offset, sn->l1_size, 0);
if (ret < 0) {
goto fail;
}
}
inc_refcounts(bs, res, refcount_table, nb_clusters,
s->snapshots_offset, s->snapshots_size);
/* refcount data */
inc_refcounts(bs, res, refcount_table, nb_clusters,
s->refcount_table_offset,
s->refcount_table_size * sizeof(uint64_t));
for(i = 0; i < s->refcount_table_size; i++) {
uint64_t offset, cluster;
offset = s->refcount_table[i];
cluster = offset >> s->cluster_bits;
/* Refcount blocks are cluster aligned */
if (offset_into_cluster(s, offset)) {
fprintf(stderr, "ERROR refcount block %" PRId64 " is not "
"cluster aligned; refcount table entry corrupted\n", i);
res->corruptions++;
continue;
}
if (cluster >= nb_clusters) {
fprintf(stderr, "ERROR refcount block %" PRId64
" is outside image\n", i);
res->corruptions++;
continue;
}
if (offset != 0) {
inc_refcounts(bs, res, refcount_table, nb_clusters,
offset, s->cluster_size);
if (refcount_table[cluster] != 1) {
fprintf(stderr, "%s refcount block %" PRId64
" refcount=%d\n",
fix & BDRV_FIX_ERRORS ? "Repairing" :
"ERROR",
i, refcount_table[cluster]);
if (fix & BDRV_FIX_ERRORS) {
int64_t new_offset;
new_offset = realloc_refcount_block(bs, i, offset);
if (new_offset < 0) {
res->corruptions++;
continue;
}
/* update refcounts */
if ((new_offset >> s->cluster_bits) >= nb_clusters) {
/* increase refcount_table size if necessary */
int old_nb_clusters = nb_clusters;
nb_clusters = (new_offset >> s->cluster_bits) + 1;
refcount_table = g_realloc(refcount_table,
nb_clusters * sizeof(uint16_t));
memset(&refcount_table[old_nb_clusters], 0, (nb_clusters
- old_nb_clusters) * sizeof(uint16_t));
}
refcount_table[cluster]--;
inc_refcounts(bs, res, refcount_table, nb_clusters,
new_offset, s->cluster_size);
res->corruptions_fixed++;
} else {
res->corruptions++;
}
}
}
}
/* compare ref counts */
for (i = 0, highest_cluster = 0; i < nb_clusters; i++) {
refcount1 = get_refcount(bs, i);
if (refcount1 < 0) {
fprintf(stderr, "Can't get refcount for cluster %" PRId64 ": %s\n",
i, strerror(-refcount1));
res->check_errors++;
continue;
}
refcount2 = refcount_table[i];
if (refcount1 > 0 || refcount2 > 0) {
highest_cluster = i;
}
if (refcount1 != refcount2) {
/* Check if we're allowed to fix the mismatch */
int *num_fixed = NULL;
if (refcount1 > refcount2 && (fix & BDRV_FIX_LEAKS)) {
num_fixed = &res->leaks_fixed;
} else if (refcount1 < refcount2 && (fix & BDRV_FIX_ERRORS)) {
num_fixed = &res->corruptions_fixed;
}
fprintf(stderr, "%s cluster %" PRId64 " refcount=%d reference=%d\n",
num_fixed != NULL ? "Repairing" :
refcount1 < refcount2 ? "ERROR" :
"Leaked",
i, refcount1, refcount2);
if (num_fixed) {
ret = update_refcount(bs, i << s->cluster_bits, 1,
refcount2 - refcount1,
QCOW2_DISCARD_ALWAYS);
if (ret >= 0) {
(*num_fixed)++;
continue;
}
}
/* And if we couldn't, print an error */
if (refcount1 < refcount2) {
res->corruptions++;
} else {
res->leaks++;
}
}
}
/* check OFLAG_COPIED */
ret = check_oflag_copied(bs, res, fix);
if (ret < 0) {
goto fail;
}
res->image_end_offset = (highest_cluster + 1) * s->cluster_size;
ret = 0;
fail:
g_free(refcount_table);
return ret;
}
| 16,369 |
qemu | 418661e0324c1c419552cf24bd4447292e884bdd | 1 | BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
const char *backing_file)
{
char *filename_full = NULL;
char *backing_file_full = NULL;
char *filename_tmp = NULL;
int is_protocol = 0;
BlockDriverState *curr_bs = NULL;
BlockDriverState *retval = NULL;
if (!bs || !bs->drv || !backing_file) {
return NULL;
}
filename_full = g_malloc(PATH_MAX);
backing_file_full = g_malloc(PATH_MAX);
filename_tmp = g_malloc(PATH_MAX);
is_protocol = path_has_protocol(backing_file);
for (curr_bs = bs; curr_bs->backing; curr_bs = curr_bs->backing->bs) {
/* If either of the filename paths is actually a protocol, then
* compare unmodified paths; otherwise make paths relative */
if (is_protocol || path_has_protocol(curr_bs->backing_file)) {
if (strcmp(backing_file, curr_bs->backing_file) == 0) {
retval = curr_bs->backing->bs;
break;
}
/* Also check against the full backing filename for the image */
bdrv_get_full_backing_filename(curr_bs, backing_file_full, PATH_MAX,
&local_error);
if (local_error == NULL) {
if (strcmp(backing_file, backing_file_full) == 0) {
retval = curr_bs->backing->bs;
break;
}
} else {
error_free(local_error);
local_error = NULL;
}
} else {
/* If not an absolute filename path, make it relative to the current
* image's filename path */
path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
backing_file);
/* We are going to compare absolute pathnames */
if (!realpath(filename_tmp, filename_full)) {
continue;
}
/* We need to make sure the backing filename we are comparing against
* is relative to the current image filename (or absolute) */
path_combine(filename_tmp, PATH_MAX, curr_bs->filename,
curr_bs->backing_file);
if (!realpath(filename_tmp, backing_file_full)) {
continue;
}
if (strcmp(backing_file_full, filename_full) == 0) {
retval = curr_bs->backing->bs;
break;
}
}
}
g_free(filename_full);
g_free(backing_file_full);
g_free(filename_tmp);
return retval;
} | 16,371 |
qemu | 1760048a5d21bacf0e4838da2f61b2d8db7d2866 | 1 | static void test_ivshmem_pair(void)
{
IVState state1, state2, *s1, *s2;
char *data;
int i;
setup_vm(&state1);
s1 = &state1;
setup_vm(&state2);
s2 = &state2;
data = g_malloc0(TMPSHMSIZE);
/* host write, guest 1 & 2 read */
memset(tmpshmem, 0x42, TMPSHMSIZE);
qtest_memread(s1->qtest, (uintptr_t)s1->mem_base, data, TMPSHMSIZE);
for (i = 0; i < TMPSHMSIZE; i++) {
g_assert_cmpuint(data[i], ==, 0x42);
}
qtest_memread(s2->qtest, (uintptr_t)s2->mem_base, data, TMPSHMSIZE);
for (i = 0; i < TMPSHMSIZE; i++) {
g_assert_cmpuint(data[i], ==, 0x42);
}
/* guest 1 write, guest 2 read */
memset(data, 0x43, TMPSHMSIZE);
qtest_memwrite(s1->qtest, (uintptr_t)s1->mem_base, data, TMPSHMSIZE);
memset(data, 0, TMPSHMSIZE);
qtest_memread(s2->qtest, (uintptr_t)s2->mem_base, data, TMPSHMSIZE);
for (i = 0; i < TMPSHMSIZE; i++) {
g_assert_cmpuint(data[i], ==, 0x43);
}
/* guest 2 write, guest 1 read */
memset(data, 0x44, TMPSHMSIZE);
qtest_memwrite(s2->qtest, (uintptr_t)s2->mem_base, data, TMPSHMSIZE);
memset(data, 0, TMPSHMSIZE);
qtest_memread(s1->qtest, (uintptr_t)s2->mem_base, data, TMPSHMSIZE);
for (i = 0; i < TMPSHMSIZE; i++) {
g_assert_cmpuint(data[i], ==, 0x44);
}
qtest_quit(s1->qtest);
qtest_quit(s2->qtest);
g_free(data);
}
| 16,372 |
FFmpeg | 43b434210e597d484aef57c4139c3126d22b7e2b | 1 | int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count)
{
AVCodecContext *const avctx = h->avctx;
H264SliceContext *sl;
int i;
av_assert0(context_count && h->slice_ctx[context_count - 1].mb_y < h->mb_height);
if (h->avctx->hwaccel ||
h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
return 0;
if (context_count == 1) {
int ret = decode_slice(avctx, &h->slice_ctx[0]);
h->mb_y = h->slice_ctx[0].mb_y;
return ret;
} else {
av_assert0(context_count > 0);
for (i = 1; i < context_count; i++) {
sl = &h->slice_ctx[i];
if (CONFIG_ERROR_RESILIENCE) {
sl->er.error_count = 0;
}
}
avctx->execute(avctx, decode_slice, h->slice_ctx,
NULL, context_count, sizeof(h->slice_ctx[0]));
/* pull back stuff from slices to master context */
sl = &h->slice_ctx[context_count - 1];
h->mb_y = sl->mb_y;
if (CONFIG_ERROR_RESILIENCE) {
for (i = 1; i < context_count; i++)
h->slice_ctx[0].er.error_count += h->slice_ctx[i].er.error_count;
}
}
return 0;
}
| 16,373 |
qemu | 5eb6a3c50185e101f87382f41fb66eed5784e7ac | 1 | static void qdev_prop_set_globals_for_type(DeviceState *dev,
const char *typename)
{
GList *l;
for (l = global_props; l; l = l->next) {
GlobalProperty *prop = l->data;
Error *err = NULL;
if (strcmp(typename, prop->driver) != 0) {
continue;
}
prop->used = true;
object_property_parse(OBJECT(dev), prop->value, prop->property, &err);
if (err != NULL) {
error_prepend(&err, "can't apply global %s.%s=%s: ",
prop->driver, prop->property, prop->value);
if (!dev->hotplugged && prop->errp) {
error_propagate(prop->errp, err);
} else {
assert(prop->user_provided);
warn_report_err(err);
}
}
}
}
| 16,374 |
FFmpeg | 98d0d19208959766a58f13dd6a678d1f765a26ac | 1 | static int lag_decode_frame(AVCodecContext *avctx,
void *data, int *data_size, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
LagarithContext *l = avctx->priv_data;
AVFrame *const p = &l->picture;
uint8_t frametype = 0;
uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9;
uint32_t offs[4];
uint8_t *srcs[4], *dst;
int i, j, planes = 3;
AVFrame *picture = data;
if (p->data[0])
avctx->release_buffer(avctx, p);
p->reference = 0;
p->key_frame = 1;
frametype = buf[0];
offset_gu = AV_RL32(buf + 1);
offset_bv = AV_RL32(buf + 5);
switch (frametype) {
case FRAME_SOLID_RGBA:
avctx->pix_fmt = PIX_FMT_RGB32;
if (avctx->get_buffer(avctx, p) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
dst = p->data[0];
for (j = 0; j < avctx->height; j++) {
for (i = 0; i < avctx->width; i++)
AV_WN32(dst + i * 4, offset_gu);
dst += p->linesize[0];
}
break;
case FRAME_ARITH_RGBA:
avctx->pix_fmt = PIX_FMT_RGB32;
planes = 4;
offset_ry += 4;
offs[3] = AV_RL32(buf + 9);
case FRAME_ARITH_RGB24:
case FRAME_U_RGB24:
if (frametype == FRAME_ARITH_RGB24 || frametype == FRAME_U_RGB24)
avctx->pix_fmt = PIX_FMT_RGB24;
if (avctx->get_buffer(avctx, p) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
offs[0] = offset_bv;
offs[1] = offset_gu;
offs[2] = offset_ry;
if (!l->rgb_planes) {
l->rgb_stride = FFALIGN(avctx->width, 16);
l->rgb_planes = av_malloc(l->rgb_stride * avctx->height * planes);
if (!l->rgb_planes) {
av_log(avctx, AV_LOG_ERROR, "cannot allocate temporary buffer\n");
return AVERROR(ENOMEM);
}
}
for (i = 0; i < planes; i++)
srcs[i] = l->rgb_planes + (i + 1) * l->rgb_stride * avctx->height - l->rgb_stride;
if (offset_ry >= buf_size ||
offset_gu >= buf_size ||
offset_bv >= buf_size ||
(planes == 4 && offs[3] >= buf_size)) {
av_log(avctx, AV_LOG_ERROR,
"Invalid frame offsets\n");
return AVERROR_INVALIDDATA;
}
for (i = 0; i < planes; i++)
lag_decode_arith_plane(l, srcs[i],
avctx->width, avctx->height,
-l->rgb_stride, buf + offs[i],
buf_size - offs[i]);
dst = p->data[0];
for (i = 0; i < planes; i++)
srcs[i] = l->rgb_planes + i * l->rgb_stride * avctx->height;
for (j = 0; j < avctx->height; j++) {
for (i = 0; i < avctx->width; i++) {
uint8_t r, g, b, a;
r = srcs[0][i];
g = srcs[1][i];
b = srcs[2][i];
r += g;
b += g;
if (frametype == FRAME_ARITH_RGBA) {
a = srcs[3][i];
AV_WN32(dst + i * 4, MKBETAG(a, r, g, b));
} else {
dst[i * 3 + 0] = r;
dst[i * 3 + 1] = g;
dst[i * 3 + 2] = b;
}
}
dst += p->linesize[0];
for (i = 0; i < planes; i++)
srcs[i] += l->rgb_stride;
}
break;
case FRAME_ARITH_YUY2:
avctx->pix_fmt = PIX_FMT_YUV422P;
if (avctx->get_buffer(avctx, p) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
if (offset_ry >= buf_size ||
offset_gu >= buf_size ||
offset_bv >= buf_size) {
av_log(avctx, AV_LOG_ERROR,
"Invalid frame offsets\n");
return AVERROR_INVALIDDATA;
}
lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
p->linesize[0], buf + offset_ry,
buf_size - offset_ry);
lag_decode_arith_plane(l, p->data[2], avctx->width / 2,
avctx->height, p->linesize[2],
buf + offset_gu, buf_size - offset_gu);
lag_decode_arith_plane(l, p->data[1], avctx->width / 2,
avctx->height, p->linesize[1],
buf + offset_bv, buf_size - offset_bv);
break;
case FRAME_ARITH_YV12:
avctx->pix_fmt = PIX_FMT_YUV420P;
if (avctx->get_buffer(avctx, p) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
if (offset_ry >= buf_size ||
offset_gu >= buf_size ||
offset_bv >= buf_size) {
av_log(avctx, AV_LOG_ERROR,
"Invalid frame offsets\n");
return AVERROR_INVALIDDATA;
}
lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
p->linesize[0], buf + offset_ry,
buf_size - offset_ry);
lag_decode_arith_plane(l, p->data[2], avctx->width / 2,
avctx->height / 2, p->linesize[2],
buf + offset_gu, buf_size - offset_gu);
lag_decode_arith_plane(l, p->data[1], avctx->width / 2,
avctx->height / 2, p->linesize[1],
buf + offset_bv, buf_size - offset_bv);
break;
default:
av_log(avctx, AV_LOG_ERROR,
"Unsupported Lagarith frame type: %#x\n", frametype);
return -1;
}
*picture = *p;
*data_size = sizeof(AVFrame);
return buf_size;
}
| 16,375 |
qemu | 36bcac16fdd6ecb75314db06171f54dcd400ab8c | 1 | static int sd_parse_uri(BDRVSheepdogState *s, const char *filename,
char *vdi, uint32_t *snapid, char *tag)
{
URI *uri;
QueryParams *qp = NULL;
int ret = 0;
uri = uri_parse(filename);
if (!uri) {
return -EINVAL;
}
/* transport */
if (!strcmp(uri->scheme, "sheepdog")) {
s->is_unix = false;
} else if (!strcmp(uri->scheme, "sheepdog+tcp")) {
s->is_unix = false;
} else if (!strcmp(uri->scheme, "sheepdog+unix")) {
s->is_unix = true;
} else {
ret = -EINVAL;
goto out;
}
if (uri->path == NULL || !strcmp(uri->path, "/")) {
ret = -EINVAL;
goto out;
}
if (g_strlcpy(vdi, uri->path + 1, SD_MAX_VDI_LEN) >= SD_MAX_VDI_LEN) {
ret = -EINVAL;
goto out;
}
qp = query_params_parse(uri->query);
if (qp->n > 1 || (s->is_unix && !qp->n) || (!s->is_unix && qp->n)) {
ret = -EINVAL;
goto out;
}
if (s->is_unix) {
/* sheepdog+unix:///vdiname?socket=path */
if (uri->server || uri->port || strcmp(qp->p[0].name, "socket")) {
ret = -EINVAL;
goto out;
}
s->host_spec = g_strdup(qp->p[0].value);
} else {
/* sheepdog[+tcp]://[host:port]/vdiname */
s->host_spec = g_strdup_printf("%s:%d", uri->server ?: SD_DEFAULT_ADDR,
uri->port ?: SD_DEFAULT_PORT);
}
/* snapshot tag */
if (uri->fragment) {
if (!sd_parse_snapid_or_tag(uri->fragment, snapid, tag)) {
ret = -EINVAL;
goto out;
}
} else {
*snapid = CURRENT_VDI_ID; /* search current vdi */
}
out:
if (qp) {
query_params_free(qp);
}
uri_free(uri);
return ret;
}
| 16,376 |
qemu | 9b4e38fe6a35890bb1d995316d7be08de0b30ee5 | 1 | static void test_visitor_in_errors(TestInputVisitorData *data,
const void *unused)
{
TestStruct *p = NULL;
Error *err = NULL;
Visitor *v;
strList *q = NULL;
UserDefTwo *r = NULL;
WrapAlternate *s = NULL;
v = visitor_input_test_init(data, "{ 'integer': false, 'boolean': 'foo', "
"'string': -42 }");
visit_type_TestStruct(v, NULL, &p, &err);
g_assert(!p);
v = visitor_input_test_init(data, "[ '1', '2', false, '3' ]");
visit_type_strList(v, NULL, &q, &err);
assert(!q);
} | 16,377 |
qemu | e92f0e1910f0655a0edd8d87c5a7262d36517a89 | 1 | BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
BlockCompletionFunc *cb, void *opaque)
{
trace_bdrv_aio_flush(bs, opaque);
Coroutine *co;
BlockAIOCBCoroutine *acb;
/* Matched by bdrv_co_complete's bdrv_dec_in_flight. */
bdrv_inc_in_flight(bs);
acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
acb->need_bh = true;
acb->req.error = -EINPROGRESS;
co = qemu_coroutine_create(bdrv_aio_flush_co_entry, acb);
qemu_coroutine_enter(co);
bdrv_co_maybe_schedule_bh(acb);
return &acb->common;
}
| 16,378 |
FFmpeg | c4c3a3d580fa2f9d65af3fb6027ffbf9a28e9d55 | 1 | static int filter_packet(void *log_ctx, AVPacket *pkt,
AVFormatContext *fmt_ctx, AVBitStreamFilterContext *bsf_ctx)
{
AVCodecContext *enc_ctx = fmt_ctx->streams[pkt->stream_index]->codec;
int ret;
while (bsf_ctx) {
AVPacket new_pkt = *pkt;
ret = av_bitstream_filter_filter(bsf_ctx, enc_ctx, NULL,
&new_pkt.data, &new_pkt.size,
pkt->data, pkt->size,
pkt->flags & AV_PKT_FLAG_KEY);
if (ret == 0 && new_pkt.data != pkt->data && new_pkt.destruct) {
if ((ret = av_copy_packet(&new_pkt, pkt)) < 0)
break;
ret = 1;
}
if (ret > 0) {
av_free_packet(pkt);
new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
av_buffer_default_free, NULL, 0);
if (!new_pkt.buf)
break;
}
*pkt = new_pkt;
bsf_ctx = bsf_ctx->next;
}
if (ret < 0) {
av_log(log_ctx, AV_LOG_ERROR,
"Failed to filter bitstream with filter %s for stream %d in file '%s' with codec %s\n",
bsf_ctx->filter->name, pkt->stream_index, fmt_ctx->filename,
avcodec_get_name(enc_ctx->codec_id));
}
return ret;
}
| 16,379 |
qemu | a697a334b3c4d3250e6420f5d38550ea10eb5319 | 1 | VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
virtio_net_conf *net)
{
VirtIONet *n;
n = (VirtIONet *)virtio_common_init("virtio-net", VIRTIO_ID_NET,
sizeof(struct virtio_net_config),
sizeof(VirtIONet));
n->vdev.get_config = virtio_net_get_config;
n->vdev.set_config = virtio_net_set_config;
n->vdev.get_features = virtio_net_get_features;
n->vdev.set_features = virtio_net_set_features;
n->vdev.bad_features = virtio_net_bad_features;
n->vdev.reset = virtio_net_reset;
n->vdev.set_status = virtio_net_set_status;
n->rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx);
n->ctrl_vq = virtio_add_queue(&n->vdev, 64, virtio_net_handle_ctrl);
qemu_macaddr_default_if_unset(&conf->macaddr);
memcpy(&n->mac[0], &conf->macaddr, sizeof(n->mac));
n->status = VIRTIO_NET_S_LINK_UP;
n->nic = qemu_new_nic(&net_virtio_info, conf, dev->info->name, dev->id, n);
qemu_format_nic_info_str(&n->nic->nc, conf->macaddr.a);
n->tx_timer = qemu_new_timer(vm_clock, virtio_net_tx_timer, n);
n->tx_waiting = 0;
n->tx_timeout = net->txtimer;
n->tx_burst = net->txburst;
n->mergeable_rx_bufs = 0;
n->promisc = 1; /* for compatibility */
n->mac_table.macs = qemu_mallocz(MAC_TABLE_ENTRIES * ETH_ALEN);
n->vlans = qemu_mallocz(MAX_VLAN >> 3);
n->qdev = dev;
register_savevm(dev, "virtio-net", -1, VIRTIO_NET_VM_VERSION,
virtio_net_save, virtio_net_load, n);
n->vmstate = qemu_add_vm_change_state_handler(virtio_net_vmstate_change, n);
return &n->vdev;
}
| 16,380 |
qemu | 2c728dfef56d468a6a80b4dacdfb7109220d2546 | 1 | int cpu_x86_register(X86CPU *cpu, const char *cpu_model)
{
CPUX86State *env = &cpu->env;
x86_def_t def1, *def = &def1;
Error *error = NULL;
char *name, *features;
gchar **model_pieces;
memset(def, 0, sizeof(*def));
model_pieces = g_strsplit(cpu_model, ",", 2);
if (!model_pieces[0]) {
error_setg(&error, "Invalid/empty CPU model name");
goto out;
}
name = model_pieces[0];
features = model_pieces[1];
if (cpu_x86_find_by_name(def, name) < 0) {
error_setg(&error, "Unable to find CPU definition: %s", name);
goto out;
}
if (kvm_enabled()) {
def->kvm_features |= kvm_default_features;
}
def->ext_features |= CPUID_EXT_HYPERVISOR;
object_property_set_str(OBJECT(cpu), def->vendor, "vendor", &error);
object_property_set_int(OBJECT(cpu), def->level, "level", &error);
object_property_set_int(OBJECT(cpu), def->family, "family", &error);
object_property_set_int(OBJECT(cpu), def->model, "model", &error);
object_property_set_int(OBJECT(cpu), def->stepping, "stepping", &error);
env->cpuid_features = def->features;
env->cpuid_ext_features = def->ext_features;
env->cpuid_ext2_features = def->ext2_features;
env->cpuid_ext3_features = def->ext3_features;
object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", &error);
env->cpuid_kvm_features = def->kvm_features;
env->cpuid_svm_features = def->svm_features;
env->cpuid_ext4_features = def->ext4_features;
env->cpuid_7_0_ebx_features = def->cpuid_7_0_ebx_features;
env->cpuid_xlevel2 = def->xlevel2;
object_property_set_int(OBJECT(cpu), (int64_t)def->tsc_khz * 1000,
"tsc-frequency", &error);
object_property_set_str(OBJECT(cpu), def->model_id, "model-id", &error);
if (error) {
goto out;
}
cpu_x86_parse_featurestr(cpu, features, &error);
out:
g_strfreev(model_pieces);
if (error) {
fprintf(stderr, "%s\n", error_get_pretty(error));
error_free(error);
return -1;
}
return 0;
}
| 16,381 |
FFmpeg | eb36ee1ee11c269e24ad4595fadf50e78463d5e2 | 1 | static int srt_encode_frame(AVCodecContext *avctx,
unsigned char *buf, int bufsize, const AVSubtitle *sub)
{
SRTContext *s = avctx->priv_data;
ASSDialog *dialog;
int i, len, num;
s->ptr = s->buffer;
s->end = s->ptr + sizeof(s->buffer);
for (i=0; i<sub->num_rects; i++) {
if (sub->rects[i]->type != SUBTITLE_ASS) {
av_log(avctx, AV_LOG_ERROR, "Only SUBTITLE_ASS type supported.\n");
return AVERROR(ENOSYS);
}
dialog = ff_ass_split_dialog(s->ass_ctx, sub->rects[i]->ass, 0, &num);
for (; dialog && num--; dialog++) {
if (avctx->codec->id == CODEC_ID_SRT) {
int sh, sm, ss, sc = 10 * dialog->start;
int eh, em, es, ec = 10 * dialog->end;
sh = sc/3600000; sc -= 3600000*sh;
sm = sc/ 60000; sc -= 60000*sm;
ss = sc/ 1000; sc -= 1000*ss;
eh = ec/3600000; ec -= 3600000*eh;
em = ec/ 60000; ec -= 60000*em;
es = ec/ 1000; ec -= 1000*es;
srt_print(s,"%d\r\n%02d:%02d:%02d,%03d --> %02d:%02d:%02d,%03d\r\n",
++s->count, sh, sm, ss, sc, eh, em, es, ec);
}
s->alignment_applied = 0;
s->dialog_start = s->ptr - 2;
srt_style_apply(s, dialog->style);
ff_ass_split_override_codes(&srt_callbacks, s, dialog->text);
}
}
if (s->ptr == s->buffer)
return 0;
len = av_strlcpy(buf, s->buffer, bufsize);
if (len > bufsize-1) {
av_log(avctx, AV_LOG_ERROR, "Buffer too small for ASS event.\n");
return -1;
}
return len;
}
| 16,382 |
qemu | c919297379e9980c2bcc4d2053addbc1fd6d762b | 1 | static int img_info(int argc, char **argv)
{
int c;
OutputFormat output_format = OFORMAT_HUMAN;
bool chain = false;
const char *filename, *fmt, *output;
ImageInfoList *list;
bool image_opts = false;
fmt = NULL;
output = NULL;
for(;;) {
int option_index = 0;
static const struct option long_options[] = {
{"help", no_argument, 0, 'h'},
{"format", required_argument, 0, 'f'},
{"output", required_argument, 0, OPTION_OUTPUT},
{"backing-chain", no_argument, 0, OPTION_BACKING_CHAIN},
{"object", required_argument, 0, OPTION_OBJECT},
{"image-opts", no_argument, 0, OPTION_IMAGE_OPTS},
{0, 0, 0, 0}
};
c = getopt_long(argc, argv, "f:h",
long_options, &option_index);
if (c == -1) {
break;
}
switch(c) {
case '?':
case 'h':
help();
break;
case 'f':
fmt = optarg;
break;
case OPTION_OUTPUT:
output = optarg;
break;
case OPTION_BACKING_CHAIN:
chain = true;
break;
case OPTION_OBJECT: {
QemuOpts *opts;
opts = qemu_opts_parse_noisily(&qemu_object_opts,
optarg, true);
if (!opts) {
return 1;
}
} break;
case OPTION_IMAGE_OPTS:
image_opts = true;
break;
}
}
if (optind != argc - 1) {
error_exit("Expecting one image file name");
}
filename = argv[optind++];
if (output && !strcmp(output, "json")) {
output_format = OFORMAT_JSON;
} else if (output && !strcmp(output, "human")) {
output_format = OFORMAT_HUMAN;
} else if (output) {
error_report("--output must be used with human or json as argument.");
return 1;
}
if (qemu_opts_foreach(&qemu_object_opts,
user_creatable_add_opts_foreach,
NULL, NULL)) {
return 1;
}
list = collect_image_info_list(image_opts, filename, fmt, chain);
if (!list) {
return 1;
}
switch (output_format) {
case OFORMAT_HUMAN:
dump_human_image_info_list(list);
break;
case OFORMAT_JSON:
if (chain) {
dump_json_image_info_list(list);
} else {
dump_json_image_info(list->value);
}
break;
}
qapi_free_ImageInfoList(list);
return 0;
}
| 16,383 |
FFmpeg | 590743101dc934043f34013f1c9bb9fb261355b0 | 1 | void pp_postprocess(const uint8_t * src[3], const int srcStride[3],
uint8_t * dst[3], const int dstStride[3],
int width, int height,
const QP_STORE_T *QP_store, int QPStride,
pp_mode *vm, void *vc, int pict_type)
{
int mbWidth = (width+15)>>4;
int mbHeight= (height+15)>>4;
PPMode *mode = vm;
PPContext *c = vc;
int minStride= FFMAX(FFABS(srcStride[0]), FFABS(dstStride[0]));
int absQPStride = FFABS(QPStride);
// c->stride and c->QPStride are always positive
if(c->stride < minStride || c->qpStride < absQPStride)
reallocBuffers(c, width, height,
FFMAX(minStride, c->stride),
FFMAX(c->qpStride, absQPStride));
if(!QP_store || (mode->lumMode & FORCE_QUANT)){
int i;
QP_store= c->forcedQPTable;
absQPStride = QPStride = 0;
if(mode->lumMode & FORCE_QUANT)
for(i=0; i<mbWidth; i++) c->forcedQPTable[i]= mode->forcedQuant;
else
for(i=0; i<mbWidth; i++) c->forcedQPTable[i]= 1;
}
if(pict_type & PP_PICT_TYPE_QP2){
int i;
const int count= FFMAX(mbHeight * absQPStride, mbWidth);
for(i=0; i<(count>>2); i++){
((uint32_t*)c->stdQPTable)[i] = (((const uint32_t*)QP_store)[i]>>1) & 0x7F7F7F7F;
}
for(i<<=2; i<count; i++){
c->stdQPTable[i] = QP_store[i]>>1;
}
QP_store= c->stdQPTable;
QPStride= absQPStride;
}
if(0){
int x,y;
for(y=0; y<mbHeight; y++){
for(x=0; x<mbWidth; x++){
av_log(c, AV_LOG_INFO, "%2d ", QP_store[x + y*QPStride]);
}
av_log(c, AV_LOG_INFO, "\n");
}
av_log(c, AV_LOG_INFO, "\n");
}
if((pict_type&7)!=3){
if (QPStride >= 0){
int i;
const int count= FFMAX(mbHeight * QPStride, mbWidth);
for(i=0; i<(count>>2); i++){
((uint32_t*)c->nonBQPTable)[i] = ((const uint32_t*)QP_store)[i] & 0x3F3F3F3F;
}
for(i<<=2; i<count; i++){
c->nonBQPTable[i] = QP_store[i] & 0x3F;
}
} else {
int i,j;
for(i=0; i<mbHeight; i++) {
for(j=0; j<absQPStride; j++) {
c->nonBQPTable[i*absQPStride+j] = QP_store[i*QPStride+j] & 0x3F;
}
}
}
}
av_log(c, AV_LOG_DEBUG, "using npp filters 0x%X/0x%X\n",
mode->lumMode, mode->chromMode);
postProcess(src[0], srcStride[0], dst[0], dstStride[0],
width, height, QP_store, QPStride, 0, mode, c);
if (!(src[1] && src[2] && dst[1] && dst[2]))
return;
width = (width )>>c->hChromaSubSample;
height = (height)>>c->vChromaSubSample;
if(mode->chromMode){
postProcess(src[1], srcStride[1], dst[1], dstStride[1],
width, height, QP_store, QPStride, 1, mode, c);
postProcess(src[2], srcStride[2], dst[2], dstStride[2],
width, height, QP_store, QPStride, 2, mode, c);
}
else if(srcStride[1] == dstStride[1] && srcStride[2] == dstStride[2]){
linecpy(dst[1], src[1], height, srcStride[1]);
linecpy(dst[2], src[2], height, srcStride[2]);
}else{
int y;
for(y=0; y<height; y++){
memcpy(&(dst[1][y*dstStride[1]]), &(src[1][y*srcStride[1]]), width);
memcpy(&(dst[2][y*dstStride[2]]), &(src[2][y*srcStride[2]]), width);
}
}
}
| 16,384 |
qemu | 74b4c74d5efb0a489bdf0acc5b5d0197167e7649 | 0 | static int handle_sigp(S390CPU *cpu, uint8_t ipa1, uint32_t ipb)
{
CPUS390XState *env = &cpu->env;
const uint8_t r1 = ipa1 >> 4;
const uint8_t r3 = ipa1 & 0x0f;
int ret;
uint8_t order;
uint64_t *status_reg;
uint64_t param;
S390CPU *dst_cpu = NULL;
cpu_synchronize_state(CPU(cpu));
/* get order code */
order = decode_basedisp_rs(env, ipb, NULL)
& SIGP_ORDER_MASK;
status_reg = &env->regs[r1];
param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1];
if (qemu_mutex_trylock(&qemu_sigp_mutex)) {
ret = SIGP_CC_BUSY;
goto out;
}
switch (order) {
case SIGP_SET_ARCH:
ret = sigp_set_architecture(cpu, param, status_reg);
break;
default:
/* all other sigp orders target a single vcpu */
dst_cpu = s390_cpu_addr2state(env->regs[r3]);
ret = handle_sigp_single_dst(dst_cpu, order, param, status_reg);
}
qemu_mutex_unlock(&qemu_sigp_mutex);
out:
trace_kvm_sigp_finished(order, CPU(cpu)->cpu_index,
dst_cpu ? CPU(dst_cpu)->cpu_index : -1, ret);
if (ret >= 0) {
setcc(cpu, ret);
return 0;
}
return ret;
}
| 16,386 |
qemu | ef1e1e0782e99c9dcf2b35e5310cdd8ca9211374 | 0 | static void bt_hci_done(struct HCIInfo *info)
{
struct bt_hci_s *hci = hci_from_info(info);
int handle;
bt_device_done(&hci->device);
if (hci->device.lmp_name)
g_free((void *) hci->device.lmp_name);
/* Be gentle and send DISCONNECT to all connected peers and those
* currently waiting for us to accept or reject a connection request.
* This frees the links. */
if (hci->conn_req_host) {
bt_hci_connection_reject(hci,
hci->conn_req_host, HCI_OE_POWER_OFF);
return;
}
for (handle = HCI_HANDLE_OFFSET;
handle < (HCI_HANDLE_OFFSET | HCI_HANDLES_MAX); handle ++)
if (!bt_hci_handle_bad(hci, handle))
bt_hci_disconnect(hci, handle, HCI_OE_POWER_OFF);
/* TODO: this is not enough actually, there may be slaves from whom
* we have requested a connection who will soon (or not) respond with
* an accept or a reject, so we should also check if hci->lm.connecting
* is non-zero and if so, avoid freeing the hci but otherwise disappear
* from all qemu social life (e.g. stop scanning and request to be
* removed from s->device.net) and arrange for
* s->device.lmp_connection_complete to free the remaining bits once
* hci->lm.awaiting_bdaddr[] is empty. */
timer_free(hci->lm.inquiry_done);
timer_free(hci->lm.inquiry_next);
timer_free(hci->conn_accept_timer);
g_free(hci);
}
| 16,387 |
FFmpeg | 474d858fd9551b45a17e1f3a4322de1a88e749b9 | 0 | static int get_port(const struct sockaddr_storage *ss)
{
sockaddr_union ssu = (sockaddr_union){.storage = *ss};
if (ss->ss_family == AF_INET)
return ntohs(ssu.in.sin_port);
#if HAVE_STRUCT_SOCKADDR_IN6
if (ss->ss_family == AF_INET6)
return ntohs(ssu.in6.sin6_port);
#endif
return 0;
}
| 16,388 |
FFmpeg | 631c56a8e46dea41585f3e7b3ef9c52b49faa385 | 0 | int avformat_network_deinit(void)
{
#if CONFIG_NETWORK
ff_network_close();
ff_tls_deinit();
ff_network_inited_globally = 0;
#endif
return 0;
}
| 16,389 |
FFmpeg | 72dbc610be3272ba36603f78a39cc2d2d8fe0cc3 | 0 | void ff_avg_h264_qpel8_mc02_msa(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
avc_luma_vt_and_aver_dst_8x8_msa(src - (stride * 2), stride, dst, stride);
}
| 16,390 |
qemu | 6dd836f5d32b989e18c6dda655a26f4d73a52f6a | 0 | static void gen_spr_vtb(CPUPPCState *env)
{
spr_register(env, SPR_VTB, "VTB",
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_tbl, SPR_NOACCESS,
0x00000000);
}
| 16,391 |
qemu | 991f8f0c91d65cebf51fa931450e02b0d5209012 | 0 | static void bswap_note(struct elf_note *en)
{
bswap32s(&en->n_namesz);
bswap32s(&en->n_descsz);
bswap32s(&en->n_type);
}
| 16,392 |
qemu | f8b6cc0070aab8b75bd082582c829be1353f395f | 0 | void ide_init_drive(IDEState *s, DriveInfo *dinfo,
const char *version, const char *serial)
{
int cylinders, heads, secs;
uint64_t nb_sectors;
s->bs = dinfo->bdrv;
bdrv_get_geometry(s->bs, &nb_sectors);
bdrv_guess_geometry(s->bs, &cylinders, &heads, &secs);
s->cylinders = cylinders;
s->heads = heads;
s->sectors = secs;
s->nb_sectors = nb_sectors;
/* The SMART values should be preserved across power cycles
but they aren't. */
s->smart_enabled = 1;
s->smart_autosave = 1;
s->smart_errors = 0;
s->smart_selftest_count = 0;
if (bdrv_get_type_hint(s->bs) == BDRV_TYPE_CDROM) {
s->is_cdrom = 1;
bdrv_set_change_cb(s->bs, cdrom_change_cb, s);
}
if (serial && *serial) {
strncpy(s->drive_serial_str, serial, sizeof(s->drive_serial_str));
} else {
snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
"QM%05d", s->drive_serial);
}
if (version) {
pstrcpy(s->version, sizeof(s->version), version);
} else {
pstrcpy(s->version, sizeof(s->version), QEMU_VERSION);
}
ide_reset(s);
}
| 16,393 |
qemu | 177b75104da3e3a9af84975c32a44782d903c41f | 0 | static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
{
BDRVDMGState *s = bs->opaque;
if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) {
int ret;
uint32_t chunk = search_chunk(s, sector_num);
#ifdef CONFIG_BZIP2
uint64_t total_out;
#endif
if (chunk >= s->n_chunks) {
return -1;
}
s->current_chunk = s->n_chunks;
switch (s->types[chunk]) { /* block entry type */
case 0x80000005: { /* zlib compressed */
/* we need to buffer, because only the chunk as whole can be
* inflated. */
ret = bdrv_pread(bs->file, s->offsets[chunk],
s->compressed_chunk, s->lengths[chunk]);
if (ret != s->lengths[chunk]) {
return -1;
}
s->zstream.next_in = s->compressed_chunk;
s->zstream.avail_in = s->lengths[chunk];
s->zstream.next_out = s->uncompressed_chunk;
s->zstream.avail_out = 512 * s->sectorcounts[chunk];
ret = inflateReset(&s->zstream);
if (ret != Z_OK) {
return -1;
}
ret = inflate(&s->zstream, Z_FINISH);
if (ret != Z_STREAM_END ||
s->zstream.total_out != 512 * s->sectorcounts[chunk]) {
return -1;
}
break; }
#ifdef CONFIG_BZIP2
case 0x80000006: /* bzip2 compressed */
/* we need to buffer, because only the chunk as whole can be
* inflated. */
ret = bdrv_pread(bs->file, s->offsets[chunk],
s->compressed_chunk, s->lengths[chunk]);
if (ret != s->lengths[chunk]) {
return -1;
}
ret = BZ2_bzDecompressInit(&s->bzstream, 0, 0);
if (ret != BZ_OK) {
return -1;
}
s->bzstream.next_in = (char *)s->compressed_chunk;
s->bzstream.avail_in = (unsigned int) s->lengths[chunk];
s->bzstream.next_out = (char *)s->uncompressed_chunk;
s->bzstream.avail_out = (unsigned int) 512 * s->sectorcounts[chunk];
ret = BZ2_bzDecompress(&s->bzstream);
total_out = ((uint64_t)s->bzstream.total_out_hi32 << 32) +
s->bzstream.total_out_lo32;
BZ2_bzDecompressEnd(&s->bzstream);
if (ret != BZ_STREAM_END ||
total_out != 512 * s->sectorcounts[chunk]) {
return -1;
}
break;
#endif /* CONFIG_BZIP2 */
case 1: /* copy */
ret = bdrv_pread(bs->file, s->offsets[chunk],
s->uncompressed_chunk, s->lengths[chunk]);
if (ret != s->lengths[chunk]) {
return -1;
}
break;
case 2: /* zero */
memset(s->uncompressed_chunk, 0, 512 * s->sectorcounts[chunk]);
break;
}
s->current_chunk = chunk;
}
return 0;
}
| 16,394 |
qemu | 56a6f02b8ce1fe41a2a9077593e46eca7d98267d | 0 | static void qmp_output_add_obj(QmpOutputVisitor *qov, const char *name,
QObject *value)
{
QStackEntry *e = QTAILQ_FIRST(&qov->stack);
QObject *cur = e ? e->value : NULL;
if (!cur) {
/* FIXME we should require the user to reset the visitor, rather
* than throwing away the previous root */
qobject_decref(qov->root);
qov->root = value;
} else {
switch (qobject_type(cur)) {
case QTYPE_QDICT:
assert(name);
qdict_put_obj(qobject_to_qdict(cur), name, value);
break;
case QTYPE_QLIST:
qlist_append_obj(qobject_to_qlist(cur), value);
break;
default:
g_assert_not_reached();
}
}
}
| 16,395 |
qemu | 5666dd19dd95d966be00d0c3e7efdfaecc096ff3 | 0 | static GenericList *qapi_dealloc_next_list(Visitor *v, GenericList **list,
Error **errp)
{
GenericList *retval = *list;
g_free(retval->value);
*list = retval->next;
return retval;
}
| 16,397 |
qemu | fc19f8a02e45c4d8ad24dd7eb374330b03dfc28e | 0 | static int nbd_co_discard(BlockDriverState *bs, int64_t sector_num,
int nb_sectors)
{
BDRVNBDState *s = bs->opaque;
struct nbd_request request;
struct nbd_reply reply;
if (!(s->nbdflags & NBD_FLAG_SEND_TRIM)) {
return 0;
}
request.type = NBD_CMD_TRIM;
request.from = sector_num * 512;;
request.len = nb_sectors * 512;
nbd_coroutine_start(s, &request);
if (nbd_co_send_request(s, &request, NULL, 0) == -1) {
reply.error = errno;
} else {
nbd_co_receive_reply(s, &request, &reply, NULL, 0);
}
nbd_coroutine_end(s, &request);
return -reply.error;
}
| 16,398 |
qemu | 897fa7cff21a98b260a5b3e73eae39273fa60272 | 0 | static bool memory_region_access_valid(MemoryRegion *mr,
target_phys_addr_t addr,
unsigned size)
{
if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
return false;
}
/* Treat zero as compatibility all valid */
if (!mr->ops->valid.max_access_size) {
return true;
}
if (size > mr->ops->valid.max_access_size
|| size < mr->ops->valid.min_access_size) {
return false;
}
return true;
}
| 16,399 |
qemu | d7dccd11683d17e4658606349544312a0769ecea | 0 | static int pci_pcnet_init(PCIDevice *pci_dev)
{
PCIPCNetState *d = DO_UPCAST(PCIPCNetState, pci_dev, pci_dev);
PCNetState *s = &d->state;
uint8_t *pci_conf;
#if 0
printf("sizeof(RMD)=%d, sizeof(TMD)=%d\n",
sizeof(struct pcnet_RMD), sizeof(struct pcnet_TMD));
#endif
pci_conf = pci_dev->config;
pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_AMD);
pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_AMD_LANCE);
pci_set_word(pci_conf + PCI_STATUS,
PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MEDIUM);
pci_conf[PCI_REVISION_ID] = 0x10;
pci_config_set_class(pci_conf, PCI_CLASS_NETWORK_ETHERNET);
pci_set_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID, 0x0);
pci_set_word(pci_conf + PCI_SUBSYSTEM_ID, 0x0);
pci_conf[PCI_INTERRUPT_PIN] = 1; // interrupt pin 0
pci_conf[PCI_MIN_GNT] = 0x06;
pci_conf[PCI_MAX_LAT] = 0xff;
/* Handler for memory-mapped I/O */
s->mmio_index =
cpu_register_io_memory(pcnet_mmio_read, pcnet_mmio_write, &d->state,
DEVICE_NATIVE_ENDIAN);
pci_register_bar(pci_dev, 0, PCNET_IOPORT_SIZE,
PCI_BASE_ADDRESS_SPACE_IO, pcnet_ioport_map);
pci_register_bar_simple(pci_dev, 1, PCNET_PNPMMIO_SIZE, 0, s->mmio_index);
s->irq = pci_dev->irq[0];
s->phys_mem_read = pci_physical_memory_read;
s->phys_mem_write = pci_physical_memory_write;
if (!pci_dev->qdev.hotplugged) {
static int loaded = 0;
if (!loaded) {
rom_add_option("pxe-pcnet.rom", -1);
loaded = 1;
}
}
return pcnet_common_init(&pci_dev->qdev, s, &net_pci_pcnet_info);
}
| 16,400 |
FFmpeg | d6604b29ef544793479d7fb4e05ef6622bb3e534 | 0 | static int dvvideo_encode_close(AVCodecContext *avctx)
{
av_frame_free(&avctx->coded_frame);
return 0;
}
| 16,401 |
qemu | 920557971b60e53c2f3f22e5d6c620ab1ed411fd | 0 | void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm, bool smm_enabled,
qemu_irq sci_irq)
{
memory_region_init(&pm->io, OBJECT(lpc_pci), "ich9-pm", ICH9_PMIO_SIZE);
memory_region_set_enabled(&pm->io, false);
memory_region_add_subregion(pci_address_space_io(lpc_pci),
0, &pm->io);
acpi_pm_tmr_init(&pm->acpi_regs, ich9_pm_update_sci_fn, &pm->io);
acpi_pm1_evt_init(&pm->acpi_regs, ich9_pm_update_sci_fn, &pm->io);
acpi_pm1_cnt_init(&pm->acpi_regs, &pm->io, pm->disable_s3, pm->disable_s4,
pm->s4_val);
acpi_gpe_init(&pm->acpi_regs, ICH9_PMIO_GPE0_LEN);
memory_region_init_io(&pm->io_gpe, OBJECT(lpc_pci), &ich9_gpe_ops, pm,
"acpi-gpe0", ICH9_PMIO_GPE0_LEN);
memory_region_add_subregion(&pm->io, ICH9_PMIO_GPE0_STS, &pm->io_gpe);
memory_region_init_io(&pm->io_smi, OBJECT(lpc_pci), &ich9_smi_ops, pm,
"acpi-smi", 8);
memory_region_add_subregion(&pm->io, ICH9_PMIO_SMI_EN, &pm->io_smi);
pm->smm_enabled = smm_enabled;
pm->irq = sci_irq;
qemu_register_reset(pm_reset, pm);
pm->powerdown_notifier.notify = pm_powerdown_req;
qemu_register_powerdown_notifier(&pm->powerdown_notifier);
acpi_cpu_hotplug_init(pci_address_space_io(lpc_pci), OBJECT(lpc_pci),
&pm->gpe_cpu, ICH9_CPU_HOTPLUG_IO_BASE);
if (pm->acpi_memory_hotplug.is_enabled) {
acpi_memory_hotplug_init(pci_address_space_io(lpc_pci), OBJECT(lpc_pci),
&pm->acpi_memory_hotplug);
}
}
| 16,402 |
qemu | 415fa2ea77b726ea5a5768d659881e919df1fcf2 | 0 | static inline int gen_intermediate_code_internal(CPUState *env,
TranslationBlock *tb,
int search_pc)
{
DisasContext dc1, *dc = &dc1;
uint8_t *pc_ptr;
uint16_t *gen_opc_end;
int flags, j, lj;
uint8_t *pc_start;
uint8_t *cs_base;
/* generate intermediate code */
pc_start = (uint8_t *)tb->pc;
cs_base = (uint8_t *)tb->cs_base;
flags = tb->flags;
dc->pe = env->cr[0] & CR0_PE_MASK;
dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
dc->f_st = 0;
dc->vm86 = (flags >> VM_SHIFT) & 1;
dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
dc->iopl = (flags >> IOPL_SHIFT) & 3;
dc->tf = (flags >> TF_SHIFT) & 1;
dc->singlestep_enabled = env->singlestep_enabled;
dc->cc_op = CC_OP_DYNAMIC;
dc->cs_base = cs_base;
dc->tb = tb;
dc->popl_esp_hack = 0;
/* select memory access functions */
dc->mem_index = 0;
if (flags & HF_SOFTMMU_MASK) {
if (dc->cpl == 3)
dc->mem_index = 6;
else
dc->mem_index = 3;
}
dc->jmp_opt = !(dc->tf || env->singlestep_enabled
#ifndef CONFIG_SOFT_MMU
|| (flags & HF_SOFTMMU_MASK)
#endif
);
gen_opc_ptr = gen_opc_buf;
gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
gen_opparam_ptr = gen_opparam_buf;
dc->is_jmp = DISAS_NEXT;
pc_ptr = pc_start;
lj = -1;
/* if irq were inhibited for the next instruction, we can disable
them here as it is simpler (otherwise jumps would have to
handled as special case) */
if (flags & HF_INHIBIT_IRQ_MASK) {
gen_op_reset_inhibit_irq();
}
for(;;) {
if (env->nb_breakpoints > 0) {
for(j = 0; j < env->nb_breakpoints; j++) {
if (env->breakpoints[j] == (unsigned long)pc_ptr) {
gen_debug(dc, pc_ptr - dc->cs_base);
break;
}
}
}
if (search_pc) {
j = gen_opc_ptr - gen_opc_buf;
if (lj < j) {
lj++;
while (lj < j)
gen_opc_instr_start[lj++] = 0;
}
gen_opc_pc[lj] = (uint32_t)pc_ptr;
gen_opc_cc_op[lj] = dc->cc_op;
gen_opc_instr_start[lj] = 1;
}
pc_ptr = disas_insn(dc, pc_ptr);
/* stop translation if indicated */
if (dc->is_jmp)
break;
/* if single step mode, we generate only one instruction and
generate an exception */
if (dc->tf || dc->singlestep_enabled) {
gen_op_jmp_im(pc_ptr - dc->cs_base);
gen_eob(dc);
break;
}
/* if too long translation, stop generation too */
if (gen_opc_ptr >= gen_opc_end ||
(pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32)) {
gen_op_jmp_im(pc_ptr - dc->cs_base);
gen_eob(dc);
break;
}
}
*gen_opc_ptr = INDEX_op_end;
/* we don't forget to fill the last values */
if (search_pc) {
j = gen_opc_ptr - gen_opc_buf;
lj++;
while (lj <= j)
gen_opc_instr_start[lj++] = 0;
}
#ifdef DEBUG_DISAS
if (loglevel) {
fprintf(logfile, "----------------\n");
fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
disas(logfile, pc_start, pc_ptr - pc_start, 0, !dc->code32);
fprintf(logfile, "\n");
fprintf(logfile, "OP:\n");
dump_ops(gen_opc_buf, gen_opparam_buf);
fprintf(logfile, "\n");
}
#endif
/* optimize flag computations */
optimize_flags(gen_opc_buf, gen_opc_ptr - gen_opc_buf);
#ifdef DEBUG_DISAS
if (loglevel) {
fprintf(logfile, "AFTER FLAGS OPT:\n");
dump_ops(gen_opc_buf, gen_opparam_buf);
fprintf(logfile, "\n");
}
#endif
if (!search_pc)
tb->size = pc_ptr - pc_start;
return 0;
}
| 16,403 |
qemu | f53a829bb9ef14be800556cbc02d8b20fc1050a7 | 0 | static void nbd_detach_aio_context(BlockDriverState *bs)
{
BDRVNBDState *s = bs->opaque;
nbd_client_session_detach_aio_context(&s->client);
}
| 16,404 |
qemu | 7ad4c7200111d20eb97eed4f46b6026e3f0b0eef | 0 | void *g_malloc0_n(size_t nmemb, size_t size)
{
size_t sz;
void *ptr;
__coverity_negative_sink__(nmemb);
__coverity_negative_sink__(size);
sz = nmemb * size;
ptr = __coverity_alloc__(size);
__coverity_writeall0__(ptr);
__coverity_mark_as_afm_allocated__(ptr, AFM_free);
return ptr;
}
| 16,405 |
qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | 0 | static uint32_t nvram_readb (void *opaque, target_phys_addr_t addr)
{
M48t59State *NVRAM = opaque;
uint32_t retval;
retval = m48t59_read(NVRAM, addr);
return retval;
}
| 16,407 |
qemu | 9e85cd5ce0d3fc99d910428c9fd9d267764d341b | 0 | static void coroutine_fn commit_run(void *opaque)
{
CommitBlockJob *s = opaque;
BlockDriverState *active = s->active;
BlockDriverState *top = s->top;
BlockDriverState *base = s->base;
BlockDriverState *overlay_bs;
int64_t sector_num, end;
int ret = 0;
int n = 0;
void *buf;
int bytes_written = 0;
int64_t base_len;
ret = s->common.len = bdrv_getlength(top);
if (s->common.len < 0) {
goto exit_restore_reopen;
}
ret = base_len = bdrv_getlength(base);
if (base_len < 0) {
goto exit_restore_reopen;
}
if (base_len < s->common.len) {
ret = bdrv_truncate(base, s->common.len);
if (ret) {
goto exit_restore_reopen;
}
}
end = s->common.len >> BDRV_SECTOR_BITS;
buf = qemu_blockalign(top, COMMIT_BUFFER_SIZE);
for (sector_num = 0; sector_num < end; sector_num += n) {
uint64_t delay_ns = 0;
bool copy;
wait:
/* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that bdrv_drain_all() returns.
*/
block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
if (block_job_is_cancelled(&s->common)) {
break;
}
/* Copy if allocated above the base */
ret = bdrv_is_allocated_above(top, base, sector_num,
COMMIT_BUFFER_SIZE / BDRV_SECTOR_SIZE,
&n);
copy = (ret == 1);
trace_commit_one_iteration(s, sector_num, n, ret);
if (copy) {
if (s->common.speed) {
delay_ns = ratelimit_calculate_delay(&s->limit, n);
if (delay_ns > 0) {
goto wait;
}
}
ret = commit_populate(top, base, sector_num, n, buf);
bytes_written += n * BDRV_SECTOR_SIZE;
}
if (ret < 0) {
if (s->on_error == BLOCKDEV_ON_ERROR_STOP ||
s->on_error == BLOCKDEV_ON_ERROR_REPORT||
(s->on_error == BLOCKDEV_ON_ERROR_ENOSPC && ret == -ENOSPC)) {
goto exit_free_buf;
} else {
n = 0;
continue;
}
}
/* Publish progress */
s->common.offset += n * BDRV_SECTOR_SIZE;
}
ret = 0;
if (!block_job_is_cancelled(&s->common) && sector_num == end) {
/* success */
ret = bdrv_drop_intermediate(active, top, base, s->backing_file_str);
}
exit_free_buf:
qemu_vfree(buf);
exit_restore_reopen:
/* restore base open flags here if appropriate (e.g., change the base back
* to r/o). These reopens do not need to be atomic, since we won't abort
* even on failure here */
if (s->base_flags != bdrv_get_flags(base)) {
bdrv_reopen(base, s->base_flags, NULL);
}
overlay_bs = bdrv_find_overlay(active, top);
if (overlay_bs && s->orig_overlay_flags != bdrv_get_flags(overlay_bs)) {
bdrv_reopen(overlay_bs, s->orig_overlay_flags, NULL);
}
g_free(s->backing_file_str);
block_job_completed(&s->common, ret);
}
| 16,413 |
qemu | 720fdd6fa92df9041316e94816ab7e56abaed4e9 | 0 | static int nvme_start_ctrl(NvmeCtrl *n)
{
uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12;
uint32_t page_size = 1 << page_bits;
if (n->cq[0] || n->sq[0] || !n->bar.asq || !n->bar.acq ||
n->bar.asq & (page_size - 1) || n->bar.acq & (page_size - 1) ||
NVME_CC_MPS(n->bar.cc) < NVME_CAP_MPSMIN(n->bar.cap) ||
NVME_CC_MPS(n->bar.cc) > NVME_CAP_MPSMAX(n->bar.cap) ||
NVME_CC_IOCQES(n->bar.cc) < NVME_CTRL_CQES_MIN(n->id_ctrl.cqes) ||
NVME_CC_IOCQES(n->bar.cc) > NVME_CTRL_CQES_MAX(n->id_ctrl.cqes) ||
NVME_CC_IOSQES(n->bar.cc) < NVME_CTRL_SQES_MIN(n->id_ctrl.sqes) ||
NVME_CC_IOSQES(n->bar.cc) > NVME_CTRL_SQES_MAX(n->id_ctrl.sqes) ||
!NVME_AQA_ASQS(n->bar.aqa) || NVME_AQA_ASQS(n->bar.aqa) > 4095 ||
!NVME_AQA_ACQS(n->bar.aqa) || NVME_AQA_ACQS(n->bar.aqa) > 4095) {
return -1;
}
n->page_bits = page_bits;
n->page_size = page_size;
n->max_prp_ents = n->page_size / sizeof(uint64_t);
n->cqe_size = 1 << NVME_CC_IOCQES(n->bar.cc);
n->sqe_size = 1 << NVME_CC_IOSQES(n->bar.cc);
nvme_init_cq(&n->admin_cq, n, n->bar.acq, 0, 0,
NVME_AQA_ACQS(n->bar.aqa) + 1, 1);
nvme_init_sq(&n->admin_sq, n, n->bar.asq, 0, 0,
NVME_AQA_ASQS(n->bar.aqa) + 1);
return 0;
}
| 16,416 |
qemu | e8dd1d9c396104f0fac4b39a701143df49df2a74 | 0 | static void netmap_send(void *opaque)
{
NetmapState *s = opaque;
struct netmap_ring *ring = s->me.rx;
/* Keep sending while there are available packets into the netmap
RX ring and the forwarding path towards the peer is open. */
while (!nm_ring_empty(ring) && qemu_can_send_packet(&s->nc)) {
uint32_t i;
uint32_t idx;
bool morefrag;
int iovcnt = 0;
int iovsize;
do {
i = ring->cur;
idx = ring->slot[i].buf_idx;
morefrag = (ring->slot[i].flags & NS_MOREFRAG);
s->iov[iovcnt].iov_base = (u_char *)NETMAP_BUF(ring, idx);
s->iov[iovcnt].iov_len = ring->slot[i].len;
iovcnt++;
ring->cur = ring->head = nm_ring_next(ring, i);
} while (!nm_ring_empty(ring) && morefrag);
if (unlikely(nm_ring_empty(ring) && morefrag)) {
RD(5, "[netmap_send] ran out of slots, with a pending"
"incomplete packet\n");
}
iovsize = qemu_sendv_packet_async(&s->nc, s->iov, iovcnt,
netmap_send_completed);
if (iovsize == 0) {
/* The peer does not receive anymore. Packet is queued, stop
* reading from the backend until netmap_send_completed()
*/
netmap_read_poll(s, false);
break;
}
}
}
| 16,417 |
qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | 0 | static uint64_t gpio_read(void *opaque, target_phys_addr_t addr, unsigned size)
{
struct gpio_state_t *s = opaque;
uint32_t r = 0;
addr >>= 2;
switch (addr)
{
case R_PA_DIN:
r = s->regs[RW_PA_DOUT] & s->regs[RW_PA_OE];
/* Encode pins from the nand. */
r |= s->nand->rdy << 7;
break;
case R_PD_DIN:
r = s->regs[RW_PD_DOUT] & s->regs[RW_PD_OE];
/* Encode temp sensor pins. */
r |= (!!(s->tempsensor.shiftreg & 0x10000)) << 4;
break;
default:
r = s->regs[addr];
break;
}
return r;
D(printf("%s %x=%x\n", __func__, addr, r));
}
| 16,418 |
qemu | 93587e3af3a259deac89c12863d93653d69d22b8 | 0 | static void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
{
struct vfio_region_info *rom = NULL, *opregion = NULL,
*host = NULL, *lpc = NULL;
VFIOQuirk *quirk;
VFIOIGDQuirk *igd;
PCIDevice *lpc_bridge;
int i, ret, ggms_mb, gms_mb = 0, gen;
uint64_t *bdsm_size;
uint32_t gmch;
uint16_t cmd_orig, cmd;
Error *err = NULL;
/* This must be an Intel VGA device. */
if (!vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, PCI_ANY_ID) ||
!vfio_is_vga(vdev) || nr != 4) {
return;
}
/*
* IGD is not a standard, they like to change their specs often. We
* only attempt to support back to SandBridge and we hope that newer
* devices maintain compatibility with generation 8.
*/
gen = igd_gen(vdev);
if (gen != 6 && gen != 8) {
error_report("IGD device %s is unsupported by IGD quirks, "
"try SandyBridge or newer", vdev->vbasedev.name);
return;
}
/*
* Regardless of running in UPT or legacy mode, the guest graphics
* driver may attempt to use stolen memory, however only legacy mode
* has BIOS support for reserving stolen memory in the guest VM.
* Emulate the GMCH register in all cases and zero out the stolen
* memory size here. Legacy mode may request allocation and re-write
* this below.
*/
gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, 4);
gmch &= ~((gen < 8 ? 0x1f : 0xff) << (gen < 8 ? 3 : 8));
/* GMCH is read-only, emulated */
pci_set_long(vdev->pdev.config + IGD_GMCH, gmch);
pci_set_long(vdev->pdev.wmask + IGD_GMCH, 0);
pci_set_long(vdev->emulated_config_bits + IGD_GMCH, ~0);
/*
* This must be at address 00:02.0 for us to even onsider enabling
* legacy mode. The vBIOS has dependencies on the PCI bus address.
*/
if (&vdev->pdev != pci_find_device(pci_device_root_bus(&vdev->pdev),
0, PCI_DEVFN(0x2, 0))) {
return;
}
/*
* We need to create an LPC/ISA bridge at PCI bus address 00:1f.0 that we
* can stuff host values into, so if there's already one there and it's not
* one we can hack on, legacy mode is no-go. Sorry Q35.
*/
lpc_bridge = pci_find_device(pci_device_root_bus(&vdev->pdev),
0, PCI_DEVFN(0x1f, 0));
if (lpc_bridge && !object_dynamic_cast(OBJECT(lpc_bridge),
"vfio-pci-igd-lpc-bridge")) {
error_report("IGD device %s cannot support legacy mode due to existing "
"devices at address 1f.0", vdev->vbasedev.name);
return;
}
/*
* Most of what we're doing here is to enable the ROM to run, so if
* there's no ROM, there's no point in setting up this quirk.
* NB. We only seem to get BIOS ROMs, so a UEFI VM would need CSM support.
*/
ret = vfio_get_region_info(&vdev->vbasedev,
VFIO_PCI_ROM_REGION_INDEX, &rom);
if ((ret || !rom->size) && !vdev->pdev.romfile) {
error_report("IGD device %s has no ROM, legacy mode disabled",
vdev->vbasedev.name);
goto out;
}
/*
* Ignore the hotplug corner case, mark the ROM failed, we can't
* create the devices we need for legacy mode in the hotplug scenario.
*/
if (vdev->pdev.qdev.hotplugged) {
error_report("IGD device %s hotplugged, ROM disabled, "
"legacy mode disabled", vdev->vbasedev.name);
vdev->rom_read_failed = true;
goto out;
}
/*
* Check whether we have all the vfio device specific regions to
* support legacy mode (added in Linux v4.6). If not, bail.
*/
ret = vfio_get_dev_region_info(&vdev->vbasedev,
VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
if (ret) {
error_report("IGD device %s does not support OpRegion access,"
"legacy mode disabled", vdev->vbasedev.name);
goto out;
}
ret = vfio_get_dev_region_info(&vdev->vbasedev,
VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG, &host);
if (ret) {
error_report("IGD device %s does not support host bridge access,"
"legacy mode disabled", vdev->vbasedev.name);
goto out;
}
ret = vfio_get_dev_region_info(&vdev->vbasedev,
VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG, &lpc);
if (ret) {
error_report("IGD device %s does not support LPC bridge access,"
"legacy mode disabled", vdev->vbasedev.name);
goto out;
}
/*
* If IGD VGA Disable is clear (expected) and VGA is not already enabled,
* try to enable it. Probably shouldn't be using legacy mode without VGA,
* but also no point in us enabling VGA if disabled in hardware.
*/
if (!(gmch & 0x2) && !vdev->vga && vfio_populate_vga(vdev, &err)) {
error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name);
error_report("IGD device %s failed to enable VGA access, "
"legacy mode disabled", vdev->vbasedev.name);
goto out;
}
/* Create our LPC/ISA bridge */
ret = vfio_pci_igd_lpc_init(vdev, lpc);
if (ret) {
error_report("IGD device %s failed to create LPC bridge, "
"legacy mode disabled", vdev->vbasedev.name);
goto out;
}
/* Stuff some host values into the VM PCI host bridge */
ret = vfio_pci_igd_host_init(vdev, host);
if (ret) {
error_report("IGD device %s failed to modify host bridge, "
"legacy mode disabled", vdev->vbasedev.name);
goto out;
}
/* Setup OpRegion access */
ret = vfio_pci_igd_opregion_init(vdev, opregion, &err);
if (ret) {
error_append_hint(&err, "IGD legacy mode disabled\n");
error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name);
goto out;
}
/* Setup our quirk to munge GTT addresses to the VM allocated buffer */
quirk = g_malloc0(sizeof(*quirk));
quirk->mem = g_new0(MemoryRegion, 2);
quirk->nr_mem = 2;
igd = quirk->data = g_malloc0(sizeof(*igd));
igd->vdev = vdev;
igd->index = ~0;
igd->bdsm = vfio_pci_read_config(&vdev->pdev, IGD_BDSM, 4);
igd->bdsm &= ~((1 << 20) - 1); /* 1MB aligned */
memory_region_init_io(&quirk->mem[0], OBJECT(vdev), &vfio_igd_index_quirk,
igd, "vfio-igd-index-quirk", 4);
memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
0, &quirk->mem[0], 1);
memory_region_init_io(&quirk->mem[1], OBJECT(vdev), &vfio_igd_data_quirk,
igd, "vfio-igd-data-quirk", 4);
memory_region_add_subregion_overlap(vdev->bars[nr].region.mem,
4, &quirk->mem[1], 1);
QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
/* Determine the size of stolen memory needed for GTT */
ggms_mb = (gmch >> (gen < 8 ? 8 : 6)) & 0x3;
if (gen > 6) {
ggms_mb = 1 << ggms_mb;
}
/*
* Assume we have no GMS memory, but allow it to be overrided by device
* option (experimental). The spec doesn't actually allow zero GMS when
* when IVD (IGD VGA Disable) is clear, but the claim is that it's unused,
* so let's not waste VM memory for it.
*/
if (vdev->igd_gms) {
if (vdev->igd_gms <= 0x10) {
gms_mb = vdev->igd_gms * 32;
gmch |= vdev->igd_gms << (gen < 8 ? 3 : 8);
pci_set_long(vdev->pdev.config + IGD_GMCH, gmch);
} else {
error_report("Unsupported IGD GMS value 0x%x", vdev->igd_gms);
vdev->igd_gms = 0;
}
}
/*
* Request reserved memory for stolen memory via fw_cfg. VM firmware
* must allocate a 1MB aligned reserved memory region below 4GB with
* the requested size (in bytes) for use by the Intel PCI class VGA
* device at VM address 00:02.0. The base address of this reserved
* memory region must be written to the device BDSM regsiter at PCI
* config offset 0x5C.
*/
bdsm_size = g_malloc(sizeof(*bdsm_size));
*bdsm_size = cpu_to_le64((ggms_mb + gms_mb) * 1024 * 1024);
fw_cfg_add_file(fw_cfg_find(), "etc/igd-bdsm-size",
bdsm_size, sizeof(*bdsm_size));
/* BDSM is read-write, emulated. The BIOS needs to be able to write it */
pci_set_long(vdev->pdev.config + IGD_BDSM, 0);
pci_set_long(vdev->pdev.wmask + IGD_BDSM, ~0);
pci_set_long(vdev->emulated_config_bits + IGD_BDSM, ~0);
/*
* This IOBAR gives us access to GTTADR, which allows us to write to
* the GTT itself. So let's go ahead and write zero to all the GTT
* entries to avoid spurious DMA faults. Be sure I/O access is enabled
* before talking to the device.
*/
if (pread(vdev->vbasedev.fd, &cmd_orig, sizeof(cmd_orig),
vdev->config_offset + PCI_COMMAND) != sizeof(cmd_orig)) {
error_report("IGD device %s - failed to read PCI command register",
vdev->vbasedev.name);
}
cmd = cmd_orig | PCI_COMMAND_IO;
if (pwrite(vdev->vbasedev.fd, &cmd, sizeof(cmd),
vdev->config_offset + PCI_COMMAND) != sizeof(cmd)) {
error_report("IGD device %s - failed to write PCI command register",
vdev->vbasedev.name);
}
for (i = 1; i < vfio_igd_gtt_max(vdev); i += 4) {
vfio_region_write(&vdev->bars[4].region, 0, i, 4);
vfio_region_write(&vdev->bars[4].region, 4, 0, 4);
}
if (pwrite(vdev->vbasedev.fd, &cmd_orig, sizeof(cmd_orig),
vdev->config_offset + PCI_COMMAND) != sizeof(cmd_orig)) {
error_report("IGD device %s - failed to restore PCI command register",
vdev->vbasedev.name);
}
trace_vfio_pci_igd_bdsm_enabled(vdev->vbasedev.name, ggms_mb + gms_mb);
out:
g_free(rom);
g_free(opregion);
g_free(host);
g_free(lpc);
}
| 16,420 |
qemu | a9321a4d49d65d29c2926a51aedc5b91a01f3591 | 0 | int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
int is_write1, int mmu_idx)
{
uint64_t ptep, pte;
target_ulong pde_addr, pte_addr;
int error_code, is_dirty, prot, page_size, is_write, is_user;
target_phys_addr_t paddr;
uint32_t page_offset;
target_ulong vaddr, virt_addr;
is_user = mmu_idx == MMU_USER_IDX;
#if defined(DEBUG_MMU)
printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
addr, is_write1, is_user, env->eip);
#endif
is_write = is_write1 & 1;
if (!(env->cr[0] & CR0_PG_MASK)) {
pte = addr;
virt_addr = addr & TARGET_PAGE_MASK;
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
page_size = 4096;
goto do_mapping;
}
if (env->cr[4] & CR4_PAE_MASK) {
uint64_t pde, pdpe;
target_ulong pdpe_addr;
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
uint64_t pml4e_addr, pml4e;
int32_t sext;
/* test virtual address sign extension */
sext = (int64_t)addr >> 47;
if (sext != 0 && sext != -1) {
env->error_code = 0;
env->exception_index = EXCP0D_GPF;
return 1;
}
pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
env->a20_mask;
pml4e = ldq_phys(pml4e_addr);
if (!(pml4e & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
error_code = PG_ERROR_RSVD_MASK;
goto do_fault;
}
if (!(pml4e & PG_ACCESSED_MASK)) {
pml4e |= PG_ACCESSED_MASK;
stl_phys_notdirty(pml4e_addr, pml4e);
}
ptep = pml4e ^ PG_NX_MASK;
pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
env->a20_mask;
pdpe = ldq_phys(pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
error_code = PG_ERROR_RSVD_MASK;
goto do_fault;
}
ptep &= pdpe ^ PG_NX_MASK;
if (!(pdpe & PG_ACCESSED_MASK)) {
pdpe |= PG_ACCESSED_MASK;
stl_phys_notdirty(pdpe_addr, pdpe);
}
} else
#endif
{
/* XXX: load them when cr3 is loaded ? */
pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
env->a20_mask;
pdpe = ldq_phys(pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
}
pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
env->a20_mask;
pde = ldq_phys(pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
error_code = PG_ERROR_RSVD_MASK;
goto do_fault;
}
ptep &= pde ^ PG_NX_MASK;
if (pde & PG_PSE_MASK) {
/* 2 MB page */
page_size = 2048 * 1024;
ptep ^= PG_NX_MASK;
if ((ptep & PG_NX_MASK) && is_write1 == 2)
goto do_fault_protect;
if (is_user) {
if (!(ptep & PG_USER_MASK))
goto do_fault_protect;
if (is_write && !(ptep & PG_RW_MASK))
goto do_fault_protect;
} else {
if ((env->cr[0] & CR0_WP_MASK) &&
is_write && !(ptep & PG_RW_MASK))
goto do_fault_protect;
}
is_dirty = is_write && !(pde & PG_DIRTY_MASK);
if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
pde |= PG_ACCESSED_MASK;
if (is_dirty)
pde |= PG_DIRTY_MASK;
stl_phys_notdirty(pde_addr, pde);
}
/* align to page_size */
pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
virt_addr = addr & ~(page_size - 1);
} else {
/* 4 KB page */
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
stl_phys_notdirty(pde_addr, pde);
}
pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
env->a20_mask;
pte = ldq_phys(pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
error_code = PG_ERROR_RSVD_MASK;
goto do_fault;
}
/* combine pde and pte nx, user and rw protections */
ptep &= pte ^ PG_NX_MASK;
ptep ^= PG_NX_MASK;
if ((ptep & PG_NX_MASK) && is_write1 == 2)
goto do_fault_protect;
if (is_user) {
if (!(ptep & PG_USER_MASK))
goto do_fault_protect;
if (is_write && !(ptep & PG_RW_MASK))
goto do_fault_protect;
} else {
if ((env->cr[0] & CR0_WP_MASK) &&
is_write && !(ptep & PG_RW_MASK))
goto do_fault_protect;
}
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
pte |= PG_ACCESSED_MASK;
if (is_dirty)
pte |= PG_DIRTY_MASK;
stl_phys_notdirty(pte_addr, pte);
}
page_size = 4096;
virt_addr = addr & ~0xfff;
pte = pte & (PHYS_ADDR_MASK | 0xfff);
}
} else {
uint32_t pde;
/* page directory entry */
pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
env->a20_mask;
pde = ldl_phys(pde_addr);
if (!(pde & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
/* if PSE bit is set, then we use a 4MB page */
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
page_size = 4096 * 1024;
if (is_user) {
if (!(pde & PG_USER_MASK))
goto do_fault_protect;
if (is_write && !(pde & PG_RW_MASK))
goto do_fault_protect;
} else {
if ((env->cr[0] & CR0_WP_MASK) &&
is_write && !(pde & PG_RW_MASK))
goto do_fault_protect;
}
is_dirty = is_write && !(pde & PG_DIRTY_MASK);
if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
pde |= PG_ACCESSED_MASK;
if (is_dirty)
pde |= PG_DIRTY_MASK;
stl_phys_notdirty(pde_addr, pde);
}
pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
ptep = pte;
virt_addr = addr & ~(page_size - 1);
} else {
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
stl_phys_notdirty(pde_addr, pde);
}
/* page directory entry */
pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
env->a20_mask;
pte = ldl_phys(pte_addr);
if (!(pte & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
/* combine pde and pte user and rw protections */
ptep = pte & pde;
if (is_user) {
if (!(ptep & PG_USER_MASK))
goto do_fault_protect;
if (is_write && !(ptep & PG_RW_MASK))
goto do_fault_protect;
} else {
if ((env->cr[0] & CR0_WP_MASK) &&
is_write && !(ptep & PG_RW_MASK))
goto do_fault_protect;
}
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
pte |= PG_ACCESSED_MASK;
if (is_dirty)
pte |= PG_DIRTY_MASK;
stl_phys_notdirty(pte_addr, pte);
}
page_size = 4096;
virt_addr = addr & ~0xfff;
}
}
/* the page can be put in the TLB */
prot = PAGE_READ;
if (!(ptep & PG_NX_MASK))
prot |= PAGE_EXEC;
if (pte & PG_DIRTY_MASK) {
/* only set write access if already dirty... otherwise wait
for dirty access */
if (is_user) {
if (ptep & PG_RW_MASK)
prot |= PAGE_WRITE;
} else {
if (!(env->cr[0] & CR0_WP_MASK) ||
(ptep & PG_RW_MASK))
prot |= PAGE_WRITE;
}
}
do_mapping:
pte = pte & env->a20_mask;
/* Even if 4MB pages, we map only one 4KB page in the cache to
avoid filling it too fast */
page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
paddr = (pte & TARGET_PAGE_MASK) + page_offset;
vaddr = virt_addr + page_offset;
tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
return 0;
do_fault_protect:
error_code = PG_ERROR_P_MASK;
do_fault:
error_code |= (is_write << PG_ERROR_W_BIT);
if (is_user)
error_code |= PG_ERROR_U_MASK;
if (is_write1 == 2 &&
(env->efer & MSR_EFER_NXE) &&
(env->cr[4] & CR4_PAE_MASK))
error_code |= PG_ERROR_I_D_MASK;
if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
/* cr2 is not modified in case of exceptions */
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
addr);
} else {
env->cr[2] = addr;
}
env->error_code = error_code;
env->exception_index = EXCP0E_PAGE;
return 1;
}
| 16,421 |
qemu | 910f738b851a263396fc85b2052e47f884ffead3 | 1 | static void test_qga_invalid_args(gconstpointer fix)
{
const TestFixture *fixture = fix;
QDict *ret, *error;
const gchar *class, *desc;
ret = qmp_fd(fixture->fd, "{'execute': 'guest-ping', "
"'arguments': {'foo': 42 }}");
g_assert_nonnull(ret);
error = qdict_get_qdict(ret, "error");
class = qdict_get_try_str(error, "class");
desc = qdict_get_try_str(error, "desc");
g_assert_cmpstr(class, ==, "GenericError");
g_assert_cmpstr(desc, ==, "QMP input object member 'foo' is unexpected");
QDECREF(ret);
}
| 16,423 |
FFmpeg | 1abcd972c4c0e16f1e83be2fd32a251f51b2946d | 1 | int ff_mlz_decompression(MLZ* mlz, GetBitContext* gb, int size, unsigned char *buff) {
MLZDict *dict = mlz->dict;
unsigned long output_chars;
int string_code, last_string_code, char_code;
string_code = 0;
char_code = -1;
last_string_code = -1;
output_chars = 0;
while (output_chars < size) {
string_code = input_code(gb, mlz->dic_code_bit);
switch (string_code) {
case FLUSH_CODE:
case MAX_CODE:
ff_mlz_flush_dict(mlz);
char_code = -1;
last_string_code = -1;
break;
case FREEZE_CODE:
mlz->freeze_flag = 1;
break;
default:
if (string_code > mlz->current_dic_index_max) {
av_log(mlz->context, AV_LOG_ERROR, "String code %d exceeds maximum value of %d.\n", string_code, mlz->current_dic_index_max);
if (string_code == (int) mlz->bump_code) {
++mlz->dic_code_bit;
mlz->current_dic_index_max *= 2;
mlz->bump_code = mlz->current_dic_index_max - 1;
} else {
if (string_code >= mlz->next_code) {
int ret = decode_string(mlz, &buff[output_chars], last_string_code, &char_code, size - output_chars);
if (ret < 0 || ret > size - output_chars) {
av_log(mlz->context, AV_LOG_ERROR, "output chars overflow\n");
output_chars += ret;
ret = decode_string(mlz, &buff[output_chars], char_code, &char_code, size - output_chars);
if (ret < 0 || ret > size - output_chars) {
av_log(mlz->context, AV_LOG_ERROR, "output chars overflow\n");
output_chars += ret;
set_new_entry_dict(dict, mlz->next_code, last_string_code, char_code);
mlz->next_code++;
} else {
int ret = decode_string(mlz, &buff[output_chars], string_code, &char_code, size - output_chars);
if (ret < 0 || ret > size - output_chars) {
av_log(mlz->context, AV_LOG_ERROR, "output chars overflow\n");
output_chars += ret;
if (output_chars <= size && !mlz->freeze_flag) {
if (last_string_code != -1) {
set_new_entry_dict(dict, mlz->next_code, last_string_code, char_code);
mlz->next_code++;
} else {
break;
last_string_code = string_code;
break;
| 16,424 |
qemu | a01672d3968cf91208666d371784110bfde9d4f8 | 1 | static void kvm_client_set_memory(struct CPUPhysMemoryClient *client,
target_phys_addr_t start_addr,
ram_addr_t size, ram_addr_t phys_offset,
bool log_dirty)
{
kvm_set_phys_mem(start_addr, size, phys_offset, log_dirty);
}
| 16,426 |
qemu | 22644cd2c60151a964d9505f4c5f7baf845f20d8 | 1 | static void balloon_stats_set_poll_interval(Object *obj, struct Visitor *v,
void *opaque, const char *name,
Error **errp)
{
VirtIOBalloon *s = opaque;
Error *local_err = NULL;
int64_t value;
visit_type_int(v, &value, name, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
if (value < 0) {
error_setg(errp, "timer value must be greater than zero");
return;
}
if (value > UINT_MAX) {
error_setg(errp, "timer value is too big");
return;
}
if (value == s->stats_poll_interval) {
return;
}
if (value == 0) {
/* timer=0 disables the timer */
balloon_stats_destroy_timer(s);
return;
}
if (balloon_stats_enabled(s)) {
/* timer interval change */
s->stats_poll_interval = value;
balloon_stats_change_timer(s, value);
return;
}
/* create a new timer */
g_assert(s->stats_timer == NULL);
s->stats_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, balloon_stats_poll_cb, s);
s->stats_poll_interval = value;
balloon_stats_change_timer(s, 0);
}
| 16,428 |
FFmpeg | 6770a9d6898a0c7561586dabd7a4e5b5187bed62 | 1 | static OutputStream *choose_output(void)
{
int i;
int64_t opts_min = INT64_MAX;
OutputStream *ost_min = NULL;
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
AV_TIME_BASE_Q);
if (!ost->finished && opts < opts_min) {
opts_min = opts;
ost_min = ost->unavailable ? NULL : ost;
}
}
return ost_min;
}
| 16,429 |
qemu | 1108b2f8a939fb5778d384149e2f1b99062a72da | 1 | static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos)
{
uint16_t ctrl;
bool msi_64bit, msi_maskbit;
int ret, entries;
if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
return -errno;
}
ctrl = le16_to_cpu(ctrl);
msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);
trace_vfio_msi_setup(vdev->vbasedev.name, pos);
ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit);
if (ret < 0) {
if (ret == -ENOTSUP) {
return 0;
}
error_report("vfio: msi_init failed");
return ret;
}
vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
return 0;
}
| 16,430 |
qemu | 2db59a76c421cdd1039d10e32a9798952d3ff5ba | 1 | static void gen_advance_ccount(DisasContext *dc)
{
gen_advance_ccount_cond(dc);
dc->ccount_delta = 0;
}
| 16,432 |
qemu | bc5008a832f95aae86efce844382e64d54da2146 | 1 | void init_paths(const char *prefix)
{
char pref_buf[PATH_MAX];
if (prefix[0] == '\0' ||
!strcmp(prefix, "/"))
return;
if (prefix[0] != '/') {
char *cwd = getcwd(NULL, 0);
size_t pref_buf_len = sizeof(pref_buf);
if (!cwd)
abort();
pstrcpy(pref_buf, sizeof(pref_buf), cwd);
pstrcat(pref_buf, pref_buf_len, "/");
pstrcat(pref_buf, pref_buf_len, prefix);
free(cwd);
} else
pstrcpy(pref_buf, sizeof(pref_buf), prefix + 1);
base = new_entry("", NULL, pref_buf);
base = add_dir_maybe(base);
if (base->num_entries == 0) {
g_free(base->pathname);
free(base->name);
free(base);
base = NULL;
} else {
set_parents(base, base);
}
}
| 16,433 |
qemu | 5e003f17ec518cd96f5d2ac23ce9e14144426235 | 1 | int bdrv_all_create_snapshot(QEMUSnapshotInfo *sn,
BlockDriverState *vm_state_bs,
uint64_t vm_state_size,
BlockDriverState **first_bad_bs)
{
int err = 0;
BlockDriverState *bs;
BdrvNextIterator it;
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
AioContext *ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
if (bs == vm_state_bs) {
sn->vm_state_size = vm_state_size;
err = bdrv_snapshot_create(bs, sn);
} else if (bdrv_can_snapshot(bs)) {
sn->vm_state_size = 0;
err = bdrv_snapshot_create(bs, sn);
}
aio_context_release(ctx);
if (err < 0) {
goto fail;
}
}
fail:
*first_bad_bs = bs;
return err;
} | 16,434 |
qemu | ba026602a673677735428e64e621cdf95b5cd6d9 | 1 | void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
uintptr_t addr)
{
if (TCG_TARGET_REG_BITS == 64) {
tcg_insn_unit i1, i2;
intptr_t tb_diff = addr - tc_ptr;
intptr_t br_diff = addr - (jmp_addr + 4);
uint64_t pair;
/* This does not exercise the range of the branch, but we do
still need to be able to load the new value of TCG_REG_TB.
But this does still happen quite often. */
if (tb_diff == (int16_t)tb_diff) {
i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff);
i2 = B | (br_diff & 0x3fffffc);
} else {
intptr_t lo = (int16_t)tb_diff;
intptr_t hi = (int32_t)(tb_diff - lo);
assert(tb_diff == hi + lo);
i1 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16);
i2 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo);
}
#ifdef HOST_WORDS_BIGENDIAN
pair = (uint64_t)i1 << 32 | i2;
#else
pair = (uint64_t)i2 << 32 | i1;
#endif
atomic_set((uint64_t *)jmp_addr, pair);
flush_icache_range(jmp_addr, jmp_addr + 8);
} else {
intptr_t diff = addr - jmp_addr;
tcg_debug_assert(in_range_b(diff));
atomic_set((uint32_t *)jmp_addr, B | (diff & 0x3fffffc));
flush_icache_range(jmp_addr, jmp_addr + 4);
}
}
| 16,436 |
FFmpeg | 8c50704ebf1777bee76772c4835d9760b3721057 | 1 | static av_always_inline int setup_classifs(vorbis_context *vc,
vorbis_residue *vr,
uint8_t *do_not_decode,
unsigned ch_used,
int partition_count)
{
int p, j, i;
unsigned c_p_c = vc->codebooks[vr->classbook].dimensions;
unsigned inverse_class = ff_inverse[vr->classifications];
unsigned temp, temp2;
for (p = 0, j = 0; j < ch_used; ++j) {
if (!do_not_decode[j]) {
temp = get_vlc2(&vc->gb, vc->codebooks[vr->classbook].vlc.table,
vc->codebooks[vr->classbook].nb_bits, 3);
av_dlog(NULL, "Classword: %u\n", temp);
if ((int)temp < 0)
return temp;
av_assert0(vr->classifications > 1); //needed for inverse[]
if (temp <= 65536) {
for (i = partition_count + c_p_c - 1; i >= partition_count; i--) {
temp2 = (((uint64_t)temp) * inverse_class) >> 32;
if (i < vr->ptns_to_read)
vr->classifs[p + i] = temp - temp2 * vr->classifications;
temp = temp2;
}
} else {
for (i = partition_count + c_p_c - 1; i >= partition_count; i--) {
temp2 = temp / vr->classifications;
if (i < vr->ptns_to_read)
vr->classifs[p + i] = temp - temp2 * vr->classifications;
temp = temp2;
}
}
}
p += vr->ptns_to_read;
}
return 0;
}
| 16,438 |
FFmpeg | 55188278169c3a1838334d7aa47a1f7a40741690 | 1 | static int xan_unpack(uint8_t *dest, const int dest_len,
const uint8_t *src, const int src_len)
{
uint8_t opcode;
int size;
uint8_t *orig_dest = dest;
const uint8_t *src_end = src + src_len;
const uint8_t *dest_end = dest + dest_len;
while (dest < dest_end) {
opcode = *src++;
if (opcode < 0xe0) {
int size2, back;
if ((opcode & 0x80) == 0) {
size = opcode & 3;
back = ((opcode & 0x60) << 3) + *src++ + 1;
size2 = ((opcode & 0x1c) >> 2) + 3;
} else if ((opcode & 0x40) == 0) {
size = *src >> 6;
back = (bytestream_get_be16(&src) & 0x3fff) + 1;
size2 = (opcode & 0x3f) + 4;
} else {
size = opcode & 3;
back = ((opcode & 0x10) << 12) + bytestream_get_be16(&src) + 1;
size2 = ((opcode & 0x0c) << 6) + *src++ + 5;
if (size + size2 > dest_end - dest)
break;
}
if (src + size > src_end || dest + size + size2 > dest_end ||
dest - orig_dest + size < back)
return -1;
bytestream_get_buffer(&src, dest, size);
dest += size;
av_memcpy_backptr(dest, back, size2);
dest += size2;
} else {
int finish = opcode >= 0xfc;
size = finish ? opcode & 3 : ((opcode & 0x1f) << 2) + 4;
if (src + size > src_end || dest + size > dest_end)
return -1;
bytestream_get_buffer(&src, dest, size);
dest += size;
if (finish)
break;
}
}
return dest - orig_dest;
}
| 16,439 |
qemu | 7266ae91a111001abda65c79299c9b7e365456b6 | 1 | static void qht_do_test(unsigned int mode, size_t init_entries)
{
qht_init(&ht, 0, mode);
insert(0, N);
check(0, N, true);
check_n(N);
check(-N, -1, false);
iter_check(N);
rm(101, 102);
check_n(N - 1);
insert(N, N * 2);
check_n(N + N - 1);
rm(N, N * 2);
check_n(N - 1);
insert(101, 102);
check_n(N);
rm(10, 200);
check_n(N - 190);
insert(150, 200);
check_n(N - 190 + 50);
insert(10, 150);
check_n(N);
rm(1, 2);
check_n(N - 1);
qht_reset_size(&ht, 0);
check(0, N, false);
qht_destroy(&ht);
} | 16,440 |
FFmpeg | 69dde1ad36b7d95b8b9268f414aa6c076212ed41 | 0 | static int mov_write_tkhd_tag(ByteIOContext *pb, MOVTrack* track)
{
int64_t maxTrackLenTemp;
put_be32(pb, 0x5c); /* size (always 0x5c) */
put_tag(pb, "tkhd");
put_be32(pb, 0xf); /* version & flags (track enabled) */
put_be32(pb, track->time); /* creation time */
put_be32(pb, track->time); /* modification time */
put_be32(pb, track->trackID); /* track-id */
put_be32(pb, 0); /* reserved */
maxTrackLenTemp = ((int64_t)globalTimescale*(int64_t)track->trackDuration)/(int64_t)track->timescale;
put_be32(pb, (long)maxTrackLenTemp); /* duration */
put_be32(pb, 0); /* reserved */
put_be32(pb, 0); /* reserved */
put_be32(pb, 0x0); /* reserved (Layer & Alternate group) */
/* Volume, only for audio */
if(track->enc->codec_type == CODEC_TYPE_AUDIO)
put_be16(pb, 0x0100);
else
put_be16(pb, 0);
put_be16(pb, 0); /* reserved */
/* Matrix structure */
put_be32(pb, 0x00010000); /* reserved */
put_be32(pb, 0x0); /* reserved */
put_be32(pb, 0x0); /* reserved */
put_be32(pb, 0x0); /* reserved */
put_be32(pb, 0x00010000); /* reserved */
put_be32(pb, 0x0); /* reserved */
put_be32(pb, 0x0); /* reserved */
put_be32(pb, 0x0); /* reserved */
put_be32(pb, 0x40000000); /* reserved */
/* Track width and height, for visual only */
if(track->enc->codec_type == CODEC_TYPE_VIDEO) {
put_be32(pb, track->enc->width*0x10000);
put_be32(pb, track->enc->height*0x10000);
}
else {
put_be32(pb, 0);
put_be32(pb, 0);
}
return 0x5c;
}
| 16,441 |
FFmpeg | 01ecb7172b684f1c4b3e748f95c5a9a494ca36ec | 1 | static void quantize_and_encode_band_cost_NONE_mips(struct AACEncContext *s,
PutBitContext *pb, const float *in, float *out,
const float *scaled, int size, int scale_idx,
int cb, const float lambda, const float uplim,
int *bits, const float ROUNDING) {
av_assert0(0);
}
| 16,442 |
qemu | 3a661f1eabf7e8db66e28489884d9b54aacb94ea | 1 | int qcrypto_cipher_setiv(QCryptoCipher *cipher,
const uint8_t *iv, size_t niv,
Error **errp)
{
QCryptoCipherNettle *ctx = cipher->opaque;
if (niv != ctx->niv) {
error_setg(errp, "Expected IV size %zu not %zu",
ctx->niv, niv);
return -1;
}
memcpy(ctx->iv, iv, niv);
return 0;
}
| 16,443 |
qemu | e2f89926f19d2940eda070542501f39f51a8c81f | 1 | static int uhci_handle_td(UHCIState *s, uint32_t addr, UHCI_TD *td,
uint32_t *int_mask, bool queuing)
{
UHCIAsync *async;
int len = 0, max_len;
uint8_t pid;
USBDevice *dev;
USBEndpoint *ep;
/* Is active ? */
if (!(td->ctrl & TD_CTRL_ACTIVE))
return TD_RESULT_NEXT_QH;
async = uhci_async_find_td(s, addr, td);
if (async) {
/* Already submitted */
async->queue->valid = 32;
if (!async->done)
return TD_RESULT_ASYNC_CONT;
if (queuing) {
/* we are busy filling the queue, we are not prepared
to consume completed packages then, just leave them
in async state */
return TD_RESULT_ASYNC_CONT;
}
uhci_async_unlink(async);
goto done;
}
/* Allocate new packet */
async = uhci_async_alloc(uhci_queue_get(s, td), addr);
/* valid needs to be large enough to handle 10 frame delay
* for initial isochronous requests
*/
async->queue->valid = 32;
async->isoc = td->ctrl & TD_CTRL_IOS;
max_len = ((td->token >> 21) + 1) & 0x7ff;
pid = td->token & 0xff;
dev = uhci_find_device(s, (td->token >> 8) & 0x7f);
ep = usb_ep_get(dev, pid, (td->token >> 15) & 0xf);
usb_packet_setup(&async->packet, pid, ep);
qemu_sglist_add(&async->sgl, td->buffer, max_len);
usb_packet_map(&async->packet, &async->sgl);
switch(pid) {
case USB_TOKEN_OUT:
case USB_TOKEN_SETUP:
len = usb_handle_packet(dev, &async->packet);
if (len >= 0)
len = max_len;
break;
case USB_TOKEN_IN:
len = usb_handle_packet(dev, &async->packet);
break;
default:
/* invalid pid : frame interrupted */
uhci_async_free(async);
s->status |= UHCI_STS_HCPERR;
uhci_update_irq(s);
return TD_RESULT_STOP_FRAME;
}
if (len == USB_RET_ASYNC) {
uhci_async_link(async);
return TD_RESULT_ASYNC_START;
}
async->packet.result = len;
done:
len = uhci_complete_td(s, td, async, int_mask);
usb_packet_unmap(&async->packet);
uhci_async_free(async);
return len;
}
| 16,444 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.