unique_id
int64 13
189k
| target
int64 0
1
| code
stringlengths 20
241k
| __index_level_0__
int64 0
18.9k
|
---|---|---|---|
161,676 | 0 | void VaapiVideoDecodeAccelerator::Flush() {
VLOGF(2) << "Got flush request";
DCHECK(task_runner_->BelongsToCurrentThread());
QueueInputBuffer(media::BitstreamBuffer());
}
| 8,000 |
33,278 | 0 | int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
bool user_alloc)
{
int npages = memslot->npages;
/*
* Only private memory slots need to be mapped here since
* KVM_SET_MEMORY_REGION ioctl is no longer supported.
*/
if ((memslot->id >= KVM_USER_MEM_SLOTS) && npages && !old.npages) {
unsigned long userspace_addr;
/*
* MAP_SHARED to prevent internal slot pages from being moved
* by fork()/COW.
*/
userspace_addr = vm_mmap(NULL, 0, npages * PAGE_SIZE,
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, 0);
if (IS_ERR((void *)userspace_addr))
return PTR_ERR((void *)userspace_addr);
memslot->userspace_addr = userspace_addr;
}
return 0;
}
| 8,001 |
123,554 | 0 | void SavePackage::SaveCanceled(SaveItem* save_item) {
file_manager_->RemoveSaveFile(save_item->save_id(),
save_item->url(),
this);
if (save_item->save_id() != -1)
BrowserThread::PostTask(
BrowserThread::FILE, FROM_HERE,
base::Bind(&SaveFileManager::CancelSave,
file_manager_,
save_item->save_id()));
}
| 8,002 |
67,548 | 0 | static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
{
struct address_space *mapping = mpd->inode->i_mapping;
struct pagevec pvec;
unsigned int nr_pages;
long left = mpd->wbc->nr_to_write;
pgoff_t index = mpd->first_page;
pgoff_t end = mpd->last_page;
int tag;
int i, err = 0;
int blkbits = mpd->inode->i_blkbits;
ext4_lblk_t lblk;
struct buffer_head *head;
if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
pagevec_init(&pvec, 0);
mpd->map.m_len = 0;
mpd->next_page = index;
while (index <= end) {
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
if (nr_pages == 0)
goto out;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
/*
* At this point, the page may be truncated or
* invalidated (changing page->mapping to NULL), or
* even swizzled back from swapper_space to tmpfs file
* mapping. However, page->index will not change
* because we have a reference on the page.
*/
if (page->index > end)
goto out;
/*
* Accumulated enough dirty pages? This doesn't apply
* to WB_SYNC_ALL mode. For integrity sync we have to
* keep going because someone may be concurrently
* dirtying pages, and we might have synced a lot of
* newly appeared dirty pages, but have not synced all
* of the old dirty pages.
*/
if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
goto out;
/* If we can't merge this page, we are done. */
if (mpd->map.m_len > 0 && mpd->next_page != page->index)
goto out;
lock_page(page);
/*
* If the page is no longer dirty, or its mapping no
* longer corresponds to inode we are writing (which
* means it has been truncated or invalidated), or the
* page is already under writeback and we are not doing
* a data integrity writeback, skip the page
*/
if (!PageDirty(page) ||
(PageWriteback(page) &&
(mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
unlikely(page->mapping != mapping)) {
unlock_page(page);
continue;
}
wait_on_page_writeback(page);
BUG_ON(PageWriteback(page));
if (mpd->map.m_len == 0)
mpd->first_page = page->index;
mpd->next_page = page->index + 1;
/* Add all dirty buffers to mpd */
lblk = ((ext4_lblk_t)page->index) <<
(PAGE_SHIFT - blkbits);
head = page_buffers(page);
err = mpage_process_page_bufs(mpd, head, head, lblk);
if (err <= 0)
goto out;
err = 0;
left--;
}
pagevec_release(&pvec);
cond_resched();
}
return 0;
out:
pagevec_release(&pvec);
return err;
}
| 8,003 |
135,767 | 0 | WebTextInputType InputMethodController::TextInputType() const {
if (!GetFrame().Selection().IsAvailable()) {
return kWebTextInputTypeNone;
}
if (!RootEditableElementOfSelection(GetFrame().Selection()))
return kWebTextInputTypeNone;
if (!IsAvailable())
return kWebTextInputTypeNone;
Element* element = GetDocument().FocusedElement();
if (!element)
return kWebTextInputTypeNone;
if (isHTMLInputElement(*element)) {
HTMLInputElement& input = toHTMLInputElement(*element);
const AtomicString& type = input.type();
if (input.IsDisabledOrReadOnly())
return kWebTextInputTypeNone;
if (type == InputTypeNames::password)
return kWebTextInputTypePassword;
if (type == InputTypeNames::search)
return kWebTextInputTypeSearch;
if (type == InputTypeNames::email)
return kWebTextInputTypeEmail;
if (type == InputTypeNames::number)
return kWebTextInputTypeNumber;
if (type == InputTypeNames::tel)
return kWebTextInputTypeTelephone;
if (type == InputTypeNames::url)
return kWebTextInputTypeURL;
if (type == InputTypeNames::text)
return kWebTextInputTypeText;
return kWebTextInputTypeNone;
}
if (isHTMLTextAreaElement(*element)) {
if (toHTMLTextAreaElement(*element).IsDisabledOrReadOnly())
return kWebTextInputTypeNone;
return kWebTextInputTypeTextArea;
}
if (element->IsHTMLElement()) {
if (ToHTMLElement(element)->IsDateTimeFieldElement())
return kWebTextInputTypeDateTimeField;
}
GetDocument().UpdateStyleAndLayoutTree();
if (HasEditableStyle(*element))
return kWebTextInputTypeContentEditable;
return kWebTextInputTypeNone;
}
| 8,004 |
8,224 | 0 | static void v9fs_mknod(void *opaque)
{
int mode;
gid_t gid;
int32_t fid;
V9fsQID qid;
int err = 0;
int major, minor;
size_t offset = 7;
V9fsString name;
struct stat stbuf;
V9fsFidState *fidp;
V9fsPDU *pdu = opaque;
v9fs_string_init(&name);
err = pdu_unmarshal(pdu, offset, "dsdddd", &fid, &name, &mode,
&major, &minor, &gid);
if (err < 0) {
goto out_nofid;
}
trace_v9fs_mknod(pdu->tag, pdu->id, fid, mode, major, minor);
if (name_is_illegal(name.data)) {
err = -ENOENT;
goto out_nofid;
}
if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
err = -EEXIST;
goto out_nofid;
}
fidp = get_fid(pdu, fid);
if (fidp == NULL) {
err = -ENOENT;
goto out_nofid;
}
err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, gid,
makedev(major, minor), mode, &stbuf);
if (err < 0) {
goto out;
}
stat_to_qid(&stbuf, &qid);
err = pdu_marshal(pdu, offset, "Q", &qid);
if (err < 0) {
goto out;
}
err += offset;
trace_v9fs_mknod_return(pdu->tag, pdu->id,
qid.type, qid.version, qid.path);
out:
put_fid(pdu, fidp);
out_nofid:
pdu_complete(pdu, err);
v9fs_string_free(&name);
}
| 8,005 |
142,959 | 0 | void HTMLMediaElement::pause() {
BLINK_MEDIA_LOG << "pause(" << (void*)this << ")";
autoplay_policy_->StopAutoplayMutedWhenVisible();
PauseInternal();
}
| 8,006 |
84,920 | 0 | SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
{
int rc;
struct cifs_ses *ses = sess_data->ses;
struct smb2_sess_setup_req *req;
struct smb2_sess_setup_rsp *rsp = NULL;
unsigned char *ntlmssp_blob = NULL;
bool use_spnego = false; /* else use raw ntlmssp */
u16 blob_length = 0;
rc = SMB2_sess_alloc_buffer(sess_data);
if (rc)
goto out;
req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
req->hdr.sync_hdr.SessionId = ses->Suid;
rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
sess_data->nls_cp);
if (rc) {
cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc);
goto out;
}
if (use_spnego) {
/* BB eventually need to add this */
cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
rc = -EOPNOTSUPP;
goto out;
}
sess_data->iov[1].iov_base = ntlmssp_blob;
sess_data->iov[1].iov_len = blob_length;
rc = SMB2_sess_sendreceive(sess_data);
if (rc)
goto out;
rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
ses->Suid = rsp->hdr.sync_hdr.SessionId;
ses->session_flags = le16_to_cpu(rsp->SessionFlags);
if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
cifs_dbg(VFS, "SMB3 encryption not supported yet\n");
rc = SMB2_sess_establish_session(sess_data);
out:
kfree(ntlmssp_blob);
SMB2_sess_free_buffer(sess_data);
kfree(ses->ntlmssp);
ses->ntlmssp = NULL;
sess_data->result = rc;
sess_data->func = NULL;
}
| 8,007 |
6,757 | 0 | static void ide_sector_write_cb(void *opaque, int ret)
{
IDEState *s = opaque;
int n;
if (ret == -ECANCELED) {
return;
}
block_acct_done(blk_get_stats(s->blk), &s->acct);
s->pio_aiocb = NULL;
s->status &= ~BUSY_STAT;
if (ret != 0) {
if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
return;
}
}
n = s->nsector;
if (n > s->req_nb_sectors) {
n = s->req_nb_sectors;
}
s->nsector -= n;
s->io_buffer_offset += 512 * n;
if (s->nsector == 0) {
/* no more sectors to write */
ide_transfer_stop(s);
} else {
int n1 = s->nsector;
if (n1 > s->req_nb_sectors) {
n1 = s->req_nb_sectors;
}
ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
ide_sector_write);
}
ide_set_sector(s, ide_get_sector(s) + n);
if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
/* It seems there is a bug in the Windows 2000 installer HDD
IDE driver which fills the disk with empty logs when the
IDE write IRQ comes too early. This hack tries to correct
that at the expense of slower write performances. Use this
option _only_ to install Windows 2000. You must disable it
for normal use. */
timer_mod(s->sector_write_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() / 1000));
} else {
ide_set_irq(s->bus);
}
}
| 8,008 |
48,229 | 0 | combineSeparateTileSamples24bits (uint8 *in[], uint8 *out, uint32 cols,
uint32 rows, uint32 imagewidth,
uint32 tw, uint16 spp, uint16 bps,
FILE *dumpfile, int format, int level)
{
int ready_bits = 0;
uint32 src_rowsize, dst_rowsize;
uint32 bit_offset, src_offset;
uint32 row, col, src_byte = 0, src_bit = 0;
uint32 maskbits = 0, matchbits = 0;
uint32 buff1 = 0, buff2 = 0;
uint8 bytebuff1 = 0, bytebuff2 = 0;
tsample_t s;
unsigned char *src = in[0];
unsigned char *dst = out;
char action[8];
if ((src == NULL) || (dst == NULL))
{
TIFFError("combineSeparateTileSamples24bits","Invalid input or output buffer");
return (1);
}
src_rowsize = ((bps * tw) + 7) / 8;
dst_rowsize = ((imagewidth * bps * spp) + 7) / 8;
maskbits = (uint32)-1 >> ( 32 - bps);
for (row = 0; row < rows; row++)
{
ready_bits = 0;
buff1 = buff2 = 0;
dst = out + (row * dst_rowsize);
src_offset = row * src_rowsize;
for (col = 0; col < cols; col++)
{
/* Compute src byte(s) and bits within byte(s) */
bit_offset = col * bps;
src_byte = bit_offset / 8;
src_bit = bit_offset % 8;
matchbits = maskbits << (32 - src_bit - bps);
for (s = 0; (s < spp) && (s < MAX_SAMPLES); s++)
{
src = in[s] + src_offset + src_byte;
if (little_endian)
buff1 = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
else
buff1 = (src[3] << 24) | (src[2] << 16) | (src[1] << 8) | src[0];
buff1 = (buff1 & matchbits) << (src_bit);
/* If we have a full buffer's worth, write it out */
if (ready_bits >= 16)
{
bytebuff1 = (buff2 >> 24);
*dst++ = bytebuff1;
bytebuff2 = (buff2 >> 16);
*dst++ = bytebuff2;
ready_bits -= 16;
/* shift in new bits */
buff2 = ((buff2 << 16) | (buff1 >> ready_bits));
strcpy (action, "Flush");
}
else
{ /* add another bps bits to the buffer */
bytebuff1 = bytebuff2 = 0;
buff2 = (buff2 | (buff1 >> ready_bits));
strcpy (action, "Update");
}
ready_bits += bps;
if ((dumpfile != NULL) && (level == 3))
{
dump_info (dumpfile, format, "",
"Row %3d, Col %3d, Samples %d, Src byte offset %3d bit offset %2d Dst offset %3d",
row + 1, col + 1, s, src_byte, src_bit, dst - out);
dump_long (dumpfile, format, "Match bits ", matchbits);
dump_data (dumpfile, format, "Src bits ", src, 4);
dump_long (dumpfile, format, "Buff1 bits ", buff1);
dump_long (dumpfile, format, "Buff2 bits ", buff2);
dump_byte (dumpfile, format, "Write bits1", bytebuff1);
dump_byte (dumpfile, format, "Write bits2", bytebuff2);
dump_info (dumpfile, format, "","Ready bits: %d, %s", ready_bits, action);
}
}
}
/* catch any trailing bits at the end of the line */
while (ready_bits > 0)
{
bytebuff1 = (buff2 >> 24);
*dst++ = bytebuff1;
buff2 = (buff2 << 8);
bytebuff2 = bytebuff1;
ready_bits -= 8;
}
if ((dumpfile != NULL) && (level == 3))
{
dump_info (dumpfile, format, "",
"Row %3d, Col %3d, Src byte offset %3d bit offset %2d Dst offset %3d",
row + 1, col + 1, src_byte, src_bit, dst - out);
dump_long (dumpfile, format, "Match bits ", matchbits);
dump_data (dumpfile, format, "Src bits ", src, 4);
dump_long (dumpfile, format, "Buff1 bits ", buff1);
dump_long (dumpfile, format, "Buff2 bits ", buff2);
dump_byte (dumpfile, format, "Write bits1", bytebuff1);
dump_byte (dumpfile, format, "Write bits2", bytebuff2);
dump_info (dumpfile, format, "", "Ready bits: %2d", ready_bits);
}
if ((dumpfile != NULL) && (level == 2))
{
dump_info (dumpfile, format, "combineSeparateTileSamples24bits","Output data");
dump_buffer(dumpfile, format, 1, dst_rowsize, row, out + (row * dst_rowsize));
}
}
return (0);
} /* end combineSeparateTileSamples24bits */
| 8,009 |
87,810 | 0 | R_API int r_core_cmd0(RCore *core, const char *cmd) {
return r_core_cmd (core, cmd, 0);
}
| 8,010 |
91,912 | 0 | static void sycc_to_rgb(int offset, int upb, int y, int cb, int cr,
int *out_r, int *out_g, int *out_b)
{
int r, g, b;
cb -= offset;
cr -= offset;
r = y + (int)(1.402 * (float)cr);
if (r < 0) {
r = 0;
} else if (r > upb) {
r = upb;
}
*out_r = r;
g = y - (int)(0.344 * (float)cb + 0.714 * (float)cr);
if (g < 0) {
g = 0;
} else if (g > upb) {
g = upb;
}
*out_g = g;
b = y + (int)(1.772 * (float)cb);
if (b < 0) {
b = 0;
} else if (b > upb) {
b = upb;
}
*out_b = b;
}
| 8,011 |
39,835 | 0 | static inline unsigned char read_buf(struct n_tty_data *ldata, size_t i)
{
return ldata->read_buf[i & (N_TTY_BUF_SIZE - 1)];
}
| 8,012 |
10,728 | 0 | Write_CVT_Stretched( TT_ExecContext exc,
FT_ULong idx,
FT_F26Dot6 value )
{
exc->cvt[idx] = FT_DivFix( value, Current_Ratio( exc ) );
}
| 8,013 |
6,258 | 0 | static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
pixman_format_code_t pformat;
struct virtio_gpu_simple_resource *res;
struct virtio_gpu_resource_create_2d c2d;
VIRTIO_GPU_FILL_CMD(c2d);
trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
c2d.width, c2d.height);
if (c2d.resource_id == 0) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
__func__);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
return;
}
res = virtio_gpu_find_resource(g, c2d.resource_id);
if (res) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
__func__, c2d.resource_id);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
return;
}
res = g_new0(struct virtio_gpu_simple_resource, 1);
res->width = c2d.width;
res->height = c2d.height;
res->format = c2d.format;
res->resource_id = c2d.resource_id;
pformat = get_pixman_format(c2d.format);
if (!pformat) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: host couldn't handle guest format %d\n",
__func__, c2d.format);
g_free(res);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
return;
}
res->hostmem = PIXMAN_FORMAT_BPP(pformat) * c2d.width * c2d.height;
if (res->hostmem + g->hostmem < g->conf.max_hostmem) {
res->image = pixman_image_create_bits(pformat,
c2d.width,
c2d.height,
NULL, 0);
}
if (!res->image) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: resource creation failed %d %d %d\n",
__func__, c2d.resource_id, c2d.width, c2d.height);
g_free(res);
cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
return;
}
QTAILQ_INSERT_HEAD(&g->reslist, res, next);
g->hostmem += res->hostmem;
}
| 8,014 |
94,252 | 0 | static enum test_return test_binary_delete_impl(const char *key, uint8_t cmd) {
union {
protocol_binary_request_no_extras request;
protocol_binary_response_no_extras response;
char bytes[1024];
} send, receive;
size_t len = raw_command(send.bytes, sizeof(send.bytes), cmd,
key, strlen(key), NULL, 0);
safe_send(send.bytes, len, false);
safe_recv_packet(receive.bytes, sizeof(receive.bytes));
validate_response_header(&receive.response, cmd,
PROTOCOL_BINARY_RESPONSE_KEY_ENOENT);
len = storage_command(send.bytes, sizeof(send.bytes),
PROTOCOL_BINARY_CMD_ADD,
key, strlen(key), NULL, 0, 0, 0);
safe_send(send.bytes, len, false);
safe_recv_packet(receive.bytes, sizeof(receive.bytes));
validate_response_header(&receive.response, PROTOCOL_BINARY_CMD_ADD,
PROTOCOL_BINARY_RESPONSE_SUCCESS);
len = raw_command(send.bytes, sizeof(send.bytes),
cmd, key, strlen(key), NULL, 0);
safe_send(send.bytes, len, false);
if (cmd == PROTOCOL_BINARY_CMD_DELETE) {
safe_recv_packet(receive.bytes, sizeof(receive.bytes));
validate_response_header(&receive.response, PROTOCOL_BINARY_CMD_DELETE,
PROTOCOL_BINARY_RESPONSE_SUCCESS);
}
safe_send(send.bytes, len, false);
safe_recv_packet(receive.bytes, sizeof(receive.bytes));
validate_response_header(&receive.response, cmd,
PROTOCOL_BINARY_RESPONSE_KEY_ENOENT);
return TEST_PASS;
}
| 8,015 |
167,947 | 0 | bool LocalFrame::IsProvisional() const {
CHECK_NE(FrameLifecycle::kDetached, lifecycle_.GetState());
if (IsMainFrame()) {
return GetPage()->MainFrame() != this;
}
DCHECK(Owner());
return Owner()->ContentFrame() != this;
}
| 8,016 |
92,605 | 0 | static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
{
s64 delta = (s64)(vruntime - max_vruntime);
if (delta > 0)
max_vruntime = vruntime;
return max_vruntime;
}
| 8,017 |
84,824 | 0 | static int tm_dscr_active(struct task_struct *target,
const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (MSR_TM_ACTIVE(target->thread.regs->msr))
return regset->n;
return 0;
}
| 8,018 |
141,710 | 0 | void V8Console::traceCallback(const v8::FunctionCallbackInfo<v8::Value>& info)
{
ConsoleHelper(info).reportCallWithDefaultArgument(ConsoleAPIType::kTrace, String16("console.trace"));
}
| 8,019 |
16,181 | 0 | escapeGahpString(const char * input)
{
static std::string output;
if (!input) return NULL;
output = "";
unsigned int i = 0;
size_t input_len = strlen(input);
for (i=0; i < input_len; i++) {
if ( input[i] == ' ' || input[i] == '\\' || input[i] == '\r' ||
input[i] == '\n' ) {
output += '\\';
}
output += input[i];
}
return output.c_str();
}
| 8,020 |
51,302 | 0 | static int php_zip_parse_options(zval *options, long *remove_all_path,
char **remove_path, int *remove_path_len, char **add_path, int *add_path_len TSRMLS_DC) /* {{{ */
{
zval **option;
if (zend_hash_find(HASH_OF(options), "remove_all_path", sizeof("remove_all_path"), (void **)&option) == SUCCESS) {
long opt;
if (Z_TYPE_PP(option) != IS_LONG) {
zval tmp = **option;
zval_copy_ctor(&tmp);
convert_to_long(&tmp);
opt = Z_LVAL(tmp);
} else {
opt = Z_LVAL_PP(option);
}
*remove_all_path = opt;
}
/* If I add more options, it would make sense to create a nice static struct and loop over it. */
if (zend_hash_find(HASH_OF(options), "remove_path", sizeof("remove_path"), (void **)&option) == SUCCESS) {
if (Z_TYPE_PP(option) != IS_STRING) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "remove_path option expected to be a string");
return -1;
}
if (Z_STRLEN_PP(option) < 1) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Empty string given as remove_path option");
return -1;
}
if (Z_STRLEN_PP(option) >= MAXPATHLEN) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "remove_path string is too long (max: %i, %i given)",
MAXPATHLEN - 1, Z_STRLEN_PP(option));
return -1;
}
*remove_path_len = Z_STRLEN_PP(option);
*remove_path = Z_STRVAL_PP(option);
}
if (zend_hash_find(HASH_OF(options), "add_path", sizeof("add_path"), (void **)&option) == SUCCESS) {
if (Z_TYPE_PP(option) != IS_STRING) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "add_path option expected to be a string");
return -1;
}
if (Z_STRLEN_PP(option) < 1) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Empty string given as the add_path option");
return -1;
}
if (Z_STRLEN_PP(option) >= MAXPATHLEN) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "add_path string too long (max: %i, %i given)",
MAXPATHLEN - 1, Z_STRLEN_PP(option));
return -1;
}
*add_path_len = Z_STRLEN_PP(option);
*add_path = Z_STRVAL_PP(option);
}
return 1;
}
/* }}} */
| 8,021 |
86,214 | 0 | static int i8042_enable_aux_port(void)
{
i8042_ctr &= ~I8042_CTR_AUXDIS;
i8042_ctr |= I8042_CTR_AUXINT;
if (i8042_command(&i8042_ctr, I8042_CMD_CTL_WCTR)) {
i8042_ctr &= ~I8042_CTR_AUXINT;
i8042_ctr |= I8042_CTR_AUXDIS;
pr_err("Failed to enable AUX port\n");
return -EIO;
}
return 0;
}
| 8,022 |
121,315 | 0 | bool GetInfoFromDataURL(const GURL& url,
ResourceResponseInfo* info,
std::string* data,
int* error_code) {
std::string mime_type;
std::string charset;
if (net::DataURL::Parse(url, &mime_type, &charset, data)) {
*error_code = net::OK;
Time now = Time::Now();
info->load_timing.request_start = TimeTicks::Now();
info->load_timing.request_start_time = now;
info->request_time = now;
info->response_time = now;
info->headers = NULL;
info->mime_type.swap(mime_type);
info->charset.swap(charset);
info->security_info.clear();
info->content_length = data->length();
info->encoded_data_length = 0;
return true;
}
*error_code = net::ERR_INVALID_URL;
return false;
}
| 8,023 |
174,429 | 0 | static int aacDecoder_drcReadCompression (
HANDLE_FDK_BITSTREAM bs,
CDrcPayload *pDrcBs,
UINT payloadPosition )
{
int bitCnt = 0;
int dmxLevelsPresent, extensionPresent, compressionPresent;
int coarseGrainTcPresent, fineGrainTcPresent;
/* Move to the beginning of the DRC payload field */
FDKpushBiDirectional(bs, FDKgetValidBits(bs)-payloadPosition);
/* Sanity checks */
if ( FDKgetValidBits(bs) < 24 ) {
return 0;
}
/* Check sync word */
if (FDKreadBits(bs, 8) != DVB_ANC_DATA_SYNC_BYTE) {
return 0;
}
/* Evaluate bs_info field */
if (FDKreadBits(bs, 2) != 3) { /* mpeg_audio_type */
/* No MPEG-4 audio data */
return 0;
}
FDKreadBits(bs, 2); /* dolby_surround_mode */
pDrcBs->presMode = FDKreadBits(bs, 2); /* presentation_mode */
FDKreadBits(bs, 1); /* stereo_downmix_mode */
if (FDKreadBits(bs, 1) != 0) { /* reserved, set to 0 */
return 0;
}
/* Evaluate ancillary_data_status */
if (FDKreadBits(bs, 3) != 0) { /* reserved, set to 0 */
return 0;
}
dmxLevelsPresent = FDKreadBits(bs, 1); /* downmixing_levels_MPEG4_status */
extensionPresent = FDKreadBits(bs, 1); /* ancillary_data_extension_status; */
compressionPresent = FDKreadBits(bs, 1); /* audio_coding_mode_and_compression status */
coarseGrainTcPresent = FDKreadBits(bs, 1); /* coarse_grain_timecode_status */
fineGrainTcPresent = FDKreadBits(bs, 1); /* fine_grain_timecode_status */
bitCnt += 24;
if (dmxLevelsPresent) {
FDKreadBits(bs, 8); /* downmixing_levels_MPEG4 */
bitCnt += 8;
}
/* audio_coding_mode_and_compression_status */
if (compressionPresent)
{
UCHAR compressionOn, compressionValue;
/* audio_coding_mode */
if ( FDKreadBits(bs, 7) != 0 ) { /* The reserved bits shall be set to "0". */
return 0;
}
compressionOn = (UCHAR)FDKreadBits(bs, 1); /* compression_on */
compressionValue = (UCHAR)FDKreadBits(bs, 8); /* Compression_value */
bitCnt += 16;
if ( compressionOn ) {
/* A compression value is available so store the data just like MPEG DRC data */
pDrcBs->channelData.numBands = 1; /* One band ... */
pDrcBs->channelData.drcValue[0] = compressionValue; /* ... with one value ... */
pDrcBs->channelData.bandTop[0] = (1024 >> 2) - 1; /* ... comprising the whole spectrum. */
pDrcBs->pceInstanceTag = -1; /* Not present */
pDrcBs->progRefLevel = -1; /* Not present */
pDrcBs->channelData.drcDataType = DVB_DRC_ANC_DATA; /* Set DRC payload type to DVB. */
} else {
/* No compression value available */
/* CAUTION: It is not clearly defined by standard how to react in this situation. */
/* Turn down the compression value to aprox. 0dB */
pDrcBs->channelData.numBands = 1; /* One band ... */
pDrcBs->channelData.drcValue[0] = 0x80; /* ... with aprox. 0dB ... */
pDrcBs->channelData.bandTop[0] = (1024 >> 2) - 1; /* ... comprising the whole spectrum. */
pDrcBs->channelData.drcDataType = DVB_DRC_ANC_DATA; /* Set DRC payload type to DVB. */
/* If compression_on field is set to "0" the compression_value field shall be "0000 0000". */
if (compressionValue != 0) {
return 0;
}
}
}
/* Read timecodes if available just to get the right amount of bits. */
if (coarseGrainTcPresent) {
FDKreadBits(bs, 16); /* coarse_grain_timecode */
bitCnt += 16;
}
if (fineGrainTcPresent) {
FDKreadBits(bs, 16); /* fine_grain_timecode */
bitCnt += 16;
}
/* Read extension just to get the right amount of bits. */
if (extensionPresent) {
int extBits = 8;
FDKreadBits(bs, 1); /* reserved, set to 0 */
if (FDKreadBits(bs, 1)) extBits += 8; /* ext_downmixing_levels_status */
if (FDKreadBits(bs, 1)) extBits += 16; /* ext_downmixing_global_gains_status */
if (FDKreadBits(bs, 1)) extBits += 8; /* ext_downmixing_lfe_level_status */
FDKpushFor(bs, extBits - 4); /* skip the extension payload remainder. */
bitCnt += extBits;
}
return (bitCnt);
}
| 8,024 |
34,874 | 0 | static void nlmclnt_rpc_release(void *data)
{
nlmclnt_release_call(data);
}
| 8,025 |
9,730 | 0 | static boolean parse_label( struct translate_ctx *ctx, uint *val )
{
const char *cur = ctx->cur;
if (parse_uint( &cur, val )) {
eat_opt_white( &cur );
if (*cur == ':') {
cur++;
ctx->cur = cur;
return TRUE;
}
}
return FALSE;
}
| 8,026 |
15,967 | 0 | ImportArrayTIFF_Short ( const TIFF_Manager::TagInfo & tagInfo, const bool nativeEndian,
SXMPMeta * xmp, const char * xmpNS, const char * xmpProp )
{
try { // Don't let errors with one stop the others.
XMP_Uns16 * binPtr = (XMP_Uns16*)tagInfo.dataPtr;
xmp->DeleteProperty ( xmpNS, xmpProp ); // ! Don't keep appending, create a new array.
for ( size_t i = 0; i < tagInfo.count; ++i, ++binPtr ) {
XMP_Uns16 binValue = *binPtr;
if ( ! nativeEndian ) binValue = Flip2 ( binValue );
char strValue[20];
snprintf ( strValue, sizeof(strValue), "%hu", binValue ); // AUDIT: Using sizeof(strValue) is safe.
xmp->AppendArrayItem ( xmpNS, xmpProp, kXMP_PropArrayIsOrdered, strValue );
}
} catch ( ... ) {
}
} // ImportArrayTIFF_Short
| 8,027 |
97,369 | 0 | void FrameLoader::clear(bool clearWindowProperties, bool clearScriptObjects, bool clearFrameView)
{
m_frame->editor()->clear();
if (!m_needsClear)
return;
m_needsClear = false;
if (!m_frame->document()->inPageCache()) {
m_frame->document()->cancelParsing();
m_frame->document()->stopActiveDOMObjects();
if (m_frame->document()->attached()) {
m_frame->document()->willRemove();
m_frame->document()->detach();
m_frame->document()->removeFocusedNodeOfSubtree(m_frame->document());
}
}
if (clearWindowProperties) {
m_frame->clearDOMWindow();
m_frame->script()->clearWindowShell();
}
m_frame->selection()->clear();
m_frame->eventHandler()->clear();
if (clearFrameView && m_frame->view())
m_frame->view()->clear();
m_frame->setSelectionGranularity(CharacterGranularity);
m_frame->setDocument(0);
m_decoder = 0;
m_containsPlugIns = false;
if (clearScriptObjects)
m_frame->script()->clearScriptObjects();
m_frame->redirectScheduler()->clear();
m_checkTimer.stop();
m_shouldCallCheckCompleted = false;
m_shouldCallCheckLoadComplete = false;
m_receivedData = false;
m_isDisplayingInitialEmptyDocument = false;
if (!m_encodingWasChosenByUser)
m_encoding = String();
}
| 8,028 |
186,055 | 1 | scoped_refptr<NGLayoutResult> NGFlexLayoutAlgorithm::Layout() {
DCHECK(!NeedMinMaxSize(ConstraintSpace(), Style()))
<< "Don't support that yet";
borders_ = ComputeBorders(ConstraintSpace(), Style());
padding_ = ComputePadding(ConstraintSpace(), Style());
// TODO(dgrogan): Pass padding+borders as optimization.
border_box_size_ = CalculateBorderBoxSize(ConstraintSpace(), Node());
border_scrollbar_padding_ =
CalculateBorderScrollbarPadding(ConstraintSpace(), Node());
content_box_size_ =
ShrinkAvailableSize(border_box_size_, border_scrollbar_padding_);
const LayoutUnit line_break_length = MainAxisContentExtent(LayoutUnit::Max());
FlexLayoutAlgorithm algorithm(&Style(), line_break_length);
bool is_column = Style().IsColumnFlexDirection();
bool is_horizontal_flow = algorithm.IsHorizontalFlow();
for (NGLayoutInputNode generic_child = Node().FirstChild(); generic_child;
generic_child = generic_child.NextSibling()) {
NGBlockNode child = ToNGBlockNode(generic_child);
if (child.IsOutOfFlowPositioned())
continue;
const ComputedStyle& child_style = child.Style();
NGConstraintSpaceBuilder space_builder(ConstraintSpace(),
child_style.GetWritingMode(),
/* is_new_fc */ true);
SetOrthogonalFallbackInlineSizeIfNeeded(Style(), child, &space_builder);
// TODO(dgrogan): Set IsShrinkToFit here when cross axis size is auto, at
// least for correctness. For perf, don't set it if the item will later be
// stretched or we won't hit the cache later.
NGConstraintSpace child_space =
space_builder.SetAvailableSize(content_box_size_)
.SetPercentageResolutionSize(content_box_size_)
.ToConstraintSpace();
NGBoxStrut border_padding_in_child_writing_mode =
ComputeBorders(child_space, child_style) +
ComputePadding(child_space, child_style);
NGPhysicalBoxStrut physical_border_padding(
border_padding_in_child_writing_mode.ConvertToPhysical(
child_style.GetWritingMode(), child_style.Direction()));
LayoutUnit main_axis_border_and_padding =
is_horizontal_flow ? physical_border_padding.HorizontalSum()
: physical_border_padding.VerticalSum();
// ComputeMinMaxSize will layout the child if it has an orthogonal writing
// mode. MinMaxSize will be in the container's inline direction.
MinMaxSizeInput zero_input;
MinMaxSize min_max_sizes_border_box = child.ComputeMinMaxSize(
ConstraintSpace().GetWritingMode(), zero_input, &child_space);
// TODO(dgrogan): Don't layout every time, just when you need to.
scoped_refptr<NGLayoutResult> layout_result =
child.Layout(child_space, nullptr /*break token*/);
NGFragment fragment_in_child_writing_mode(
child_style.GetWritingMode(), *layout_result->PhysicalFragment());
LayoutUnit flex_base_border_box;
Length length_in_main_axis =
is_horizontal_flow ? child_style.Width() : child_style.Height();
if (child_style.FlexBasis().IsAuto() && length_in_main_axis.IsAuto()) {
if (MainAxisIsInlineAxis(child))
flex_base_border_box = min_max_sizes_border_box.max_size;
else
flex_base_border_box = fragment_in_child_writing_mode.BlockSize();
} else {
Length length_to_resolve = child_style.FlexBasis();
if (length_to_resolve.IsAuto())
length_to_resolve = length_in_main_axis;
DCHECK(!length_to_resolve.IsAuto());
if (MainAxisIsInlineAxis(child)) {
flex_base_border_box = ResolveInlineLength(
child_space, child_style, min_max_sizes_border_box,
length_to_resolve, LengthResolveType::kContentSize,
LengthResolvePhase::kLayout);
} else {
// Flex container's main axis is in child's block direction. Child's
// flex basis is in child's block direction.
flex_base_border_box = ResolveBlockLength(
child_space, child_style, length_to_resolve,
fragment_in_child_writing_mode.BlockSize(),
LengthResolveType::kContentSize, LengthResolvePhase::kLayout);
}
}
// Spec calls this "flex base size"
// https://www.w3.org/TR/css-flexbox-1/#algo-main-item
// Blink's FlexibleBoxAlgorithm expects it to be content + scrollbar widths,
// but no padding or border.
LayoutUnit flex_base_content_size =
flex_base_border_box - main_axis_border_and_padding;
NGPhysicalBoxStrut physical_child_margins =
ComputePhysicalMargins(child_space, child_style);
LayoutUnit main_axis_margin = is_horizontal_flow
? physical_child_margins.HorizontalSum()
: physical_child_margins.VerticalSum();
// TODO(dgrogan): When child has a min/max-{width,height} set, call
// Resolve{Inline,Block}Length here with child's style and constraint space.
// Pass kMinSize, kMaxSize as appropriate.
// Further, min-width:auto has special meaning for flex items. We'll need to
// calculate that here by either extracting the logic from legacy or
// reimplementing. When resolved, pass it here.
// https://www.w3.org/TR/css-flexbox-1/#min-size-auto
MinMaxSize min_max_sizes_in_main_axis_direction{LayoutUnit(),
LayoutUnit::Max()};
algorithm
.emplace_back(child.GetLayoutBox(), flex_base_content_size,
min_max_sizes_in_main_axis_direction,
main_axis_border_and_padding, main_axis_margin)
.ng_input_node = child;
}
LayoutUnit main_axis_offset = border_scrollbar_padding_.inline_start;
LayoutUnit cross_axis_offset = border_scrollbar_padding_.block_start;
if (is_column) {
main_axis_offset = border_scrollbar_padding_.block_start;
cross_axis_offset = border_scrollbar_padding_.inline_start;
}
FlexLine* line;
LayoutUnit max_main_axis_extent;
while ((line = algorithm.ComputeNextFlexLine(border_box_size_.inline_size))) {
line->SetContainerMainInnerSize(
MainAxisContentExtent(line->sum_hypothetical_main_size));
line->FreezeInflexibleItems();
while (!line->ResolveFlexibleLengths()) {
continue;
}
for (wtf_size_t i = 0; i < line->line_items.size(); ++i) {
FlexItem& flex_item = line->line_items[i];
WritingMode child_writing_mode =
flex_item.box->StyleRef().GetWritingMode();
NGConstraintSpaceBuilder space_builder(ConstraintSpace(),
child_writing_mode,
/* is_new_fc */ true);
SetOrthogonalFallbackInlineSizeIfNeeded(Style(), flex_item.ng_input_node,
&space_builder);
NGLogicalSize available_size;
if (is_column) {
available_size.inline_size = content_box_size_.inline_size;
available_size.block_size = flex_item.flexed_content_size +
flex_item.main_axis_border_and_padding;
space_builder.SetIsFixedSizeBlock(true);
} else {
available_size.inline_size = flex_item.flexed_content_size +
flex_item.main_axis_border_and_padding;
available_size.block_size = content_box_size_.block_size;
space_builder.SetIsFixedSizeInline(true);
}
space_builder.SetAvailableSize(available_size);
space_builder.SetPercentageResolutionSize(content_box_size_);
NGConstraintSpace child_space = space_builder.ToConstraintSpace();
flex_item.layout_result =
ToNGBlockNode(flex_item.ng_input_node)
.Layout(child_space, nullptr /*break token*/);
flex_item.cross_axis_size =
is_horizontal_flow
? flex_item.layout_result->PhysicalFragment()->Size().height
: flex_item.layout_result->PhysicalFragment()->Size().width;
// TODO(dgrogan): Port logic from
// LayoutFlexibleBox::CrossAxisIntrinsicExtentForChild?
flex_item.cross_axis_intrinsic_size = flex_item.cross_axis_size;
}
// cross_axis_offset is updated in each iteration of the loop, for passing
// in to the next iteration.
line->ComputeLineItemsPosition(main_axis_offset, cross_axis_offset);
max_main_axis_extent =
std::max(max_main_axis_extent, line->main_axis_extent);
}
LayoutUnit intrinsic_block_content_size = cross_axis_offset;
if (is_column)
intrinsic_block_content_size = max_main_axis_extent;
LayoutUnit intrinsic_block_size =
intrinsic_block_content_size + border_scrollbar_padding_.BlockSum();
LayoutUnit block_size = ComputeBlockSizeForFragment(
ConstraintSpace(), Style(), intrinsic_block_size);
// Apply stretch alignment.
// TODO(dgrogan): Move this to its own method, which means making some of the
// container-specific local variables into data members.
LayoutUnit final_content_cross_size =
block_size - border_scrollbar_padding_.BlockSum();
if (is_column) {
final_content_cross_size =
border_box_size_.inline_size - border_scrollbar_padding_.InlineSum();
}
if (!algorithm.IsMultiline() && !algorithm.FlexLines().IsEmpty())
algorithm.FlexLines()[0].cross_axis_extent = final_content_cross_size;
for (FlexLine& line_context : algorithm.FlexLines()) {
for (wtf_size_t child_number = 0;
child_number < line_context.line_items.size(); ++child_number) {
FlexItem& flex_item = line_context.line_items[child_number];
if (flex_item.Alignment() == ItemPosition::kStretch) {
flex_item.ComputeStretchedSize();
WritingMode child_writing_mode =
flex_item.box->StyleRef().GetWritingMode();
NGConstraintSpaceBuilder space_builder(ConstraintSpace(),
child_writing_mode,
/* is_new_fc */ true);
SetOrthogonalFallbackInlineSizeIfNeeded(
Style(), flex_item.ng_input_node, &space_builder);
NGLogicalSize available_size(flex_item.flexed_content_size +
flex_item.main_axis_border_and_padding,
flex_item.cross_axis_size);
if (is_column)
available_size.Flip();
space_builder.SetAvailableSize(available_size);
space_builder.SetPercentageResolutionSize(content_box_size_);
space_builder.SetIsFixedSizeInline(true);
space_builder.SetIsFixedSizeBlock(true);
NGConstraintSpace child_space = space_builder.ToConstraintSpace();
flex_item.layout_result =
ToNGBlockNode(flex_item.ng_input_node)
.Layout(child_space, /* break_token */ nullptr);
}
container_builder_.AddChild(
*flex_item.layout_result,
{flex_item.desired_location.X(), flex_item.desired_location.Y()});
}
}
container_builder_.SetBlockSize(block_size);
container_builder_.SetInlineSize(border_box_size_.inline_size);
container_builder_.SetBorders(ComputeBorders(ConstraintSpace(), Style()));
container_builder_.SetPadding(ComputePadding(ConstraintSpace(), Style()));
return container_builder_.ToBoxFragment();
}
| 8,029 |
69,649 | 0 | rend_services_add_filenames_to_lists(smartlist_t *open_lst,
smartlist_t *stat_lst)
{
if (!rend_service_list)
return;
SMARTLIST_FOREACH_BEGIN(rend_service_list, rend_service_t *, s) {
if (!rend_service_is_ephemeral(s)) {
rend_service_add_filenames_to_list(open_lst, s);
smartlist_add_strdup(stat_lst, s->directory);
}
} SMARTLIST_FOREACH_END(s);
}
| 8,030 |
158,026 | 0 | LocalFrame* LocalFrameClientImpl::CreateFrame(
const AtomicString& name,
HTMLFrameOwnerElement* owner_element) {
return web_frame_->CreateChildFrame(name, owner_element);
}
| 8,031 |
125,138 | 0 | void NotifyPluginsOfActivation() {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
for (PluginProcessHostIterator iter; !iter.Done(); ++iter)
iter->OnAppActivation();
}
| 8,032 |
158,673 | 0 | void GLES2DecoderImpl::DoBlitFramebufferCHROMIUM(
GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
GLbitfield mask, GLenum filter) {
const char* func_name = "glBlitFramebufferCHROMIUM";
DCHECK(!ShouldDeferReads() && !ShouldDeferDraws());
if (!CheckBoundFramebufferValid(func_name)) {
return;
}
if (GetBoundFramebufferSamples(GL_DRAW_FRAMEBUFFER) > 0) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, func_name,
"destination framebuffer is multisampled");
return;
}
GLsizei read_buffer_samples = GetBoundFramebufferSamples(GL_READ_FRAMEBUFFER);
if (read_buffer_samples > 0 &&
(srcX0 != dstX0 || srcY0 != dstY0 || srcX1 != dstX1 || srcY1 != dstY1)) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, func_name,
"src framebuffer is multisampled, but src/dst regions are different");
return;
}
GLbitfield mask_blit = mask;
bool read_framebuffer_miss_image = false;
enum FeedbackLoopState {
FeedbackLoopTrue,
FeedbackLoopFalse,
FeedbackLoopUnknown
};
FeedbackLoopState is_feedback_loop = FeedbackLoopUnknown;
Framebuffer* read_framebuffer =
framebuffer_state_.bound_read_framebuffer.get();
Framebuffer* draw_framebuffer =
framebuffer_state_.bound_draw_framebuffer.get();
if (!read_framebuffer && !draw_framebuffer) {
is_feedback_loop = FeedbackLoopTrue;
} else if (!read_framebuffer || !draw_framebuffer) {
is_feedback_loop = FeedbackLoopFalse;
if (read_framebuffer) {
if (((mask & GL_COLOR_BUFFER_BIT) != 0 &&
!GetBoundReadFramebufferInternalFormat()) ||
((mask & GL_DEPTH_BUFFER_BIT) != 0 &&
!read_framebuffer->GetAttachment(GL_DEPTH_ATTACHMENT) &&
BoundFramebufferHasDepthAttachment()) ||
((mask & GL_STENCIL_BUFFER_BIT) != 0 &&
!read_framebuffer->GetAttachment(GL_STENCIL_ATTACHMENT) &&
BoundFramebufferHasStencilAttachment())) {
read_framebuffer_miss_image = true;
}
}
} else {
DCHECK(read_framebuffer && draw_framebuffer);
if ((mask & GL_DEPTH_BUFFER_BIT) != 0) {
const Framebuffer::Attachment* depth_buffer_read =
read_framebuffer->GetAttachment(GL_DEPTH_ATTACHMENT);
const Framebuffer::Attachment* depth_buffer_draw =
draw_framebuffer->GetAttachment(GL_DEPTH_ATTACHMENT);
if (!depth_buffer_draw || !depth_buffer_read) {
mask_blit &= ~GL_DEPTH_BUFFER_BIT;
if (depth_buffer_draw) {
read_framebuffer_miss_image = true;
}
} else if (depth_buffer_draw->IsSameAttachment(depth_buffer_read)) {
is_feedback_loop = FeedbackLoopTrue;
}
}
if ((mask & GL_STENCIL_BUFFER_BIT) != 0) {
const Framebuffer::Attachment* stencil_buffer_read =
read_framebuffer->GetAttachment(GL_STENCIL_ATTACHMENT);
const Framebuffer::Attachment* stencil_buffer_draw =
draw_framebuffer->GetAttachment(GL_STENCIL_ATTACHMENT);
if (!stencil_buffer_draw || !stencil_buffer_read) {
mask_blit &= ~GL_STENCIL_BUFFER_BIT;
if (stencil_buffer_draw) {
read_framebuffer_miss_image = true;
}
} else if (stencil_buffer_draw->IsSameAttachment(stencil_buffer_read)) {
is_feedback_loop = FeedbackLoopTrue;
}
}
}
GLenum src_internal_format = GetBoundReadFramebufferInternalFormat();
GLenum src_type = GetBoundReadFramebufferTextureType();
bool read_buffer_has_srgb = GLES2Util::GetColorEncodingFromInternalFormat(
src_internal_format) == GL_SRGB;
bool draw_buffers_has_srgb = false;
if ((mask & GL_COLOR_BUFFER_BIT) != 0) {
bool is_src_signed_int =
GLES2Util::IsSignedIntegerFormat(src_internal_format);
bool is_src_unsigned_int =
GLES2Util::IsUnsignedIntegerFormat(src_internal_format);
DCHECK(!is_src_signed_int || !is_src_unsigned_int);
if ((is_src_signed_int || is_src_unsigned_int) && filter == GL_LINEAR) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, func_name,
"invalid filter for integer format");
return;
}
GLenum src_sized_format =
GLES2Util::ConvertToSizedFormat(src_internal_format, src_type);
DCHECK(read_framebuffer || (is_feedback_loop != FeedbackLoopUnknown));
const Framebuffer::Attachment* read_buffer =
is_feedback_loop == FeedbackLoopUnknown ?
read_framebuffer->GetReadBufferAttachment() : nullptr;
bool draw_buffer_has_image = false;
for (uint32_t ii = 0; ii < group_->max_draw_buffers(); ++ii) {
GLenum dst_format = GetBoundColorDrawBufferInternalFormat(
static_cast<GLint>(ii));
GLenum dst_type = GetBoundColorDrawBufferType(static_cast<GLint>(ii));
if (dst_format == 0)
continue;
draw_buffer_has_image = true;
if (!src_internal_format) {
read_framebuffer_miss_image = true;
}
if (GLES2Util::GetColorEncodingFromInternalFormat(dst_format) == GL_SRGB)
draw_buffers_has_srgb = true;
if (read_buffer_samples > 0 &&
(src_sized_format !=
GLES2Util::ConvertToSizedFormat(dst_format, dst_type))) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, func_name,
"src and dst formats differ for color");
return;
}
bool is_dst_signed_int = GLES2Util::IsSignedIntegerFormat(dst_format);
bool is_dst_unsigned_int = GLES2Util::IsUnsignedIntegerFormat(dst_format);
DCHECK(!is_dst_signed_int || !is_dst_unsigned_int);
if (is_src_signed_int != is_dst_signed_int ||
is_src_unsigned_int != is_dst_unsigned_int) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, func_name,
"incompatible src/dst color formats");
return;
}
if (is_feedback_loop == FeedbackLoopUnknown) {
GLenum attachment = static_cast<GLenum>(GL_COLOR_ATTACHMENT0 + ii);
DCHECK(draw_framebuffer);
const Framebuffer::Attachment* draw_buffer =
draw_framebuffer->GetAttachment(attachment);
if (!draw_buffer || !read_buffer) {
continue;
}
if (draw_buffer->IsSameAttachment(read_buffer)) {
is_feedback_loop = FeedbackLoopTrue;
break;
}
}
}
if (draw_framebuffer && !draw_buffer_has_image)
mask_blit &= ~GL_COLOR_BUFFER_BIT;
}
if (is_feedback_loop == FeedbackLoopTrue) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, func_name,
"source buffer and destination buffers are identical");
return;
}
if (read_framebuffer_miss_image == true) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, func_name,
"The designated attachment point(s) in read framebuffer miss image");
return;
}
if ((mask & (GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT)) != 0) {
if (filter != GL_NEAREST) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, func_name,
"invalid filter for depth/stencil");
return;
}
}
mask = mask_blit;
if (!mask)
return;
if (((mask & GL_DEPTH_BUFFER_BIT) != 0 &&
(GetBoundFramebufferDepthFormat(GL_READ_FRAMEBUFFER) !=
GetBoundFramebufferDepthFormat(GL_DRAW_FRAMEBUFFER))) ||
((mask & GL_STENCIL_BUFFER_BIT) != 0 &&
((GetBoundFramebufferStencilFormat(GL_READ_FRAMEBUFFER) !=
GetBoundFramebufferStencilFormat(GL_DRAW_FRAMEBUFFER))))) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, func_name,
"src and dst formats differ for depth/stencil");
return;
}
base::CheckedNumeric<GLint> src_width_temp = srcX1;
src_width_temp -= srcX0;
base::CheckedNumeric<GLint> src_height_temp = srcY1;
src_height_temp -= srcY0;
base::CheckedNumeric<GLint> dst_width_temp = dstX1;
dst_width_temp -= dstX0;
base::CheckedNumeric<GLint> dst_height_temp = dstY1;
dst_height_temp -= dstY0;
if (!src_width_temp.Abs().IsValid() || !src_height_temp.Abs().IsValid() ||
!dst_width_temp.Abs().IsValid() || !dst_height_temp.Abs().IsValid()) {
LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, func_name,
"the width or height of src or dst region overflowed");
return;
}
if (workarounds().adjust_src_dst_region_for_blitframebuffer) {
gfx::Size read_size = GetBoundReadFramebufferSize();
gfx::Rect src_bounds(0, 0, read_size.width(), read_size.height());
GLint src_x = srcX1 > srcX0 ? srcX0 : srcX1;
GLint src_y = srcY1 > srcY0 ? srcY0 : srcY1;
GLuint src_width = 0, src_height = 0;
if (!src_width_temp.Abs().AssignIfValid(&src_width))
src_width = 0;
if (!src_height_temp.Abs().AssignIfValid(&src_height))
src_height = 0;
gfx::Rect src_region(src_x, src_y, src_width, src_height);
if (!src_bounds.Contains(src_region) &&
(src_width != 0) && (src_height != 0)) {
src_bounds.Intersect(src_region);
GLuint src_real_width = src_bounds.width();
GLuint src_real_height = src_bounds.height();
GLuint xoffset = src_bounds.x() - src_x;
GLuint yoffset = src_bounds.y() - src_y;
if (((srcX1 > srcX0) && (dstX1 < dstX0)) ||
((srcX1 < srcX0) && (dstX1 > dstX0))) {
xoffset = src_x + src_width - src_bounds.x() - src_bounds.width();
}
if (((srcY1 > srcY0) && (dstY1 < dstY0)) ||
((srcY1 < srcY0) && (dstY1 > dstY0))) {
yoffset = src_y + src_height - src_bounds.y() - src_bounds.height();
}
GLint dst_x = dstX1 > dstX0 ? dstX0 : dstX1;
GLint dst_y = dstY1 > dstY0 ? dstY0 : dstY1;
base::CheckedNumeric<GLint> dst_width_temp = dstX1;
dst_width_temp -= dstX0;
base::CheckedNumeric<GLint> dst_height_temp = dstY1;
dst_height_temp -= dstY0;
GLuint dst_width = 0, dst_height = 0;
if (!dst_width_temp.IsValid() || !dst_height_temp.IsValid()) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, func_name,
"the width or height of dst region overflow");
return;
}
if (!dst_width_temp.Abs().AssignIfValid(&dst_width))
dst_width = 0;
if (!dst_height_temp.Abs().AssignIfValid(&dst_height))
dst_height = 0;
GLfloat dst_mapping_width =
static_cast<GLfloat>(src_real_width) * dst_width / src_width;
GLfloat dst_mapping_height =
static_cast<GLfloat>(src_real_height) * dst_height / src_height;
GLfloat dst_mapping_xoffset =
static_cast<GLfloat>(xoffset) * dst_width / src_width;
GLfloat dst_mapping_yoffset =
static_cast<GLfloat>(yoffset) * dst_height / src_height;
GLuint dst_mapping_x0 =
std::round(dst_x + dst_mapping_xoffset);
GLuint dst_mapping_y0 =
std::round(dst_y + dst_mapping_yoffset);
GLuint dst_mapping_x1 =
std::round(dst_x + dst_mapping_xoffset + dst_mapping_width);
GLuint dst_mapping_y1 =
std::round(dst_y + dst_mapping_yoffset + dst_mapping_height);
srcX0 = srcX0 < srcX1 ?
src_bounds.x() : src_bounds.x() + src_bounds.width();
srcY0 = srcY0 < srcY1 ?
src_bounds.y() : src_bounds.y() + src_bounds.height();
srcX1 = srcX0 < srcX1 ?
src_bounds.x() + src_bounds.width() : src_bounds.x();
srcY1 = srcY0 < srcY1 ?
src_bounds.y() + src_bounds.height() : src_bounds.y();
dstX0 = dstX0 < dstX1 ? dst_mapping_x0 : dst_mapping_x1;
dstY0 = dstY0 < dstY1 ? dst_mapping_y0 : dst_mapping_y1;
dstX1 = dstX0 < dstX1 ? dst_mapping_x1 : dst_mapping_x0;
dstY1 = dstY0 < dstY1 ? dst_mapping_y1 : dst_mapping_y0;
}
}
bool enable_srgb =
(read_buffer_has_srgb || draw_buffers_has_srgb) &&
((mask & GL_COLOR_BUFFER_BIT) != 0);
bool encode_srgb_only =
(draw_buffers_has_srgb && !read_buffer_has_srgb) &&
((mask & GL_COLOR_BUFFER_BIT) != 0);
if (!enable_srgb ||
read_buffer_samples > 0 ||
!feature_info_->feature_flags().desktop_srgb_support ||
gl_version_info().IsAtLeastGL(4, 4) ||
(gl_version_info().IsAtLeastGL(4, 2) && encode_srgb_only)) {
if (enable_srgb && gl_version_info().IsAtLeastGL(4, 2)) {
state_.EnableDisableFramebufferSRGB(enable_srgb);
}
api()->glBlitFramebufferFn(srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1,
dstY1, mask, filter);
return;
}
state_.EnableDisableFramebufferSRGB(true);
if (!InitializeSRGBConverter(func_name)) {
return;
}
GLenum src_format =
TextureManager::ExtractFormatFromStorageFormat(src_internal_format);
srgb_converter_->Blit(this, srcX0, srcY0, srcX1, srcY1,
dstX0, dstY0, dstX1, dstY1,
mask, filter,
GetBoundReadFramebufferSize(),
GetBoundReadFramebufferServiceId(),
src_internal_format, src_format, src_type,
GetBoundDrawFramebufferServiceId(),
read_buffer_has_srgb, draw_buffers_has_srgb,
state_.enable_flags.scissor_test);
}
| 8,033 |
150,610 | 0 | void DataReductionProxyIOData::SetPingbackReportingFraction(
float pingback_reporting_fraction) {
DCHECK(io_task_runner_->BelongsToCurrentThread());
ui_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&DataReductionProxyService::SetPingbackReportingFraction,
service_, pingback_reporting_fraction));
}
| 8,034 |
10,591 | 0 | Ins_EIF( void )
{
/* nothing to do */
}
| 8,035 |
172,096 | 0 | static uint64_t btsnoop_timestamp(void) {
struct timeval tv;
gettimeofday(&tv, NULL);
uint64_t timestamp = tv.tv_sec * 1000 * 1000LL;
timestamp += tv.tv_usec;
timestamp += BTSNOOP_EPOCH_DELTA;
return timestamp;
}
| 8,036 |
128,837 | 0 | SVGDocumentExtensions::SVGDocumentExtensions(Document* document)
: m_document(document)
, m_resourcesCache(adoptPtr(new SVGResourcesCache))
#if !ASSERT_DISABLED
, m_inRelativeLengthSVGRootsInvalidation(false)
#endif
{
}
| 8,037 |
74,417 | 0 | BOOLEAN AnalyzeL2Hdr(
PNET_PACKET_INFO packetInfo)
{
PETH_HEADER dataBuffer = (PETH_HEADER) packetInfo->headersBuffer;
if (packetInfo->dataLength < ETH_HEADER_SIZE)
return FALSE;
packetInfo->ethDestAddr = dataBuffer->DstAddr;
if (ETH_IS_BROADCAST(dataBuffer))
{
packetInfo->isBroadcast = TRUE;
}
else if (ETH_IS_MULTICAST(dataBuffer))
{
packetInfo->isMulticast = TRUE;
}
else
{
packetInfo->isUnicast = TRUE;
}
if(ETH_HAS_PRIO_HEADER(dataBuffer))
{
PVLAN_HEADER vlanHdr = ETH_GET_VLAN_HDR(dataBuffer);
if(packetInfo->dataLength < ETH_HEADER_SIZE + ETH_PRIORITY_HEADER_SIZE)
return FALSE;
packetInfo->hasVlanHeader = TRUE;
packetInfo->Vlan.UserPriority = VLAN_GET_USER_PRIORITY(vlanHdr);
packetInfo->Vlan.VlanId = VLAN_GET_VLAN_ID(vlanHdr);
packetInfo->L2HdrLen = ETH_HEADER_SIZE + ETH_PRIORITY_HEADER_SIZE;
AnalyzeL3Proto(vlanHdr->EthType, packetInfo);
}
else
{
packetInfo->L2HdrLen = ETH_HEADER_SIZE;
AnalyzeL3Proto(dataBuffer->EthType, packetInfo);
}
packetInfo->L2PayloadLen = packetInfo->dataLength - packetInfo->L2HdrLen;
return TRUE;
}
| 8,038 |
74,859 | 0 | static int pva_read_packet(AVFormatContext *s, AVPacket *pkt) {
AVIOContext *pb = s->pb;
int64_t pva_pts;
int ret, length, streamid;
if (read_part_of_packet(s, &pva_pts, &length, &streamid, 1) < 0 ||
(ret = av_get_packet(pb, pkt, length)) <= 0)
return AVERROR(EIO);
pkt->stream_index = streamid - 1;
pkt->pts = pva_pts;
return ret;
}
| 8,039 |
114,857 | 0 | void TestingAutomationProvider::BuildSimpleWebKeyEvent(
WebKit::WebInputEvent::Type type,
int windows_key_code,
NativeWebKeyboardEvent* event) {
event->nativeKeyCode = 0;
event->windowsKeyCode = windows_key_code;
event->setKeyIdentifierFromWindowsKeyCode();
event->type = type;
event->modifiers = 0;
event->isSystemKey = false;
event->timeStampSeconds = base::Time::Now().ToDoubleT();
event->skip_in_browser = true;
}
| 8,040 |
125,122 | 0 | void PluginServiceImpl::GetAllowedPluginForOpenChannelToPlugin(
int render_process_id,
int render_view_id,
const GURL& url,
const GURL& page_url,
const std::string& mime_type,
PluginProcessHost::Client* client,
ResourceContext* resource_context) {
webkit::WebPluginInfo info;
bool allow_wildcard = true;
bool found = GetPluginInfo(
render_process_id, render_view_id, resource_context,
url, page_url, mime_type, allow_wildcard,
NULL, &info, NULL);
FilePath plugin_path;
if (found)
plugin_path = info.path;
BrowserThread::PostTask(
BrowserThread::IO, FROM_HERE,
base::Bind(&PluginServiceImpl::FinishOpenChannelToPlugin,
base::Unretained(this),
render_process_id,
plugin_path,
client));
}
| 8,041 |
80,616 | 0 | GF_Err trun_Size(GF_Box *s)
{
u32 i, count;
GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *)s;
ptr->size += 4;
if (ptr->flags & GF_ISOM_TRUN_DATA_OFFSET) ptr->size += 4;
if (ptr->flags & GF_ISOM_TRUN_FIRST_FLAG) ptr->size += 4;
count = gf_list_count(ptr->entries);
for (i=0; i<count; i++) {
if (ptr->flags & GF_ISOM_TRUN_DURATION) ptr->size += 4;
if (ptr->flags & GF_ISOM_TRUN_SIZE) ptr->size += 4;
if (ptr->flags & GF_ISOM_TRUN_FLAGS) ptr->size += 4;
if (ptr->flags & GF_ISOM_TRUN_CTS_OFFSET) ptr->size += 4;
}
return GF_OK;
}
| 8,042 |
150,524 | 0 | std::string ReadSubresourceFromRenderer(Browser* browser,
const GURL& url,
bool asynchronous_xhr = true) {
static const char asynchronous_script[] = R"((url => {
var xhr = new XMLHttpRequest();
xhr.open('GET', url, true);
xhr.onload = () => domAutomationController.send(xhr.responseText);
xhr.send();
}))";
static const char synchronous_script[] = R"((url => {
var xhr = new XMLHttpRequest();
xhr.open('GET', url, false);
xhr.send();
domAutomationController.send(xhr.responseText);
}))";
std::string result;
EXPECT_TRUE(ExecuteScriptAndExtractString(
browser->tab_strip_model()->GetActiveWebContents(),
base::StrCat({asynchronous_xhr ? asynchronous_script : synchronous_script,
"('", url.spec(), "')"}),
&result));
return result;
}
| 8,043 |
96,129 | 0 | static int timer_load(Unit *u) {
Timer *t = TIMER(u);
int r;
assert(u);
assert(u->load_state == UNIT_STUB);
r = unit_load_fragment_and_dropin(u);
if (r < 0)
return r;
if (u->load_state == UNIT_LOADED) {
if (set_isempty(u->dependencies[UNIT_TRIGGERS])) {
Unit *x;
r = unit_load_related_unit(u, ".service", &x);
if (r < 0)
return r;
r = unit_add_two_dependencies(u, UNIT_BEFORE, UNIT_TRIGGERS, x, true);
if (r < 0)
return r;
}
r = timer_setup_persistent(t);
if (r < 0)
return r;
r = timer_add_default_dependencies(t);
if (r < 0)
return r;
}
return timer_verify(t);
}
| 8,044 |
71,312 | 0 | static void SetHeaderFromIPL(Image *image, IPLInfo *ipl){
image->columns = ipl->width;
image->rows = ipl->height;
image->depth = ipl->depth;
image->x_resolution = 1;
image->y_resolution = 1;
}
| 8,045 |
36,900 | 0 | xfs_attrlist_by_handle(
struct file *parfilp,
void __user *arg)
{
int error = -ENOMEM;
attrlist_cursor_kern_t *cursor;
xfs_fsop_attrlist_handlereq_t al_hreq;
struct dentry *dentry;
char *kbuf;
if (!capable(CAP_SYS_ADMIN))
return -XFS_ERROR(EPERM);
if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
return -XFS_ERROR(EFAULT);
if (al_hreq.buflen < sizeof(struct attrlist) ||
al_hreq.buflen > XATTR_LIST_MAX)
return -XFS_ERROR(EINVAL);
/*
* Reject flags, only allow namespaces.
*/
if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
return -XFS_ERROR(EINVAL);
dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
kbuf = kmem_zalloc_large(al_hreq.buflen, KM_SLEEP);
if (!kbuf)
goto out_dput;
cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen,
al_hreq.flags, cursor);
if (error)
goto out_kfree;
if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
error = -EFAULT;
out_kfree:
kmem_free(kbuf);
out_dput:
dput(dentry);
return error;
}
| 8,046 |
20,111 | 0 | int __sk_mem_schedule(struct sock *sk, int size, int kind)
{
struct proto *prot = sk->sk_prot;
int amt = sk_mem_pages(size);
long allocated;
int parent_status = UNDER_LIMIT;
sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
allocated = sk_memory_allocated_add(sk, amt, &parent_status);
/* Under limit. */
if (parent_status == UNDER_LIMIT &&
allocated <= sk_prot_mem_limits(sk, 0)) {
sk_leave_memory_pressure(sk);
return 1;
}
/* Under pressure. (we or our parents) */
if ((parent_status > SOFT_LIMIT) ||
allocated > sk_prot_mem_limits(sk, 1))
sk_enter_memory_pressure(sk);
/* Over hard limit (we or our parents) */
if ((parent_status == OVER_LIMIT) ||
(allocated > sk_prot_mem_limits(sk, 2)))
goto suppress_allocation;
/* guarantee minimum buffer size under pressure */
if (kind == SK_MEM_RECV) {
if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
return 1;
} else { /* SK_MEM_SEND */
if (sk->sk_type == SOCK_STREAM) {
if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
return 1;
} else if (atomic_read(&sk->sk_wmem_alloc) <
prot->sysctl_wmem[0])
return 1;
}
if (sk_has_memory_pressure(sk)) {
int alloc;
if (!sk_under_memory_pressure(sk))
return 1;
alloc = sk_sockets_allocated_read_positive(sk);
if (sk_prot_mem_limits(sk, 2) > alloc *
sk_mem_pages(sk->sk_wmem_queued +
atomic_read(&sk->sk_rmem_alloc) +
sk->sk_forward_alloc))
return 1;
}
suppress_allocation:
if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
sk_stream_moderate_sndbuf(sk);
/* Fail only if socket is _under_ its sndbuf.
* In this case we cannot block, so that we have to fail.
*/
if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
return 1;
}
trace_sock_exceed_buf_limit(sk, prot, allocated);
/* Alas. Undo changes. */
sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
sk_memory_allocated_sub(sk, amt);
return 0;
}
| 8,047 |
131,212 | 0 | static void anyAttributeAttributeGetter(const v8::PropertyCallbackInfo<v8::Value>& info)
{
TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder());
v8SetReturnValue(info, imp->anyAttribute().v8Value());
}
| 8,048 |
126,040 | 0 | IPC::Message* AutomationProviderBookmarkModelObserver::ReleaseReply() {
return reply_message_.release();
}
| 8,049 |
91,948 | 0 | static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
dst->cpu = src->cpu;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
dst->special_vec = src->special_vec;
}
dst->nr_phys_segments = src->nr_phys_segments;
dst->ioprio = src->ioprio;
dst->extra_len = src->extra_len;
}
| 8,050 |
155,921 | 0 | void PeopleHandler::DisplaySpinner() {
configuring_sync_ = true;
const int kTimeoutSec = 30;
DCHECK(!engine_start_timer_);
engine_start_timer_.reset(new base::OneShotTimer());
engine_start_timer_->Start(FROM_HERE,
base::TimeDelta::FromSeconds(kTimeoutSec), this,
&PeopleHandler::DisplayTimeout);
FireWebUIListener("page-status-changed", base::Value(kSpinnerPageStatus));
}
| 8,051 |
74,159 | 0 | create_peer_node(
int hmode,
address_node * addr,
attr_val_fifo * options
)
{
peer_node *my_node;
attr_val *option;
int freenode;
int errflag = 0;
my_node = emalloc_zero(sizeof(*my_node));
/* Initialize node values to default */
my_node->peerversion = NTP_VERSION;
/* Now set the node to the read values */
my_node->host_mode = hmode;
my_node->addr = addr;
/*
* the options FIFO mixes items that will be saved in the
* peer_node as explicit members, such as minpoll, and
* those that are moved intact to the peer_node's peerflags
* FIFO. The options FIFO is consumed and reclaimed here.
*/
while (options != NULL) {
UNLINK_FIFO(option, *options, link);
if (NULL == option) {
free(options);
break;
}
freenode = 1;
/* Check the kind of option being set */
switch (option->attr) {
case T_Flag:
APPEND_G_FIFO(my_node->peerflags, option);
freenode = 0;
break;
case T_Minpoll:
if (option->value.i < NTP_MINPOLL ||
option->value.i > UCHAR_MAX) {
msyslog(LOG_INFO,
"minpoll: provided value (%d) is out of range [%d-%d])",
option->value.i, NTP_MINPOLL,
UCHAR_MAX);
my_node->minpoll = NTP_MINPOLL;
} else {
my_node->minpoll =
(u_char)option->value.u;
}
break;
case T_Maxpoll:
if (option->value.i < 0 ||
option->value.i > NTP_MAXPOLL) {
msyslog(LOG_INFO,
"maxpoll: provided value (%d) is out of range [0-%d])",
option->value.i, NTP_MAXPOLL);
my_node->maxpoll = NTP_MAXPOLL;
} else {
my_node->maxpoll =
(u_char)option->value.u;
}
break;
case T_Ttl:
if (option->value.u >= MAX_TTL) {
msyslog(LOG_ERR, "ttl: invalid argument");
errflag = 1;
} else {
my_node->ttl = (u_char)option->value.u;
}
break;
case T_Mode:
if (option->value.u >= UCHAR_MAX) {
msyslog(LOG_ERR, "mode: invalid argument");
errflag = 1;
} else {
my_node->ttl = (u_char)option->value.u;
}
break;
case T_Key:
if (option->value.u >= KEYID_T_MAX) {
msyslog(LOG_ERR, "key: invalid argument");
errflag = 1;
} else {
my_node->peerkey =
(keyid_t)option->value.u;
}
break;
case T_Version:
if (option->value.u >= UCHAR_MAX) {
msyslog(LOG_ERR, "version: invalid argument");
errflag = 1;
} else {
my_node->peerversion =
(u_char)option->value.u;
}
break;
case T_Ident:
my_node->group = option->value.s;
break;
default:
msyslog(LOG_ERR,
"Unknown peer/server option token %s",
token_name(option->attr));
errflag = 1;
}
if (freenode)
free(option);
}
/* Check if errors were reported. If yes, ignore the node */
if (errflag) {
free(my_node);
my_node = NULL;
}
return my_node;
}
| 8,052 |
14,607 | 0 | PHP_FUNCTION(trim)
{
php_do_trim(INTERNAL_FUNCTION_PARAM_PASSTHRU, 3);
}
| 8,053 |
111,485 | 0 | void InputHandler::notifyClientOfKeyboardVisibilityChange(bool visible, bool triggeredByFocusChange)
{
if (!isInputModeEnabled() && visible)
return;
if (!triggeredByFocusChange && processingChange() && visible)
return;
if (!m_delayKeyboardVisibilityChange) {
m_webPage->showVirtualKeyboard(visible);
return;
}
m_pendingKeyboardVisibilityChange = visible ? Visible : NotVisible;
}
| 8,054 |
133,789 | 0 | X509Certificate* SSLClientSocketOpenSSL::UpdateServerCert() {
if (server_cert_.get())
return server_cert_.get();
crypto::ScopedOpenSSL<X509, X509_free> cert(SSL_get_peer_certificate(ssl_));
if (!cert.get()) {
LOG(WARNING) << "SSL_get_peer_certificate returned NULL";
return NULL;
}
STACK_OF(X509)* chain = SSL_get_peer_cert_chain(ssl_);
X509Certificate::OSCertHandles intermediates;
if (chain) {
for (int i = 0; i < sk_X509_num(chain); ++i)
intermediates.push_back(sk_X509_value(chain, i));
}
server_cert_ = X509Certificate::CreateFromHandle(cert.get(), intermediates);
DCHECK(server_cert_.get());
return server_cert_.get();
}
| 8,055 |
159,886 | 0 | bool TopSitesImpl::IsKnownURL(const GURL& url) {
return loaded_ && cache_->IsKnownURL(url);
}
| 8,056 |
30,913 | 0 | void install_exec_creds(struct linux_binprm *bprm)
{
security_bprm_committing_creds(bprm);
commit_creds(bprm->cred);
bprm->cred = NULL;
/*
* Disable monitoring for regular users
* when executing setuid binaries. Must
* wait until new credentials are committed
* by commit_creds() above
*/
if (get_dumpable(current->mm) != SUID_DUMP_USER)
perf_event_exit_task(current);
/*
* cred_guard_mutex must be held at least to this point to prevent
* ptrace_attach() from altering our determination of the task's
* credentials; any time after this it may be unlocked.
*/
security_bprm_committed_creds(bprm);
mutex_unlock(¤t->signal->cred_guard_mutex);
}
| 8,057 |
80,284 | 0 | GF_Err name_Size(GF_Box *s)
{
GF_NameBox *ptr = (GF_NameBox *)s;
if (ptr->string) ptr->size += strlen(ptr->string) + 1;
return GF_OK;
}
| 8,058 |
93,919 | 0 | virDomainSetMemoryStatsPeriod(virDomainPtr domain, int period,
unsigned int flags)
{
virConnectPtr conn;
VIR_DOMAIN_DEBUG(domain, "period=%d, flags=%x", period, flags);
virResetLastError();
virCheckDomainReturn(domain, -1);
conn = domain->conn;
virCheckReadOnlyGoto(conn->flags, error);
/* This must be positive to set the balloon collection period */
virCheckNonNegativeArgGoto(period, error);
if (conn->driver->domainSetMemoryStatsPeriod) {
int ret;
ret = conn->driver->domainSetMemoryStatsPeriod(domain, period, flags);
if (ret < 0)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(domain->conn);
return -1;
}
| 8,059 |
134,883 | 0 | String WebPageSerializerImpl::preActionBeforeSerializeEndTag(
const Element* element, SerializeDomParam* param, bool* needSkip)
{
String result;
*needSkip = false;
if (!param->isHTMLDocument)
return result;
if (param->skipMetaElement == element) {
*needSkip = true;
} else if (isHTMLScriptElement(*element) || isHTMLScriptElement(*element)) {
ASSERT(param->isInScriptOrStyleTag);
param->isInScriptOrStyleTag = false;
}
return result;
}
| 8,060 |
131,464 | 0 | static void longAttributeAttributeGetterCallback(v8::Local<v8::String>, const v8::PropertyCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMGetter");
TestObjectPythonV8Internal::longAttributeAttributeGetter(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
| 8,061 |
161,321 | 0 | std::unique_ptr<Network::Request> NetworkHandler::CreateRequestFromURLRequest(
const net::URLRequest* request) {
std::unique_ptr<DictionaryValue> headers_dict(DictionaryValue::create());
for (net::HttpRequestHeaders::Iterator it(request->extra_request_headers());
it.GetNext();) {
headers_dict->setString(it.name(), it.value());
}
if (!request->referrer().empty()) {
headers_dict->setString(net::HttpRequestHeaders::kReferer,
request->referrer());
}
std::unique_ptr<protocol::Network::Request> request_object =
Network::Request::Create()
.SetUrl(ClearUrlRef(request->url()).spec())
.SetMethod(request->method())
.SetHeaders(Object::fromValue(headers_dict.get(), nullptr))
.SetInitialPriority(resourcePriority(request->priority()))
.SetReferrerPolicy(referrerPolicy(request->referrer_policy()))
.Build();
std::string post_data;
if (GetPostData(request, &post_data))
request_object->SetPostData(std::move(post_data));
return request_object;
}
| 8,062 |
38,595 | 0 | int sta_info_insert(struct sta_info *sta)
{
int err = sta_info_insert_rcu(sta);
rcu_read_unlock();
return err;
}
| 8,063 |
92,680 | 0 | set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
/* 'current' is not kept within the tree. */
if (se->on_rq) {
/*
* Any task has to be enqueued before it get to execute on
* a CPU. So account for the time it spent waiting on the
* runqueue.
*/
update_stats_wait_end(cfs_rq, se);
__dequeue_entity(cfs_rq, se);
update_load_avg(cfs_rq, se, UPDATE_TG);
}
update_stats_curr_start(cfs_rq, se);
cfs_rq->curr = se;
/*
* Track our maximum slice length, if the CPU's load is at
* least twice that of our own weight (i.e. dont track it
* when there are only lesser-weight tasks around):
*/
if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
schedstat_set(se->statistics.slice_max,
max((u64)schedstat_val(se->statistics.slice_max),
se->sum_exec_runtime - se->prev_sum_exec_runtime));
}
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
| 8,064 |
159,030 | 0 | void PDFiumEngine::StartFind(const std::string& text, bool case_sensitive) {
DCHECK(!text.empty());
if (pages_.empty())
return;
bool first_search = (current_find_text_ != text);
int character_to_start_searching_from = 0;
if (first_search) {
std::vector<PDFiumRange> old_selection = selection_;
StopFind();
current_find_text_ = text;
if (old_selection.empty()) {
next_page_to_search_ = 0;
last_page_to_search_ = pages_.size() - 1;
last_character_index_to_search_ = -1;
} else {
next_page_to_search_ = old_selection[0].page_index();
last_character_index_to_search_ = old_selection[0].char_index();
character_to_start_searching_from = old_selection[0].char_index();
last_page_to_search_ = next_page_to_search_;
}
}
int current_page = next_page_to_search_;
if (pages_[current_page]->available()) {
base::string16 str = base::UTF8ToUTF16(text);
if (0) {
SearchUsingPDFium(str, case_sensitive, first_search,
character_to_start_searching_from, current_page);
} else {
SearchUsingICU(str, case_sensitive, first_search,
character_to_start_searching_from, current_page);
}
if (!IsPageVisible(current_page))
pages_[current_page]->Unload();
}
if (next_page_to_search_ != last_page_to_search_ ||
(first_search && last_character_index_to_search_ != -1)) {
++next_page_to_search_;
}
if (next_page_to_search_ == static_cast<int>(pages_.size()))
next_page_to_search_ = 0;
bool end_of_search =
next_page_to_search_ == last_page_to_search_ &&
((pages_.size() == 1 && last_character_index_to_search_ == -1) ||
(pages_.size() == 1 && !first_search) ||
(pages_.size() > 1 && current_page == next_page_to_search_));
if (end_of_search) {
client_->NotifyNumberOfFindResultsChanged(find_results_.size(), true);
} else {
pp::CompletionCallback callback =
find_factory_.NewCallback(&PDFiumEngine::ContinueFind);
pp::Module::Get()->core()->CallOnMainThread(0, callback,
case_sensitive ? 1 : 0);
}
}
| 8,065 |
26,817 | 0 | static int mem_open(struct inode* inode, struct file* file)
{
file->private_data = (void*)((long)current->self_exec_id);
/* OK to pass negative loff_t, we can catch out-of-range */
file->f_mode |= FMODE_UNSIGNED_OFFSET;
return 0;
}
| 8,066 |
138,683 | 0 | ui::AXTreeIDRegistry::AXTreeID RenderFrameHostImpl::GetAXTreeID() {
return ui::AXTreeIDRegistry::GetInstance()->GetOrCreateAXTreeID(
GetProcess()->GetID(), routing_id_);
}
| 8,067 |
37,255 | 0 | static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
{
ktime_t remaining =
hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
u64 value;
if (ktime_to_ns(remaining) <= 0)
return 0;
value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
do_div(value, 1000000);
return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
}
| 8,068 |
2,316 | 0 | _PUBLIC_ size_t strlen_m_ext_term(const char *s, const charset_t src_charset,
const charset_t dst_charset)
{
if (!s) {
return 0;
}
return strlen_m_ext(s, src_charset, dst_charset) + 1;
}
| 8,069 |
141,718 | 0 | void V8Debugger::asyncTaskCanceled(void* task)
{
if (!m_maxAsyncCallStackDepth)
return;
m_asyncTaskStacks.erase(task);
m_recurringTasks.erase(task);
}
| 8,070 |
137,482 | 0 | bool RunLoop::IsRunningOnCurrentThread() {
Delegate* delegate = tls_delegate.Get().Get();
return delegate && !delegate->active_run_loops_.empty();
}
| 8,071 |
117,219 | 0 | void GLES2DecoderImpl::DoCompressedTexSubImage2D(
GLenum target,
GLint level,
GLint xoffset,
GLint yoffset,
GLsizei width,
GLsizei height,
GLenum format,
GLsizei image_size,
const void * data) {
TextureManager::TextureInfo* info = GetTextureInfoForTarget(target);
if (!info) {
SetGLError(GL_INVALID_OPERATION,
"glCompressedTexSubImage2D: unknown texture for target");
return;
}
GLenum type = 0;
GLenum internal_format = 0;
if (!info->GetLevelType(target, level, &type, &internal_format)) {
SetGLError(
GL_INVALID_OPERATION,
"glCompressedTexSubImage2D: level does not exist.");
return;
}
if (internal_format != format) {
SetGLError(
GL_INVALID_OPERATION,
"glCompressedTexSubImage2D: format does not match internal format.");
return;
}
if (!info->ValidForTexture(
target, level, xoffset, yoffset, width, height, format, type)) {
SetGLError(GL_INVALID_VALUE,
"glCompressedTexSubImage2D: bad dimensions.");
return;
}
glCompressedTexSubImage2D(
target, level, xoffset, yoffset, width, height, format, image_size, data);
}
| 8,072 |
153,453 | 0 | void TabStrip::PaintChildren(const views::PaintInfo& paint_info) {
bool is_dragging = false;
Tab* active_tab = nullptr;
Tabs tabs_dragging;
Tabs selected_and_hovered_tabs;
const auto paint_or_add_to_tabs = [&paint_info,
&selected_and_hovered_tabs](Tab* tab) {
if (tab->tab_style()->GetZValue() > 0.0) {
selected_and_hovered_tabs.push_back(tab);
} else {
tab->Paint(paint_info);
}
};
const auto paint_closing_tabs = [=](int index) {
if (tabs_closing_map_.find(index) == tabs_closing_map_.end())
return;
for (Tab* tab : base::Reversed(tabs_closing_map_[index]))
paint_or_add_to_tabs(tab);
};
paint_closing_tabs(tab_count());
int active_tab_index = -1;
for (int i = tab_count() - 1; i >= 0; --i) {
Tab* tab = tab_at(i);
if (tab->dragging() && !stacked_layout_) {
is_dragging = true;
if (tab->IsActive()) {
active_tab = tab;
active_tab_index = i;
} else {
tabs_dragging.push_back(tab);
}
} else if (tab->IsActive()) {
active_tab = tab;
active_tab_index = i;
} else if (!stacked_layout_) {
paint_or_add_to_tabs(tab);
}
paint_closing_tabs(i);
}
if (stacked_layout_ && active_tab_index >= 0) {
for (int i = 0; i < active_tab_index; ++i) {
Tab* tab = tab_at(i);
tab->Paint(paint_info);
}
for (int i = tab_count() - 1; i > active_tab_index; --i) {
Tab* tab = tab_at(i);
tab->Paint(paint_info);
}
}
std::stable_sort(selected_and_hovered_tabs.begin(),
selected_and_hovered_tabs.end(), [](Tab* tab1, Tab* tab2) {
return tab1->tab_style()->GetZValue() <
tab2->tab_style()->GetZValue();
});
for (Tab* tab : selected_and_hovered_tabs)
tab->Paint(paint_info);
for (const auto& header_pair : group_headers_)
header_pair.second->Paint(paint_info);
if (active_tab && !is_dragging)
active_tab->Paint(paint_info);
if (!new_tab_button_->layer())
new_tab_button_->Paint(paint_info);
for (size_t i = 0; i < tabs_dragging.size(); ++i)
tabs_dragging[i]->Paint(paint_info);
if (active_tab && is_dragging)
active_tab->Paint(paint_info);
}
| 8,073 |
121,335 | 0 | void set_client(WebURLLoaderClient* client) { client_ = client; }
| 8,074 |
161,494 | 0 | TargetHandler::Throttle::~Throttle() {
CleanupPointers();
}
| 8,075 |
155,324 | 0 | void ChromeContentBrowserClient::GetAdditionalAllowedSchemesForFileSystem(
std::vector<std::string>* additional_allowed_schemes) {
ContentBrowserClient::GetAdditionalAllowedSchemesForFileSystem(
additional_allowed_schemes);
additional_allowed_schemes->push_back(content::kChromeDevToolsScheme);
additional_allowed_schemes->push_back(content::kChromeUIScheme);
for (size_t i = 0; i < extra_parts_.size(); ++i) {
extra_parts_[i]->GetAdditionalAllowedSchemesForFileSystem(
additional_allowed_schemes);
}
}
| 8,076 |
80,510 | 0 | void subs_del(GF_Box *s)
{
GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *)s;
if (ptr == NULL) return;
while (gf_list_count(ptr->Samples)) {
GF_SubSampleInfoEntry *pSamp;
pSamp = (GF_SubSampleInfoEntry*)gf_list_get(ptr->Samples, 0);
while (gf_list_count(pSamp->SubSamples)) {
GF_SubSampleEntry *pSubSamp;
pSubSamp = (GF_SubSampleEntry*) gf_list_get(pSamp->SubSamples, 0);
gf_free(pSubSamp);
gf_list_rem(pSamp->SubSamples, 0);
}
gf_list_del(pSamp->SubSamples);
gf_free(pSamp);
gf_list_rem(ptr->Samples, 0);
}
gf_list_del(ptr->Samples);
gf_free(ptr);
}
| 8,077 |
187,036 | 1 | void Document::InitContentSecurityPolicy(
ContentSecurityPolicy* csp,
const ContentSecurityPolicy* policy_to_inherit,
const ContentSecurityPolicy* previous_document_csp) {
SetContentSecurityPolicy(csp ? csp : ContentSecurityPolicy::Create());
GetContentSecurityPolicy()->BindToExecutionContext(this);
// We inherit the parent/opener's CSP for documents with "local" schemes:
// 'about', 'blob', 'data', and 'filesystem'. We also inherit CSP for
// documents with empty/invalid URLs because we treat those URLs as
// 'about:blank' in Blink.
// https://w3c.github.io/webappsec-csp/#initialize-document-csp
// TODO(dcheng): This is similar enough to work we're doing in
// 'DocumentLoader::ensureWriter' that it might make sense to combine them.
if (policy_to_inherit) {
GetContentSecurityPolicy()->CopyStateFrom(policy_to_inherit);
} else {
if (frame_) {
Frame* inherit_from = frame_->Tree().Parent()
? frame_->Tree().Parent()
: frame_->Client()->Opener();
if (inherit_from && frame_ != inherit_from) {
DCHECK(inherit_from->GetSecurityContext() &&
inherit_from->GetSecurityContext()->GetContentSecurityPolicy());
policy_to_inherit =
inherit_from->GetSecurityContext()->GetContentSecurityPolicy();
}
}
// If we don't have an opener or parent, inherit from the previous
// document CSP.
if (!policy_to_inherit)
policy_to_inherit = previous_document_csp;
// We should inherit the relevant CSP if the document is loaded using
// a local-scheme url.
if (policy_to_inherit &&
(url_.IsEmpty() || url_.ProtocolIsAbout() || url_.ProtocolIsData() ||
url_.ProtocolIs("blob") || url_.ProtocolIs("filesystem")))
GetContentSecurityPolicy()->CopyStateFrom(policy_to_inherit);
}
// Plugin documents inherit their parent/opener's 'plugin-types' directive
// regardless of URL.
if (policy_to_inherit && IsPluginDocument())
GetContentSecurityPolicy()->CopyPluginTypesFrom(policy_to_inherit);
}
| 8,078 |
4,194 | 0 | tt_cmap6_char_next( TT_CMap cmap,
FT_UInt32 *pchar_code )
{
FT_Byte* table = cmap->data;
FT_UInt32 result = 0;
FT_UInt32 char_code = *pchar_code + 1;
FT_UInt gindex = 0;
FT_Byte* p = table + 6;
FT_UInt start = TT_NEXT_USHORT( p );
FT_UInt count = TT_NEXT_USHORT( p );
FT_UInt idx;
if ( char_code >= 0x10000UL )
goto Exit;
if ( char_code < start )
char_code = start;
idx = (FT_UInt)( char_code - start );
p += 2 * idx;
for ( ; idx < count; idx++ )
{
gindex = TT_NEXT_USHORT( p );
if ( gindex != 0 )
{
result = char_code;
break;
}
char_code++;
}
Exit:
*pchar_code = result;
return gindex;
}
| 8,079 |
34,748 | 0 | static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path,
struct ext4_extent *newext)
{
struct ext4_ext_path *curp = path;
struct ext4_extent_header *neh;
struct buffer_head *bh;
ext4_fsblk_t newblock;
int err = 0;
newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err);
if (newblock == 0)
return err;
bh = sb_getblk(inode->i_sb, newblock);
if (!bh) {
err = -EIO;
ext4_std_error(inode->i_sb, err);
return err;
}
lock_buffer(bh);
err = ext4_journal_get_create_access(handle, bh);
if (err) {
unlock_buffer(bh);
goto out;
}
/* move top-level index/leaf into new block */
memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
/* set size of new block */
neh = ext_block_hdr(bh);
/* old root could have indexes or leaves
* so calculate e_max right way */
if (ext_depth(inode))
neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
else
neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
neh->eh_magic = EXT4_EXT_MAGIC;
set_buffer_uptodate(bh);
unlock_buffer(bh);
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (err)
goto out;
/* create index in new top-level index: num,max,pointer */
err = ext4_ext_get_access(handle, inode, curp);
if (err)
goto out;
curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
curp->p_hdr->eh_entries = cpu_to_le16(1);
curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
if (path[0].p_hdr->eh_depth)
curp->p_idx->ei_block =
EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
else
curp->p_idx->ei_block =
EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
ext4_idx_store_pblock(curp->p_idx, newblock);
neh = ext_inode_hdr(inode);
ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
neh->eh_depth = cpu_to_le16(path->p_depth + 1);
err = ext4_ext_dirty(handle, inode, curp);
out:
brelse(bh);
return err;
}
| 8,080 |
54,646 | 0 | void snd_seq_info_queues_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
int i, bpm;
struct snd_seq_queue *q;
struct snd_seq_timer *tmr;
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
if ((q = queueptr(i)) == NULL)
continue;
tmr = q->timer;
if (tmr->tempo)
bpm = 60000000 / tmr->tempo;
else
bpm = 0;
snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
snd_iprintf(buffer, "owned by client : %d\n", q->owner);
snd_iprintf(buffer, "lock status : %s\n", q->locked ? "Locked" : "Free");
snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
snd_iprintf(buffer, "timer state : %s\n", tmr->running ? "Running" : "Stopped");
snd_iprintf(buffer, "timer PPQ : %d\n", tmr->ppq);
snd_iprintf(buffer, "current tempo : %d\n", tmr->tempo);
snd_iprintf(buffer, "current BPM : %d\n", bpm);
snd_iprintf(buffer, "current time : %d.%09d s\n", tmr->cur_time.tv_sec, tmr->cur_time.tv_nsec);
snd_iprintf(buffer, "current tick : %d\n", tmr->tick.cur_tick);
snd_iprintf(buffer, "\n");
queuefree(q);
}
}
| 8,081 |
32,634 | 0 | static int tg3_nvram_lock(struct tg3 *tp)
{
if (tg3_flag(tp, NVRAM)) {
int i;
if (tp->nvram_lock_cnt == 0) {
tw32(NVRAM_SWARB, SWARB_REQ_SET1);
for (i = 0; i < 8000; i++) {
if (tr32(NVRAM_SWARB) & SWARB_GNT1)
break;
udelay(20);
}
if (i == 8000) {
tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
return -ENODEV;
}
}
tp->nvram_lock_cnt++;
}
return 0;
}
| 8,082 |
31,077 | 0 | static int dcb_app_add(const struct dcb_app *app, int ifindex)
{
struct dcb_app_type *entry;
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
memcpy(&entry->app, app, sizeof(*app));
entry->ifindex = ifindex;
list_add(&entry->list, &dcb_app_list);
return 0;
}
| 8,083 |
27,826 | 0 | static void br_multicast_port_group_query_expired(unsigned long data)
{
struct net_bridge_port_group *pg = (void *)data;
struct net_bridge_port *port = pg->port;
struct net_bridge *br = port->br;
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
pg->queries_sent >= br->multicast_last_member_count)
goto out;
br_multicast_send_port_group_query(pg);
out:
spin_unlock(&br->multicast_lock);
}
| 8,084 |
120,782 | 0 | void BluetoothAdapterChromeOS::OnStopDiscovery(const base::Closure& callback) {
callback.Run();
}
| 8,085 |
86,428 | 0 | static void set_page_huge_active(struct page *page)
{
VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
SetPagePrivate(&page[1]);
}
| 8,086 |
76,325 | 0 | xfs_attr_remove(
struct xfs_inode *dp,
const unsigned char *name,
int flags)
{
struct xfs_mount *mp = dp->i_mount;
struct xfs_da_args args;
struct xfs_defer_ops dfops;
xfs_fsblock_t firstblock;
int error;
XFS_STATS_INC(mp, xs_attr_remove);
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return -EIO;
error = xfs_attr_args_init(&args, dp, name, flags);
if (error)
return error;
args.firstblock = &firstblock;
args.dfops = &dfops;
/*
* we have no control over the attribute names that userspace passes us
* to remove, so we have to allow the name lookup prior to attribute
* removal to fail.
*/
args.op_flags = XFS_DA_OP_OKNOENT;
error = xfs_qm_dqattach(dp, 0);
if (error)
return error;
/*
* Root fork attributes can use reserved data blocks for this
* operation if necessary
*/
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_attrrm,
XFS_ATTRRM_SPACE_RES(mp), 0,
(flags & ATTR_ROOT) ? XFS_TRANS_RESERVE : 0,
&args.trans);
if (error)
return error;
xfs_ilock(dp, XFS_ILOCK_EXCL);
/*
* No need to make quota reservations here. We expect to release some
* blocks not allocate in the common case.
*/
xfs_trans_ijoin(args.trans, dp, 0);
if (!xfs_inode_hasattr(dp)) {
error = -ENOATTR;
} else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
error = xfs_attr_shortform_remove(&args);
} else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
error = xfs_attr_leaf_removename(&args);
} else {
error = xfs_attr_node_removename(&args);
}
if (error)
goto out;
/*
* If this is a synchronous mount, make sure that the
* transaction goes to disk before returning to the user.
*/
if (mp->m_flags & XFS_MOUNT_WSYNC)
xfs_trans_set_sync(args.trans);
if ((flags & ATTR_KERNOTIME) == 0)
xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG);
/*
* Commit the last in the sequence of transactions.
*/
xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
error = xfs_trans_commit(args.trans);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
return error;
out:
if (args.trans)
xfs_trans_cancel(args.trans);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
return error;
}
| 8,087 |
93,893 | 0 | virDomainPMWakeup(virDomainPtr dom,
unsigned int flags)
{
virConnectPtr conn;
VIR_DOMAIN_DEBUG(dom, "flags=%x", flags);
virResetLastError();
virCheckDomainReturn(dom, -1);
conn = dom->conn;
virCheckReadOnlyGoto(conn->flags, error);
if (conn->driver->domainPMWakeup) {
int ret;
ret = conn->driver->domainPMWakeup(dom, flags);
if (ret < 0)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(conn);
return -1;
}
| 8,088 |
39,624 | 0 | long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3)
{
int clockrt, ret = -ENOSYS;
int cmd = op & FUTEX_CMD_MASK;
int fshared = 0;
if (!(op & FUTEX_PRIVATE_FLAG))
fshared = 1;
clockrt = op & FUTEX_CLOCK_REALTIME;
if (clockrt && cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
return -ENOSYS;
switch (cmd) {
case FUTEX_WAIT:
val3 = FUTEX_BITSET_MATCH_ANY;
case FUTEX_WAIT_BITSET:
ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
break;
case FUTEX_WAKE:
val3 = FUTEX_BITSET_MATCH_ANY;
case FUTEX_WAKE_BITSET:
ret = futex_wake(uaddr, fshared, val, val3);
break;
case FUTEX_REQUEUE:
ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL, 0);
break;
case FUTEX_CMP_REQUEUE:
ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3,
0);
break;
case FUTEX_WAKE_OP:
ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
break;
case FUTEX_LOCK_PI:
if (futex_cmpxchg_enabled)
ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
break;
case FUTEX_UNLOCK_PI:
if (futex_cmpxchg_enabled)
ret = futex_unlock_pi(uaddr, fshared);
break;
case FUTEX_TRYLOCK_PI:
if (futex_cmpxchg_enabled)
ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
break;
case FUTEX_WAIT_REQUEUE_PI:
val3 = FUTEX_BITSET_MATCH_ANY;
ret = futex_wait_requeue_pi(uaddr, fshared, val, timeout, val3,
clockrt, uaddr2);
break;
case FUTEX_CMP_REQUEUE_PI:
ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3,
1);
break;
default:
ret = -ENOSYS;
}
return ret;
}
| 8,089 |
134,543 | 0 | void WebContentsViewAura::SetInitialFocus() {
if (web_contents_->FocusLocationBarByDefault())
web_contents_->SetFocusToLocationBar(false);
else
Focus();
}
| 8,090 |
100,509 | 0 | void Cache::remove(CachedResource* resource)
{
if (resource->inCache()) {
m_resources.remove(resource->url());
resource->setInCache(false);
removeFromLRUList(resource);
removeFromLiveDecodedResourcesList(resource);
HashSet<DocLoader*>::iterator end = m_docLoaders.end();
for (HashSet<DocLoader*>::iterator itr = m_docLoaders.begin(); itr != end; ++itr)
(*itr)->removeCachedResource(resource);
int delta = -static_cast<int>(resource->size());
if (delta)
adjustSize(resource->referenced(), delta);
} else
ASSERT(m_resources.get(resource->url()) != resource);
if (resource->canDelete())
delete resource;
}
| 8,091 |
156,329 | 0 | bool MediaRecorder::isTypeSupported(ExecutionContext* context,
const String& type) {
std::unique_ptr<WebMediaRecorderHandler> handler =
Platform::Current()->CreateMediaRecorderHandler(
context->GetTaskRunner(TaskType::kInternalMediaRealTime));
if (!handler)
return false;
ContentType content_type(type);
return handler->CanSupportMimeType(content_type.GetType(),
content_type.Parameter("codecs"));
}
| 8,092 |
178,964 | 1 | void ptrace_triggered(struct perf_event *bp, int nmi,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event_attr attr;
/*
* Disable the breakpoint request here since ptrace has defined a
* one-shot behaviour for breakpoint exceptions in PPC64.
* The SIGTRAP signal is generated automatically for us in do_dabr().
* We don't have to do anything about that here
*/
attr = bp->attr;
attr.disabled = true;
modify_user_hw_breakpoint(bp, &attr);
}
| 8,093 |
177,396 | 0 | const BlockEntry* Cluster::GetEntry(const CuePoint& cp,
const CuePoint::TrackPosition& tp) const {
assert(m_pSegment);
const long long tc = cp.GetTimeCode();
if (tp.m_block > 0) {
const long block = static_cast<long>(tp.m_block);
const long index = block - 1;
while (index >= m_entries_count) {
long long pos;
long len;
const long status = Parse(pos, len);
if (status < 0) // TODO: can this happen?
return NULL;
if (status > 0) // nothing remains to be parsed
return NULL;
}
const BlockEntry* const pEntry = m_entries[index];
assert(pEntry);
assert(!pEntry->EOS());
const Block* const pBlock = pEntry->GetBlock();
assert(pBlock);
if ((pBlock->GetTrackNumber() == tp.m_track) &&
(pBlock->GetTimeCode(this) == tc)) {
return pEntry;
}
}
long index = 0;
for (;;) {
if (index >= m_entries_count) {
long long pos;
long len;
const long status = Parse(pos, len);
if (status < 0) // TODO: can this happen?
return NULL;
if (status > 0) // nothing remains to be parsed
return NULL;
assert(m_entries);
assert(index < m_entries_count);
}
const BlockEntry* const pEntry = m_entries[index];
assert(pEntry);
assert(!pEntry->EOS());
const Block* const pBlock = pEntry->GetBlock();
assert(pBlock);
if (pBlock->GetTrackNumber() != tp.m_track) {
++index;
continue;
}
const long long tc_ = pBlock->GetTimeCode(this);
if (tc_ < tc) {
++index;
continue;
}
if (tc_ > tc)
return NULL;
const Tracks* const pTracks = m_pSegment->GetTracks();
assert(pTracks);
const long tn = static_cast<long>(tp.m_track);
const Track* const pTrack = pTracks->GetTrackByNumber(tn);
if (pTrack == NULL)
return NULL;
const long long type = pTrack->GetType();
if (type == 2) // audio
return pEntry;
if (type != 1) // not video
return NULL;
if (!pBlock->IsKey())
return NULL;
return pEntry;
}
}
| 8,094 |
101,519 | 0 | void PrintWebViewHelper::PrintPreviewContext::OnPrintPreview() {
DCHECK_EQ(INITIALIZED, state_);
ClearContext();
}
| 8,095 |
21,180 | 0 | struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
{
if (!memcg || mem_cgroup_is_root(memcg))
return NULL;
return &memcg->tcp_mem.cg_proto;
}
| 8,096 |
77,723 | 0 | parse_instruction_ids(struct ofpbuf *payload, bool loose, uint32_t *insts)
{
*insts = 0;
while (payload->size > 0) {
enum ovs_instruction_type inst;
enum ofperr error;
uint64_t ofpit;
/* OF1.3 and OF1.4 aren't clear about padding in the instruction IDs.
* It seems clear that they aren't padded to 8 bytes, though, because
* both standards say that "non-experimenter instructions are 4 bytes"
* and do not mention any padding before the first instruction ID.
* (There wouldn't be any point in padding to 8 bytes if the IDs were
* aligned on an odd 4-byte boundary.)
*
* Anyway, we just assume they're all glommed together on byte
* boundaries. */
error = ofpprop_pull__(payload, NULL, 1, 0x10000, &ofpit);
if (error) {
return error;
}
error = ovs_instruction_type_from_inst_type(&inst, ofpit);
if (!error) {
*insts |= 1u << inst;
} else if (!loose) {
return error;
}
}
return 0;
}
| 8,097 |
68,831 | 0 | static __always_inline int alloc_block(struct kmem_cache *cachep,
struct array_cache *ac, struct page *page, int batchcount)
{
/*
* There must be at least one object available for
* allocation.
*/
BUG_ON(page->active >= cachep->num);
while (page->active < cachep->num && batchcount--) {
STATS_INC_ALLOCED(cachep);
STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);
ac->entry[ac->avail++] = slab_get_obj(cachep, page);
}
return batchcount;
}
| 8,098 |
127,115 | 0 | int ComputeConsumedBytes(int initial_bytes_enqueued,
int initial_bytes_buffered) {
int byte_delta = bytes_enqueued_ - initial_bytes_enqueued;
int buffered_delta = algorithm_.bytes_buffered() - initial_bytes_buffered;
int consumed = byte_delta - buffered_delta;
CHECK_GE(consumed, 0);
return consumed;
}
| 8,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.