unique_id
int64
13
189k
target
int64
0
1
code
stringlengths
20
241k
__index_level_0__
int64
0
18.9k
41,681
0
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput, int nr) { struct btrfs_root *root; struct list_head splice; int ret; if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) return -EROFS; INIT_LIST_HEAD(&splice); mutex_lock(&fs_info->delalloc_root_mutex); spin_lock(&fs_info->delalloc_root_lock); list_splice_init(&fs_info->delalloc_roots, &splice); while (!list_empty(&splice) && nr) { root = list_first_entry(&splice, struct btrfs_root, delalloc_root); root = btrfs_grab_fs_root(root); BUG_ON(!root); list_move_tail(&root->delalloc_root, &fs_info->delalloc_roots); spin_unlock(&fs_info->delalloc_root_lock); ret = __start_delalloc_inodes(root, delay_iput, nr); btrfs_put_fs_root(root); if (ret < 0) goto out; if (nr != -1) { nr -= ret; WARN_ON(nr < 0); } spin_lock(&fs_info->delalloc_root_lock); } spin_unlock(&fs_info->delalloc_root_lock); ret = 0; atomic_inc(&fs_info->async_submit_draining); while (atomic_read(&fs_info->nr_async_submits) || atomic_read(&fs_info->async_delalloc_pages)) { wait_event(fs_info->async_submit_wait, (atomic_read(&fs_info->nr_async_submits) == 0 && atomic_read(&fs_info->async_delalloc_pages) == 0)); } atomic_dec(&fs_info->async_submit_draining); out: if (!list_empty_careful(&splice)) { spin_lock(&fs_info->delalloc_root_lock); list_splice_tail(&splice, &fs_info->delalloc_roots); spin_unlock(&fs_info->delalloc_root_lock); } mutex_unlock(&fs_info->delalloc_root_mutex); return ret; }
7,900
132,892
0
static void VerifyAllTilesExistAndHavePile( const PictureLayerTiling* tiling, PicturePileImpl* pile) { for (PictureLayerTiling::CoverageIterator iter( tiling, tiling->contents_scale(), gfx::Rect(tiling->tiling_size())); iter; ++iter) { EXPECT_TRUE(*iter); EXPECT_EQ(pile, iter->raster_source()); } }
7,901
65,999
0
svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt, struct rdma_conn_param *param) { const struct rpcrdma_connect_private *pmsg = param->private_data; if (pmsg && pmsg->cp_magic == rpcrdma_cmp_magic && pmsg->cp_version == RPCRDMA_CMP_VERSION) { newxprt->sc_snd_w_inv = pmsg->cp_flags & RPCRDMA_CMP_F_SND_W_INV_OK; dprintk("svcrdma: client send_size %u, recv_size %u " "remote inv %ssupported\n", rpcrdma_decode_buffer_size(pmsg->cp_send_size), rpcrdma_decode_buffer_size(pmsg->cp_recv_size), newxprt->sc_snd_w_inv ? "" : "un"); } }
7,902
157,823
0
bool WebContentsImpl::OnMessageReceived(RenderFrameHostImpl* render_frame_host, const IPC::Message& message) { { WebUIImpl* web_ui = render_frame_host->web_ui(); if (web_ui && web_ui->OnMessageReceived(message, render_frame_host)) return true; } for (auto& observer : observers_) { if (observer.OnMessageReceived(message, render_frame_host)) return true; } bool handled = true; IPC_BEGIN_MESSAGE_MAP_WITH_PARAM(WebContentsImpl, message, render_frame_host) IPC_MESSAGE_HANDLER(FrameHostMsg_DomOperationResponse, OnDomOperationResponse) IPC_MESSAGE_HANDLER(FrameHostMsg_DidChangeThemeColor, OnThemeColorChanged) IPC_MESSAGE_HANDLER(FrameHostMsg_DidFinishDocumentLoad, OnDocumentLoadedInFrame) IPC_MESSAGE_HANDLER(FrameHostMsg_DidFinishLoad, OnDidFinishLoad) IPC_MESSAGE_HANDLER(FrameHostMsg_DidLoadResourceFromMemoryCache, OnDidLoadResourceFromMemoryCache) IPC_MESSAGE_HANDLER(FrameHostMsg_DidDisplayInsecureContent, OnDidDisplayInsecureContent) IPC_MESSAGE_HANDLER(FrameHostMsg_DidContainInsecureFormAction, OnDidContainInsecureFormAction) IPC_MESSAGE_HANDLER(FrameHostMsg_DidRunInsecureContent, OnDidRunInsecureContent) IPC_MESSAGE_HANDLER(FrameHostMsg_DidDisplayContentWithCertificateErrors, OnDidDisplayContentWithCertificateErrors) IPC_MESSAGE_HANDLER(FrameHostMsg_DidRunContentWithCertificateErrors, OnDidRunContentWithCertificateErrors) IPC_MESSAGE_HANDLER(FrameHostMsg_RegisterProtocolHandler, OnRegisterProtocolHandler) IPC_MESSAGE_HANDLER(FrameHostMsg_UnregisterProtocolHandler, OnUnregisterProtocolHandler) IPC_MESSAGE_HANDLER(FrameHostMsg_UpdatePageImportanceSignals, OnUpdatePageImportanceSignals) IPC_MESSAGE_HANDLER(FrameHostMsg_UpdateFaviconURL, OnUpdateFaviconURL) #if BUILDFLAG(ENABLE_PLUGINS) IPC_MESSAGE_HANDLER(FrameHostMsg_PepperInstanceCreated, OnPepperInstanceCreated) IPC_MESSAGE_HANDLER(FrameHostMsg_PepperInstanceDeleted, OnPepperInstanceDeleted) IPC_MESSAGE_HANDLER(FrameHostMsg_PepperPluginHung, OnPepperPluginHung) IPC_MESSAGE_HANDLER(FrameHostMsg_PepperStartsPlayback, OnPepperStartsPlayback) IPC_MESSAGE_HANDLER(FrameHostMsg_PepperStopsPlayback, OnPepperStopsPlayback) IPC_MESSAGE_HANDLER(FrameHostMsg_PluginCrashed, OnPluginCrashed) IPC_MESSAGE_HANDLER_GENERIC(BrowserPluginHostMsg_Attach, OnBrowserPluginMessage(render_frame_host, message)) #endif IPC_MESSAGE_UNHANDLED(handled = false) IPC_END_MESSAGE_MAP() return handled; }
7,903
52,739
0
static int snd_timer_user_status(struct file *file, struct snd_timer_status __user *_status) { struct snd_timer_user *tu; struct snd_timer_status status; tu = file->private_data; if (!tu->timeri) return -EBADFD; memset(&status, 0, sizeof(status)); status.tstamp = tu->tstamp; status.resolution = snd_timer_resolution(tu->timeri); status.lost = tu->timeri->lost; status.overrun = tu->overrun; spin_lock_irq(&tu->qlock); status.queue = tu->qused; spin_unlock_irq(&tu->qlock); if (copy_to_user(_status, &status, sizeof(status))) return -EFAULT; return 0; }
7,904
65,892
0
nfsd_init_raparms(struct file *file) { struct inode *inode = file_inode(file); dev_t dev = inode->i_sb->s_dev; ino_t ino = inode->i_ino; struct raparms *ra, **rap, **frap = NULL; int depth = 0; unsigned int hash; struct raparm_hbucket *rab; hash = jhash_2words(dev, ino, 0xfeedbeef) & RAPARM_HASH_MASK; rab = &raparm_hash[hash]; spin_lock(&rab->pb_lock); for (rap = &rab->pb_head; (ra = *rap); rap = &ra->p_next) { if (ra->p_ino == ino && ra->p_dev == dev) goto found; depth++; if (ra->p_count == 0) frap = rap; } depth = nfsdstats.ra_size; if (!frap) { spin_unlock(&rab->pb_lock); return NULL; } rap = frap; ra = *frap; ra->p_dev = dev; ra->p_ino = ino; ra->p_set = 0; ra->p_hindex = hash; found: if (rap != &rab->pb_head) { *rap = ra->p_next; ra->p_next = rab->pb_head; rab->pb_head = ra; } ra->p_count++; nfsdstats.ra_depth[depth*10/nfsdstats.ra_size]++; spin_unlock(&rab->pb_lock); if (ra->p_set) file->f_ra = ra->p_ra; return ra; }
7,905
57,687
0
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) { int i; for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { unsigned long ugfn; int lpages; int level = i + 1; lpages = gfn_to_index(slot->base_gfn + npages - 1, slot->base_gfn, level) + 1; slot->arch.rmap[i] = kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i])); if (!slot->arch.rmap[i]) goto out_free; if (i == 0) continue; slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages * sizeof(*slot->arch.lpage_info[i - 1])); if (!slot->arch.lpage_info[i - 1]) goto out_free; if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) slot->arch.lpage_info[i - 1][0].write_count = 1; if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1; ugfn = slot->userspace_addr >> PAGE_SHIFT; /* * If the gfn and userspace address are not aligned wrt each * other, or if explicitly asked to, disable large page * support for this slot */ if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || !kvm_largepages_enabled()) { unsigned long j; for (j = 0; j < lpages; ++j) slot->arch.lpage_info[i - 1][j].write_count = 1; } } return 0; out_free: for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { kvfree(slot->arch.rmap[i]); slot->arch.rmap[i] = NULL; if (i == 0) continue; kvfree(slot->arch.lpage_info[i - 1]); slot->arch.lpage_info[i - 1] = NULL; } return -ENOMEM; }
7,906
19,255
0
static int netlink_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &netlink_seq_ops, sizeof(struct nl_seq_iter)); }
7,907
118,702
0
inline void DialogHandler::dialogCreated(DOMWindow* dialogFrame) { m_dialogContext = dialogFrame->frame() ? dialogFrame->frame()->script()->currentWorldContext() : v8::Local<v8::Context>(); if (m_dialogContext.IsEmpty()) return; if (m_dialogArguments.IsEmpty()) return; v8::Context::Scope scope(m_dialogContext); m_dialogContext->Global()->Set(v8::String::NewSymbol("dialogArguments"), m_dialogArguments); }
7,908
162,763
0
void BaseRenderingContext2D::setLineDash(const Vector<double>& dash) { if (!LineDashSequenceIsValid(dash)) return; ModifiableState().SetLineDash(dash); }
7,909
43,816
0
init_ctx_call_init(OM_uint32 *minor_status, spnego_gss_ctx_id_t sc, spnego_gss_cred_id_t spcred, gss_name_t target_name, OM_uint32 req_flags, OM_uint32 time_req, gss_buffer_t mechtok_in, gss_OID *actual_mech, gss_buffer_t mechtok_out, OM_uint32 *ret_flags, OM_uint32 *time_rec, OM_uint32 *negState, send_token_flag *send_token) { OM_uint32 ret, tmpret, tmpmin, mech_req_flags; gss_cred_id_t mcred; mcred = (spcred == NULL) ? GSS_C_NO_CREDENTIAL : spcred->mcred; mech_req_flags = req_flags; if (spcred == NULL || !spcred->no_ask_integ) mech_req_flags |= GSS_C_INTEG_FLAG; ret = gss_init_sec_context(minor_status, mcred, &sc->ctx_handle, target_name, sc->internal_mech, mech_req_flags, time_req, GSS_C_NO_CHANNEL_BINDINGS, mechtok_in, &sc->actual_mech, mechtok_out, &sc->ctx_flags, time_rec); if (ret == GSS_S_COMPLETE) { sc->mech_complete = 1; if (ret_flags != NULL) *ret_flags = sc->ctx_flags; /* * Microsoft SPNEGO implementations expect an even number of * token exchanges. So if we're sending a final token, ask for * a zero-length token back from the server. Also ask for a * token back if this is the first token or if a MIC exchange * is required. */ if (*send_token == CONT_TOKEN_SEND && mechtok_out->length == 0 && (!sc->mic_reqd || !(sc->ctx_flags & GSS_C_INTEG_FLAG))) { /* The exchange is complete. */ *negState = ACCEPT_COMPLETE; ret = GSS_S_COMPLETE; *send_token = NO_TOKEN_SEND; } else { /* Ask for one more hop. */ *negState = ACCEPT_INCOMPLETE; ret = GSS_S_CONTINUE_NEEDED; } return ret; } if (ret == GSS_S_CONTINUE_NEEDED) return ret; if (*send_token != INIT_TOKEN_SEND) { *send_token = ERROR_TOKEN_SEND; *negState = REJECT; return ret; } /* * Since this is the first token, we can fall back to later mechanisms * in the list. Since the mechanism list is expected to be short, we * can do this with recursion. If all mechanisms produce errors, the * caller should get the error from the first mech in the list. */ gssalloc_free(sc->mech_set->elements->elements); memmove(sc->mech_set->elements, sc->mech_set->elements + 1, --sc->mech_set->count * sizeof(*sc->mech_set->elements)); if (sc->mech_set->count == 0) goto fail; gss_release_buffer(&tmpmin, &sc->DER_mechTypes); if (put_mech_set(sc->mech_set, &sc->DER_mechTypes) < 0) goto fail; tmpret = init_ctx_call_init(&tmpmin, sc, spcred, target_name, req_flags, time_req, mechtok_in, actual_mech, mechtok_out, ret_flags, time_rec, negState, send_token); if (HARD_ERROR(tmpret)) goto fail; *minor_status = tmpmin; return tmpret; fail: /* Don't output token on error from first call. */ *send_token = NO_TOKEN_SEND; *negState = REJECT; return ret; }
7,910
108,637
0
virtual void StartAsync() { this->NotifyRestartRequired(); }
7,911
177,397
0
const CuePoint* Cues::GetFirst() const { if (m_cue_points == NULL || m_count == 0) return NULL; CuePoint* const* const pp = m_cue_points; if (pp == NULL) return NULL; CuePoint* const pCP = pp[0]; if (pCP == NULL || pCP->GetTimeCode() < 0) return NULL; return pCP; }
7,912
97,240
0
bool WebFrameLoaderClient::shouldUseCredentialStorage(DocumentLoader*, unsigned long identifier) { return true; }
7,913
183,766
1
FilePath ExtensionPrefs::GetExtensionPath(const std::string& extension_id) { const DictionaryValue* dict = GetExtensionPref(extension_id); std::string path; if (!dict->GetString(kPrefPath, &path)) return FilePath(); return install_directory_.Append(FilePath::FromWStringHack(UTF8ToWide(path))); }
7,914
25,956
0
SYSCALL_DEFINE5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) { struct perf_event *group_leader = NULL, *output_event = NULL; struct perf_event *event, *sibling; struct perf_event_attr attr; struct perf_event_context *ctx; struct file *event_file = NULL; struct file *group_file = NULL; struct task_struct *task = NULL; struct pmu *pmu; int event_fd; int move_group = 0; int fput_needed = 0; int err; /* for future expandability... */ if (flags & ~PERF_FLAG_ALL) return -EINVAL; err = perf_copy_attr(attr_uptr, &attr); if (err) return err; if (!attr.exclude_kernel) { if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) return -EACCES; } if (attr.freq) { if (attr.sample_freq > sysctl_perf_event_sample_rate) return -EINVAL; } /* * In cgroup mode, the pid argument is used to pass the fd * opened to the cgroup directory in cgroupfs. The cpu argument * designates the cpu on which to monitor threads from that * cgroup. */ if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) return -EINVAL; event_fd = get_unused_fd_flags(O_RDWR); if (event_fd < 0) return event_fd; if (group_fd != -1) { group_leader = perf_fget_light(group_fd, &fput_needed); if (IS_ERR(group_leader)) { err = PTR_ERR(group_leader); goto err_fd; } group_file = group_leader->filp; if (flags & PERF_FLAG_FD_OUTPUT) output_event = group_leader; if (flags & PERF_FLAG_FD_NO_GROUP) group_leader = NULL; } if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { task = find_lively_task_by_vpid(pid); if (IS_ERR(task)) { err = PTR_ERR(task); goto err_group_fd; } } event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, NULL); if (IS_ERR(event)) { err = PTR_ERR(event); goto err_task; } if (flags & PERF_FLAG_PID_CGROUP) { err = perf_cgroup_connect(pid, event, &attr, group_leader); if (err) goto err_alloc; /* * one more event: * - that has cgroup constraint on event->cpu * - that may need work on context switch */ atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); jump_label_inc(&perf_sched_events); } /* * Special case software events and allow them to be part of * any hardware group. */ pmu = event->pmu; if (group_leader && (is_software_event(event) != is_software_event(group_leader))) { if (is_software_event(event)) { /* * If event and group_leader are not both a software * event, and event is, then group leader is not. * * Allow the addition of software events to !software * groups, this is safe because software events never * fail to schedule. */ pmu = group_leader->pmu; } else if (is_software_event(group_leader) && (group_leader->group_flags & PERF_GROUP_SOFTWARE)) { /* * In case the group is a pure software group, and we * try to add a hardware event, move the whole group to * the hardware context. */ move_group = 1; } } /* * Get the target context (task or percpu): */ ctx = find_get_context(pmu, task, cpu); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto err_alloc; } if (task) { put_task_struct(task); task = NULL; } /* * Look up the group leader (we will attach this event to it): */ if (group_leader) { err = -EINVAL; /* * Do not allow a recursive hierarchy (this new sibling * becoming part of another group-sibling): */ if (group_leader->group_leader != group_leader) goto err_context; /* * Do not allow to attach to a group in a different * task or CPU context: */ if (move_group) { if (group_leader->ctx->type != ctx->type) goto err_context; } else { if (group_leader->ctx != ctx) goto err_context; } /* * Only a group leader can be exclusive or pinned */ if (attr.exclusive || attr.pinned) goto err_context; } if (output_event) { err = perf_event_set_output(event, output_event); if (err) goto err_context; } event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR); if (IS_ERR(event_file)) { err = PTR_ERR(event_file); goto err_context; } if (move_group) { struct perf_event_context *gctx = group_leader->ctx; mutex_lock(&gctx->mutex); perf_remove_from_context(group_leader); list_for_each_entry(sibling, &group_leader->sibling_list, group_entry) { perf_remove_from_context(sibling); put_ctx(gctx); } mutex_unlock(&gctx->mutex); put_ctx(gctx); } event->filp = event_file; WARN_ON_ONCE(ctx->parent_ctx); mutex_lock(&ctx->mutex); if (move_group) { perf_install_in_context(ctx, group_leader, cpu); get_ctx(ctx); list_for_each_entry(sibling, &group_leader->sibling_list, group_entry) { perf_install_in_context(ctx, sibling, cpu); get_ctx(ctx); } } perf_install_in_context(ctx, event, cpu); ++ctx->generation; perf_unpin_context(ctx); mutex_unlock(&ctx->mutex); event->owner = current; mutex_lock(&current->perf_event_mutex); list_add_tail(&event->owner_entry, &current->perf_event_list); mutex_unlock(&current->perf_event_mutex); /* * Precalculate sample_data sizes */ perf_event__header_size(event); perf_event__id_header_size(event); /* * Drop the reference on the group_event after placing the * new event on the sibling_list. This ensures destruction * of the group leader will find the pointer to itself in * perf_group_detach(). */ fput_light(group_file, fput_needed); fd_install(event_fd, event_file); return event_fd; err_context: perf_unpin_context(ctx); put_ctx(ctx); err_alloc: free_event(event); err_task: if (task) put_task_struct(task); err_group_fd: fput_light(group_file, fput_needed); err_fd: put_unused_fd(event_fd); return err; }
7,915
72,419
0
static void free_statement(pdo_stmt_t *stmt TSRMLS_DC) { if (stmt->bound_params) { zend_hash_destroy(stmt->bound_params); FREE_HASHTABLE(stmt->bound_params); stmt->bound_params = NULL; } if (stmt->bound_param_map) { zend_hash_destroy(stmt->bound_param_map); FREE_HASHTABLE(stmt->bound_param_map); stmt->bound_param_map = NULL; } if (stmt->bound_columns) { zend_hash_destroy(stmt->bound_columns); FREE_HASHTABLE(stmt->bound_columns); stmt->bound_columns = NULL; } if (stmt->methods && stmt->methods->dtor) { stmt->methods->dtor(stmt TSRMLS_CC); } if (stmt->query_string) { efree(stmt->query_string); } if (stmt->columns) { int i; struct pdo_column_data *cols = stmt->columns; for (i = 0; i < stmt->column_count; i++) { if (cols[i].name) { efree(cols[i].name); cols[i].name = NULL; } } efree(stmt->columns); stmt->columns = NULL; } if (stmt->fetch.into && stmt->default_fetch_type == PDO_FETCH_INTO) { FREE_ZVAL(stmt->fetch.into); stmt->fetch.into = NULL; } do_fetch_opt_finish(stmt, 1 TSRMLS_CC); zend_objects_store_del_ref(&stmt->database_object_handle TSRMLS_CC); if (stmt->dbh) { php_pdo_dbh_delref(stmt->dbh TSRMLS_CC); } zend_object_std_dtor(&stmt->std TSRMLS_CC); efree(stmt); }
7,916
25,215
0
static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) { const struct in6_addr *dest, *src; __u16 destp, srcp; int timer_active; unsigned long timer_expires; struct inet_sock *inet = inet_sk(sp); struct tcp_sock *tp = tcp_sk(sp); const struct inet_connection_sock *icsk = inet_csk(sp); struct ipv6_pinfo *np = inet6_sk(sp); dest = &np->daddr; src = &np->rcv_saddr; destp = ntohs(inet->inet_dport); srcp = ntohs(inet->inet_sport); if (icsk->icsk_pending == ICSK_TIME_RETRANS) { timer_active = 1; timer_expires = icsk->icsk_timeout; } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; timer_expires = icsk->icsk_timeout; } else if (timer_pending(&sp->sk_timer)) { timer_active = 2; timer_expires = sp->sk_timer.expires; } else { timer_active = 0; timer_expires = jiffies; } seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, sp->sk_state, tp->write_seq-tp->snd_una, (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq), timer_active, jiffies_to_clock_t(timer_expires - jiffies), icsk->icsk_retransmits, sock_i_uid(sp), icsk->icsk_probes_out, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, jiffies_to_clock_t(icsk->icsk_rto), jiffies_to_clock_t(icsk->icsk_ack.ato), (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong, tp->snd_cwnd, tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh ); }
7,917
166,366
0
size_t GLES2Util::CalcClearBufferivDataCount(int buffer) { switch (buffer) { case GL_COLOR: return 4; case GL_STENCIL: return 1; default: return 0; } }
7,918
17,763
0
static void remove_master_func(void *res, XID id, void *devid) { struct PointerBarrierDevice *pbd; struct PointerBarrierClient *barrier; struct PointerBarrier *b; DeviceIntPtr dev; int *deviceid = devid; int rc; Time ms = GetTimeInMillis(); rc = dixLookupDevice(&dev, *deviceid, serverClient, DixSendAccess); if (rc != Success) return; b = res; barrier = container_of(b, struct PointerBarrierClient, barrier); pbd = GetBarrierDevice(barrier, *deviceid); if (pbd->hit) { BarrierEvent ev = { .header = ET_Internal, .type =ET_BarrierLeave, .length = sizeof (BarrierEvent), .time = ms, .deviceid = *deviceid, .sourceid = 0, .dx = 0, .dy = 0, .root = barrier->screen->root->drawable.id, .window = barrier->window, .dt = ms - pbd->last_timestamp, .flags = XIBarrierPointerReleased, .event_id = pbd->barrier_event_id, .barrierid = barrier->id, }; mieqEnqueue(dev, (InternalEvent *) &ev); } xorg_list_del(&pbd->entry); free(pbd); }
7,919
39,368
0
static void __exit floppy_module_exit(void) { int drive; blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); unregister_blkdev(FLOPPY_MAJOR, "fd"); platform_driver_unregister(&floppy_driver); destroy_workqueue(floppy_wq); for (drive = 0; drive < N_DRIVE; drive++) { del_timer_sync(&motor_off_timer[drive]); if (floppy_available(drive)) { del_gendisk(disks[drive]); device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos); platform_device_unregister(&floppy_device[drive]); } blk_cleanup_queue(disks[drive]->queue); /* * These disks have not called add_disk(). Don't put down * queue reference in put_disk(). */ if (!(allowed_drive_mask & (1 << drive)) || fdc_state[FDC(drive)].version == FDC_NONE) disks[drive]->queue = NULL; put_disk(disks[drive]); } cancel_delayed_work_sync(&fd_timeout); cancel_delayed_work_sync(&fd_timer); if (atomic_read(&usage_count)) floppy_release_irq_and_dma(); /* eject disk, if any */ fd_eject(0); }
7,920
4,104
0
SplashPath *Splash::flattenPath(SplashPath *path, SplashCoord *matrix, SplashCoord flatness) { SplashPath *fPath; SplashCoord flatness2; Guchar flag; int i; fPath = new SplashPath(); #if USE_FIXEDPOINT flatness2 = flatness; #else flatness2 = flatness * flatness; #endif i = 0; while (i < path->length) { flag = path->flags[i]; if (flag & splashPathFirst) { fPath->moveTo(path->pts[i].x, path->pts[i].y); ++i; } else { if (flag & splashPathCurve) { flattenCurve(path->pts[i-1].x, path->pts[i-1].y, path->pts[i ].x, path->pts[i ].y, path->pts[i+1].x, path->pts[i+1].y, path->pts[i+2].x, path->pts[i+2].y, matrix, flatness2, fPath); i += 3; } else { fPath->lineTo(path->pts[i].x, path->pts[i].y); ++i; } if (path->flags[i-1] & splashPathClosed) { fPath->close(); } } } return fPath; }
7,921
12,921
0
static int nlmsg_len(const struct nlmsghdr *nlh) { return nlmsg_datalen(nlh); }
7,922
46,882
0
int lrw_camellia_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct camellia_lrw_ctx *ctx = crypto_tfm_ctx(tfm); int err; err = __camellia_setkey(&ctx->camellia_ctx, key, keylen - CAMELLIA_BLOCK_SIZE, &tfm->crt_flags); if (err) return err; return lrw_init_table(&ctx->lrw_table, key + keylen - CAMELLIA_BLOCK_SIZE); }
7,923
96,110
0
void notify_other(int fd) { FILE* stream; int newfd = dup(fd); if (newfd == -1) errExit("dup"); stream = fdopen(newfd, "w"); fprintf(stream, "%u\n", getpid()); fflush(stream); fclose(stream); }
7,924
58,903
0
void kvm_notify_acked_gsi(struct kvm *kvm, int gsi) { struct kvm_irq_ack_notifier *kian; hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, link) if (kian->gsi == gsi) kian->irq_acked(kian); }
7,925
70,626
0
evdns_request_timeout_callback(evutil_socket_t fd, short events, void *arg) { struct request *const req = (struct request *) arg; struct evdns_base *base = req->base; (void) fd; (void) events; log(EVDNS_LOG_DEBUG, "Request %p timed out", arg); EVDNS_LOCK(base); if (req->tx_count >= req->base->global_max_retransmits) { struct nameserver *ns = req->ns; /* this request has failed */ log(EVDNS_LOG_DEBUG, "Giving up on request %p; tx_count==%d", arg, req->tx_count); reply_schedule_callback(req, 0, DNS_ERR_TIMEOUT, NULL); request_finished(req, &REQ_HEAD(req->base, req->trans_id), 1); nameserver_failed(ns, "request timed out."); } else { /* retransmit it */ log(EVDNS_LOG_DEBUG, "Retransmitting request %p; tx_count==%d", arg, req->tx_count); (void) evtimer_del(&req->timeout_event); request_swap_ns(req, nameserver_pick(base)); evdns_request_transmit(req); req->ns->timedout++; if (req->ns->timedout > req->base->global_max_nameserver_timeout) { req->ns->timedout = 0; nameserver_failed(req->ns, "request timed out."); } } EVDNS_UNLOCK(base); }
7,926
130,918
0
static void orangeMethod(const v8::FunctionCallbackInfo<v8::Value>& info) { TestObject* imp = V8TestObject::toNative(info.Holder()); imp->banana(); }
7,927
132,389
0
void FlagsState::ConvertFlagsToSwitches(FlagsStorage* flags_storage, base::CommandLine* command_line, SentinelsMode sentinels) { if (command_line->HasSwitch(switches::kNoExperiments)) return; std::set<std::string> enabled_experiments; GetSanitizedEnabledFlagsForCurrentPlatform(flags_storage, &enabled_experiments); NameToSwitchAndValueMap name_to_switch_map; for (size_t i = 0; i < num_experiments; ++i) { const Experiment& e = experiments[i]; if (e.type == Experiment::SINGLE_VALUE) { SetFlagToSwitchMapping(e.internal_name, e.command_line_switch, e.command_line_value, &name_to_switch_map); } else if (e.type == Experiment::MULTI_VALUE) { for (int j = 0; j < e.num_choices; ++j) { SetFlagToSwitchMapping(e.NameForChoice(j), e.choices[j].command_line_switch, e.choices[j].command_line_value, &name_to_switch_map); } } else { DCHECK_EQ(e.type, Experiment::ENABLE_DISABLE_VALUE); SetFlagToSwitchMapping(e.NameForChoice(0), std::string(), std::string(), &name_to_switch_map); SetFlagToSwitchMapping(e.NameForChoice(1), e.command_line_switch, e.command_line_value, &name_to_switch_map); SetFlagToSwitchMapping(e.NameForChoice(2), e.disable_command_line_switch, e.disable_command_line_value, &name_to_switch_map); } } if (sentinels == kAddSentinels) { command_line->AppendSwitch(switches::kFlagSwitchesBegin); flags_switches_.insert( std::pair<std::string, std::string>(switches::kFlagSwitchesBegin, std::string())); } for (const std::string& experiment_name : enabled_experiments) { NameToSwitchAndValueMap::const_iterator name_to_switch_it = name_to_switch_map.find(experiment_name); if (name_to_switch_it == name_to_switch_map.end()) { NOTREACHED(); continue; } const std::pair<std::string, std::string>& switch_and_value_pair = name_to_switch_it->second; CHECK(!switch_and_value_pair.first.empty()); command_line->AppendSwitchASCII(switch_and_value_pair.first, switch_and_value_pair.second); flags_switches_[switch_and_value_pair.first] = switch_and_value_pair.second; } if (sentinels == kAddSentinels) { command_line->AppendSwitch(switches::kFlagSwitchesEnd); flags_switches_.insert( std::pair<std::string, std::string>(switches::kFlagSwitchesEnd, std::string())); } }
7,928
92,851
0
static Bool wgt_enum_files(void *cbck, char *file_name, char *file_path, GF_FileEnumInfo *file_info) { WGTEnum *wgt = (WGTEnum *)cbck; if (!strcmp(wgt->root_file, file_path)) return 0; /*remove CVS stuff*/ if (strstr(file_path, ".#")) return 0; gf_list_add(wgt->imports, gf_strdup(file_path) ); return 0; }
7,929
112,396
0
void Document::buildAccessKeyMap(TreeScope* scope) { ASSERT(scope); Node* rootNode = scope->rootNode(); for (Element* element = ElementTraversal::firstWithin(rootNode); element; element = ElementTraversal::next(element, rootNode)) { const AtomicString& accessKey = element->getAttribute(accesskeyAttr); if (!accessKey.isEmpty()) m_elementsByAccessKey.set(accessKey.impl(), element); for (ShadowRoot* root = element->youngestShadowRoot(); root; root = root->olderShadowRoot()) buildAccessKeyMap(root); } }
7,930
42,666
0
static int handle_pcommit(struct kvm_vcpu *vcpu) { /* we never catch pcommit instruct for L1 guest. */ WARN_ON(1); return 1; }
7,931
91,124
0
u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr) { struct fib_info *fi = res->fi; struct fib_nh *nh = &fi->fib_nh[res->nh_sel]; struct net_device *dev = nh->nh_dev; u32 mtu = 0; if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu || fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU)) mtu = fi->fib_mtu; if (likely(!mtu)) { struct fib_nh_exception *fnhe; fnhe = find_exception(nh, daddr); if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires)) mtu = fnhe->fnhe_pmtu; } if (likely(!mtu)) mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU); return mtu - lwtunnel_headroom(nh->nh_lwtstate, mtu); }
7,932
11,476
0
fbStore_a8 (FbBits *bits, const CARD32 *values, int x, int width, miIndexedPtr indexed) { int i; CARD8 *pixel = ((CARD8 *) bits) + x; for (i = 0; i < width; ++i) { WRITE(pixel++, READ(values + i) >> 24); } }
7,933
108,739
0
const SkBitmap* ImageDataPlatformBackend::GetMappedBitmap() const { if (!mapped_canvas_.get()) return NULL; return &skia::GetTopDevice(*mapped_canvas_)->accessBitmap(false); }
7,934
157,270
0
void WebMediaPlayerImpl::OnPause() { client_->RequestPause(); }
7,935
176,604
0
xmlParseAttribute2(xmlParserCtxtPtr ctxt, const xmlChar * pref, const xmlChar * elem, const xmlChar ** prefix, xmlChar ** value, int *len, int *alloc) { const xmlChar *name; xmlChar *val, *internal_val = NULL; int normalize = 0; *value = NULL; GROW; name = xmlParseQName(ctxt, prefix); if (name == NULL) { xmlFatalErrMsg(ctxt, XML_ERR_NAME_REQUIRED, "error parsing attribute name\n"); return (NULL); } /* * get the type if needed */ if (ctxt->attsSpecial != NULL) { int type; type = (int) (long) xmlHashQLookup2(ctxt->attsSpecial, pref, elem, *prefix, name); if (type != 0) normalize = 1; } /* * read the value */ SKIP_BLANKS; if (RAW == '=') { NEXT; SKIP_BLANKS; val = xmlParseAttValueInternal(ctxt, len, alloc, normalize); if (normalize) { /* * Sometimes a second normalisation pass for spaces is needed * but that only happens if charrefs or entities refernces * have been used in the attribute value, i.e. the attribute * value have been extracted in an allocated string already. */ if (*alloc) { const xmlChar *val2; val2 = xmlAttrNormalizeSpace2(ctxt, val, len); if ((val2 != NULL) && (val2 != val)) { xmlFree(val); val = (xmlChar *) val2; } } } ctxt->instate = XML_PARSER_CONTENT; } else { xmlFatalErrMsgStr(ctxt, XML_ERR_ATTRIBUTE_WITHOUT_VALUE, "Specification mandate value for attribute %s\n", name); return (NULL); } if (*prefix == ctxt->str_xml) { /* * Check that xml:lang conforms to the specification * No more registered as an error, just generate a warning now * since this was deprecated in XML second edition */ if ((ctxt->pedantic) && (xmlStrEqual(name, BAD_CAST "lang"))) { internal_val = xmlStrndup(val, *len); if (!xmlCheckLanguageID(internal_val)) { xmlWarningMsg(ctxt, XML_WAR_LANG_VALUE, "Malformed value for xml:lang : %s\n", internal_val, NULL); } } /* * Check that xml:space conforms to the specification */ if (xmlStrEqual(name, BAD_CAST "space")) { internal_val = xmlStrndup(val, *len); if (xmlStrEqual(internal_val, BAD_CAST "default")) *(ctxt->space) = 0; else if (xmlStrEqual(internal_val, BAD_CAST "preserve")) *(ctxt->space) = 1; else { xmlWarningMsg(ctxt, XML_WAR_SPACE_VALUE, "Invalid value \"%s\" for xml:space : \"default\" or \"preserve\" expected\n", internal_val, NULL); } } if (internal_val) { xmlFree(internal_val); } } *value = val; return (name); }
7,936
134,724
0
gfx::Size GuestViewBase::GetDefaultSize() const { if (is_full_page_plugin()) { return owner_web_contents() ->GetRenderWidgetHostView() ->GetVisibleViewportSize(); } else { return gfx::Size(guestview::kDefaultWidth, guestview::kDefaultHeight); } }
7,937
176,627
0
xmlParseElement(xmlParserCtxtPtr ctxt) { const xmlChar *name; const xmlChar *prefix = NULL; const xmlChar *URI = NULL; xmlParserNodeInfo node_info; int line, tlen = 0; xmlNodePtr ret; int nsNr = ctxt->nsNr; if (((unsigned int) ctxt->nameNr > xmlParserMaxDepth) && ((ctxt->options & XML_PARSE_HUGE) == 0)) { xmlFatalErrMsgInt(ctxt, XML_ERR_INTERNAL_ERROR, "Excessive depth in document: %d use XML_PARSE_HUGE option\n", xmlParserMaxDepth); xmlHaltParser(ctxt); return; } /* Capture start position */ if (ctxt->record_info) { node_info.begin_pos = ctxt->input->consumed + (CUR_PTR - ctxt->input->base); node_info.begin_line = ctxt->input->line; } if (ctxt->spaceNr == 0) spacePush(ctxt, -1); else if (*ctxt->space == -2) spacePush(ctxt, -1); else spacePush(ctxt, *ctxt->space); line = ctxt->input->line; #ifdef LIBXML_SAX1_ENABLED if (ctxt->sax2) #endif /* LIBXML_SAX1_ENABLED */ name = xmlParseStartTag2(ctxt, &prefix, &URI, &tlen); #ifdef LIBXML_SAX1_ENABLED else name = xmlParseStartTag(ctxt); #endif /* LIBXML_SAX1_ENABLED */ if (ctxt->instate == XML_PARSER_EOF) return; if (name == NULL) { spacePop(ctxt); return; } namePush(ctxt, name); ret = ctxt->node; #ifdef LIBXML_VALID_ENABLED /* * [ VC: Root Element Type ] * The Name in the document type declaration must match the element * type of the root element. */ if (ctxt->validate && ctxt->wellFormed && ctxt->myDoc && ctxt->node && (ctxt->node == ctxt->myDoc->children)) ctxt->valid &= xmlValidateRoot(&ctxt->vctxt, ctxt->myDoc); #endif /* LIBXML_VALID_ENABLED */ /* * Check for an Empty Element. */ if ((RAW == '/') && (NXT(1) == '>')) { SKIP(2); if (ctxt->sax2) { if ((ctxt->sax != NULL) && (ctxt->sax->endElementNs != NULL) && (!ctxt->disableSAX)) ctxt->sax->endElementNs(ctxt->userData, name, prefix, URI); #ifdef LIBXML_SAX1_ENABLED } else { if ((ctxt->sax != NULL) && (ctxt->sax->endElement != NULL) && (!ctxt->disableSAX)) ctxt->sax->endElement(ctxt->userData, name); #endif /* LIBXML_SAX1_ENABLED */ } namePop(ctxt); spacePop(ctxt); if (nsNr != ctxt->nsNr) nsPop(ctxt, ctxt->nsNr - nsNr); if ( ret != NULL && ctxt->record_info ) { node_info.end_pos = ctxt->input->consumed + (CUR_PTR - ctxt->input->base); node_info.end_line = ctxt->input->line; node_info.node = ret; xmlParserAddNodeInfo(ctxt, &node_info); } return; } if (RAW == '>') { NEXT1; } else { xmlFatalErrMsgStrIntStr(ctxt, XML_ERR_GT_REQUIRED, "Couldn't find end of Start Tag %s line %d\n", name, line, NULL); /* * end of parsing of this node. */ nodePop(ctxt); namePop(ctxt); spacePop(ctxt); if (nsNr != ctxt->nsNr) nsPop(ctxt, ctxt->nsNr - nsNr); /* * Capture end position and add node */ if ( ret != NULL && ctxt->record_info ) { node_info.end_pos = ctxt->input->consumed + (CUR_PTR - ctxt->input->base); node_info.end_line = ctxt->input->line; node_info.node = ret; xmlParserAddNodeInfo(ctxt, &node_info); } return; } /* * Parse the content of the element: */ xmlParseContent(ctxt); if (ctxt->instate == XML_PARSER_EOF) return; if (!IS_BYTE_CHAR(RAW)) { xmlFatalErrMsgStrIntStr(ctxt, XML_ERR_TAG_NOT_FINISHED, "Premature end of data in tag %s line %d\n", name, line, NULL); /* * end of parsing of this node. */ nodePop(ctxt); namePop(ctxt); spacePop(ctxt); if (nsNr != ctxt->nsNr) nsPop(ctxt, ctxt->nsNr - nsNr); return; } /* * parse the end of tag: '</' should be here. */ if (ctxt->sax2) { xmlParseEndTag2(ctxt, prefix, URI, line, ctxt->nsNr - nsNr, tlen); namePop(ctxt); } #ifdef LIBXML_SAX1_ENABLED else xmlParseEndTag1(ctxt, line); #endif /* LIBXML_SAX1_ENABLED */ /* * Capture end position and add node */ if ( ret != NULL && ctxt->record_info ) { node_info.end_pos = ctxt->input->consumed + (CUR_PTR - ctxt->input->base); node_info.end_line = ctxt->input->line; node_info.node = ret; xmlParserAddNodeInfo(ctxt, &node_info); } }
7,938
175,289
0
failCauseToString(RIL_Errno e) { switch(e) { case RIL_E_SUCCESS: return "E_SUCCESS"; case RIL_E_RADIO_NOT_AVAILABLE: return "E_RADIO_NOT_AVAILABLE"; case RIL_E_GENERIC_FAILURE: return "E_GENERIC_FAILURE"; case RIL_E_PASSWORD_INCORRECT: return "E_PASSWORD_INCORRECT"; case RIL_E_SIM_PIN2: return "E_SIM_PIN2"; case RIL_E_SIM_PUK2: return "E_SIM_PUK2"; case RIL_E_REQUEST_NOT_SUPPORTED: return "E_REQUEST_NOT_SUPPORTED"; case RIL_E_CANCELLED: return "E_CANCELLED"; case RIL_E_OP_NOT_ALLOWED_DURING_VOICE_CALL: return "E_OP_NOT_ALLOWED_DURING_VOICE_CALL"; case RIL_E_OP_NOT_ALLOWED_BEFORE_REG_TO_NW: return "E_OP_NOT_ALLOWED_BEFORE_REG_TO_NW"; case RIL_E_SMS_SEND_FAIL_RETRY: return "E_SMS_SEND_FAIL_RETRY"; case RIL_E_SIM_ABSENT:return "E_SIM_ABSENT"; case RIL_E_ILLEGAL_SIM_OR_ME:return "E_ILLEGAL_SIM_OR_ME"; #ifdef FEATURE_MULTIMODE_ANDROID case RIL_E_SUBSCRIPTION_NOT_AVAILABLE:return "E_SUBSCRIPTION_NOT_AVAILABLE"; case RIL_E_MODE_NOT_SUPPORTED:return "E_MODE_NOT_SUPPORTED"; #endif case RIL_E_FDN_CHECK_FAILURE: return "E_FDN_CHECK_FAILURE"; case RIL_E_MISSING_RESOURCE: return "E_MISSING_RESOURCE"; case RIL_E_NO_SUCH_ELEMENT: return "E_NO_SUCH_ELEMENT"; case RIL_E_DIAL_MODIFIED_TO_USSD: return "E_DIAL_MODIFIED_TO_USSD"; case RIL_E_DIAL_MODIFIED_TO_SS: return "E_DIAL_MODIFIED_TO_SS"; case RIL_E_DIAL_MODIFIED_TO_DIAL: return "E_DIAL_MODIFIED_TO_DIAL"; case RIL_E_USSD_MODIFIED_TO_DIAL: return "E_USSD_MODIFIED_TO_DIAL"; case RIL_E_USSD_MODIFIED_TO_SS: return "E_USSD_MODIFIED_TO_SS"; case RIL_E_USSD_MODIFIED_TO_USSD: return "E_USSD_MODIFIED_TO_USSD"; case RIL_E_SS_MODIFIED_TO_DIAL: return "E_SS_MODIFIED_TO_DIAL"; case RIL_E_SS_MODIFIED_TO_USSD: return "E_SS_MODIFIED_TO_USSD"; case RIL_E_SUBSCRIPTION_NOT_SUPPORTED: return "E_SUBSCRIPTION_NOT_SUPPORTED"; case RIL_E_SS_MODIFIED_TO_SS: return "E_SS_MODIFIED_TO_SS"; case RIL_E_LCE_NOT_SUPPORTED: return "E_LCE_NOT_SUPPORTED"; case RIL_E_NO_MEMORY: return "E_NO_MEMORY"; case RIL_E_INTERNAL_ERR: return "E_INTERNAL_ERR"; case RIL_E_SYSTEM_ERR: return "E_SYSTEM_ERR"; case RIL_E_MODEM_ERR: return "E_MODEM_ERR"; case RIL_E_INVALID_STATE: return "E_INVALID_STATE"; case RIL_E_NO_RESOURCES: return "E_NO_RESOURCES"; case RIL_E_SIM_ERR: return "E_SIM_ERR"; case RIL_E_INVALID_ARGUMENTS: return "E_INVALID_ARGUMENTS"; case RIL_E_INVALID_SIM_STATE: return "E_INVALID_SIM_STATE"; case RIL_E_INVALID_MODEM_STATE: return "E_INVALID_MODEM_STATE"; case RIL_E_INVALID_CALL_ID: return "E_INVALID_CALL_ID"; case RIL_E_NO_SMS_TO_ACK: return "E_NO_SMS_TO_ACK"; case RIL_E_NETWORK_ERR: return "E_NETWORK_ERR"; case RIL_E_REQUEST_RATE_LIMITED: return "E_REQUEST_RATE_LIMITED"; case RIL_E_SIM_BUSY: return "E_SIM_BUSY"; case RIL_E_SIM_FULL: return "E_SIM_FULL"; case RIL_E_NETWORK_REJECT: return "E_NETWORK_REJECT"; case RIL_E_OPERATION_NOT_ALLOWED: return "E_OPERATION_NOT_ALLOWED"; case RIL_E_EMPTY_RECORD: "E_EMPTY_RECORD"; case RIL_E_INVALID_SMS_FORMAT: return "E_INVALID_SMS_FORMAT"; case RIL_E_ENCODING_ERR: return "E_ENCODING_ERR"; case RIL_E_INVALID_SMSC_ADDRESS: return "E_INVALID_SMSC_ADDRESS"; case RIL_E_NO_SUCH_ENTRY: return "E_NO_SUCH_ENTRY"; case RIL_E_NETWORK_NOT_READY: return "E_NETWORK_NOT_READY"; case RIL_E_NOT_PROVISIONED: return "E_NOT_PROVISIONED"; case RIL_E_NO_SUBSCRIPTION: return "E_NO_SUBSCRIPTION"; case RIL_E_NO_NETWORK_FOUND: return "E_NO_NETWORK_FOUND"; case RIL_E_DEVICE_IN_USE: return "E_DEVICE_IN_USE"; case RIL_E_ABORTED: return "E_ABORTED"; case RIL_E_OEM_ERROR_1: return "E_OEM_ERROR_1"; case RIL_E_OEM_ERROR_2: return "E_OEM_ERROR_2"; case RIL_E_OEM_ERROR_3: return "E_OEM_ERROR_3"; case RIL_E_OEM_ERROR_4: return "E_OEM_ERROR_4"; case RIL_E_OEM_ERROR_5: return "E_OEM_ERROR_5"; case RIL_E_OEM_ERROR_6: return "E_OEM_ERROR_6"; case RIL_E_OEM_ERROR_7: return "E_OEM_ERROR_7"; case RIL_E_OEM_ERROR_8: return "E_OEM_ERROR_8"; case RIL_E_OEM_ERROR_9: return "E_OEM_ERROR_9"; case RIL_E_OEM_ERROR_10: return "E_OEM_ERROR_10"; case RIL_E_OEM_ERROR_11: return "E_OEM_ERROR_11"; case RIL_E_OEM_ERROR_12: return "E_OEM_ERROR_12"; case RIL_E_OEM_ERROR_13: return "E_OEM_ERROR_13"; case RIL_E_OEM_ERROR_14: return "E_OEM_ERROR_14"; case RIL_E_OEM_ERROR_15: return "E_OEM_ERROR_15"; case RIL_E_OEM_ERROR_16: return "E_OEM_ERROR_16"; case RIL_E_OEM_ERROR_17: return "E_OEM_ERROR_17"; case RIL_E_OEM_ERROR_18: return "E_OEM_ERROR_18"; case RIL_E_OEM_ERROR_19: return "E_OEM_ERROR_19"; case RIL_E_OEM_ERROR_20: return "E_OEM_ERROR_20"; case RIL_E_OEM_ERROR_21: return "E_OEM_ERROR_21"; case RIL_E_OEM_ERROR_22: return "E_OEM_ERROR_22"; case RIL_E_OEM_ERROR_23: return "E_OEM_ERROR_23"; case RIL_E_OEM_ERROR_24: return "E_OEM_ERROR_24"; case RIL_E_OEM_ERROR_25: return "E_OEM_ERROR_25"; default: return "<unknown error>"; } }
7,939
20,521
0
static void ext4_remove_li_request(struct ext4_li_request *elr) { struct ext4_sb_info *sbi; if (!elr) return; sbi = elr->lr_sbi; list_del(&elr->lr_request); sbi->s_li_request = NULL; kfree(elr); }
7,940
90,698
0
static int incclass(Reclass *cc, Rune c) { Rune *p; for (p = cc->spans; p < cc->end; p += 2) if (p[0] <= c && c <= p[1]) return 1; return 0; }
7,941
113,247
0
ShelfLayoutManager::ShelfLayoutManager(views::Widget* status) : root_window_(Shell::GetPrimaryRootWindow()), in_layout_(false), auto_hide_behavior_(SHELF_AUTO_HIDE_BEHAVIOR_DEFAULT), alignment_(SHELF_ALIGNMENT_BOTTOM), launcher_(NULL), status_(status), workspace_manager_(NULL), window_overlaps_shelf_(false) { Shell::GetInstance()->AddShellObserver(this); aura::client::GetActivationClient(root_window_)->AddObserver(this); }
7,942
72,343
0
free_identity(Identity *id) { sshkey_free(id->key); free(id->provider); free(id->comment); free(id); }
7,943
163,562
0
XmlWriter::XmlWriter() : writer_(NULL), buffer_(NULL) {}
7,944
16,208
0
GahpClient::gt4_gram_client_refresh_credentials(const char *delegation_uri) { static const char* command = "GT4_REFRESH_CREDENTIAL"; if (server->m_commands_supported->contains_anycase(command)==FALSE) { return GAHPCLIENT_COMMAND_NOT_SUPPORTED; } ASSERT (delegation_uri && *delegation_uri); std::string reqline; int x = sprintf(reqline,"%s",escapeGahpString(delegation_uri)); ASSERT( x > 0 ); const char *buf = reqline.c_str(); if ( !is_pending(command,buf) ) { if ( m_mode == results_only ) { return GAHPCLIENT_COMMAND_NOT_SUBMITTED; } now_pending(command,buf,deleg_proxy); } Gahp_Args* result = get_pending_result(command,buf); if ( result ) { if (result->argc != 3) { EXCEPT("Bad %s Result",command); } int rc = atoi(result->argv[1]); if ( strcasecmp(result->argv[2], NULLSTRING) ) { error_string = result->argv[2]; } else { error_string = ""; } delete result; return rc; } if ( check_pending_timeout(command,buf) ) { sprintf( error_string, "%s timed out", command ); return GAHPCLIENT_COMMAND_TIMED_OUT; } return GAHPCLIENT_COMMAND_PENDING; }
7,945
57,109
0
static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) { int res = 0; might_sleep(); freezable_schedule_timeout_killable_unsafe( nfs4_update_delay(timeout)); if (fatal_signal_pending(current)) res = -ERESTARTSYS; return res; }
7,946
118,565
0
void ResetAllFlags(FlagsStorage* flags_storage) { FlagsState::GetInstance()->ResetAllFlags(flags_storage); }
7,947
22,860
0
static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data) { struct inode *inode = data->inode; if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) { rpc_restart_call(task); return -EAGAIN; } nfs_refresh_inode(inode, data->res.fattr); return 0; }
7,948
42,503
0
static ssize_t new_offset_store(struct md_rdev *rdev, const char *buf, size_t len) { unsigned long long new_offset; struct mddev *mddev = rdev->mddev; if (kstrtoull(buf, 10, &new_offset) < 0) return -EINVAL; if (mddev->sync_thread || test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) return -EBUSY; if (new_offset == rdev->data_offset) /* reset is always permitted */ ; else if (new_offset > rdev->data_offset) { /* must not push array size beyond rdev_sectors */ if (new_offset - rdev->data_offset + mddev->dev_sectors > rdev->sectors) return -E2BIG; } /* Metadata worries about other space details. */ /* decreasing the offset is inconsistent with a backwards * reshape. */ if (new_offset < rdev->data_offset && mddev->reshape_backwards) return -EINVAL; /* Increasing offset is inconsistent with forwards * reshape. reshape_direction should be set to * 'backwards' first. */ if (new_offset > rdev->data_offset && !mddev->reshape_backwards) return -EINVAL; if (mddev->pers && mddev->persistent && !super_types[mddev->major_version] .allow_new_offset(rdev, new_offset)) return -E2BIG; rdev->new_data_offset = new_offset; if (new_offset > rdev->data_offset) mddev->reshape_backwards = 1; else if (new_offset < rdev->data_offset) mddev->reshape_backwards = 0; return len; }
7,949
107,803
0
void Browser::OpenCurrentURL() { UserMetrics::RecordAction(UserMetricsAction("LoadURL"), profile_); LocationBar* location_bar = window_->GetLocationBar(); WindowOpenDisposition open_disposition = location_bar->GetWindowOpenDisposition(); if (OpenInstant(open_disposition)) return; GURL url(WideToUTF8(location_bar->GetInputString())); browser::NavigateParams params(this, url, location_bar->GetPageTransition()); params.disposition = open_disposition; params.tabstrip_add_types = TabStripModel::ADD_FORCE_INDEX | TabStripModel::ADD_INHERIT_OPENER; browser::Navigate(&params); }
7,950
70,622
0
evdns_request_data_build(const char *const name, const size_t name_len, const u16 trans_id, const u16 type, const u16 class, u8 *const buf, size_t buf_len) { off_t j = 0; /* current offset into buf */ u16 t_; /* used by the macros */ APPEND16(trans_id); APPEND16(0x0100); /* standard query, recusion needed */ APPEND16(1); /* one question */ APPEND16(0); /* no answers */ APPEND16(0); /* no authority */ APPEND16(0); /* no additional */ j = dnsname_to_labels(buf, buf_len, j, name, name_len, NULL); if (j < 0) { return (int)j; } APPEND16(type); APPEND16(class); return (int)j; overflow: return (-1); }
7,951
49,602
0
static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req) { ENTER(); if (likely(req->context)) { struct ffs_ep *ep = _ep->driver_data; ep->status = req->status ? req->status : req->actual; complete(req->context); } }
7,952
32,102
0
static void dev_change_rx_flags(struct net_device *dev, int flags) { const struct net_device_ops *ops = dev->netdev_ops; if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags) ops->ndo_change_rx_flags(dev, flags); }
7,953
177,012
0
void InputDispatcher::synthesizeCancelationEventsForAllConnectionsLocked( const CancelationOptions& options) { for (size_t i = 0; i < mConnectionsByFd.size(); i++) { synthesizeCancelationEventsForConnectionLocked( mConnectionsByFd.valueAt(i), options); } }
7,954
62,961
0
static int handle_ept_misconfig(struct kvm_vcpu *vcpu) { int ret; gpa_t gpa; /* * A nested guest cannot optimize MMIO vmexits, because we have an * nGPA here instead of the required GPA. */ gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); if (!is_guest_mode(vcpu) && !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { trace_kvm_fast_mmio(gpa); return kvm_skip_emulated_instruction(vcpu); } ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); if (ret >= 0) return ret; /* It is the real ept misconfig */ WARN_ON(1); vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; return 0; }
7,955
119,326
0
bool OmniboxEditModel::CommitSuggestedText() { const base::string16 suggestion = view_->GetGrayTextAutocompletion(); if (suggestion.empty()) return false; const base::string16 final_text = view_->GetText() + suggestion; view_->OnBeforePossibleChange(); view_->SetWindowTextAndCaretPos(final_text, final_text.length(), false, false); view_->OnAfterPossibleChange(); return true; }
7,956
145,122
0
void setPublicSuffix(const blink::WebString& suffix) { m_suffixList.setPublicSuffix(suffix); }
7,957
59,812
0
static void hid_restart_io(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl); int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl); spin_lock_irq(&usbhid->lock); clear_bit(HID_SUSPENDED, &usbhid->iofl); usbhid_mark_busy(usbhid); if (clear_halt || reset_pending) schedule_work(&usbhid->reset_work); usbhid->retry_delay = 0; spin_unlock_irq(&usbhid->lock); if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl)) return; if (!clear_halt) { if (hid_start_in(hid) < 0) hid_io_error(hid); } spin_lock_irq(&usbhid->lock); if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl)) usbhid_restart_out_queue(usbhid); if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl)) usbhid_restart_ctrl_queue(usbhid); spin_unlock_irq(&usbhid->lock); }
7,958
109,229
0
void InspectorOverlay::drawViewSize() { if (m_drawViewSize) evaluateInOverlay("drawViewSize", m_drawViewSizeWithGrid ? "true" : "false"); }
7,959
180,456
1
cJSON *cJSON_DetachItemFromArray( cJSON *array, int which ) { cJSON *c = array->child; while ( c && which > 0 ) { c = c->next; --which; } if ( ! c ) return 0; if ( c->prev ) c->prev->next = c->next; if ( c->next ) c->next->prev = c->prev; if ( c == array->child ) array->child = c->next; c->prev = c->next = 0; return c; }
7,960
6,316
0
static zend_object_value date_object_new_date(zend_class_entry *class_type TSRMLS_DC) { return date_object_new_date_ex(class_type, NULL TSRMLS_CC); }
7,961
107,651
0
Eina_Bool ewk_view_setting_scripts_can_open_windows_set(Evas_Object* ewkView, Eina_Bool allow) { EWK_VIEW_SD_GET_OR_RETURN(ewkView, smartData, false); EWK_VIEW_PRIV_GET_OR_RETURN(smartData, priv, false); allow = !!allow; if (priv->settings.scriptsCanOpenWindows != allow) { priv->pageSettings->setJavaScriptCanOpenWindowsAutomatically(allow); priv->settings.scriptsCanOpenWindows = allow; } return true; }
7,962
6,650
0
SchedulerObject::update(const ClassAd &ad) { MGMT_DECLARATIONS; m_stats.Pool = getPoolName(); STRING(CondorPlatform); STRING(CondorVersion); TIME_INTEGER(DaemonStartTime); TIME_INTEGER(JobQueueBirthdate); STRING(Machine); INTEGER(MaxJobsRunning); INTEGER(MonitorSelfAge); DOUBLE(MonitorSelfCPUUsage); DOUBLE(MonitorSelfImageSize); INTEGER(MonitorSelfRegisteredSocketCount); INTEGER(MonitorSelfResidentSetSize); TIME_INTEGER(MonitorSelfTime); STRING(MyAddress); STRING(Name); INTEGER(NumUsers); STRING(MyAddress); INTEGER(TotalHeldJobs); INTEGER(TotalIdleJobs); INTEGER(TotalJobAds); INTEGER(TotalRemovedJobs); INTEGER(TotalRunningJobs); m_stats.System = m_stats.Machine; if (DebugFlags & D_FULLDEBUG) { const_cast<ClassAd*>(&ad)->dPrint(D_FULLDEBUG|D_NOHEADER); } }
7,963
112,347
0
void ResourceDispatcherHostImpl::OnCancelRequest(int request_id) { CancelRequest(filter_->child_id(), request_id, true); }
7,964
149,844
0
void LayerTreeHost::SetLayerTreeMutator( std::unique_ptr<LayerTreeMutator> mutator) { proxy_->SetMutator(std::move(mutator)); }
7,965
69,288
0
int ssl3_write_pending(SSL *s, int type, const unsigned char *buf, unsigned int len) { int i; SSL3_BUFFER *wb = s->rlayer.wbuf; unsigned int currbuf = 0; /* XXXX */ if ((s->rlayer.wpend_tot > (int)len) || ((s->rlayer.wpend_buf != buf) && !(s->mode & SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER)) || (s->rlayer.wpend_type != type)) { SSLerr(SSL_F_SSL3_WRITE_PENDING, SSL_R_BAD_WRITE_RETRY); return (-1); } for (;;) { /* Loop until we find a buffer we haven't written out yet */ if (SSL3_BUFFER_get_left(&wb[currbuf]) == 0 && currbuf < s->rlayer.numwpipes - 1) { currbuf++; continue; } clear_sys_error(); if (s->wbio != NULL) { s->rwstate = SSL_WRITING; i = BIO_write(s->wbio, (char *) &(SSL3_BUFFER_get_buf(&wb[currbuf]) [SSL3_BUFFER_get_offset(&wb[currbuf])]), (unsigned int)SSL3_BUFFER_get_left(&wb[currbuf])); } else { SSLerr(SSL_F_SSL3_WRITE_PENDING, SSL_R_BIO_NOT_SET); i = -1; } if (i == SSL3_BUFFER_get_left(&wb[currbuf])) { SSL3_BUFFER_set_left(&wb[currbuf], 0); SSL3_BUFFER_add_offset(&wb[currbuf], i); if (currbuf + 1 < s->rlayer.numwpipes) continue; s->rwstate = SSL_NOTHING; return (s->rlayer.wpend_ret); } else if (i <= 0) { if (SSL_IS_DTLS(s)) { /* * For DTLS, just drop it. That's kind of the whole point in * using a datagram service */ SSL3_BUFFER_set_left(&wb[currbuf], 0); } return i; } SSL3_BUFFER_add_offset(&wb[currbuf], i); SSL3_BUFFER_add_left(&wb[currbuf], -i); } }
7,966
157,597
0
void HttpNetworkTransactionTest::CheckErrorIsPassedBack( int error, IoMode mode) { HttpRequestInfo request_info; request_info.url = GURL("https://www.example.com/"); request_info.method = "GET"; request_info.load_flags = LOAD_NORMAL; request_info.traffic_annotation = net::MutableNetworkTrafficAnnotationTag(TRAFFIC_ANNOTATION_FOR_TESTS); SSLSocketDataProvider ssl_data(mode, OK); MockWrite data_writes[] = { MockWrite(mode, error), }; StaticSocketDataProvider data(base::span<MockRead>(), data_writes); session_deps_.socket_factory->AddSocketDataProvider(&data); session_deps_.socket_factory->AddSSLSocketDataProvider(&ssl_data); std::unique_ptr<HttpNetworkSession> session(CreateSession(&session_deps_)); HttpNetworkTransaction trans(DEFAULT_PRIORITY, session.get()); TestCompletionCallback callback; int rv = trans.Start(&request_info, callback.callback(), NetLogWithSource()); if (rv == ERR_IO_PENDING) rv = callback.WaitForResult(); ASSERT_EQ(error, rv); }
7,967
17,008
0
void WebContext::setHostMappingRules(const QStringList& rules) { DCHECK(!IsInitialized()); construct_props_->host_mapping_rules.clear(); for (QStringList::const_iterator it = rules.cbegin(); it != rules.cend(); ++it) { construct_props_->host_mapping_rules.push_back((*it).toStdString()); } }
7,968
157,775
0
base::Optional<gfx::Size> WebContentsImpl::GetFullscreenVideoSize() { base::Optional<WebContentsObserver::MediaPlayerId> id = media_web_contents_observer_->GetFullscreenVideoMediaPlayerId(); if (id && cached_video_sizes_.count(id.value())) return base::Optional<gfx::Size>(cached_video_sizes_[id.value()]); return base::Optional<gfx::Size>(); }
7,969
128,059
0
void AwContents::SetViewVisibility(JNIEnv* env, jobject obj, bool visible) { DCHECK_CURRENTLY_ON(BrowserThread::UI); browser_view_renderer_.SetViewVisibility(visible); }
7,970
2,807
0
gx_device_init(gx_device * dev, const gx_device * proto, gs_memory_t * mem, bool internal) { memcpy(dev, proto, proto->params_size); dev->memory = mem; dev->retained = !internal; rc_init(dev, mem, (internal ? 0 : 1)); rc_increment(dev->icc_struct); }
7,971
90,789
0
static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size) { int cb_size = 1 << log2_cb_size; int log2_min_pu_size = s->ps.sps->log2_min_pu_size; int min_pu_width = s->ps.sps->min_pu_width; int x_end = FFMIN(x0 + cb_size, s->ps.sps->width); int y_end = FFMIN(y0 + cb_size, s->ps.sps->height); int i, j; for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++) for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++) s->is_pcm[i + j * min_pu_width] = 2; }
7,972
42,721
0
static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 exit_reason, u32 exit_intr_info, unsigned long exit_qualification) { /* update guest state fields: */ vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); vmcs12->guest_interruptibility_info = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); vmcs12->guest_pending_dbg_exceptions = vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; else vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; if (nested_cpu_has_preemption_timer(vmcs12)) { if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER) vmcs12->vmx_preemption_timer_value = vmx_get_preemption_timer_value(vcpu); hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); } /* * In some cases (usually, nested EPT), L2 is allowed to change its * own CR3 without exiting. If it has changed it, we must keep it. * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. * * Additionally, restore L2's PDPTR to vmcs12. */ if (enable_ept) { vmcs12->guest_cr3 = vmcs_read64(GUEST_CR3); vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); } if (nested_cpu_has_vid(vmcs12)) vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); vmcs12->vm_entry_controls = (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) { kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); } /* TODO: These cannot have changed unless we have MSR bitmaps and * the relevant bit asks not to trap the change */ if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) vmcs12->guest_ia32_efer = vcpu->arch.efer; vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); if (vmx_mpx_supported()) vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); if (nested_cpu_has_xsaves(vmcs12)) vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP); /* update exit information fields: */ vmcs12->vm_exit_reason = exit_reason; vmcs12->exit_qualification = exit_qualification; vmcs12->vm_exit_intr_info = exit_intr_info; if ((vmcs12->vm_exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) == (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) vmcs12->vm_exit_intr_error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); vmcs12->idt_vectoring_info_field = 0; vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { /* vm_entry_intr_info_field is cleared on exit. Emulate this * instead of reading the real value. */ vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; /* * Transfer the event that L0 or L1 may wanted to inject into * L2 to IDT_VECTORING_INFO_FIELD. */ vmcs12_save_pending_event(vcpu, vmcs12); } /* * Drop what we picked up for L2 via vmx_complete_interrupts. It is * preserved above and would only end up incorrectly in L1. */ vcpu->arch.nmi_injected = false; kvm_clear_exception_queue(vcpu); kvm_clear_interrupt_queue(vcpu); }
7,973
18,336
0
create_mainbar () { GUI *g = &uzbl.gui; g->mainbar = gtk_hbox_new (FALSE, 0); g->mainbar_label = gtk_label_new (""); gtk_label_set_selectable((GtkLabel *)g->mainbar_label, TRUE); gtk_label_set_ellipsize(GTK_LABEL(g->mainbar_label), PANGO_ELLIPSIZE_END); gtk_misc_set_alignment (GTK_MISC(g->mainbar_label), 0, 0); gtk_misc_set_padding (GTK_MISC(g->mainbar_label), 2, 2); gtk_box_pack_start (GTK_BOX (g->mainbar), g->mainbar_label, TRUE, TRUE, 0); g_object_connect((GObject*)g->mainbar, "signal::key-press-event", (GCallback)key_press_cb, NULL, "signal::key-release-event", (GCallback)key_release_cb, NULL, NULL); return g->mainbar; }
7,974
29,313
0
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, void *v) { int cpu = (long)v; val &= ~CPU_TASKS_FROZEN; switch (val) { case CPU_DYING: printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", cpu); hardware_disable(); break; case CPU_STARTING: printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", cpu); hardware_enable(); break; } return NOTIFY_OK; }
7,975
98,933
0
void HTMLConstructionSite::insertHTMLHtmlStartTagBeforeHTML(AtomicHTMLToken& token) { RefPtr<Element> element = HTMLHtmlElement::create(m_document); element->setAttributeMap(token.takeAtributes(), m_fragmentScriptingPermission); m_openElements.pushHTMLHtmlElement(attach(m_document, element.release())); dispatchDocumentElementAvailableIfNeeded(); }
7,976
166,556
0
void BrowserCommandController::TabRestoreServiceDestroyed( sessions::TabRestoreService* service) { service->RemoveObserver(this); }
7,977
32,502
0
static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, u32 len) { int err; u32 i, bufoff, msgoff, maxlen, apedata; if (!tg3_flag(tp, APE_HAS_NCSI)) return 0; apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); if (apedata != APE_SEG_SIG_MAGIC) return -ENODEV; apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); if (!(apedata & APE_FW_STATUS_READY)) return -EAGAIN; bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) + TG3_APE_SHMEM_BASE; msgoff = bufoff + 2 * sizeof(u32); maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN); while (len) { u32 length; /* Cap xfer sizes to scratchpad limits. */ length = (len > maxlen) ? maxlen : len; len -= length; apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); if (!(apedata & APE_FW_STATUS_READY)) return -EAGAIN; /* Wait for up to 1 msec for APE to service previous event. */ err = tg3_ape_event_lock(tp, 1000); if (err) return err; apedata = APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_SCRTCHPD_READ | APE_EVENT_STATUS_EVENT_PENDING; tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata); tg3_ape_write32(tp, bufoff, base_off); tg3_ape_write32(tp, bufoff + sizeof(u32), length); tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); base_off += length; if (tg3_ape_wait_for_event(tp, 30000)) return -EAGAIN; for (i = 0; length; i += 4, length -= 4) { u32 val = tg3_ape_read32(tp, msgoff + i); memcpy(data, &val, sizeof(u32)); data++; } } return 0; }
7,978
97,876
0
void RenderView::ClearBlockedContentSettings() { for (size_t i = 0; i < arraysize(content_blocked_); ++i) content_blocked_[i] = false; }
7,979
689
0
static const char *default_charset(void) { # if defined HAVE_LIBCHARSET_H && defined HAVE_LOCALE_CHARSET return locale_charset(); # elif defined HAVE_LANGINFO_H && defined HAVE_NL_LANGINFO return nl_langinfo(CODESET); # else return ""; /* Works with (at the very least) gnu iconv... */ # endif }
7,980
185,326
1
void CastCastView::ButtonPressed(views::Button* sender, const ui::Event& event) { DCHECK(sender == stop_button_); StopCast(); }
7,981
81,856
0
ecEncCtx* wc_ecc_ctx_new(int flags, WC_RNG* rng) { return wc_ecc_ctx_new_ex(flags, rng, NULL); }
7,982
149,847
0
void LayerTreeHost::SetNeedsAnimate() { proxy_->SetNeedsAnimate(); swap_promise_manager_.NotifySwapPromiseMonitorsOfSetNeedsCommit(); }
7,983
168,146
0
void AutofillMetricsTest::PurgeUKM() { autofill_manager_->Reset(); test_ukm_recorder_.Purge(); autofill_client_.InitializeUKMSources(); }
7,984
157,691
0
explicit LoadCommittedDetailsObserver(WebContents* web_contents) : WebContentsObserver(web_contents), navigation_type_(NAVIGATION_TYPE_UNKNOWN), is_same_document_(false), is_main_frame_(false), did_replace_entry_(false) {}
7,985
66,811
0
struct MACH0_(mach_header) * MACH0_(get_hdr_from_bytes)(RBuffer *buf) { ut8 magicbytes[sizeof (ut32)] = {0}; ut8 machohdrbytes[sizeof (struct MACH0_(mach_header))] = {0}; int len; struct MACH0_(mach_header) *macho_hdr = R_NEW0 (struct MACH0_(mach_header)); bool big_endian = false; if (!macho_hdr) { return NULL; } if (r_buf_read_at (buf, 0, magicbytes, 4) < 1) { free (macho_hdr); return false; } if (r_read_le32 (magicbytes) == 0xfeedface) { big_endian = false; } else if (r_read_be32 (magicbytes) == 0xfeedface) { big_endian = true; } else if (r_read_le32 (magicbytes) == FAT_MAGIC) { big_endian = false; } else if (r_read_be32 (magicbytes) == FAT_MAGIC) { big_endian = true; } else if (r_read_le32 (magicbytes) == 0xfeedfacf) { big_endian = false; } else if (r_read_be32 (magicbytes) == 0xfeedfacf) { big_endian = true; } else { /* also extract non-mach0s */ #if 0 free (macho_hdr); return NULL; #endif } len = r_buf_read_at (buf, 0, machohdrbytes, sizeof (machohdrbytes)); if (len != sizeof(struct MACH0_(mach_header))) { free (macho_hdr); return NULL; } macho_hdr->magic = r_read_ble (&machohdrbytes[0], big_endian, 32); macho_hdr->cputype = r_read_ble (&machohdrbytes[4], big_endian, 32); macho_hdr->cpusubtype = r_read_ble (&machohdrbytes[8], big_endian, 32); macho_hdr->filetype = r_read_ble (&machohdrbytes[12], big_endian, 32); macho_hdr->ncmds = r_read_ble (&machohdrbytes[16], big_endian, 32); macho_hdr->sizeofcmds = r_read_ble (&machohdrbytes[20], big_endian, 32); macho_hdr->flags = r_read_ble (&machohdrbytes[24], big_endian, 32); #if R_BIN_MACH064 macho_hdr->reserved = r_read_ble (&machohdrbytes[28], big_endian, 32); #endif return macho_hdr; }
7,986
180,098
1
static void disk_seqf_stop(struct seq_file *seqf, void *v) { struct class_dev_iter *iter = seqf->private; /* stop is called even after start failed :-( */ if (iter) { class_dev_iter_exit(iter); kfree(iter); } }
7,987
88,619
0
void mwifiex_set_vht_params(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg, struct cfg80211_ap_settings *params) { const u8 *vht_ie; vht_ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, params->beacon.tail, params->beacon.tail_len); if (vht_ie) { memcpy(&bss_cfg->vht_cap, vht_ie + 2, sizeof(struct ieee80211_vht_cap)); priv->ap_11ac_enabled = 1; } else { priv->ap_11ac_enabled = 0; } return; }
7,988
45,202
0
int ssl3_get_cert_verify(SSL *s) { EVP_PKEY *pkey=NULL; unsigned char *p; int al,ok,ret=0; long n; int type=0,i,j; X509 *peer; const EVP_MD *md = NULL; EVP_MD_CTX mctx; EVP_MD_CTX_init(&mctx); n=s->method->ssl_get_message(s, SSL3_ST_SR_CERT_VRFY_A, SSL3_ST_SR_CERT_VRFY_B, -1, SSL3_RT_MAX_PLAIN_LENGTH, &ok); if (!ok) return((int)n); if (s->session->peer != NULL) { peer=s->session->peer; pkey=X509_get_pubkey(peer); type=X509_certificate_type(peer,pkey); } else { peer=NULL; pkey=NULL; } if (s->s3->tmp.message_type != SSL3_MT_CERTIFICATE_VERIFY) { s->s3->tmp.reuse_message=1; if ((peer != NULL) && (type & EVP_PKT_SIGN)) { al=SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_MISSING_VERIFY_MESSAGE); goto f_err; } ret=1; goto end; } if (peer == NULL) { SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_NO_CLIENT_CERT_RECEIVED); al=SSL_AD_UNEXPECTED_MESSAGE; goto f_err; } if (!(type & EVP_PKT_SIGN)) { SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_SIGNATURE_FOR_NON_SIGNING_CERTIFICATE); al=SSL_AD_ILLEGAL_PARAMETER; goto f_err; } if (s->s3->change_cipher_spec) { SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_CCS_RECEIVED_EARLY); al=SSL_AD_UNEXPECTED_MESSAGE; goto f_err; } /* we now have a signature that we need to verify */ p=(unsigned char *)s->init_msg; /* Check for broken implementations of GOST ciphersuites */ /* If key is GOST and n is exactly 64, it is bare * signature without length field */ if (n==64 && (pkey->type==NID_id_GostR3410_94 || pkey->type == NID_id_GostR3410_2001) ) { i=64; } else { if (SSL_USE_SIGALGS(s)) { int rv = tls12_check_peer_sigalg(&md, s, p, pkey); if (rv == -1) { al = SSL_AD_INTERNAL_ERROR; goto f_err; } else if (rv == 0) { al = SSL_AD_DECODE_ERROR; goto f_err; } #ifdef SSL_DEBUG fprintf(stderr, "USING TLSv1.2 HASH %s\n", EVP_MD_name(md)); #endif p += 2; n -= 2; } n2s(p,i); n-=2; if (i > n) { SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_LENGTH_MISMATCH); al=SSL_AD_DECODE_ERROR; goto f_err; } } j=EVP_PKEY_size(pkey); if ((i > j) || (n > j) || (n <= 0)) { SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_WRONG_SIGNATURE_SIZE); al=SSL_AD_DECODE_ERROR; goto f_err; } if (SSL_USE_SIGALGS(s)) { long hdatalen = 0; void *hdata; hdatalen = BIO_get_mem_data(s->s3->handshake_buffer, &hdata); if (hdatalen <= 0) { SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, ERR_R_INTERNAL_ERROR); al=SSL_AD_INTERNAL_ERROR; goto f_err; } #ifdef SSL_DEBUG fprintf(stderr, "Using TLS 1.2 with client verify alg %s\n", EVP_MD_name(md)); #endif if (!EVP_VerifyInit_ex(&mctx, md, NULL) || !EVP_VerifyUpdate(&mctx, hdata, hdatalen)) { SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, ERR_R_EVP_LIB); al=SSL_AD_INTERNAL_ERROR; goto f_err; } if (EVP_VerifyFinal(&mctx, p , i, pkey) <= 0) { al=SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_BAD_SIGNATURE); goto f_err; } } else #ifndef OPENSSL_NO_RSA if (pkey->type == EVP_PKEY_RSA) { i=RSA_verify(NID_md5_sha1, s->s3->tmp.cert_verify_md, MD5_DIGEST_LENGTH+SHA_DIGEST_LENGTH, p, i, pkey->pkey.rsa); if (i < 0) { al=SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_BAD_RSA_DECRYPT); goto f_err; } if (i == 0) { al=SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_BAD_RSA_SIGNATURE); goto f_err; } } else #endif #ifndef OPENSSL_NO_DSA if (pkey->type == EVP_PKEY_DSA) { j=DSA_verify(pkey->save_type, &(s->s3->tmp.cert_verify_md[MD5_DIGEST_LENGTH]), SHA_DIGEST_LENGTH,p,i,pkey->pkey.dsa); if (j <= 0) { /* bad signature */ al=SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,SSL_R_BAD_DSA_SIGNATURE); goto f_err; } } else #endif #ifndef OPENSSL_NO_ECDSA if (pkey->type == EVP_PKEY_EC) { j=ECDSA_verify(pkey->save_type, &(s->s3->tmp.cert_verify_md[MD5_DIGEST_LENGTH]), SHA_DIGEST_LENGTH,p,i,pkey->pkey.ec); if (j <= 0) { /* bad signature */ al=SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, SSL_R_BAD_ECDSA_SIGNATURE); goto f_err; } } else #endif if (pkey->type == NID_id_GostR3410_94 || pkey->type == NID_id_GostR3410_2001) { unsigned char signature[64]; int idx; EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new(pkey,NULL); EVP_PKEY_verify_init(pctx); if (i!=64) { fprintf(stderr,"GOST signature length is %d",i); } for (idx=0;idx<64;idx++) { signature[63-idx]=p[idx]; } j=EVP_PKEY_verify(pctx,signature,64,s->s3->tmp.cert_verify_md,32); EVP_PKEY_CTX_free(pctx); if (j<=0) { al=SSL_AD_DECRYPT_ERROR; SSLerr(SSL_F_SSL3_GET_CERT_VERIFY, SSL_R_BAD_ECDSA_SIGNATURE); goto f_err; } } else { SSLerr(SSL_F_SSL3_GET_CERT_VERIFY,ERR_R_INTERNAL_ERROR); al=SSL_AD_UNSUPPORTED_CERTIFICATE; goto f_err; } ret=1; if (0) { f_err: ssl3_send_alert(s,SSL3_AL_FATAL,al); } end: if (s->s3->handshake_buffer) { BIO_free(s->s3->handshake_buffer); s->s3->handshake_buffer = NULL; s->s3->flags &= ~TLS1_FLAGS_KEEP_HANDSHAKE; } EVP_MD_CTX_cleanup(&mctx); EVP_PKEY_free(pkey); return(ret); }
7,989
72,818
0
void jas_tmr_stop(jas_tmr_t *tmr) { }
7,990
147,971
0
static void UnscopableRuntimeEnabledLongAttributeAttributeGetter(const v8::FunctionCallbackInfo<v8::Value>& info) { v8::Local<v8::Object> holder = info.Holder(); TestObject* impl = V8TestObject::ToImpl(holder); V8SetReturnValueInt(info, impl->unscopableRuntimeEnabledLongAttribute()); }
7,991
31,369
0
static int shash_finup_unaligned(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return crypto_shash_update(desc, data, len) ?: crypto_shash_final(desc, out); }
7,992
123,854
0
bool RenderViewImpl::ForceCompositingModeEnabled() { return webkit_preferences_.force_compositing_mode; }
7,993
162,281
0
void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) { CheckLock(); base::AutoLock lock(last_state_lock_); if (last_state_.error != gpu::error::kNoError) return; TRACE_EVENT1("gpu", "CommandBufferProxyImpl::OrderingBarrier", "put_offset", put_offset); OrderingBarrierHelper(put_offset); }
7,994
36,438
0
init_ldap_connection (cherokee_validator_ldap_t *ldap, cherokee_validator_ldap_props_t *props) { int re; int val; /* Connect */ ldap->conn = ldap_init (props->server.buf, props->port); if (ldap->conn == NULL) { LOG_ERRNO (errno, cherokee_err_critical, CHEROKEE_ERROR_VALIDATOR_LDAP_CONNECT, props->server.buf, props->port); return ret_error; } TRACE (ENTRIES, "Connected to %s:%d\n", props->server.buf, props->port); /* Set LDAP protocol version */ val = LDAP_VERSION3; re = ldap_set_option (ldap->conn, LDAP_OPT_PROTOCOL_VERSION, &val); if (re != LDAP_OPT_SUCCESS) { LOG_ERROR (CHEROKEE_ERROR_VALIDATOR_LDAP_V3, ldap_err2string(re)); return ret_error; } TRACE (ENTRIES, "LDAP protocol version %d set\n", LDAP_VERSION3); /* Secure connections */ if (props->tls) { #ifdef LDAP_OPT_X_TLS if (! cherokee_buffer_is_empty (&props->ca_file)) { re = ldap_set_option (NULL, LDAP_OPT_X_TLS_CACERTFILE, props->ca_file.buf); if (re != LDAP_OPT_SUCCESS) { LOG_CRITICAL (CHEROKEE_ERROR_VALIDATOR_LDAP_CA, props->ca_file.buf, ldap_err2string (re)); return ret_error; } } #else LOG_ERROR_S (CHEROKEE_ERROR_VALIDATOR_LDAP_STARTTLS); #endif } /* Bind */ if (cherokee_buffer_is_empty (&props->binddn)) { TRACE (ENTRIES, "anonymous bind %s", "\n"); re = ldap_simple_bind_s (ldap->conn, NULL, NULL); } else { TRACE (ENTRIES, "bind user=%s password=%s\n", props->binddn.buf, props->bindpw.buf); re = ldap_simple_bind_s (ldap->conn, props->binddn.buf, props->bindpw.buf); } if (re != LDAP_SUCCESS) { LOG_CRITICAL (CHEROKEE_ERROR_VALIDATOR_LDAP_BIND, props->server.buf, props->port, props->binddn.buf, props->bindpw.buf, ldap_err2string(re)); return ret_error; } return ret_ok; }
7,995
100,401
0
void BrowserRenderProcessHost::WidgetHidden() { if (backgrounded_) return; DCHECK_EQ(backgrounded_, (visible_widgets_ == 0)); visible_widgets_--; DCHECK_GE(visible_widgets_, 0); if (visible_widgets_ == 0) { DCHECK(!backgrounded_); SetBackgrounded(true); } }
7,996
182,079
1
static inline void VectorClamp3(DDSVector3 *value) { value->x = MinF(1.0f,MaxF(0.0f,value->x)); value->y = MinF(1.0f,MaxF(0.0f,value->y)); value->z = MinF(1.0f,MaxF(0.0f,value->z)); }
7,997
60,901
0
filesystem_info_stop (NautilusDirectory *directory) { NautilusFile *file; if (directory->details->filesystem_info_state != NULL) { file = directory->details->filesystem_info_state->file; if (file != NULL) { g_assert (NAUTILUS_IS_FILE (file)); g_assert (file->details->directory == directory); if (is_needy (file, lacks_filesystem_info, REQUEST_FILESYSTEM_INFO)) { return; } } /* The filesystem info is not wanted, so stop it. */ filesystem_info_cancel (directory); } }
7,998
48,819
0
static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, struct rps_dev_flow **rflowp) { const struct rps_sock_flow_table *sock_flow_table; struct netdev_rx_queue *rxqueue = dev->_rx; struct rps_dev_flow_table *flow_table; struct rps_map *map; int cpu = -1; u32 tcpu; u32 hash; if (skb_rx_queue_recorded(skb)) { u16 index = skb_get_rx_queue(skb); if (unlikely(index >= dev->real_num_rx_queues)) { WARN_ONCE(dev->real_num_rx_queues > 1, "%s received packet on queue %u, but number " "of RX queues is %u\n", dev->name, index, dev->real_num_rx_queues); goto done; } rxqueue += index; } /* Avoid computing hash if RFS/RPS is not active for this rxqueue */ flow_table = rcu_dereference(rxqueue->rps_flow_table); map = rcu_dereference(rxqueue->rps_map); if (!flow_table && !map) goto done; skb_reset_network_header(skb); hash = skb_get_hash(skb); if (!hash) goto done; sock_flow_table = rcu_dereference(rps_sock_flow_table); if (flow_table && sock_flow_table) { struct rps_dev_flow *rflow; u32 next_cpu; u32 ident; /* First check into global flow table if there is a match */ ident = sock_flow_table->ents[hash & sock_flow_table->mask]; if ((ident ^ hash) & ~rps_cpu_mask) goto try_rps; next_cpu = ident & rps_cpu_mask; /* OK, now we know there is a match, * we can look at the local (per receive queue) flow table */ rflow = &flow_table->flows[hash & flow_table->mask]; tcpu = rflow->cpu; /* * If the desired CPU (where last recvmsg was done) is * different from current CPU (one in the rx-queue flow * table entry), switch if one of the following holds: * - Current CPU is unset (>= nr_cpu_ids). * - Current CPU is offline. * - The current CPU's queue tail has advanced beyond the * last packet that was enqueued using this table entry. * This guarantees that all previous packets for the flow * have been dequeued, thus preserving in order delivery. */ if (unlikely(tcpu != next_cpu) && (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || ((int)(per_cpu(softnet_data, tcpu).input_queue_head - rflow->last_qtail)) >= 0)) { tcpu = next_cpu; rflow = set_rps_cpu(dev, skb, rflow, next_cpu); } if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { *rflowp = rflow; cpu = tcpu; goto done; } } try_rps: if (map) { tcpu = map->cpus[reciprocal_scale(hash, map->len)]; if (cpu_online(tcpu)) { cpu = tcpu; goto done; } } done: return cpu; }
7,999