unique_id
int64
13
189k
target
int64
0
1
code
stringlengths
20
241k
__index_level_0__
int64
0
18.9k
144,398
0
void ExtensionInstallPrompt::ShowConfirmation() { scoped_ptr<const PermissionSet> permissions_wrapper; const PermissionSet* permissions_to_display = nullptr; if (custom_permissions_.get()) { permissions_to_display = custom_permissions_.get(); } else if (extension_) { extensions::PermissionsUpdater( profile_, extensions::PermissionsUpdater::INIT_FLAG_TRANSIENT) .InitializePermissions(extension_); permissions_to_display = &extension_->permissions_data()->active_permissions(); if (prompt_->type() == DELEGATED_PERMISSIONS_PROMPT || prompt_->type() == DELEGATED_BUNDLE_PERMISSIONS_PROMPT) { const PermissionSet& optional_permissions = extensions::PermissionsParser::GetOptionalPermissions(extension_); permissions_wrapper = PermissionSet::CreateUnion(*permissions_to_display, optional_permissions); permissions_to_display = permissions_wrapper.get(); } } if (permissions_to_display && (!extension_ || !extensions::PermissionsData::ShouldSkipPermissionWarnings( extension_->id()))) { Manifest::Type type = extension_ ? extension_->GetType() : Manifest::TYPE_UNKNOWN; const extensions::PermissionMessageProvider* message_provider = extensions::PermissionMessageProvider::Get(); prompt_->SetPermissions(message_provider->GetPermissionMessages( message_provider->GetAllPermissionIDs( *permissions_to_display, type)), REGULAR_PERMISSIONS); const PermissionSet* withheld = extension_ ? &extension_->permissions_data()->withheld_permissions() : nullptr; if (withheld && !withheld->IsEmpty()) { prompt_->SetPermissions( message_provider->GetPermissionMessages( message_provider->GetAllPermissionIDs(*withheld, type)), WITHHELD_PERMISSIONS); } } switch (prompt_->type()) { case PERMISSIONS_PROMPT: case RE_ENABLE_PROMPT: case INLINE_INSTALL_PROMPT: case EXTERNAL_INSTALL_PROMPT: case INSTALL_PROMPT: case POST_INSTALL_PERMISSIONS_PROMPT: case REMOTE_INSTALL_PROMPT: case REPAIR_PROMPT: case DELEGATED_PERMISSIONS_PROMPT: { prompt_->set_extension(extension_); break; } case BUNDLE_INSTALL_PROMPT: case DELEGATED_BUNDLE_PERMISSIONS_PROMPT: { prompt_->set_bundle(bundle_); break; } case LAUNCH_PROMPT_DEPRECATED: default: NOTREACHED() << "Unknown message"; return; } prompt_->set_delegated_username(delegated_username_); prompt_->set_icon(gfx::Image::CreateFrom1xBitmap(icon_)); g_last_prompt_type_for_tests = prompt_->type(); if (AutoConfirmPrompt(delegate_)) return; if (show_params_->WasParentDestroyed()) { delegate_->InstallUIAbort(false); return; } if (show_dialog_callback_.is_null()) GetDefaultShowDialogCallback().Run(show_params_.get(), delegate_, prompt_); else show_dialog_callback_.Run(show_params_.get(), delegate_, prompt_); }
4,700
109,726
0
PassRefPtr<HTMLCollection> Document::links() { return ensureCachedCollection(DocLinks); }
4,701
84,724
0
loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { struct loop_info64 info64; int err = 0; if (!arg) err = -EINVAL; if (!err) err = loop_get_status(lo, &info64); if (!err && copy_to_user(arg, &info64, sizeof(info64))) err = -EFAULT; return err; }
4,702
125,661
0
void RenderViewHostImpl::OnFrameTreeUpdated(const std::string& frame_tree) { DCHECK(false); frame_tree_ = frame_tree; delegate_->DidUpdateFrameTree(this); }
4,703
34,988
0
static int __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) { struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc = NULL; struct sctp_association *asoc2; struct sctp_transport *transport; union sctp_addr to; struct sctp_af *af; sctp_scope_t scope; long timeo; int err = 0; int addrcnt = 0; int walk_size = 0; union sctp_addr *sa_addr; void *addr_buf; sp = sctp_sk(sk); ep = sp->ep; /* connect() cannot be done on a socket that is already in ESTABLISHED * state - UDP-style peeled off socket or a TCP-style socket that * is already connected. * It cannot be done even on a TCP-style listening socket. */ if (sctp_sstate(sk, ESTABLISHED) || (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { err = -EISCONN; goto out_free; } /* Walk through the addrs buffer and count the number of addresses. */ addr_buf = kaddrs; while (walk_size < addrs_size) { sa_addr = (union sctp_addr *)addr_buf; af = sctp_get_af_specific(sa_addr->sa.sa_family); /* If the address family is not supported or if this address * causes the address buffer to overflow return EINVAL. */ if (!af || (walk_size + af->sockaddr_len) > addrs_size) { err = -EINVAL; goto out_free; } err = sctp_verify_addr(sk, sa_addr, af->sockaddr_len); if (err) goto out_free; memcpy(&to, sa_addr, af->sockaddr_len); /* Check if there already is a matching association on the * endpoint (other than the one created here). */ asoc2 = sctp_endpoint_lookup_assoc(ep, sa_addr, &transport); if (asoc2 && asoc2 != asoc) { if (asoc2->state >= SCTP_STATE_ESTABLISHED) err = -EISCONN; else err = -EALREADY; goto out_free; } /* If we could not find a matching association on the endpoint, * make sure that there is no peeled-off association matching * the peer address even on another socket. */ if (sctp_endpoint_is_peeled_off(ep, sa_addr)) { err = -EADDRNOTAVAIL; goto out_free; } if (!asoc) { /* If a bind() or sctp_bindx() is not called prior to * an sctp_connectx() call, the system picks an * ephemeral port and will choose an address set * equivalent to binding with a wildcard address. */ if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) { err = -EAGAIN; goto out_free; } } else { /* * If an unprivileged user inherits a 1-many * style socket with open associations on a * privileged port, it MAY be permitted to * accept new associations, but it SHOULD NOT * be permitted to open new associations. */ if (ep->base.bind_addr.port < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) { err = -EACCES; goto out_free; } } scope = sctp_scope(sa_addr); asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); if (!asoc) { err = -ENOMEM; goto out_free; } } /* Prime the peer's transport structures. */ transport = sctp_assoc_add_peer(asoc, sa_addr, GFP_KERNEL, SCTP_UNKNOWN); if (!transport) { err = -ENOMEM; goto out_free; } addrcnt++; addr_buf += af->sockaddr_len; walk_size += af->sockaddr_len; } err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL); if (err < 0) { goto out_free; } err = sctp_primitive_ASSOCIATE(asoc, NULL); if (err < 0) { goto out_free; } /* Initialize sk's dport and daddr for getpeername() */ inet_sk(sk)->dport = htons(asoc->peer.port); af = sctp_get_af_specific(to.sa.sa_family); af->to_sk_daddr(&to, sk); sk->sk_err = 0; timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK); err = sctp_wait_for_connect(asoc, &timeo); /* Don't free association on exit. */ asoc = NULL; out_free: SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p" " kaddrs: %p err: %d\n", asoc, kaddrs, err); if (asoc) sctp_association_free(asoc); return err; }
4,704
114,692
0
UserResponse user_response() const { return user_response_; }
4,705
174,091
0
BpGraphicBufferConsumer::~BpGraphicBufferConsumer() {}
4,706
47,724
0
bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, struct user_namespace *user_ns, int cap) { return ((nsp->flags & NETLINK_SKB_DST) || file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) && ns_capable(user_ns, cap); }
4,707
56,494
0
static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp) { int i; long err = 0; /* addi r1, r1, __SIGNAL_FRAMESIZE # Pop the dummy stackframe */ err |= __put_user(0x38210000UL | (__SIGNAL_FRAMESIZE & 0xffff), &tramp[0]); /* li r0, __NR_[rt_]sigreturn| */ err |= __put_user(0x38000000UL | (syscall & 0xffff), &tramp[1]); /* sc */ err |= __put_user(0x44000002UL, &tramp[2]); /* Minimal traceback info */ for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++) err |= __put_user(0, &tramp[i]); if (!err) flush_icache_range((unsigned long) &tramp[0], (unsigned long) &tramp[TRAMP_SIZE]); return err; }
4,708
98,166
0
void AutoFillManager::FillFormField(const AutoFillProfile* profile, AutoFillType type, webkit_glue::FormField* field) { DCHECK(profile); DCHECK(field); if (type.subgroup() == AutoFillType::PHONE_NUMBER) { FillPhoneNumberField(profile, field); } else { if (field->form_control_type() == ASCIIToUTF16("select-one")) autofill::FillSelectControl(profile, type, field); else field->set_value(profile->GetFieldText(type)); } }
4,709
81,409
0
static inline void trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, int len) { }
4,710
75,950
0
dbus_object_create_path_vrrp(void) { return g_strconcat(DBUS_VRRP_OBJECT_ROOT, #if HAVE_DECL_CLONE_NEWNET global_data->network_namespace ? "/" : "", global_data->network_namespace ? global_data->network_namespace : "", #endif global_data->instance_name ? "/" : "", global_data->instance_name ? global_data->instance_name : "", "/Vrrp", NULL); }
4,711
57,650
0
store_tabletExecute(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct aiptek *aiptek = dev_get_drvdata(dev); /* We do not care what you write to this file. Merely the action * of writing to this file triggers a tablet reprogramming. */ memcpy(&aiptek->curSetting, &aiptek->newSetting, sizeof(struct aiptek_settings)); if (aiptek_program_tablet(aiptek) < 0) return -EIO; return count; }
4,712
1,909
0
static void reds_mig_target_client_free(RedsMigTargetClient *mig_client) { RingItem *now, *next; ring_remove(&mig_client->link); reds->num_mig_target_clients--; RING_FOREACH_SAFE(now, next, &mig_client->pending_links) { RedsMigPendingLink *mig_link = SPICE_CONTAINEROF(now, RedsMigPendingLink, ring_link); ring_remove(now); free(mig_link); } free(mig_client); }
4,713
42,962
0
static void free_receive_page_frags(struct virtnet_info *vi) { int i; for (i = 0; i < vi->max_queue_pairs; i++) if (vi->rq[i].alloc_frag.page) put_page(vi->rq[i].alloc_frag.page); }
4,714
102,776
0
virtual WebGLId createProgram() { return 1; }
4,715
69,768
0
init_nodelist(void) { if (PREDICT_UNLIKELY(the_nodelist == NULL)) { the_nodelist = tor_malloc_zero(sizeof(nodelist_t)); HT_INIT(nodelist_map, &the_nodelist->nodes_by_id); the_nodelist->nodes = smartlist_new(); } }
4,716
152,742
0
std::unique_ptr<HistogramBase> BooleanHistogram::PersistentCreate( const std::string& name, const BucketRanges* ranges, HistogramBase::AtomicCount* counts, HistogramBase::AtomicCount* logged_counts, HistogramSamples::Metadata* meta, HistogramSamples::Metadata* logged_meta) { return WrapUnique(new BooleanHistogram( name, ranges, counts, logged_counts, meta, logged_meta)); }
4,717
17,683
0
ProcXFixesCreateRegionFromWindow(ClientPtr client) { RegionPtr pRegion; Bool copy = TRUE; WindowPtr pWin; int rc; REQUEST(xXFixesCreateRegionFromWindowReq); REQUEST_SIZE_MATCH(xXFixesCreateRegionFromWindowReq); LEGAL_NEW_RESOURCE(stuff->region, client); rc = dixLookupResourceByType((void **) &pWin, stuff->window, RT_WINDOW, client, DixGetAttrAccess); if (rc != Success) { client->errorValue = stuff->window; return rc; } switch (stuff->kind) { case WindowRegionBounding: pRegion = wBoundingShape(pWin); if (!pRegion) { pRegion = CreateBoundingShape(pWin); copy = FALSE; } break; case WindowRegionClip: pRegion = wClipShape(pWin); if (!pRegion) { pRegion = CreateClipShape(pWin); copy = FALSE; } break; default: client->errorValue = stuff->kind; return BadValue; } if (copy && pRegion) pRegion = XFixesRegionCopy(pRegion); if (!pRegion) return BadAlloc; if (!AddResource(stuff->region, RegionResType, (void *) pRegion)) return BadAlloc; return Success; }
4,718
37,794
0
static void nested_svm_unmap(struct page *page) { kunmap(page); kvm_release_page_dirty(page); }
4,719
25,356
0
do_translation_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { unsigned int index; pgd_t *pgd, *pgd_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; if (addr < TASK_SIZE) return do_page_fault(addr, fsr, regs); if (user_mode(regs)) goto bad_area; index = pgd_index(addr); /* * FIXME: CP15 C1 is write only on ARMv3 architectures. */ pgd = cpu_get_pgd() + index; pgd_k = init_mm.pgd + index; if (pgd_none(*pgd_k)) goto bad_area; if (!pgd_present(*pgd)) set_pgd(pgd, *pgd_k); pud = pud_offset(pgd, addr); pud_k = pud_offset(pgd_k, addr); if (pud_none(*pud_k)) goto bad_area; if (!pud_present(*pud)) set_pud(pud, *pud_k); pmd = pmd_offset(pud, addr); pmd_k = pmd_offset(pud_k, addr); /* * On ARM one Linux PGD entry contains two hardware entries (see page * tables layout in pgtable.h). We normally guarantee that we always * fill both L1 entries. But create_mapping() doesn't follow the rule. * It can create inidividual L1 entries, so here we have to call * pmd_none() check for the entry really corresponded to address, not * for the first of pair. */ index = (addr >> SECTION_SHIFT) & 1; if (pmd_none(pmd_k[index])) goto bad_area; copy_pmd(pmd, pmd_k); return 0; bad_area: do_bad_area(addr, fsr, regs); return 0; }
4,720
58,534
0
static BOOL freerdp_peer_check_fds(freerdp_peer* client) { int status; rdpRdp* rdp; rdp = client->context->rdp; status = rdp_check_fds(rdp); if (status < 0) return FALSE; return TRUE; }
4,721
181,537
1
static int search_old_relocation(struct reloc_struct_t *reloc_table, ut32 addr_to_patch, int n_reloc) { int i; for (i = 0; i < n_reloc; i++) { if (addr_to_patch == reloc_table[i].data_offset) { return i; } } return -1; }
4,722
97,015
0
long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas_arg) { struct vm_area_struct **vmas = vmas_arg; struct vm_area_struct *vma_prev = NULL; long rc, i; if (!pages) return -EINVAL; if (!vmas) { vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *), GFP_KERNEL); if (!vmas) return -ENOMEM; } rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas); for (i = 0; i < rc; i++) { struct vm_area_struct *vma = vmas[i]; if (vma == vma_prev) continue; vma_prev = vma; if (vma_is_fsdax(vma)) break; } /* * Either get_user_pages() failed, or the vma validation * succeeded, in either case we don't need to put_page() before * returning. */ if (i >= rc) goto out; for (i = 0; i < rc; i++) put_page(pages[i]); rc = -EOPNOTSUPP; out: if (vmas != vmas_arg) kfree(vmas); return rc; }
4,723
71,157
0
static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, size_t *dst_len, const void *src1, size_t src1_len, const void *src2, size_t src2_len) { struct scatterlist sg_in[3], prealloc_sg; struct sg_table sg_out; struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher(); SKCIPHER_REQUEST_ON_STACK(req, tfm); int ret; char iv[AES_BLOCK_SIZE]; size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f)); char pad[16]; if (IS_ERR(tfm)) return PTR_ERR(tfm); memset(pad, zero_padding, zero_padding); *dst_len = src1_len + src2_len + zero_padding; sg_init_table(sg_in, 3); sg_set_buf(&sg_in[0], src1, src1_len); sg_set_buf(&sg_in[1], src2, src2_len); sg_set_buf(&sg_in[2], pad, zero_padding); ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); if (ret) goto out_tfm; crypto_skcipher_setkey((void *)tfm, key, key_len); memcpy(iv, aes_iv, AES_BLOCK_SIZE); skcipher_request_set_tfm(req, tfm); skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, sg_in, sg_out.sgl, src1_len + src2_len + zero_padding, iv); /* print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, key, key_len, 1); print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1, src1, src1_len, 1); print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1, src2, src2_len, 1); print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, pad, zero_padding, 1); */ ret = crypto_skcipher_encrypt(req); skcipher_request_zero(req); if (ret < 0) { pr_err("ceph_aes_crypt2 failed %d\n", ret); goto out_sg; } /* print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, dst, *dst_len, 1); */ out_sg: teardown_sgtable(&sg_out); out_tfm: crypto_free_skcipher(tfm); return ret; }
4,724
165,372
0
void ClearShaderCacheOnIOThread(const base::FilePath& path, const base::Time begin, const base::Time end, base::OnceClosure callback) { DCHECK_CURRENTLY_ON(BrowserThread::IO); GetShaderCacheFactorySingleton()->ClearByPath( path, begin, end, base::BindOnce(&ClearedShaderCache, std::move(callback))); }
4,725
4,212
0
cff_fd_select_get( CFF_FDSelect fdselect, FT_UInt glyph_index ) { FT_Byte fd = 0; switch ( fdselect->format ) { case 0: fd = fdselect->data[glyph_index]; break; case 3: /* first, compare to cache */ if ( (FT_UInt)( glyph_index - fdselect->cache_first ) < fdselect->cache_count ) { fd = fdselect->cache_fd; break; } /* then, lookup the ranges array */ { FT_Byte* p = fdselect->data; FT_Byte* p_limit = p + fdselect->data_size; FT_Byte fd2; FT_UInt first, limit; first = FT_NEXT_USHORT( p ); do { if ( glyph_index < first ) break; fd2 = *p++; limit = FT_NEXT_USHORT( p ); if ( glyph_index < limit ) { fd = fd2; /* update cache */ fdselect->cache_first = first; fdselect->cache_count = limit-first; fdselect->cache_fd = fd2; break; } first = limit; } while ( p < p_limit ); } break; default: ; } return fd; }
4,726
46,387
0
ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, splice_direct_actor *actor) { struct pipe_inode_info *pipe; long ret, bytes; umode_t i_mode; size_t len; int i, flags; /* * We require the input being a regular file, as we don't want to * randomly drop data for eg socket -> socket splicing. Use the * piped splicing for that! */ i_mode = file_inode(in)->i_mode; if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode))) return -EINVAL; /* * neither in nor out is a pipe, setup an internal pipe attached to * 'out' and transfer the wanted data from 'in' to 'out' through that */ pipe = current->splice_pipe; if (unlikely(!pipe)) { pipe = alloc_pipe_info(); if (!pipe) return -ENOMEM; /* * We don't have an immediate reader, but we'll read the stuff * out of the pipe right after the splice_to_pipe(). So set * PIPE_READERS appropriately. */ pipe->readers = 1; current->splice_pipe = pipe; } /* * Do the splice. */ ret = 0; bytes = 0; len = sd->total_len; flags = sd->flags; /* * Don't block on output, we have to drain the direct pipe. */ sd->flags &= ~SPLICE_F_NONBLOCK; while (len) { size_t read_len; loff_t pos = sd->pos, prev_pos = pos; ret = do_splice_to(in, &pos, pipe, len, flags); if (unlikely(ret <= 0)) goto out_release; read_len = ret; sd->total_len = read_len; /* * NOTE: nonblocking mode only applies to the input. We * must not do the output in nonblocking mode as then we * could get stuck data in the internal pipe: */ ret = actor(pipe, sd); if (unlikely(ret <= 0)) { sd->pos = prev_pos; goto out_release; } bytes += ret; len -= ret; sd->pos = pos; if (ret < read_len) { sd->pos = prev_pos + ret; goto out_release; } } done: pipe->nrbufs = pipe->curbuf = 0; file_accessed(in); return bytes; out_release: /* * If we did an incomplete transfer we must release * the pipe buffers in question: */ for (i = 0; i < pipe->buffers; i++) { struct pipe_buffer *buf = pipe->bufs + i; if (buf->ops) { buf->ops->release(pipe, buf); buf->ops = NULL; } } if (!bytes) bytes = ret; goto done; }
4,727
29,500
0
SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) { struct msg_queue *msq; int err, version; struct ipc_namespace *ns; if (msqid < 0 || cmd < 0) return -EINVAL; version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; switch (cmd) { case IPC_INFO: case MSG_INFO: { struct msginfo msginfo; int max_id; if (!buf) return -EFAULT; /* * We must not return kernel stack data. * due to padding, it's not enough * to set all member fields. */ err = security_msg_queue_msgctl(NULL, cmd); if (err) return err; memset(&msginfo, 0, sizeof(msginfo)); msginfo.msgmni = ns->msg_ctlmni; msginfo.msgmax = ns->msg_ctlmax; msginfo.msgmnb = ns->msg_ctlmnb; msginfo.msgssz = MSGSSZ; msginfo.msgseg = MSGSEG; down_read(&msg_ids(ns).rw_mutex); if (cmd == MSG_INFO) { msginfo.msgpool = msg_ids(ns).in_use; msginfo.msgmap = atomic_read(&ns->msg_hdrs); msginfo.msgtql = atomic_read(&ns->msg_bytes); } else { msginfo.msgmap = MSGMAP; msginfo.msgpool = MSGPOOL; msginfo.msgtql = MSGTQL; } max_id = ipc_get_maxid(&msg_ids(ns)); up_read(&msg_ids(ns).rw_mutex); if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) return -EFAULT; return (max_id < 0) ? 0 : max_id; } case MSG_STAT: /* msqid is an index rather than a msg queue id */ case IPC_STAT: { struct msqid64_ds tbuf; int success_return; if (!buf) return -EFAULT; if (cmd == MSG_STAT) { msq = msg_lock(ns, msqid); if (IS_ERR(msq)) return PTR_ERR(msq); success_return = msq->q_perm.id; } else { msq = msg_lock_check(ns, msqid); if (IS_ERR(msq)) return PTR_ERR(msq); success_return = 0; } err = -EACCES; if (ipcperms(ns, &msq->q_perm, S_IRUGO)) goto out_unlock; err = security_msg_queue_msgctl(msq, cmd); if (err) goto out_unlock; memset(&tbuf, 0, sizeof(tbuf)); kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm); tbuf.msg_stime = msq->q_stime; tbuf.msg_rtime = msq->q_rtime; tbuf.msg_ctime = msq->q_ctime; tbuf.msg_cbytes = msq->q_cbytes; tbuf.msg_qnum = msq->q_qnum; tbuf.msg_qbytes = msq->q_qbytes; tbuf.msg_lspid = msq->q_lspid; tbuf.msg_lrpid = msq->q_lrpid; msg_unlock(msq); if (copy_msqid_to_user(buf, &tbuf, version)) return -EFAULT; return success_return; } case IPC_SET: case IPC_RMID: err = msgctl_down(ns, msqid, cmd, buf, version); return err; default: return -EINVAL; } out_unlock: msg_unlock(msq); return err; }
4,728
110,805
0
void AutocompleteEditModel::OnWillKillFocus( gfx::NativeView view_gaining_focus) { SetSuggestedText(string16(), INSTANT_COMPLETE_NOW); InstantController* instant = controller_->GetInstant(); if (instant) instant->OnAutocompleteLostFocus(view_gaining_focus); }
4,729
119,309
0
void TranslateInfoBarDelegate::ToggleSiteBlacklist() { if (ui_delegate_.IsSiteBlacklisted()) { ui_delegate_.SetSiteBlacklist(false); } else { ui_delegate_.SetSiteBlacklist(true); infobar()->RemoveSelf(); } }
4,730
11,687
0
drive_eject_completed_cb (DBusGMethodInvocation *context, Device *device, gboolean job_was_cancelled, int status, const char *stderr, const char *stdout, gpointer user_data) { if (WEXITSTATUS (status) == 0 && !job_was_cancelled) { /* TODO: probably wait for has_media to change to FALSE */ dbus_g_method_return (context); } else { if (job_was_cancelled) { throw_error (context, ERROR_CANCELLED, "Job was cancelled"); } else { throw_error (context, ERROR_FAILED, "Error ejecting: eject exited with exit code %d: %s", WEXITSTATUS (status), stderr); } } }
4,731
182,006
1
php_http_url_t *php_http_url_parse(const char *str, size_t len, unsigned flags TSRMLS_DC) { size_t maxlen = 3 * len; struct parse_state *state = ecalloc(1, sizeof(*state) + maxlen); state->end = str + len; state->ptr = str; state->flags = flags; state->maxlen = maxlen; TSRMLS_SET_CTX(state->ts); if (!parse_scheme(state)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Failed to parse URL scheme: '%s'", state->ptr); efree(state); return NULL; } if (!parse_hier(state)) { efree(state); return NULL; } if (!parse_query(state)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Failed to parse URL query: '%s'", state->ptr); efree(state); return NULL; } if (!parse_fragment(state)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Failed to parse URL fragment: '%s'", state->ptr); efree(state); return NULL; } return (php_http_url_t *) state; }
4,732
114,880
0
void TestingAutomationProvider::ExecuteJavascriptInRenderView( DictionaryValue* args, IPC::Message* reply_message) { string16 frame_xpath, javascript, extension_id, url_text; std::string error; int render_process_id, render_view_id; if (!args->GetString("frame_xpath", &frame_xpath)) { AutomationJSONReply(this, reply_message) .SendError("'frame_xpath' missing or invalid"); return; } if (!args->GetString("javascript", &javascript)) { AutomationJSONReply(this, reply_message) .SendError("'javascript' missing or invalid"); return; } if (!args->GetInteger("view.render_process_id", &render_process_id)) { AutomationJSONReply(this, reply_message) .SendError("'view.render_process_id' missing or invalid"); return; } if (!args->GetInteger("view.render_view_id", &render_view_id)) { AutomationJSONReply(this, reply_message) .SendError("'view.render_view_id' missing or invalid"); return; } RenderViewHost* rvh = RenderViewHost::FromID(render_process_id, render_view_id); if (!rvh) { AutomationJSONReply(this, reply_message).SendError( "A RenderViewHost object was not found with the given view ID."); return; } new DomOperationMessageSender(this, reply_message, true); ExecuteJavascriptInRenderViewFrame(frame_xpath, javascript, reply_message, rvh); }
4,733
154,953
0
DrawingBuffer* WebGLRenderingContextBase::GetDrawingBuffer() const { return drawing_buffer_.get(); }
4,734
114,026
0
virtual ~DummyVolumeControlDelegate() {}
4,735
23,940
0
static void age_mic_context(miccntx *cur, miccntx *old, u8 *key, int key_len, struct crypto_cipher *tfm) { /* If the current MIC context is valid and its key is the same as * the MIC register, there's nothing to do. */ if (cur->valid && (memcmp(cur->key, key, key_len) == 0)) return; /* Age current mic Context */ memcpy(old, cur, sizeof(*cur)); /* Initialize new context */ memcpy(cur->key, key, key_len); cur->window = 33; /* Window always points to the middle */ cur->rx = 0; /* Rx Sequence numbers */ cur->tx = 0; /* Tx sequence numbers */ cur->valid = 1; /* Key is now valid */ /* Give key to mic seed */ emmh32_setseed(&cur->seed, key, key_len, tfm); }
4,736
129,075
0
base::string16 ExtensionDevToolsInfoBarDelegate::GetMessageText() const { return l10n_util::GetStringFUTF16(IDS_DEV_TOOLS_INFOBAR_LABEL, base::UTF8ToUTF16(client_name_)); }
4,737
121,681
0
void ChangeListLoader::LoadDirectoryFromServerAfterRefresh( const DirectoryFetchInfo& directory_fetch_info, const FileOperationCallback& callback, const base::FilePath* directory_path, FileError error) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); DCHECK(!callback.is_null()); DVLOG(1) << "Directory loaded: " << directory_fetch_info.ToString(); callback.Run(error); if (error == FILE_ERROR_OK && !directory_path->empty()) { FOR_EACH_OBSERVER(ChangeListLoaderObserver, observers_, OnDirectoryChanged(*directory_path)); } }
4,738
103,195
0
void Browser::ShowCollectedCookiesDialog(TabContents *tab_contents) { window()->ShowCollectedCookiesDialog(tab_contents); }
4,739
125,375
0
void GetLocalFileInfoOnBlockingPool( const FilePath& local_file, GDataFileError* error, int64* file_size, std::string* content_type) { DCHECK(error); DCHECK(file_size); DCHECK(content_type); if (!net::GetMimeTypeFromExtension(local_file.Extension(), content_type)) *content_type = kMimeTypeOctetStream; *file_size = 0; *error = file_util::GetFileSize(local_file, file_size) ? GDATA_FILE_OK : GDATA_FILE_ERROR_NOT_FOUND; }
4,740
88,376
0
static void decode_sce_lfe(NeAACDecStruct *hDecoder, NeAACDecFrameInfo *hInfo, bitfile *ld, uint8_t id_syn_ele) { uint8_t channels = hDecoder->fr_channels; uint8_t tag = 0; if (channels+1 > MAX_CHANNELS) { hInfo->error = 12; return; } if (hDecoder->fr_ch_ele+1 > MAX_SYNTAX_ELEMENTS) { hInfo->error = 13; return; } /* for SCE hDecoder->element_output_channels[] is not set here because this can become 2 when some form of Parametric Stereo coding is used */ if (hDecoder->frame && hDecoder->element_id[hDecoder->fr_ch_ele] != id_syn_ele) { /* element inconsistency */ hInfo->error = 21; return; } /* save the syntax element id */ hDecoder->element_id[hDecoder->fr_ch_ele] = id_syn_ele; /* decode the element */ hInfo->error = single_lfe_channel_element(hDecoder, ld, channels, &tag); /* map output channels position to internal data channels */ if (hDecoder->element_output_channels[hDecoder->fr_ch_ele] == 2) { /* this might be faulty when pce_set is true */ hDecoder->internal_channel[channels] = channels; hDecoder->internal_channel[channels+1] = channels+1; } else { if (hDecoder->pce_set) hDecoder->internal_channel[hDecoder->pce.sce_channel[tag]] = channels; else hDecoder->internal_channel[channels] = channels; } hDecoder->fr_channels += hDecoder->element_output_channels[hDecoder->fr_ch_ele]; hDecoder->fr_ch_ele++; }
4,741
70,504
0
void re_yyset_lval (YYSTYPE * yylval_param , yyscan_t yyscanner) { struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; yylval = yylval_param; }
4,742
102,461
0
bool LayerTreeCoordinator::flushPendingLayerChanges() { if (m_waitingForUIProcess) return false; m_shouldSyncFrame = false; bool didSync = m_webPage->corePage()->mainFrame()->view()->syncCompositingStateIncludingSubframes(); m_nonCompositedContentLayer->syncCompositingStateForThisLayerOnly(); if (m_pageOverlayLayer) m_pageOverlayLayer->syncCompositingStateForThisLayerOnly(); m_rootLayer->syncCompositingStateForThisLayerOnly(); if (m_shouldSyncRootLayer) { m_webPage->send(Messages::LayerTreeCoordinatorProxy::SetRootCompositingLayer(toCoordinatedGraphicsLayer(m_rootLayer.get())->id())); m_shouldSyncRootLayer = false; } if (m_shouldSyncFrame) { didSync = true; m_webPage->send(Messages::LayerTreeCoordinatorProxy::DidRenderFrame()); m_waitingForUIProcess = true; m_shouldSyncFrame = false; } if (m_forceRepaintAsyncCallbackID) { m_webPage->send(Messages::WebPageProxy::VoidCallback(m_forceRepaintAsyncCallbackID)); m_forceRepaintAsyncCallbackID = 0; } return didSync; }
4,743
90,830
0
void gf_prompt_set_echo_off(Bool echo_off) { DWORD flags; HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE); BOOL ret = GetConsoleMode(hStdin, &flags); if (!ret) { DWORD err = GetLastError(); GF_LOG(GF_LOG_ERROR, GF_LOG_CONSOLE, ("[Console] GetConsoleMode() return with the following error code: %d\n", err)); } if (echo_off) flags &= ~ENABLE_ECHO_INPUT; else flags |= ENABLE_ECHO_INPUT; SetConsoleMode(hStdin, flags); }
4,744
87,536
0
void lodepng_decompress_settings_init(LodePNGDecompressSettings* settings) { settings->ignore_adler32 = 0; settings->custom_zlib = 0; settings->custom_inflate = 0; settings->custom_context = 0; }
4,745
118,791
0
GURL GetWindowOpenURL() { return ui_test_utils::GetTestUrl( base::FilePath(kTestDir), base::FilePath(FILE_PATH_LITERAL("window_open.html"))); }
4,746
109,642
0
void Document::didRemoveText(Node* text, unsigned offset, unsigned length) { if (!m_ranges.isEmpty()) { HashSet<Range*>::const_iterator end = m_ranges.end(); for (HashSet<Range*>::const_iterator it = m_ranges.begin(); it != end; ++it) (*it)->didRemoveText(text, offset, length); } m_markers->removeMarkers(text, offset, length); m_markers->shiftMarkers(text, offset + length, 0 - length); }
4,747
16,095
0
cdio_generic_unimplemented_eject_media (void *p_user_data) { /* Sort of a stub here. Perhaps log a message? */ return DRIVER_OP_UNSUPPORTED; }
4,748
144,014
0
png_push_restore_buffer(png_structp png_ptr, png_bytep buffer, png_size_t buffer_length) { png_ptr->current_buffer = buffer; png_ptr->current_buffer_size = buffer_length; png_ptr->buffer_size = buffer_length + png_ptr->save_buffer_size; png_ptr->current_buffer_ptr = png_ptr->current_buffer; }
4,749
106,355
0
void SyncBackendHost::SetAutofillMigrationDebugInfo( syncable::AutofillMigrationDebugInfo::PropertyToSet property_to_set, const syncable::AutofillMigrationDebugInfo& info) { return core_->syncapi()->SetAutofillMigrationDebugInfo(property_to_set, info); }
4,750
145,999
0
DrawingBufferClientRestorePixelUnpackBufferBinding() { if (!ContextGL()) return; ContextGL()->BindBuffer(GL_PIXEL_UNPACK_BUFFER, ObjectOrZero(bound_pixel_unpack_buffer_.Get())); }
4,751
146,650
0
DrawingBuffer::TextureColorBufferParameters() { ColorBufferParameters parameters; parameters.target = GL_TEXTURE_2D; if (want_alpha_channel_) { parameters.allocate_alpha_channel = true; } else if (ContextProvider() ->GetCapabilities() .emulate_rgb_buffer_with_rgba) { parameters.allocate_alpha_channel = true; } else { parameters.allocate_alpha_channel = DefaultBufferRequiresAlphaChannelToBePreserved(); } return parameters; }
4,752
28,937
0
static int arm_init(void) { int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); return rc; }
4,753
61,629
0
static int64_t mxf_timestamp_to_int64(uint64_t timestamp) { struct tm time = { 0 }; time.tm_year = (timestamp >> 48) - 1900; time.tm_mon = (timestamp >> 40 & 0xFF) - 1; time.tm_mday = (timestamp >> 32 & 0xFF); time.tm_hour = (timestamp >> 24 & 0xFF); time.tm_min = (timestamp >> 16 & 0xFF); time.tm_sec = (timestamp >> 8 & 0xFF); /* msvcrt versions of strftime calls the invalid parameter handler * (aborting the process if one isn't set) if the parameters are out * of range. */ time.tm_mon = av_clip(time.tm_mon, 0, 11); time.tm_mday = av_clip(time.tm_mday, 1, 31); time.tm_hour = av_clip(time.tm_hour, 0, 23); time.tm_min = av_clip(time.tm_min, 0, 59); time.tm_sec = av_clip(time.tm_sec, 0, 59); return (int64_t)av_timegm(&time) * 1000000; }
4,754
134,320
0
views::View* ConvertPointToViewAndGetTooltipHandler( views::View* source, views::View* dest, const gfx::Point& point_in_source) { gfx::Point dest_point(point_in_source); views::View::ConvertPointToTarget(source, dest, &dest_point); return dest->GetTooltipHandlerForPoint(dest_point); }
4,755
39,834
0
static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata) { *read_buf_addr(ldata, ldata->read_head++) = c; }
4,756
39,708
0
void path_put(struct path *path) { dput(path->dentry); mntput(path->mnt); }
4,757
27,180
0
static void xt_client_event_handler(Widget w, XtPointer client_data, XEvent *event, Boolean *cont) { XtData *toolkit = (XtData *)client_data; switch (event->type) { case ClientMessage: if (event->xclient.message_type == XInternAtom(x_display, "_XEMBED", False)) { switch (event->xclient.data.l[1]) { case XEMBED_FOCUS_IN: xt_client_simulate_focus(toolkit->form, FocusIn); break; case XEMBED_FOCUS_OUT: xt_client_simulate_focus(toolkit->form, FocusOut); break; } } break; case KeyPress: case KeyRelease: if (event->xkey.window == XtWindow(toolkit->top_widget)) { event->xkey.window = XtWindow(toolkit->form); trap_errors(); XSendEvent(XtDisplay(toolkit->form), event->xfocus.window, False, NoEventMask, event); XSync(XtDisplay(toolkit->form), False); untrap_errors(); *cont = False; } break; case ButtonRelease: send_xembed_message(XtDisplay(w), toolkit->browser_window, XEMBED_REQUEST_FOCUS, 0, 0, 0); break; } }
4,758
177,881
1
struct edid *drm_load_edid_firmware(struct drm_connector *connector) { const char *connector_name = connector->name; char *edidname, *last, *colon, *fwstr, *edidstr, *fallback = NULL; struct edid *edid; if (edid_firmware[0] == '\0') return ERR_PTR(-ENOENT); /* * If there are multiple edid files specified and separated * by commas, search through the list looking for one that * matches the connector. * * If there's one or more that doesn't specify a connector, keep * the last one found one as a fallback. */ fwstr = kstrdup(edid_firmware, GFP_KERNEL); edidstr = fwstr; while ((edidname = strsep(&edidstr, ","))) { if (strncmp(connector_name, edidname, colon - edidname)) continue; edidname = colon + 1; break; } if (*edidname != '\0') /* corner case: multiple ',' */ fallback = edidname; }
4,759
184,981
1
xmlParseEntityRef(xmlParserCtxtPtr ctxt) { const xmlChar *name; xmlEntityPtr ent = NULL; GROW; if (RAW != '&') return(NULL); NEXT; name = xmlParseName(ctxt); if (name == NULL) { xmlFatalErrMsg(ctxt, XML_ERR_NAME_REQUIRED, "xmlParseEntityRef: no name\n"); return(NULL); } if (RAW != ';') { xmlFatalErr(ctxt, XML_ERR_ENTITYREF_SEMICOL_MISSING, NULL); return(NULL); } NEXT; /* * Predefined entites override any extra definition */ if ((ctxt->options & XML_PARSE_OLDSAX) == 0) { ent = xmlGetPredefinedEntity(name); if (ent != NULL) return(ent); } /* * Increate the number of entity references parsed */ ctxt->nbentities++; /* * Ask first SAX for entity resolution, otherwise try the * entities which may have stored in the parser context. */ if (ctxt->sax != NULL) { if (ctxt->sax->getEntity != NULL) ent = ctxt->sax->getEntity(ctxt->userData, name); if ((ctxt->wellFormed == 1 ) && (ent == NULL) && (ctxt->options & XML_PARSE_OLDSAX)) ent = xmlGetPredefinedEntity(name); if ((ctxt->wellFormed == 1 ) && (ent == NULL) && (ctxt->userData==ctxt)) { ent = xmlSAX2GetEntity(ctxt, name); } } /* * [ WFC: Entity Declared ] * In a document without any DTD, a document with only an * internal DTD subset which contains no parameter entity * references, or a document with "standalone='yes'", the * Name given in the entity reference must match that in an * entity declaration, except that well-formed documents * need not declare any of the following entities: amp, lt, * gt, apos, quot. * The declaration of a parameter entity must precede any * reference to it. * Similarly, the declaration of a general entity must * precede any reference to it which appears in a default * value in an attribute-list declaration. Note that if * entities are declared in the external subset or in * external parameter entities, a non-validating processor * is not obligated to read and process their declarations; * for such documents, the rule that an entity must be * declared is a well-formedness constraint only if * standalone='yes'. */ if (ent == NULL) { if ((ctxt->standalone == 1) || ((ctxt->hasExternalSubset == 0) && (ctxt->hasPErefs == 0))) { xmlFatalErrMsgStr(ctxt, XML_ERR_UNDECLARED_ENTITY, "Entity '%s' not defined\n", name); } else { xmlErrMsgStr(ctxt, XML_WAR_UNDECLARED_ENTITY, "Entity '%s' not defined\n", name); if ((ctxt->inSubset == 0) && (ctxt->sax != NULL) && (ctxt->sax->reference != NULL)) { ctxt->sax->reference(ctxt->userData, name); } } ctxt->valid = 0; } /* * [ WFC: Parsed Entity ] * An entity reference must not contain the name of an * unparsed entity */ else if (ent->etype == XML_EXTERNAL_GENERAL_UNPARSED_ENTITY) { xmlFatalErrMsgStr(ctxt, XML_ERR_UNPARSED_ENTITY, "Entity reference to unparsed entity %s\n", name); } /* * [ WFC: No External Entity References ] * Attribute values cannot contain direct or indirect * entity references to external entities. */ else if ((ctxt->instate == XML_PARSER_ATTRIBUTE_VALUE) && (ent->etype == XML_EXTERNAL_GENERAL_PARSED_ENTITY)) { xmlFatalErrMsgStr(ctxt, XML_ERR_ENTITY_IS_EXTERNAL, "Attribute references external entity '%s'\n", name); } /* * [ WFC: No < in Attribute Values ] * The replacement text of any entity referred to directly or * indirectly in an attribute value (other than "&lt;") must * not contain a <. */ else if ((ctxt->instate == XML_PARSER_ATTRIBUTE_VALUE) && (ent != NULL) && (ent->content != NULL) && (ent->etype != XML_INTERNAL_PREDEFINED_ENTITY) && (xmlStrchr(ent->content, '<'))) { xmlFatalErrMsgStr(ctxt, XML_ERR_LT_IN_ATTRIBUTE, "'<' in entity '%s' is not allowed in attributes values\n", name); } /* * Internal check, no parameter entities here ... */ else { switch (ent->etype) { case XML_INTERNAL_PARAMETER_ENTITY: case XML_EXTERNAL_PARAMETER_ENTITY: xmlFatalErrMsgStr(ctxt, XML_ERR_ENTITY_IS_PARAMETER, "Attempt to reference the parameter entity '%s'\n", name); break; default: break; } } /* * [ WFC: No Recursion ] * A parsed entity must not contain a recursive reference * to itself, either directly or indirectly. * Done somewhere else */ return(ent); }
4,760
69,923
0
void _addReplySdsToList(client *c, sds s) { robj *tail; if (c->flags & CLIENT_CLOSE_AFTER_REPLY) { sdsfree(s); return; } if (listLength(c->reply) == 0) { listAddNodeTail(c->reply,createObject(OBJ_STRING,s)); c->reply_bytes += sdsZmallocSize(s); } else { tail = listNodeValue(listLast(c->reply)); /* Append to this object when possible. */ if (tail->ptr != NULL && tail->encoding == OBJ_ENCODING_RAW && sdslen(tail->ptr)+sdslen(s) <= PROTO_REPLY_CHUNK_BYTES) { c->reply_bytes -= sdsZmallocSize(tail->ptr); tail = dupLastObjectIfNeeded(c->reply); tail->ptr = sdscatlen(tail->ptr,s,sdslen(s)); c->reply_bytes += sdsZmallocSize(tail->ptr); sdsfree(s); } else { listAddNodeTail(c->reply,createObject(OBJ_STRING,s)); c->reply_bytes += sdsZmallocSize(s); } } asyncCloseClientOnOutputBufferLimitReached(c); }
4,761
170,630
0
void AecEnable(preproc_effect_t *effect) { webrtc::EchoControlMobile *aec = static_cast<webrtc::EchoControlMobile *>(effect->engine); ALOGV("AecEnable aec %p", aec); aec->Enable(true); }
4,762
46,243
0
static struct inode *bdev_alloc_inode(struct super_block *sb) { struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; }
4,763
61,730
0
static int __init set_thash_entries(char *str) { ssize_t ret; if (!str) return 0; ret = kstrtoul(str, 0, &thash_entries); if (ret) return 0; return 1; }
4,764
139,551
0
static TriState StateStyle(LocalFrame& frame, CSSPropertyID property_id, const char* desired_value) { frame.GetDocument()->UpdateStyleAndLayoutIgnorePendingStylesheets(); if (frame.GetEditor().Behavior().ShouldToggleStyleBasedOnStartOfSelection()) return frame.GetEditor().SelectionStartHasStyle(property_id, desired_value) ? kTrueTriState : kFalseTriState; return frame.GetEditor().SelectionHasStyle(property_id, desired_value); }
4,765
169,006
0
void OfflinePageModelTaskified::GetPagesByNamespace( const std::string& name_space, const MultipleOfflinePageItemCallback& callback) { auto task = GetPagesTask::CreateTaskMatchingNamespace(store_.get(), callback, name_space); task_queue_.AddTask(std::move(task)); }
4,766
126,976
0
void AudioRendererHost::DoCompleteCreation( media::AudioOutputController* controller) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); AudioEntry* entry = LookupByController(controller); if (!entry) return; if (!peer_handle()) { NOTREACHED() << "Renderer process handle is invalid."; DeleteEntryOnError(entry); return; } base::SharedMemoryHandle foreign_memory_handle; if (!entry->shared_memory.ShareToProcess(peer_handle(), &foreign_memory_handle)) { DeleteEntryOnError(entry); return; } AudioSyncReader* reader = static_cast<AudioSyncReader*>(entry->reader.get()); #if defined(OS_WIN) base::SyncSocket::Handle foreign_socket_handle; #else base::FileDescriptor foreign_socket_handle; #endif if (!reader->PrepareForeignSocketHandle(peer_handle(), &foreign_socket_handle)) { DeleteEntryOnError(entry); return; } Send(new AudioMsg_NotifyStreamCreated( entry->stream_id, foreign_memory_handle, foreign_socket_handle, media::PacketSizeInBytes(entry->shared_memory.created_size()))); }
4,767
150,136
0
void OnDraw(bool resourceless_software_draw) { gfx::Transform identity; gfx::Rect empty_rect; client_->OnDraw(identity, empty_rect, resourceless_software_draw); }
4,768
47,429
0
static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm) { return aes_ctx_common(crypto_blkcipher_ctx(tfm)); }
4,769
68,127
0
static void oidc_copy_tokens_to_request_state(request_rec *r, oidc_session_t *session, const char **s_id_token, const char **s_claims) { const char *id_token = NULL, *claims = NULL; oidc_session_get(r, session, OIDC_IDTOKEN_CLAIMS_SESSION_KEY, &id_token); oidc_session_get(r, session, OIDC_CLAIMS_SESSION_KEY, &claims); oidc_debug(r, "id_token=%s claims=%s", id_token, claims); if (id_token != NULL) { oidc_request_state_set(r, OIDC_IDTOKEN_CLAIMS_SESSION_KEY, id_token); if (s_id_token != NULL) *s_id_token = id_token; } if (claims != NULL) { oidc_request_state_set(r, OIDC_CLAIMS_SESSION_KEY, claims); if (s_claims != NULL) *s_claims = claims; } }
4,770
125,121
0
void PluginServiceImpl::ForwardGetAllowedPluginForOpenChannelToPlugin( const PluginServiceFilterParams& params, const GURL& url, const std::string& mime_type, PluginProcessHost::Client* client, const std::vector<webkit::WebPluginInfo>&) { GetAllowedPluginForOpenChannelToPlugin(params.render_process_id, params.render_view_id, url, params.page_url, mime_type, client, params.resource_context); }
4,771
167,651
0
void OnSetIsInertOnUI(bool is_inert) { is_inert_ = is_inert; if (!msg_received_) { msg_received_ = true; message_loop_runner_->Quit(); } }
4,772
129,716
0
void ResourceFetcher::garbageCollectDocumentResourcesTimerFired(Timer<ResourceFetcher>* timer) { ASSERT_UNUSED(timer, timer == &m_garbageCollectDocumentResourcesTimer); garbageCollectDocumentResources(); }
4,773
38,239
0
void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size) { u8 *ptr = NULL; if (k >= SKF_NET_OFF) ptr = skb_network_header(skb) + k - SKF_NET_OFF; else if (k >= SKF_LL_OFF) ptr = skb_mac_header(skb) + k - SKF_LL_OFF; if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) return ptr; return NULL; }
4,774
20,173
0
static int __net_init sock_inuse_init_net(struct net *net) { net->core.inuse = alloc_percpu(struct prot_inuse); return net->core.inuse ? 0 : -ENOMEM; }
4,775
123,631
0
void InspectorPageAgent::setShowScrollBottleneckRects(ErrorString* errorString, bool show) { m_state->setBoolean(PageAgentState::pageAgentShowScrollBottleneckRects, show); if (show && !forceCompositingMode(errorString)) return; m_client->setShowScrollBottleneckRects(show); }
4,776
73,719
0
BGD_DECLARE(gdImagePtr) gdImageCreateFromWebpCtx (gdIOCtx * infile) { int width, height; uint8_t *filedata = NULL; uint8_t *argb = NULL; unsigned char *read, *temp; size_t size = 0, n; gdImagePtr im; int x, y; uint8_t *p; do { temp = gdRealloc(filedata, size+GD_WEBP_ALLOC_STEP); if (temp) { filedata = temp; read = temp + size; } else { if (filedata) { gdFree(filedata); } gd_error("WebP decode: realloc failed"); return NULL; } n = gdGetBuf(read, GD_WEBP_ALLOC_STEP, infile); if (n>0 && n!=EOF) { size += n; } } while (n>0 && n!=EOF); if (WebPGetInfo(filedata,size, &width, &height) == 0) { gd_error("gd-webp cannot get webp info"); gdFree(temp); return NULL; } im = gdImageCreateTrueColor(width, height); if (!im) { gdFree(temp); return NULL; } argb = WebPDecodeARGB(filedata, size, &width, &height); if (!argb) { gd_error("gd-webp cannot allocate temporary buffer"); gdFree(temp); gdImageDestroy(im); return NULL; } for (y = 0, p = argb; y < height; y++) { for (x = 0; x < width; x++) { register uint8_t a = gdAlphaMax - (*(p++) >> 1); register uint8_t r = *(p++); register uint8_t g = *(p++); register uint8_t b = *(p++); im->tpixels[y][x] = gdTrueColorAlpha(r, g, b, a); } } /* do not use gdFree here, in case gdFree/alloc is mapped to something else than libc */ free(argb); gdFree(temp); im->saveAlphaFlag = 1; return im; }
4,777
179,017
1
static void sample_hbp_handler(struct perf_event *bp, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { printk(KERN_INFO "%s value is changed\n", ksym_name); dump_stack(); printk(KERN_INFO "Dump stack from sample_hbp_handler\n"); }
4,778
155,833
0
SupervisedUserSettingsService* SupervisedUserService::GetSettingsService() { return SupervisedUserSettingsServiceFactory::GetForProfile(profile_); }
4,779
134,059
0
ExtensionRegistry* ExtensionRegistry::Get(content::BrowserContext* context) { return ExtensionRegistryFactory::GetForBrowserContext(context); }
4,780
152,106
0
void RenderFrameHostImpl::UpdateFrameFrozenState() { if (!base::FeatureList::IsEnabled(features::kFreezeFramesOnVisibility)) return; if (is_loading_) return; if (visibility_ == blink::mojom::FrameVisibility::kNotRendered) { frame_->SetLifecycleState(blink::mojom::FrameLifecycleState::kFrozen); } else if (visibility_ == blink::mojom::FrameVisibility::kRenderedOutOfViewport) { frame_->SetLifecycleState( blink::mojom::FrameLifecycleState::kFrozenAutoResumeMedia); } else { frame_->SetLifecycleState(blink::mojom::FrameLifecycleState::kRunning); } }
4,781
183,158
1
static int ext4_fill_super(struct super_block *sb, void *data, int silent) { char *orig_data = kstrdup(data, GFP_KERNEL); struct buffer_head *bh; struct ext4_super_block *es = NULL; struct ext4_sb_info *sbi; ext4_fsblk_t block; ext4_fsblk_t sb_block = get_sb_block(&data); ext4_fsblk_t logical_sb_block; unsigned long offset = 0; unsigned long journal_devnum = 0; unsigned long def_mount_opts; struct inode *root; const char *descr; int ret = -ENOMEM; int blocksize, clustersize; unsigned int db_count; unsigned int i; int needs_recovery, has_huge_files, has_bigalloc; __u64 blocks_count; int err = 0; unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; ext4_group_t first_not_zeroed; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) goto out_free_orig; sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); if (!sbi->s_blockgroup_lock) { kfree(sbi); goto out_free_orig; } sb->s_fs_info = sbi; sbi->s_sb = sb; sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS; sbi->s_sb_block = sb_block; if (sb->s_bdev->bd_part) sbi->s_sectors_written_start = part_stat_read(sb->s_bdev->bd_part, sectors[1]); /* Cleanup superblock name */ strreplace(sb->s_id, '/', '!'); /* -EINVAL is default */ ret = -EINVAL; blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE); if (!blocksize) { ext4_msg(sb, KERN_ERR, "unable to set blocksize"); goto out_fail; } /* * The ext4 superblock will not be buffer aligned for other than 1kB * block sizes. We need to calculate the offset from buffer start. */ if (blocksize != EXT4_MIN_BLOCK_SIZE) { logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE; offset = do_div(logical_sb_block, blocksize); } else { logical_sb_block = sb_block; } if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) { ext4_msg(sb, KERN_ERR, "unable to read superblock"); goto out_fail; } /* * Note: s_es must be initialized as soon as possible because * some ext4 macro-instructions depend on its value */ es = (struct ext4_super_block *) (bh->b_data + offset); sbi->s_es = es; sb->s_magic = le16_to_cpu(es->s_magic); if (sb->s_magic != EXT4_SUPER_MAGIC) goto cantfind_ext4; sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written); /* Warn if metadata_csum and gdt_csum are both set. */ if (ext4_has_feature_metadata_csum(sb) && ext4_has_feature_gdt_csum(sb)) ext4_warning(sb, "metadata_csum and uninit_bg are " "redundant flags; please run fsck."); /* Check for a known checksum algorithm */ if (!ext4_verify_csum_type(sb, es)) { ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " "unknown checksum algorithm."); silent = 1; goto cantfind_ext4; } /* Load the checksum driver */ if (ext4_has_feature_metadata_csum(sb)) { sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); if (IS_ERR(sbi->s_chksum_driver)) { ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver."); ret = PTR_ERR(sbi->s_chksum_driver); sbi->s_chksum_driver = NULL; goto failed_mount; } } /* Check superblock checksum */ if (!ext4_superblock_csum_verify(sb, es)) { ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " "invalid superblock checksum. Run e2fsck?"); silent = 1; ret = -EFSBADCRC; goto cantfind_ext4; } /* Precompute checksum seed for all metadata */ if (ext4_has_feature_csum_seed(sb)) sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed); else if (ext4_has_metadata_csum(sb)) sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid, sizeof(es->s_uuid)); /* Set defaults before we parse the mount options */ def_mount_opts = le32_to_cpu(es->s_default_mount_opts); set_opt(sb, INIT_INODE_TABLE); if (def_mount_opts & EXT4_DEFM_DEBUG) set_opt(sb, DEBUG); if (def_mount_opts & EXT4_DEFM_BSDGROUPS) set_opt(sb, GRPID); if (def_mount_opts & EXT4_DEFM_UID16) set_opt(sb, NO_UID32); /* xattr user namespace & acls are now defaulted on */ set_opt(sb, XATTR_USER); #ifdef CONFIG_EXT4_FS_POSIX_ACL set_opt(sb, POSIX_ACL); #endif /* don't forget to enable journal_csum when metadata_csum is enabled. */ if (ext4_has_metadata_csum(sb)) set_opt(sb, JOURNAL_CHECKSUM); if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) set_opt(sb, JOURNAL_DATA); else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) set_opt(sb, ORDERED_DATA); else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK) set_opt(sb, WRITEBACK_DATA); if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC) set_opt(sb, ERRORS_PANIC); else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE) set_opt(sb, ERRORS_CONT); else set_opt(sb, ERRORS_RO); /* block_validity enabled by default; disable with noblock_validity */ set_opt(sb, BLOCK_VALIDITY); if (def_mount_opts & EXT4_DEFM_DISCARD) set_opt(sb, DISCARD); sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid)); sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid)); sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ; sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME; sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME; if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0) set_opt(sb, BARRIER); /* * enable delayed allocation by default * Use -o nodelalloc to turn it off */ if (!IS_EXT3_SB(sb) && !IS_EXT2_SB(sb) && ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0)) set_opt(sb, DELALLOC); /* * set default s_li_wait_mult for lazyinit, for the case there is * no mount option specified. */ sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; if (!parse_options((char *) sbi->s_es->s_mount_opts, sb, &journal_devnum, &journal_ioprio, 0)) { ext4_msg(sb, KERN_WARNING, "failed to parse options in superblock: %s", sbi->s_es->s_mount_opts); } sbi->s_def_mount_opt = sbi->s_mount_opt; if (!parse_options((char *) data, sb, &journal_devnum, &journal_ioprio, 0)) goto failed_mount; if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { printk_once(KERN_WARNING "EXT4-fs: Warning: mounting " "with data=journal disables delayed " "allocation and O_DIRECT support!\n"); if (test_opt2(sb, EXPLICIT_DELALLOC)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and delalloc"); goto failed_mount; } if (test_opt(sb, DIOREAD_NOLOCK)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and dioread_nolock"); goto failed_mount; } if (test_opt(sb, DAX)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and dax"); goto failed_mount; } if (test_opt(sb, DELALLOC)) clear_opt(sb, DELALLOC); } else { sb->s_iflags |= SB_I_CGROUPWB; } sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV && (ext4_has_compat_features(sb) || ext4_has_ro_compat_features(sb) || ext4_has_incompat_features(sb))) ext4_msg(sb, KERN_WARNING, "feature flags set on rev 0 fs, " "running e2fsck is recommended"); if (es->s_creator_os == cpu_to_le32(EXT4_OS_HURD)) { set_opt2(sb, HURD_COMPAT); if (ext4_has_feature_64bit(sb)) { ext4_msg(sb, KERN_ERR, "The Hurd can't support 64-bit file systems"); goto failed_mount; } } if (IS_EXT2_SB(sb)) { if (ext2_feature_set_ok(sb)) ext4_msg(sb, KERN_INFO, "mounting ext2 file system " "using the ext4 subsystem"); else { ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due " "to feature incompatibilities"); goto failed_mount; } } if (IS_EXT3_SB(sb)) { if (ext3_feature_set_ok(sb)) ext4_msg(sb, KERN_INFO, "mounting ext3 file system " "using the ext4 subsystem"); else { ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due " "to feature incompatibilities"); goto failed_mount; } } /* * Check feature flags regardless of the revision level, since we * previously didn't change the revision level when setting the flags, * so there is a chance incompat flags are set on a rev 0 filesystem. */ if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY))) goto failed_mount; blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); if (blocksize < EXT4_MIN_BLOCK_SIZE || blocksize > EXT4_MAX_BLOCK_SIZE) { ext4_msg(sb, KERN_ERR, "Unsupported filesystem blocksize %d", blocksize); goto failed_mount; } if (sbi->s_mount_opt & EXT4_MOUNT_DAX) { if (blocksize != PAGE_SIZE) { ext4_msg(sb, KERN_ERR, "error: unsupported blocksize for dax"); goto failed_mount; } if (!sb->s_bdev->bd_disk->fops->direct_access) { ext4_msg(sb, KERN_ERR, "error: device does not support dax"); goto failed_mount; } } if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) { ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d", es->s_encryption_level); goto failed_mount; } if (sb->s_blocksize != blocksize) { /* Validate the filesystem blocksize */ if (!sb_set_blocksize(sb, blocksize)) { ext4_msg(sb, KERN_ERR, "bad block size %d", blocksize); goto failed_mount; } brelse(bh); logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE; offset = do_div(logical_sb_block, blocksize); bh = sb_bread_unmovable(sb, logical_sb_block); if (!bh) { ext4_msg(sb, KERN_ERR, "Can't read superblock on 2nd try"); goto failed_mount; } es = (struct ext4_super_block *)(bh->b_data + offset); sbi->s_es = es; if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) { ext4_msg(sb, KERN_ERR, "Magic mismatch, very weird!"); goto failed_mount; } } has_huge_files = ext4_has_feature_huge_file(sb); sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits, has_huge_files); sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files); if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO; } else { sbi->s_inode_size = le16_to_cpu(es->s_inode_size); sbi->s_first_ino = le32_to_cpu(es->s_first_ino); if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || (!is_power_of_2(sbi->s_inode_size)) || (sbi->s_inode_size > blocksize)) { ext4_msg(sb, KERN_ERR, "unsupported inode size: %d", sbi->s_inode_size); goto failed_mount; } if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2); } sbi->s_desc_size = le16_to_cpu(es->s_desc_size); if (ext4_has_feature_64bit(sb)) { if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT || sbi->s_desc_size > EXT4_MAX_DESC_SIZE || !is_power_of_2(sbi->s_desc_size)) { ext4_msg(sb, KERN_ERR, "unsupported descriptor size %lu", sbi->s_desc_size); goto failed_mount; } } else sbi->s_desc_size = EXT4_MIN_DESC_SIZE; sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0) goto cantfind_ext4; sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb); if (sbi->s_inodes_per_block == 0) goto cantfind_ext4; sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb); sbi->s_sbh = bh; sbi->s_mount_state = le16_to_cpu(es->s_state); sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb)); sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb)); for (i = 0; i < 4; i++) sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); sbi->s_def_hash_version = es->s_def_hash_version; if (ext4_has_feature_dir_index(sb)) { i = le32_to_cpu(es->s_flags); if (i & EXT2_FLAGS_UNSIGNED_HASH) sbi->s_hash_unsigned = 3; else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { #ifdef __CHAR_UNSIGNED__ if (!(sb->s_flags & MS_RDONLY)) es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); sbi->s_hash_unsigned = 3; #else if (!(sb->s_flags & MS_RDONLY)) es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); #endif } } /* Handle clustersize */ clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size); has_bigalloc = ext4_has_feature_bigalloc(sb); if (has_bigalloc) { if (clustersize < blocksize) { ext4_msg(sb, KERN_ERR, "cluster size (%d) smaller than " "block size (%d)", clustersize, blocksize); goto failed_mount; } sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - le32_to_cpu(es->s_log_block_size); sbi->s_clusters_per_group = le32_to_cpu(es->s_clusters_per_group); if (sbi->s_clusters_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, "#clusters per group too big: %lu", sbi->s_clusters_per_group); goto failed_mount; } if (sbi->s_blocks_per_group != (sbi->s_clusters_per_group * (clustersize / blocksize))) { ext4_msg(sb, KERN_ERR, "blocks per group (%lu) and " "clusters per group (%lu) inconsistent", sbi->s_blocks_per_group, sbi->s_clusters_per_group); goto failed_mount; } } else { if (clustersize != blocksize) { ext4_warning(sb, "fragment/cluster size (%d) != " "block size (%d)", clustersize, blocksize); clustersize = blocksize; } if (sbi->s_blocks_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, "#blocks per group too big: %lu", sbi->s_blocks_per_group); goto failed_mount; } sbi->s_clusters_per_group = sbi->s_blocks_per_group; sbi->s_cluster_bits = 0; } sbi->s_cluster_ratio = clustersize / blocksize; if (sbi->s_inodes_per_group > blocksize * 8) { ext4_msg(sb, KERN_ERR, "#inodes per group too big: %lu", sbi->s_inodes_per_group); goto failed_mount; } /* Do we have standard group size of clustersize * 8 blocks ? */ if (sbi->s_blocks_per_group == clustersize << 3) set_opt2(sb, STD_GROUP_SIZE); /* * Test whether we have more sectors than will fit in sector_t, * and whether the max offset is addressable by the page cache. */ err = generic_check_addressable(sb->s_blocksize_bits, ext4_blocks_count(es)); if (err) { ext4_msg(sb, KERN_ERR, "filesystem" " too large to mount safely on this system"); if (sizeof(sector_t) < 8) ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled"); goto failed_mount; } if (EXT4_BLOCKS_PER_GROUP(sb) == 0) goto cantfind_ext4; /* check blocks count against device size */ blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits; if (blocks_count && ext4_blocks_count(es) > blocks_count) { ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu " "exceeds size of device (%llu blocks)", ext4_blocks_count(es), blocks_count); goto failed_mount; } /* * It makes no sense for the first data block to be beyond the end * of the filesystem. */ if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) { ext4_msg(sb, KERN_WARNING, "bad geometry: first data " "block %u is beyond end of filesystem (%llu)", le32_to_cpu(es->s_first_data_block), ext4_blocks_count(es)); goto failed_mount; } blocks_count = (ext4_blocks_count(es) - le32_to_cpu(es->s_first_data_block) + EXT4_BLOCKS_PER_GROUP(sb) - 1); do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb)); if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) { ext4_msg(sb, KERN_WARNING, "groups count too large: %u " "(block count %llu, first data block %u, " "blocks per group %lu)", sbi->s_groups_count, ext4_blocks_count(es), le32_to_cpu(es->s_first_data_block), EXT4_BLOCKS_PER_GROUP(sb)); goto failed_mount; } sbi->s_groups_count = blocks_count; sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb); sbi->s_group_desc = ext4_kvmalloc(db_count * sizeof(struct buffer_head *), GFP_KERNEL); if (sbi->s_group_desc == NULL) { ext4_msg(sb, KERN_ERR, "not enough memory"); ret = -ENOMEM; goto failed_mount; } bgl_lock_init(sbi->s_blockgroup_lock); for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logical_sb_block, i); sbi->s_group_desc[i] = sb_bread_unmovable(sb, block); if (!sbi->s_group_desc[i]) { ext4_msg(sb, KERN_ERR, "can't read group descriptor %d", i); db_count = i; goto failed_mount2; } } if (!ext4_check_descriptors(sb, &first_not_zeroed)) { ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); ret = -EFSCORRUPTED; goto failed_mount2; } sbi->s_gdb_count = db_count; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); spin_lock_init(&sbi->s_next_gen_lock); setup_timer(&sbi->s_err_report, print_daily_error_info, (unsigned long) sb); /* Register extent status tree shrinker */ if (ext4_es_register_shrinker(sbi)) goto failed_mount3; sbi->s_stripe = ext4_get_stripe_size(sbi); sbi->s_extent_max_zeroout_kb = 32; /* * set up enough so that it can read an inode */ sb->s_op = &ext4_sops; sb->s_export_op = &ext4_export_ops; sb->s_xattr = ext4_xattr_handlers; #ifdef CONFIG_QUOTA sb->dq_op = &ext4_quota_operations; if (ext4_has_feature_quota(sb)) sb->s_qcop = &dquot_quotactl_sysfile_ops; else sb->s_qcop = &ext4_qctl_operations; sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; #endif memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid)); INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ mutex_init(&sbi->s_orphan_lock); sb->s_root = NULL; needs_recovery = (es->s_last_orphan != 0 || ext4_has_feature_journal_needs_recovery(sb)); if (ext4_has_feature_mmp(sb) && !(sb->s_flags & MS_RDONLY)) if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block))) goto failed_mount3a; /* * The first inode we look at is the journal inode. Don't try * root first: it may be modified in the journal! */ if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) { if (ext4_load_journal(sb, es, journal_devnum)) goto failed_mount3a; } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) && ext4_has_feature_journal_needs_recovery(sb)) { ext4_msg(sb, KERN_ERR, "required journal recovery " "suppressed and not mounted read-only"); goto failed_mount_wq; } else { /* Nojournal mode, all journal mount options are illegal */ if (test_opt2(sb, EXPLICIT_JOURNAL_CHECKSUM)) { ext4_msg(sb, KERN_ERR, "can't mount with " "journal_checksum, fs mounted w/o journal"); goto failed_mount_wq; } if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { ext4_msg(sb, KERN_ERR, "can't mount with " "journal_async_commit, fs mounted w/o journal"); goto failed_mount_wq; } if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) { ext4_msg(sb, KERN_ERR, "can't mount with " "commit=%lu, fs mounted w/o journal", sbi->s_commit_interval / HZ); goto failed_mount_wq; } if (EXT4_MOUNT_DATA_FLAGS & (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) { ext4_msg(sb, KERN_ERR, "can't mount with " "data=, fs mounted w/o journal"); goto failed_mount_wq; } sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM; clear_opt(sb, JOURNAL_CHECKSUM); clear_opt(sb, DATA_FLAGS); sbi->s_journal = NULL; needs_recovery = 0; goto no_journal; } if (ext4_has_feature_64bit(sb) && !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_64BIT)) { ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature"); goto failed_mount_wq; } if (!set_journal_csum_feature_set(sb)) { ext4_msg(sb, KERN_ERR, "Failed to set journal checksum " "feature set"); goto failed_mount_wq; } /* We have now updated the journal if required, so we can * validate the data journaling mode. */ switch (test_opt(sb, DATA_FLAGS)) { case 0: /* No mode set, assume a default based on the journal * capabilities: ORDERED_DATA if the journal can * cope, else JOURNAL_DATA */ if (jbd2_journal_check_available_features (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) set_opt(sb, ORDERED_DATA); else set_opt(sb, JOURNAL_DATA); break; case EXT4_MOUNT_ORDERED_DATA: case EXT4_MOUNT_WRITEBACK_DATA: if (!jbd2_journal_check_available_features (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) { ext4_msg(sb, KERN_ERR, "Journal does not support " "requested data journaling mode"); goto failed_mount_wq; } default: break; } set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); sbi->s_journal->j_commit_callback = ext4_journal_commit_callback; no_journal: if (ext4_mballoc_ready) { sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id); if (!sbi->s_mb_cache) { ext4_msg(sb, KERN_ERR, "Failed to create an mb_cache"); goto failed_mount_wq; } } if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) && (blocksize != PAGE_CACHE_SIZE)) { ext4_msg(sb, KERN_ERR, "Unsupported blocksize for fs encryption"); goto failed_mount_wq; } if (DUMMY_ENCRYPTION_ENABLED(sbi) && !(sb->s_flags & MS_RDONLY) && !ext4_has_feature_encrypt(sb)) { ext4_set_feature_encrypt(sb); ext4_commit_super(sb, 1); } /* * Get the # of file system overhead blocks from the * superblock if present. */ if (es->s_overhead_clusters) sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters); else { err = ext4_calculate_overhead(sb); if (err) goto failed_mount_wq; } /* * The maximum number of concurrent works can be high and * concurrency isn't really necessary. Limit it to 1. */ EXT4_SB(sb)->rsv_conversion_wq = alloc_workqueue("ext4-rsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); if (!EXT4_SB(sb)->rsv_conversion_wq) { printk(KERN_ERR "EXT4-fs: failed to create workqueue\n"); ret = -ENOMEM; goto failed_mount4; } /* * The jbd2_journal_load will have done any necessary log recovery, * so we can safely mount the rest of the filesystem now. */ root = ext4_iget(sb, EXT4_ROOT_INO); if (IS_ERR(root)) { ext4_msg(sb, KERN_ERR, "get root inode failed"); ret = PTR_ERR(root); root = NULL; goto failed_mount4; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck"); iput(root); goto failed_mount4; } sb->s_root = d_make_root(root); if (!sb->s_root) { ext4_msg(sb, KERN_ERR, "get root dentry failed"); ret = -ENOMEM; goto failed_mount4; } if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY)) sb->s_flags |= MS_RDONLY; /* determine the minimum size of new large inodes, if present */ if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) { sbi->s_want_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; if (ext4_has_feature_extra_isize(sb)) { if (sbi->s_want_extra_isize < le16_to_cpu(es->s_want_extra_isize)) sbi->s_want_extra_isize = le16_to_cpu(es->s_want_extra_isize); if (sbi->s_want_extra_isize < le16_to_cpu(es->s_min_extra_isize)) sbi->s_want_extra_isize = le16_to_cpu(es->s_min_extra_isize); } } /* Check if enough inode space is available */ if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize > sbi->s_inode_size) { sbi->s_want_extra_isize = sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE; ext4_msg(sb, KERN_INFO, "required extra inode space not" "available"); } ext4_set_resv_clusters(sb); err = ext4_setup_system_zone(sb); if (err) { ext4_msg(sb, KERN_ERR, "failed to initialize system " "zone (%d)", err); goto failed_mount4a; } ext4_ext_init(sb); err = ext4_mb_init(sb); if (err) { ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)", err); goto failed_mount5; } block = ext4_count_free_clusters(sb); ext4_free_blocks_count_set(sbi->s_es, EXT4_C2B(sbi, block)); err = percpu_counter_init(&sbi->s_freeclusters_counter, block, GFP_KERNEL); if (!err) { unsigned long freei = ext4_count_free_inodes(sb); sbi->s_es->s_free_inodes_count = cpu_to_le32(freei); err = percpu_counter_init(&sbi->s_freeinodes_counter, freei, GFP_KERNEL); } if (!err) err = percpu_counter_init(&sbi->s_dirs_counter, ext4_count_dirs(sb), GFP_KERNEL); if (!err) err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0, GFP_KERNEL); if (err) { ext4_msg(sb, KERN_ERR, "insufficient memory"); goto failed_mount6; } if (ext4_has_feature_flex_bg(sb)) if (!ext4_fill_flex_info(sb)) { ext4_msg(sb, KERN_ERR, "unable to initialize " "flex_bg meta info!"); goto failed_mount6; } err = ext4_register_li_request(sb, first_not_zeroed); if (err) goto failed_mount6; err = ext4_register_sysfs(sb); if (err) goto failed_mount7; #ifdef CONFIG_QUOTA /* Enable quota usage during mount. */ if (ext4_has_feature_quota(sb) && !(sb->s_flags & MS_RDONLY)) { err = ext4_enable_quotas(sb); if (err) goto failed_mount8; } #endif /* CONFIG_QUOTA */ EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; ext4_orphan_cleanup(sb, es); EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; if (needs_recovery) { ext4_msg(sb, KERN_INFO, "recovery complete"); ext4_mark_recovery_complete(sb, es); } if (EXT4_SB(sb)->s_journal) { if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) descr = " journalled data mode"; else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) descr = " ordered data mode"; else descr = " writeback data mode"; } else descr = "out journal"; if (test_opt(sb, DISCARD)) { struct request_queue *q = bdev_get_queue(sb->s_bdev); if (!blk_queue_discard(q)) ext4_msg(sb, KERN_WARNING, "mounting with \"discard\" option, but " "the device does not support discard"); } if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount")) ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. " "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts, *sbi->s_es->s_mount_opts ? "; " : "", orig_data); if (es->s_error_count) mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */ /* Enable message ratelimiting. Default is 10 messages per 5 secs. */ ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10); ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10); ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10); kfree(orig_data); return 0; cantfind_ext4: if (!silent) ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); goto failed_mount; #ifdef CONFIG_QUOTA failed_mount8: ext4_unregister_sysfs(sb); #endif failed_mount7: ext4_unregister_li_request(sb); failed_mount6: ext4_mb_release(sb); if (sbi->s_flex_groups) kvfree(sbi->s_flex_groups); percpu_counter_destroy(&sbi->s_freeclusters_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirtyclusters_counter); failed_mount5: ext4_ext_release(sb); ext4_release_system_zone(sb); failed_mount4a: dput(sb->s_root); sb->s_root = NULL; failed_mount4: ext4_msg(sb, KERN_ERR, "mount failed"); if (EXT4_SB(sb)->rsv_conversion_wq) destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq); failed_mount_wq: if (sbi->s_journal) { jbd2_journal_destroy(sbi->s_journal); sbi->s_journal = NULL; } failed_mount3a: ext4_es_unregister_shrinker(sbi); failed_mount3: del_timer_sync(&sbi->s_err_report); if (sbi->s_mmp_tsk) kthread_stop(sbi->s_mmp_tsk); failed_mount2: for (i = 0; i < db_count; i++) brelse(sbi->s_group_desc[i]); kvfree(sbi->s_group_desc); failed_mount: if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); #ifdef CONFIG_QUOTA for (i = 0; i < EXT4_MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif ext4_blkdev_remove(sbi); brelse(bh); out_fail: sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); out_free_orig: kfree(orig_data); return err ? err : ret; }
4,782
76,300
0
static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi, void __user *arg) { struct cdrom_read_audio ra; int lba; if (copy_from_user(&ra, (struct cdrom_read_audio __user *)arg, sizeof(ra))) return -EFAULT; if (ra.addr_format == CDROM_MSF) lba = msf_to_lba(ra.addr.msf.minute, ra.addr.msf.second, ra.addr.msf.frame); else if (ra.addr_format == CDROM_LBA) lba = ra.addr.lba; else return -EINVAL; /* FIXME: we need upper bound checking, too!! */ if (lba < 0 || ra.nframes <= 0 || ra.nframes > CD_FRAMES) return -EINVAL; return cdrom_read_cdda(cdi, ra.buf, lba, ra.nframes); }
4,783
95,586
0
void Com_AppendCDKey( const char *filename ) { fileHandle_t f; char buffer[33]; char fbuffer[MAX_OSPATH]; Com_sprintf(fbuffer, sizeof(fbuffer), "%s/rtcwkey", filename); FS_SV_FOpenFileRead( fbuffer, &f ); if ( !f ) { Q_strncpyz( &cl_cdkey[16], " ", 17 ); return; } Com_Memset( buffer, 0, sizeof( buffer ) ); FS_Read( buffer, 16, f ); FS_FCloseFile( f ); if ( CL_CDKeyValidate( buffer, NULL ) ) { strcat( &cl_cdkey[16], buffer ); } else { Q_strncpyz( &cl_cdkey[16], " ", 17 ); } }
4,784
141,304
0
const AtomicString& Document::bgColor() const { return BodyAttributeValue(kBgcolorAttr); }
4,785
171,113
0
static uint8_t *get_data(const camera_metadata_t *metadata) { return (uint8_t*)metadata + metadata->data_start; }
4,786
41,713
0
static int insert_inline_extent(struct btrfs_trans_handle *trans, struct btrfs_path *path, int extent_inserted, struct btrfs_root *root, struct inode *inode, u64 start, size_t size, size_t compressed_size, int compress_type, struct page **compressed_pages) { struct extent_buffer *leaf; struct page *page = NULL; char *kaddr; unsigned long ptr; struct btrfs_file_extent_item *ei; int err = 0; int ret; size_t cur_size = size; unsigned long offset; if (compressed_size && compressed_pages) cur_size = compressed_size; inode_add_bytes(inode, size); if (!extent_inserted) { struct btrfs_key key; size_t datasize; key.objectid = btrfs_ino(inode); key.offset = start; key.type = BTRFS_EXTENT_DATA_KEY; datasize = btrfs_file_extent_calc_inline_size(cur_size); path->leave_spinning = 1; ret = btrfs_insert_empty_item(trans, root, path, &key, datasize); if (ret) { err = ret; goto fail; } } leaf = path->nodes[0]; ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); btrfs_set_file_extent_generation(leaf, ei, trans->transid); btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); btrfs_set_file_extent_encryption(leaf, ei, 0); btrfs_set_file_extent_other_encoding(leaf, ei, 0); btrfs_set_file_extent_ram_bytes(leaf, ei, size); ptr = btrfs_file_extent_inline_start(ei); if (compress_type != BTRFS_COMPRESS_NONE) { struct page *cpage; int i = 0; while (compressed_size > 0) { cpage = compressed_pages[i]; cur_size = min_t(unsigned long, compressed_size, PAGE_CACHE_SIZE); kaddr = kmap_atomic(cpage); write_extent_buffer(leaf, kaddr, ptr, cur_size); kunmap_atomic(kaddr); i++; ptr += cur_size; compressed_size -= cur_size; } btrfs_set_file_extent_compression(leaf, ei, compress_type); } else { page = find_get_page(inode->i_mapping, start >> PAGE_CACHE_SHIFT); btrfs_set_file_extent_compression(leaf, ei, 0); kaddr = kmap_atomic(page); offset = start & (PAGE_CACHE_SIZE - 1); write_extent_buffer(leaf, kaddr + offset, ptr, size); kunmap_atomic(kaddr); page_cache_release(page); } btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); /* * we're an inline extent, so nobody can * extend the file past i_size without locking * a page we already have locked. * * We must do any isize and inode updates * before we unlock the pages. Otherwise we * could end up racing with unlink. */ BTRFS_I(inode)->disk_i_size = inode->i_size; ret = btrfs_update_inode(trans, root, inode); return ret; fail: return err; }
4,787
61,593
0
static int mxf_read_content_storage(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset) { MXFContext *mxf = arg; switch (tag) { case 0x1901: if (mxf->packages_refs) av_log(mxf->fc, AV_LOG_VERBOSE, "Multiple packages_refs\n"); av_free(mxf->packages_refs); return mxf_read_strong_ref_array(pb, &mxf->packages_refs, &mxf->packages_count); } return 0; }
4,788
17,465
0
ProcXvPutStill(ClientPtr client) { DrawablePtr pDraw; XvPortPtr pPort; GCPtr pGC; int status; REQUEST(xvPutStillReq); REQUEST_SIZE_MATCH(xvPutStillReq); VALIDATE_DRAWABLE_AND_GC(stuff->drawable, pDraw, DixWriteAccess); VALIDATE_XV_PORT(stuff->port, pPort, DixReadAccess); if (!(pPort->pAdaptor->type & XvInputMask) || !(pPort->pAdaptor->type & XvStillMask)) { client->errorValue = stuff->port; return BadMatch; } status = XvdiMatchPort(pPort, pDraw); if (status != Success) { return status; } return XvdiPutStill(client, pDraw, pPort, pGC, stuff->vid_x, stuff->vid_y, stuff->vid_w, stuff->vid_h, stuff->drw_x, stuff->drw_y, stuff->drw_w, stuff->drw_h); }
4,789
11,918
0
BIO *cms_EnvelopedData_init_bio(CMS_ContentInfo *cms) { CMS_EncryptedContentInfo *ec; STACK_OF(CMS_RecipientInfo) *rinfos; CMS_RecipientInfo *ri; int i, ok = 0; BIO *ret; /* Get BIO first to set up key */ ec = cms->d.envelopedData->encryptedContentInfo; ret = cms_EncryptedContent_init_bio(ec); /* If error or no cipher end of processing */ if (!ret || !ec->cipher) return ret; /* Now encrypt content key according to each RecipientInfo type */ rinfos = cms->d.envelopedData->recipientInfos; for (i = 0; i < sk_CMS_RecipientInfo_num(rinfos); i++) { ri = sk_CMS_RecipientInfo_value(rinfos, i); if (CMS_RecipientInfo_encrypt(cms, ri) <= 0) { CMSerr(CMS_F_CMS_ENVELOPEDDATA_INIT_BIO, CMS_R_ERROR_SETTING_RECIPIENTINFO); goto err; } } cms_env_set_version(cms->d.envelopedData); ok = 1; err: ec->cipher = NULL; OPENSSL_clear_free(ec->key, ec->keylen); ec->key = NULL; ec->keylen = 0; if (ok) return ret; BIO_free(ret); return NULL; }
4,790
116,085
0
syncable::ModelTypeSet GetTypesWithEmptyProgressMarkerToken( syncable::ModelTypeSet types, sync_api::UserShare* share) { syncable::ScopedDirLookup lookup(share->dir_manager.get(), share->name); if (!lookup.good()) { NOTREACHED() << "ScopedDirLookup failed for " << "GetTypesWithEmptyProgressMarkerToken"; return syncable::ModelTypeSet(); } syncable::ModelTypeSet result; for (syncable::ModelTypeSet::Iterator i = types.First(); i.Good(); i.Inc()) { sync_pb::DataTypeProgressMarker marker; lookup->GetDownloadProgress(i.Get(), &marker); if (marker.token().empty()) result.Put(i.Get()); } return result; }
4,791
12,599
0
static int rndis_reset_response(USBNetState *s, rndis_reset_msg_type *buf) { rndis_reset_cmplt_type *resp = rndis_queue_response(s, sizeof(rndis_reset_cmplt_type)); if (!resp) return USB_RET_STALL; resp->MessageType = cpu_to_le32(RNDIS_RESET_CMPLT); resp->MessageLength = cpu_to_le32(sizeof(rndis_reset_cmplt_type)); resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); resp->AddressingReset = cpu_to_le32(1); /* reset information */ return 0; }
4,792
4,715
0
on_user_changed_timeout (User *user) { user->changed_timeout_id = 0; accounts_user_emit_changed (ACCOUNTS_USER (user)); return G_SOURCE_REMOVE; }
4,793
120,419
0
void PopulateForm(const std::string& field_id) { std::string js("document.getElementById('" + field_id + "').focus();"); ASSERT_TRUE(content::ExecuteScript(GetRenderViewHost(), js)); SendKeyToPageAndWait(ui::VKEY_DOWN); SendKeyToPopupAndWait(ui::VKEY_DOWN); SendKeyToPopupAndWait(ui::VKEY_RETURN); }
4,794
164,081
0
GURL AppCache::GetNamespaceEntryUrl( const std::vector<AppCacheNamespace>& namespaces, const GURL& namespace_url) const { size_t count = namespaces.size(); for (size_t i = 0; i < count; ++i) { if (namespaces[i].namespace_url == namespace_url) return namespaces[i].target_url; } NOTREACHED(); return GURL(); }
4,795
129,093
0
const std::string& extension_id() { return extension_id_; }
4,796
95,394
0
static int _do_auth(pool *p, xaset_t *conf, char *u, char *pw) { char *cpw = NULL; config_rec *c; if (conf) { c = find_config(conf, CONF_PARAM, "UserPassword", FALSE); while (c) { if (strcmp(c->argv[0], u) == 0) { cpw = (char *) c->argv[1]; break; } c = find_config_next(c, c->next, CONF_PARAM, "UserPassword", FALSE); } } if (cpw) { if (pr_auth_getpwnam(p, u) == NULL) { int xerrno = errno; if (xerrno == ENOENT) { pr_log_pri(PR_LOG_NOTICE, "no such user '%s'", u); } errno = xerrno; return PR_AUTH_NOPWD; } return pr_auth_check(p, cpw, u, pw); } return pr_auth_authenticate(p, u, pw); }
4,797
156,179
0
void RendererSchedulerImpl::CreateTraceEventObjectSnapshotLocked() const { TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( TRACE_DISABLED_BY_DEFAULT("renderer.scheduler.debug"), "RendererScheduler", this, AsValueLocked(helper_.NowTicks())); }
4,798
108,940
0
void RenderViewImpl::OnFindMatchRects(int current_version) { if (!webview()) return; WebFrame* main_frame = webview()->mainFrame(); std::vector<gfx::RectF> match_rects; int rects_version = main_frame->findMatchMarkersVersion(); if (current_version != rects_version) { WebVector<WebFloatRect> web_match_rects; main_frame->findMatchRects(web_match_rects); match_rects.reserve(web_match_rects.size()); for (size_t i = 0; i < web_match_rects.size(); ++i) match_rects.push_back(gfx::RectF(web_match_rects[i])); } gfx::RectF active_rect = main_frame->activeFindMatchRect(); Send(new ViewHostMsg_FindMatchRects_Reply(routing_id_, rects_version, match_rects, active_rect)); }
4,799