unique_id
int64
13
189k
target
int64
0
1
code
stringlengths
20
241k
__index_level_0__
int64
0
18.9k
89,893
0
void write_events_details(int s) { int n; char buff[80]; struct upnp_event_notify * obj; struct subscriber * sub; write(s, "Events details :\n", 17); for(obj = notifylist.lh_first; obj != NULL; obj = obj->entries.le_next) { n = snprintf(buff, sizeof(buff), " %p sub=%p state=%d s=%d\n", obj, obj->sub, obj->state, obj->s); write(s, buff, n); } write(s, "Subscribers :\n", 14); for(sub = subscriberlist.lh_first; sub != NULL; sub = sub->entries.le_next) { n = snprintf(buff, sizeof(buff), " %p timeout=%d seq=%u service=%d\n", sub, (int)sub->timeout, sub->seq, sub->service); write(s, buff, n); n = snprintf(buff, sizeof(buff), " notify=%p %s\n", sub->notify, sub->uuid); write(s, buff, n); n = snprintf(buff, sizeof(buff), " %s\n", sub->callback); write(s, buff, n); } }
200
118,299
0
void AutofillDialogViews::SetIconsForSection(DialogSection section) { FieldValueMap user_input; GetUserInput(section, &user_input); FieldIconMap field_icons = delegate_->IconsForFields(user_input); TextfieldMap* textfields = &GroupForSection(section)->textfields; for (TextfieldMap::const_iterator textfield_it = textfields->begin(); textfield_it != textfields->end(); ++textfield_it) { ServerFieldType field_type = textfield_it->first; FieldIconMap::const_iterator field_icon_it = field_icons.find(field_type); ExpandingTextfield* textfield = textfield_it->second; if (field_icon_it != field_icons.end()) textfield->SetIcon(field_icon_it->second); else textfield->SetTooltipIcon(delegate_->TooltipForField(field_type)); } }
201
100,539
0
void ChildProcessSecurityPolicy::RegisterWebSafeScheme( const std::string& scheme) { AutoLock lock(lock_); DCHECK(web_safe_schemes_.count(scheme) == 0) << "Add schemes at most once."; DCHECK(pseudo_schemes_.count(scheme) == 0) << "Web-safe implies not psuedo."; web_safe_schemes_.insert(scheme); }
202
178,003
1
SchedulerObject::_continue(std::string key, std::string &/*reason*/, std::string &text) { PROC_ID id = getProcByString(key.c_str()); if (id.cluster < 0 || id.proc < 0) { dprintf(D_FULLDEBUG, "Remove: Failed to parse id: %s\n", key.c_str()); text = "Invalid Id"; return false; } scheduler.enqueueActOnJobMyself(id,JA_CONTINUE_JOBS,true); return true; }
203
107,453
0
void* JSArray::subclassData() const { return m_storage->subclassData; }
204
99,613
0
void InterstitialPage::InterstitialPageRVHViewDelegate::StartDragging( const WebDropData& drop_data, WebDragOperationsMask allowed_operations, const SkBitmap& image, const gfx::Point& image_offset) { NOTREACHED() << "InterstitialPage does not support dragging yet."; }
205
187,683
1
make_error(png_store* volatile psIn, png_byte PNG_CONST colour_type, png_byte bit_depth, int interlace_type, int test, png_const_charp name) { png_store * volatile ps = psIn; context(ps, fault); check_interlace_type(interlace_type); Try { png_structp pp; png_infop pi; pp = set_store_for_write(ps, &pi, name); if (pp == NULL) Throw ps; png_set_IHDR(pp, pi, transform_width(pp, colour_type, bit_depth), transform_height(pp, colour_type, bit_depth), bit_depth, colour_type, interlace_type, PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE); if (colour_type == 3) /* palette */ init_standard_palette(ps, pp, pi, 1U << bit_depth, 0/*do tRNS*/); /* Time for a few errors; these are in various optional chunks, the * standard tests test the standard chunks pretty well. */ # define exception__prev exception_prev_1 # define exception__env exception_env_1 Try { /* Expect this to throw: */ ps->expect_error = !error_test[test].warning; ps->expect_warning = error_test[test].warning; ps->saw_warning = 0; error_test[test].fn(pp, pi); /* Normally the error is only detected here: */ png_write_info(pp, pi); /* And handle the case where it was only a warning: */ if (ps->expect_warning && ps->saw_warning) Throw ps; /* If we get here there is a problem, we have success - no error or * no warning - when we shouldn't have success. Log an error. */ store_log(ps, pp, error_test[test].msg, 1 /*error*/); } Catch (fault) ps = fault; /* expected exit, make sure ps is not clobbered * #undef exception__prev #undef exception__env /* And clear these flags */ ps->expect_error = 0; ps->expect_warning = 0; /* Now write the whole image, just to make sure that the detected, or * undetected, errro has not created problems inside libpng. */ if (png_get_rowbytes(pp, pi) != transform_rowsize(pp, colour_type, bit_depth)) png_error(pp, "row size incorrect"); else { png_uint_32 h = transform_height(pp, colour_type, bit_depth); int npasses = png_set_interlace_handling(pp); int pass; if (npasses != npasses_from_interlace_type(pp, interlace_type)) png_error(pp, "write: png_set_interlace_handling failed"); for (pass=0; pass<npasses; ++pass) { png_uint_32 y; for (y=0; y<h; ++y) { png_byte buffer[TRANSFORM_ROWMAX]; transform_row(pp, buffer, colour_type, bit_depth, y); png_write_row(pp, buffer); } } } png_write_end(pp, pi); /* The following deletes the file that was just written. */ store_write_reset(ps); } Catch(fault) { store_write_reset(fault); } }
206
140,604
0
void OutOfProcessInstance::DocumentHasUnsupportedFeature( const std::string& feature) { std::string metric("PDF_Unsupported_"); metric += feature; if (!unsupported_features_reported_.count(metric)) { unsupported_features_reported_.insert(metric); UserMetricsRecordAction(metric); } if (!full_) return; if (told_browser_about_unsupported_feature_) return; told_browser_about_unsupported_feature_ = true; pp::PDF::HasUnsupportedFeature(this); }
207
114,652
0
void WebPluginDelegateProxy::SetFocus(bool focused) { Send(new PluginMsg_SetFocus(instance_id_, focused)); #if defined(OS_WIN) if (render_view_) render_view_->PluginFocusChanged(focused, instance_id_); #endif }
208
114,081
0
TestTarget() : accelerator_pressed_count_(0) {}
209
32,960
0
static int __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size, sctp_assoc_t *assoc_id) { struct net *net = sock_net(sk); struct sctp_sock *sp; struct sctp_endpoint *ep; struct sctp_association *asoc = NULL; struct sctp_association *asoc2; struct sctp_transport *transport; union sctp_addr to; struct sctp_af *af; sctp_scope_t scope; long timeo; int err = 0; int addrcnt = 0; int walk_size = 0; union sctp_addr *sa_addr = NULL; void *addr_buf; unsigned short port; unsigned int f_flags = 0; sp = sctp_sk(sk); ep = sp->ep; /* connect() cannot be done on a socket that is already in ESTABLISHED * state - UDP-style peeled off socket or a TCP-style socket that * is already connected. * It cannot be done even on a TCP-style listening socket. */ if (sctp_sstate(sk, ESTABLISHED) || (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { err = -EISCONN; goto out_free; } /* Walk through the addrs buffer and count the number of addresses. */ addr_buf = kaddrs; while (walk_size < addrs_size) { if (walk_size + sizeof(sa_family_t) > addrs_size) { err = -EINVAL; goto out_free; } sa_addr = addr_buf; af = sctp_get_af_specific(sa_addr->sa.sa_family); /* If the address family is not supported or if this address * causes the address buffer to overflow return EINVAL. */ if (!af || (walk_size + af->sockaddr_len) > addrs_size) { err = -EINVAL; goto out_free; } port = ntohs(sa_addr->v4.sin_port); /* Save current address so we can work with it */ memcpy(&to, sa_addr, af->sockaddr_len); err = sctp_verify_addr(sk, &to, af->sockaddr_len); if (err) goto out_free; /* Make sure the destination port is correctly set * in all addresses. */ if (asoc && asoc->peer.port && asoc->peer.port != port) goto out_free; /* Check if there already is a matching association on the * endpoint (other than the one created here). */ asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); if (asoc2 && asoc2 != asoc) { if (asoc2->state >= SCTP_STATE_ESTABLISHED) err = -EISCONN; else err = -EALREADY; goto out_free; } /* If we could not find a matching association on the endpoint, * make sure that there is no peeled-off association matching * the peer address even on another socket. */ if (sctp_endpoint_is_peeled_off(ep, &to)) { err = -EADDRNOTAVAIL; goto out_free; } if (!asoc) { /* If a bind() or sctp_bindx() is not called prior to * an sctp_connectx() call, the system picks an * ephemeral port and will choose an address set * equivalent to binding with a wildcard address. */ if (!ep->base.bind_addr.port) { if (sctp_autobind(sk)) { err = -EAGAIN; goto out_free; } } else { /* * If an unprivileged user inherits a 1-many * style socket with open associations on a * privileged port, it MAY be permitted to * accept new associations, but it SHOULD NOT * be permitted to open new associations. */ if (ep->base.bind_addr.port < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { err = -EACCES; goto out_free; } } scope = sctp_scope(&to); asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); if (!asoc) { err = -ENOMEM; goto out_free; } err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); if (err < 0) { goto out_free; } } /* Prime the peer's transport structures. */ transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); if (!transport) { err = -ENOMEM; goto out_free; } addrcnt++; addr_buf += af->sockaddr_len; walk_size += af->sockaddr_len; } /* In case the user of sctp_connectx() wants an association * id back, assign one now. */ if (assoc_id) { err = sctp_assoc_set_id(asoc, GFP_KERNEL); if (err < 0) goto out_free; } err = sctp_primitive_ASSOCIATE(net, asoc, NULL); if (err < 0) { goto out_free; } /* Initialize sk's dport and daddr for getpeername() */ inet_sk(sk)->inet_dport = htons(asoc->peer.port); af = sctp_get_af_specific(sa_addr->sa.sa_family); af->to_sk_daddr(sa_addr, sk); sk->sk_err = 0; /* in-kernel sockets don't generally have a file allocated to them * if all they do is call sock_create_kern(). */ if (sk->sk_socket->file) f_flags = sk->sk_socket->file->f_flags; timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); err = sctp_wait_for_connect(asoc, &timeo); if ((err == 0 || err == -EINPROGRESS) && assoc_id) *assoc_id = asoc->assoc_id; /* Don't free association on exit. */ asoc = NULL; out_free: SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p" " kaddrs: %p err: %d\n", asoc, kaddrs, err); if (asoc) { /* sctp_primitive_ASSOCIATE may have added this association * To the hash table, try to unhash it, just in case, its a noop * if it wasn't hashed so we're safe */ sctp_unhash_established(asoc); sctp_association_free(asoc); } return err; }
210
7,432
0
ZEND_API int zend_ts_hash_sort(TsHashTable *ht, sort_func_t sort_func, compare_func_t compare_func, int renumber) { int retval; begin_write(ht); retval = zend_hash_sort(TS_HASH(ht), sort_func, compare_func, renumber); end_write(ht); return retval; }
211
8,992
0
static uint64_t vmxnet3_get_mac_high(MACAddr *addr) { return VMXNET3_MAKE_BYTE(0, addr->a[4]) | VMXNET3_MAKE_BYTE(1, addr->a[5]); }
212
106,498
0
void WebPageProxy::didReceiveMessage(CoreIPC::Connection* connection, CoreIPC::MessageID messageID, CoreIPC::ArgumentDecoder* arguments) { #if PLATFORM(MAC) || PLATFORM(WIN) if (messageID.is<CoreIPC::MessageClassDrawingAreaProxy>()) { m_drawingArea->didReceiveDrawingAreaProxyMessage(connection, messageID, arguments); return; } #endif if (messageID.is<CoreIPC::MessageClassDrawingAreaProxyLegacy>()) { m_drawingArea->didReceiveMessage(connection, messageID, arguments); return; } #if ENABLE(INSPECTOR) if (messageID.is<CoreIPC::MessageClassWebInspectorProxy>()) { if (WebInspectorProxy* inspector = this->inspector()) inspector->didReceiveWebInspectorProxyMessage(connection, messageID, arguments); return; } #endif #if ENABLE(FULLSCREEN_API) if (messageID.is<CoreIPC::MessageClassWebFullScreenManagerProxy>()) { fullScreenManager()->didReceiveMessage(connection, messageID, arguments); return; } #endif didReceiveWebPageProxyMessage(connection, messageID, arguments); }
213
37,781
0
static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) { u32 offset, msr, value; int write, mask; if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) return NESTED_EXIT_HOST; msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; offset = svm_msrpm_offset(msr); write = svm->vmcb->control.exit_info_1 & 1; mask = 1 << ((2 * (msr & 0xf)) + write); if (offset == MSR_INVALID) return NESTED_EXIT_DONE; /* Offset is in 32 bit units but need in 8 bit units */ offset *= 4; if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4)) return NESTED_EXIT_DONE; return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; }
214
35,972
0
SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid) { struct smb2_flush_req *req; struct TCP_Server_Info *server; struct cifs_ses *ses = tcon->ses; struct kvec iov[1]; int resp_buftype; int rc = 0; cifs_dbg(FYI, "Flush\n"); if (ses && (ses->server)) server = ses->server; else return -EIO; rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req); if (rc) return rc; req->PersistentFileId = persistent_fid; req->VolatileFileId = volatile_fid; iov[0].iov_base = (char *)req; /* 4 for rfc1002 length field */ iov[0].iov_len = get_rfc1002_length(req) + 4; rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0); if ((rc != 0) && tcon) cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE); free_rsp_buf(resp_buftype, iov[0].iov_base); return rc; }
215
180,175
1
isoent_gen_joliet_identifier(struct archive_write *a, struct isoent *isoent, struct idr *idr) { struct iso9660 *iso9660; struct isoent *np; unsigned char *p; size_t l; int r; int ffmax, parent_len; static const struct archive_rb_tree_ops rb_ops = { isoent_cmp_node_joliet, isoent_cmp_key_joliet }; if (isoent->children.cnt == 0) return (0); iso9660 = a->format_data; if (iso9660->opt.joliet == OPT_JOLIET_LONGNAME) ffmax = 206; else ffmax = 128; r = idr_start(a, idr, isoent->children.cnt, ffmax, 6, 2, &rb_ops); if (r < 0) return (r); parent_len = 1; for (np = isoent; np->parent != np; np = np->parent) parent_len += np->mb_len + 1; for (np = isoent->children.first; np != NULL; np = np->chnext) { unsigned char *dot; int ext_off, noff, weight; size_t lt; if ((int)(l = np->file->basename_utf16.length) > ffmax) l = ffmax; p = malloc((l+1)*2); if (p == NULL) { archive_set_error(&a->archive, ENOMEM, "Can't allocate memory"); return (ARCHIVE_FATAL); } memcpy(p, np->file->basename_utf16.s, l); p[l] = 0; p[l+1] = 0; np->identifier = (char *)p; lt = l; dot = p + l; weight = 0; while (lt > 0) { if (!joliet_allowed_char(p[0], p[1])) archive_be16enc(p, 0x005F); /* '_' */ else if (p[0] == 0 && p[1] == 0x2E) /* '.' */ dot = p; p += 2; lt -= 2; } ext_off = (int)(dot - (unsigned char *)np->identifier); np->ext_off = ext_off; np->ext_len = (int)l - ext_off; np->id_len = (int)l; /* * Get a length of MBS of a full-pathname. */ if ((int)np->file->basename_utf16.length > ffmax) { if (archive_strncpy_l(&iso9660->mbs, (const char *)np->identifier, l, iso9660->sconv_from_utf16be) != 0 && errno == ENOMEM) { archive_set_error(&a->archive, errno, "No memory"); return (ARCHIVE_FATAL); } np->mb_len = (int)iso9660->mbs.length; if (np->mb_len != (int)np->file->basename.length) weight = np->mb_len; } else np->mb_len = (int)np->file->basename.length; /* If a length of full-pathname is longer than 240 bytes, * it violates Joliet extensions regulation. */ if (parent_len + np->mb_len > 240) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "The regulation of Joliet extensions;" " A length of a full-pathname of `%s' is " "longer than 240 bytes, (p=%d, b=%d)", archive_entry_pathname(np->file->entry), (int)parent_len, (int)np->mb_len); return (ARCHIVE_FATAL); } /* Make an offset of the number which is used to be set * hexadecimal number to avoid duplicate identifier. */ if ((int)l == ffmax) noff = ext_off - 6; else if ((int)l == ffmax-2) noff = ext_off - 4; else if ((int)l == ffmax-4) noff = ext_off - 2; else noff = ext_off; /* Register entry to the identifier resolver. */ idr_register(idr, np, weight, noff); } /* Resolve duplicate identifier with Joliet Volume. */ idr_resolve(idr, idr_set_num_beutf16); return (ARCHIVE_OK); }
216
43,416
0
int ff_set_ref_count(H264Context *h) { int ref_count[2], list_count; int num_ref_idx_active_override_flag; ref_count[0] = h->pps.ref_count[0]; ref_count[1] = h->pps.ref_count[1]; if (h->slice_type_nos != AV_PICTURE_TYPE_I) { unsigned max[2]; max[0] = max[1] = h->picture_structure == PICT_FRAME ? 15 : 31; if (h->slice_type_nos == AV_PICTURE_TYPE_B) h->direct_spatial_mv_pred = get_bits1(&h->gb); num_ref_idx_active_override_flag = get_bits1(&h->gb); if (num_ref_idx_active_override_flag) { ref_count[0] = get_ue_golomb(&h->gb) + 1; if (h->slice_type_nos == AV_PICTURE_TYPE_B) { ref_count[1] = get_ue_golomb(&h->gb) + 1; } else ref_count[1] = 1; } if (ref_count[0]-1 > max[0] || ref_count[1]-1 > max[1]){ av_log(h->avctx, AV_LOG_ERROR, "reference overflow %u > %u or %u > %u\n", ref_count[0]-1, max[0], ref_count[1]-1, max[1]); h->ref_count[0] = h->ref_count[1] = 0; h->list_count = 0; return AVERROR_INVALIDDATA; } if (h->slice_type_nos == AV_PICTURE_TYPE_B) list_count = 2; else list_count = 1; } else { list_count = 0; ref_count[0] = ref_count[1] = 0; } if (list_count != h->list_count || ref_count[0] != h->ref_count[0] || ref_count[1] != h->ref_count[1]) { h->ref_count[0] = ref_count[0]; h->ref_count[1] = ref_count[1]; h->list_count = list_count; return 1; } return 0; }
217
45,971
0
get_default_magic(void) { static const char hmagic[] = "/.magic/magic.mgc"; static char *default_magic; char *home, *hmagicpath; #ifndef WIN32 struct stat st; if (default_magic) { free(default_magic); default_magic = NULL; } if ((home = getenv("HOME")) == NULL) return MAGIC; if (asprintf(&hmagicpath, "%s/.magic.mgc", home) < 0) return MAGIC; if (stat(hmagicpath, &st) == -1) { free(hmagicpath); if (asprintf(&hmagicpath, "%s/.magic", home) < 0) return MAGIC; if (stat(hmagicpath, &st) == -1) goto out; if (S_ISDIR(st.st_mode)) { free(hmagicpath); if (asprintf(&hmagicpath, "%s/%s", home, hmagic) < 0) return MAGIC; if (access(hmagicpath, R_OK) == -1) goto out; } } if (asprintf(&default_magic, "%s:%s", hmagicpath, MAGIC) < 0) goto out; free(hmagicpath); return default_magic; out: default_magic = NULL; free(hmagicpath); return MAGIC; #else char *hmagicp; char *tmppath = NULL; LPTSTR dllpath; hmagicpath = NULL; #define APPENDPATH() \ do { \ if (tmppath && access(tmppath, R_OK) != -1) { \ if (hmagicpath == NULL) \ hmagicpath = tmppath; \ else { \ if (asprintf(&hmagicp, "%s%c%s", hmagicpath, \ PATHSEP, tmppath) >= 0) { \ free(hmagicpath); \ hmagicpath = hmagicp; \ } \ free(tmppath); \ } \ tmppath = NULL; \ } \ } while (/*CONSTCOND*/0) if (default_magic) { free(default_magic); default_magic = NULL; } /* First, try to get user-specific magic file */ if ((home = getenv("LOCALAPPDATA")) == NULL) { if ((home = getenv("USERPROFILE")) != NULL) if (asprintf(&tmppath, "%s/Local Settings/Application Data%s", home, hmagic) < 0) tmppath = NULL; } else { if (asprintf(&tmppath, "%s%s", home, hmagic) < 0) tmppath = NULL; } APPENDPATH(); /* Second, try to get a magic file from Common Files */ if ((home = getenv("COMMONPROGRAMFILES")) != NULL) { if (asprintf(&tmppath, "%s%s", home, hmagic) >= 0) APPENDPATH(); } /* Third, try to get magic file relative to dll location */ dllpath = malloc(sizeof(*dllpath) * (MAX_PATH + 1)); dllpath[MAX_PATH] = 0; /* just in case long path gets truncated and not null terminated */ if (GetModuleFileNameA(NULL, dllpath, MAX_PATH)){ PathRemoveFileSpecA(dllpath); if (strlen(dllpath) > 3 && stricmp(&dllpath[strlen(dllpath) - 3], "bin") == 0) { if (asprintf(&tmppath, "%s/../share/misc/magic.mgc", dllpath) >= 0) APPENDPATH(); } else { if (asprintf(&tmppath, "%s/share/misc/magic.mgc", dllpath) >= 0) APPENDPATH(); else if (asprintf(&tmppath, "%s/magic.mgc", dllpath) >= 0) APPENDPATH(); } } /* Don't put MAGIC constant - it likely points to a file within MSys tree */ default_magic = hmagicpath; return default_magic; #endif }
218
88,988
0
MagickExport MagickBooleanType AutoGammaImage(Image *image, ExceptionInfo *exception) { double gamma, log_mean, mean, sans; MagickStatusType status; register ssize_t i; log_mean=log(0.5); if (image->channel_mask == DefaultChannels) { /* Apply gamma correction equally across all given channels. */ (void) GetImageMean(image,&mean,&sans,exception); gamma=log(mean*QuantumScale)/log_mean; return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception)); } /* Auto-gamma each channel separately. */ status=MagickTrue; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ChannelType channel_mask; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; channel_mask=SetImageChannelMask(image,(ChannelType) (1UL << i)); status=GetImageMean(image,&mean,&sans,exception); gamma=log(mean*QuantumScale)/log_mean; status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception); (void) SetImageChannelMask(image,channel_mask); if (status == MagickFalse) break; } return(status != 0 ? MagickTrue : MagickFalse); }
219
55,537
0
void get_iowait_load(unsigned long *nr_waiters, unsigned long *load) { struct rq *rq = this_rq(); *nr_waiters = atomic_read(&rq->nr_iowait); *load = rq->load.weight; }
220
135,943
0
PassRefPtrWillBeRawPtr<NameNodeList> ContainerNode::getElementsByName(const AtomicString& elementName) { return ensureCachedCollection<NameNodeList>(NameNodeListType, elementName); }
221
65,532
0
static void nfs4_put_deleg_lease(struct nfs4_file *fp) { struct file *filp = NULL; spin_lock(&fp->fi_lock); if (fp->fi_deleg_file && --fp->fi_delegees == 0) swap(filp, fp->fi_deleg_file); spin_unlock(&fp->fi_lock); if (filp) { vfs_setlease(filp, F_UNLCK, NULL, (void **)&fp); fput(filp); } }
222
83,264
0
struct sas_phy *sas_get_local_phy(struct domain_device *dev) { struct sas_ha_struct *ha = dev->port->ha; struct sas_phy *phy; unsigned long flags; /* a published domain device always has a valid phy, it may be * stale, but it is never NULL */ BUG_ON(!dev->phy); spin_lock_irqsave(&ha->phy_port_lock, flags); phy = dev->phy; get_device(&phy->dev); spin_unlock_irqrestore(&ha->phy_port_lock, flags); return phy; }
223
165,978
0
void GetRTCStatsOnSignalingThread( const scoped_refptr<base::SingleThreadTaskRunner>& main_thread, scoped_refptr<webrtc::PeerConnectionInterface> native_peer_connection, std::unique_ptr<blink::WebRTCStatsReportCallback> callback, blink::RTCStatsFilter filter) { TRACE_EVENT0("webrtc", "GetRTCStatsOnSignalingThread"); native_peer_connection->GetStats(RTCStatsCollectorCallbackImpl::Create( main_thread, std::move(callback), filter)); }
224
165,514
0
bool ContentSecurityPolicy::ExperimentalFeaturesEnabled() const { return RuntimeEnabledFeatures:: ExperimentalContentSecurityPolicyFeaturesEnabled(); }
225
74,349
0
static BOOLEAN CheckRunningDpc(PARANDIS_ADAPTER *pContext) { BOOLEAN bStopped; BOOLEAN bReportHang = FALSE; bStopped = 0 != InterlockedExchange(&pContext->bDPCInactive, TRUE); if (bStopped) { pContext->nDetectedInactivity++; } else { pContext->nDetectedInactivity = 0; } for (UINT i = 0; i < pContext->nPathBundles; i++) { if (pContext->pPathBundles[i].txPath.HasHWBuffersIsUse()) { if (pContext->nDetectedStoppedTx++ > 1) { DPrintf(0, ("[%s] - Suspicious Tx inactivity (%d)!\n", __FUNCTION__, pContext->pPathBundles[i].txPath.GetFreeHWBuffers())); #ifdef DBG_USE_VIRTIO_PCI_ISR_FOR_HOST_REPORT WriteVirtIODeviceByte(pContext->IODevice->addr + VIRTIO_PCI_ISR, 0); #endif break; } } } if (pContext->Limits.nPrintDiagnostic && ++pContext->Counters.nPrintDiagnostic >= pContext->Limits.nPrintDiagnostic) { pContext->Counters.nPrintDiagnostic = 0; PrintStatistics(pContext); } if (pContext->Statistics.ifHCInOctets == pContext->Counters.prevIn) { pContext->Counters.nRxInactivity++; if (pContext->Counters.nRxInactivity >= 10) { #if defined(CRASH_ON_NO_RX) ONPAUSECOMPLETEPROC proc = (ONPAUSECOMPLETEPROC)(PVOID)1; proc(pContext); #endif } } else { pContext->Counters.nRxInactivity = 0; pContext->Counters.prevIn = pContext->Statistics.ifHCInOctets; } return bReportHang; }
226
78,065
0
const char* CMSEXPORT cmsIT8GetDataRowCol(cmsHANDLE hIT8, int row, int col) { cmsIT8* it8 = (cmsIT8*) hIT8; _cmsAssert(hIT8 != NULL); return GetData(it8, row, col); }
227
21,842
0
static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg) { ctxt->has_seg_override = true; ctxt->seg_override = seg; }
228
99,412
0
RenderSandboxHostLinux::~RenderSandboxHostLinux() { if (init_) { if (HANDLE_EINTR(close(renderer_socket_)) < 0) PLOG(ERROR) << "close"; if (HANDLE_EINTR(close(childs_lifeline_fd_)) < 0) PLOG(ERROR) << "close"; } }
229
36,445
0
int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol) { struct snd_ctl_elem_id id; unsigned int idx; unsigned int count; int err = -EINVAL; if (! kcontrol) return err; if (snd_BUG_ON(!card || !kcontrol->info)) goto error; id = kcontrol->id; down_write(&card->controls_rwsem); if (snd_ctl_find_id(card, &id)) { up_write(&card->controls_rwsem); dev_err(card->dev, "control %i:%i:%i:%s:%i is already present\n", id.iface, id.device, id.subdevice, id.name, id.index); err = -EBUSY; goto error; } if (snd_ctl_find_hole(card, kcontrol->count) < 0) { up_write(&card->controls_rwsem); err = -ENOMEM; goto error; } list_add_tail(&kcontrol->list, &card->controls); card->controls_count += kcontrol->count; kcontrol->id.numid = card->last_numid + 1; card->last_numid += kcontrol->count; count = kcontrol->count; up_write(&card->controls_rwsem); for (idx = 0; idx < count; idx++, id.index++, id.numid++) snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id); return 0; error: snd_ctl_free_one(kcontrol); return err; }
230
18,722
0
int ip6_frag_nqueues(struct net *net) { return net->ipv6.frags.nqueues; }
231
101,718
0
TabContents* Browser::OpenAppShortcutWindow(Profile* profile, const GURL& url, bool update_shortcut) { Browser* app_browser; TabContents* tab = OpenApplicationWindow( profile, NULL, // this is a URL app. No extension. extension_misc::LAUNCH_WINDOW, url, &app_browser); if (!tab) return NULL; if (update_shortcut) { app_browser->pending_web_app_action_ = UPDATE_SHORTCUT; } return tab; }
232
181,611
1
key_ref_t key_create_or_update(key_ref_t keyring_ref, const char *type, const char *description, const void *payload, size_t plen, key_perm_t perm, unsigned long flags) { struct keyring_index_key index_key = { .description = description, }; struct key_preparsed_payload prep; struct assoc_array_edit *edit; const struct cred *cred = current_cred(); struct key *keyring, *key = NULL; key_ref_t key_ref; int ret; /* look up the key type to see if it's one of the registered kernel * types */ index_key.type = key_type_lookup(type); if (IS_ERR(index_key.type)) { key_ref = ERR_PTR(-ENODEV); goto error; } key_ref = ERR_PTR(-EINVAL); if (!index_key.type->match || !index_key.type->instantiate || (!index_key.description && !index_key.type->preparse)) goto error_put_type; keyring = key_ref_to_ptr(keyring_ref); key_check(keyring); key_ref = ERR_PTR(-ENOTDIR); if (keyring->type != &key_type_keyring) goto error_put_type; memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; prep.quotalen = index_key.type->def_datalen; prep.trusted = flags & KEY_ALLOC_TRUSTED; prep.expiry = TIME_T_MAX; if (index_key.type->preparse) { ret = index_key.type->preparse(&prep); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; } if (!index_key.description) index_key.description = prep.description; key_ref = ERR_PTR(-EINVAL); if (!index_key.description) goto error_free_prep; } index_key.desc_len = strlen(index_key.description); key_ref = ERR_PTR(-EPERM); if (!prep.trusted && test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags)) goto error_free_prep; flags |= prep.trusted ? KEY_ALLOC_TRUSTED : 0; ret = __key_link_begin(keyring, &index_key, &edit); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; } /* if we're going to allocate a new key, we're going to have * to modify the keyring */ ret = key_permission(keyring_ref, KEY_NEED_WRITE); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_link_end; } /* if it's possible to update this type of key, search for an existing * key of the same type and description in the destination keyring and * update that instead if possible */ if (index_key.type->update) { key_ref = find_key_to_update(keyring_ref, &index_key); if (key_ref) goto found_matching_key; } /* if the client doesn't provide, decide on the permissions we want */ if (perm == KEY_PERM_UNDEF) { perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; perm |= KEY_USR_VIEW; if (index_key.type->read) perm |= KEY_POS_READ; if (index_key.type == &key_type_keyring || index_key.type->update) perm |= KEY_POS_WRITE; } /* allocate a new key */ key = key_alloc(index_key.type, index_key.description, cred->fsuid, cred->fsgid, cred, perm, flags); if (IS_ERR(key)) { key_ref = ERR_CAST(key); goto error_link_end; } /* instantiate it and link it into the target keyring */ ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit); if (ret < 0) { key_put(key); key_ref = ERR_PTR(ret); goto error_link_end; } key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); error_link_end: __key_link_end(keyring, &index_key, edit); error_free_prep: if (index_key.type->preparse) index_key.type->free_preparse(&prep); error_put_type: key_type_put(index_key.type); error: return key_ref; found_matching_key: /* we found a matching key, so we're going to try to update it * - we can drop the locks first as we have the key pinned */ __key_link_end(keyring, &index_key, edit); key_ref = __key_update(key_ref, &prep); goto error_free_prep; }
233
132,728
0
void ChromotingInstance::HandlePauseVideo(const base::DictionaryValue& data) { if (!data.HasKey("pause")) { LOG(ERROR) << "Invalid pauseVideo."; return; } HandleVideoControl(data); }
234
166,462
0
explicit TestSafeBrowsingService(const SafeBrowsingTestConfiguration* config) : test_configuration_(config) { services_delegate_ = safe_browsing::ServicesDelegate::CreateForTest(this, this); }
235
4,611
0
PHP_FUNCTION(openssl_digest) { zend_bool raw_output = 0; char *data, *method; size_t data_len, method_len; const EVP_MD *mdtype; EVP_MD_CTX *md_ctx; unsigned int siglen; zend_string *sigbuf; if (zend_parse_parameters(ZEND_NUM_ARGS(), "ss|b", &data, &data_len, &method, &method_len, &raw_output) == FAILURE) { return; } mdtype = EVP_get_digestbyname(method); if (!mdtype) { php_error_docref(NULL, E_WARNING, "Unknown signature algorithm"); RETURN_FALSE; } siglen = EVP_MD_size(mdtype); sigbuf = zend_string_alloc(siglen, 0); md_ctx = EVP_MD_CTX_create(); if (EVP_DigestInit(md_ctx, mdtype) && EVP_DigestUpdate(md_ctx, (unsigned char *)data, data_len) && EVP_DigestFinal (md_ctx, (unsigned char *)ZSTR_VAL(sigbuf), &siglen)) { if (raw_output) { ZSTR_VAL(sigbuf)[siglen] = '\0'; ZSTR_LEN(sigbuf) = siglen; RETVAL_STR(sigbuf); } else { int digest_str_len = siglen * 2; zend_string *digest_str = zend_string_alloc(digest_str_len, 0); make_digest_ex(ZSTR_VAL(digest_str), (unsigned char*)ZSTR_VAL(sigbuf), siglen); ZSTR_VAL(digest_str)[digest_str_len] = '\0'; zend_string_release(sigbuf); RETVAL_STR(digest_str); } } else { php_openssl_store_errors(); zend_string_release(sigbuf); RETVAL_FALSE; } EVP_MD_CTX_destroy(md_ctx); }
236
143,479
0
CompositorImpl::CompositorImpl(CompositorClient* client, gfx::NativeWindow root_window) : frame_sink_id_(AllocateFrameSinkId()), resource_manager_(root_window), window_(NULL), surface_handle_(gpu::kNullSurfaceHandle), client_(client), needs_animate_(false), pending_frames_(0U), layer_tree_frame_sink_request_pending_(false), lock_manager_(base::ThreadTaskRunnerHandle::Get()), enable_surface_synchronization_( features::IsSurfaceSynchronizationEnabled()), enable_viz_( base::FeatureList::IsEnabled(features::kVizDisplayCompositor)), weak_factory_(this) { DCHECK(client); SetRootWindow(root_window); display::Screen::GetScreen()->AddObserver(this); }
237
18,627
0
SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned int, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = sizeof(address); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user(&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; }
238
145,921
0
void IsCalloutAboveLauncherIcon(aura::Window* panel) { base::RunLoop().RunUntilIdle(); views::Widget* widget = GetCalloutWidgetForPanel(panel); Shelf* shelf = GetShelfForWindow(panel); gfx::Rect icon_bounds = shelf->GetScreenBoundsOfItemIconForWindow(panel); ASSERT_FALSE(icon_bounds.IsEmpty()); gfx::Rect panel_bounds = panel->GetBoundsInScreen(); gfx::Rect callout_bounds = widget->GetWindowBoundsInScreen(); ASSERT_FALSE(icon_bounds.IsEmpty()); EXPECT_TRUE(widget->IsVisible()); const ShelfAlignment alignment = shelf->alignment(); if (alignment == SHELF_ALIGNMENT_LEFT) EXPECT_EQ(panel_bounds.x(), callout_bounds.right()); else if (alignment == SHELF_ALIGNMENT_RIGHT) EXPECT_EQ(panel_bounds.right(), callout_bounds.x()); else EXPECT_EQ(panel_bounds.bottom(), callout_bounds.y()); if (IsHorizontal(alignment)) { EXPECT_NEAR(icon_bounds.CenterPoint().x(), widget->GetWindowBoundsInScreen().CenterPoint().x(), 1); } else { EXPECT_NEAR(icon_bounds.CenterPoint().y(), widget->GetWindowBoundsInScreen().CenterPoint().y(), 1); } }
239
88,491
0
static void clock_handler(const int fd, const short which, void *arg) { struct timeval t = {.tv_sec = 1, .tv_usec = 0}; static bool initialized = false; #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) static bool monotonic = false; static time_t monotonic_start; #endif if (initialized) { /* only delete the event if it's actually there. */ evtimer_del(&clockevent); } else { initialized = true; /* process_started is initialized to time() - 2. We initialize to 1 so * flush_all won't underflow during tests. */ #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) struct timespec ts; if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) { monotonic = true; monotonic_start = ts.tv_sec - ITEM_UPDATE_INTERVAL - 2; } #endif } assoc_start_expand(stats_state.curr_items); if (settings.sig_hup) { settings.sig_hup = false; authfile_load(settings.auth_file); } evtimer_set(&clockevent, clock_handler, 0); event_base_set(main_base, &clockevent); evtimer_add(&clockevent, &t); #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) if (monotonic) { struct timespec ts; if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1) return; current_time = (rel_time_t) (ts.tv_sec - monotonic_start); return; } #endif { struct timeval tv; gettimeofday(&tv, NULL); current_time = (rel_time_t) (tv.tv_sec - process_started); } }
240
59,330
0
static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m, int num_migrate, const struct xfrm_kmaddress *k, const struct xfrm_selector *sel, const struct xfrm_encap_tmpl *encap, u8 dir, u8 type) { const struct xfrm_migrate *mp; struct xfrm_userpolicy_id *pol_id; struct nlmsghdr *nlh; int i, err; nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0); if (nlh == NULL) return -EMSGSIZE; pol_id = nlmsg_data(nlh); /* copy data from selector, dir, and type to the pol_id */ memset(pol_id, 0, sizeof(*pol_id)); memcpy(&pol_id->sel, sel, sizeof(pol_id->sel)); pol_id->dir = dir; if (k != NULL) { err = copy_to_user_kmaddress(k, skb); if (err) goto out_cancel; } if (encap) { err = nla_put(skb, XFRMA_ENCAP, sizeof(*encap), encap); if (err) goto out_cancel; } err = copy_to_user_policy_type(type, skb); if (err) goto out_cancel; for (i = 0, mp = m ; i < num_migrate; i++, mp++) { err = copy_to_user_migrate(mp, skb); if (err) goto out_cancel; } nlmsg_end(skb, nlh); return 0; out_cancel: nlmsg_cancel(skb, nlh); return err; }
241
10,657
0
Ins_SANGW( void ) { /* instruction not supported anymore */ }
242
84,460
0
_nextA(int visited) { HmarkerList *hl = Currentbuf->hmarklist; BufferPoint *po; Anchor *an, *pan; int i, x, y, n = searchKeyNum(); ParsedURL url; if (Currentbuf->firstLine == NULL) return; if (!hl || hl->nmark == 0) return; an = retrieveCurrentAnchor(Currentbuf); if (visited != TRUE && an == NULL) an = retrieveCurrentForm(Currentbuf); y = Currentbuf->currentLine->linenumber; x = Currentbuf->pos; if (visited == TRUE) { n = hl->nmark; } for (i = 0; i < n; i++) { pan = an; if (an && an->hseq >= 0) { int hseq = an->hseq + 1; do { if (hseq >= hl->nmark) { if (visited == TRUE) return; an = pan; goto _end; } po = &hl->marks[hseq]; an = retrieveAnchor(Currentbuf->href, po->line, po->pos); if (visited != TRUE && an == NULL) an = retrieveAnchor(Currentbuf->formitem, po->line, po->pos); hseq++; if (visited == TRUE && an) { parseURL2(an->url, &url, baseURL(Currentbuf)); if (getHashHist(URLHist, parsedURL2Str(&url)->ptr)) { goto _end; } } } while (an == NULL || an == pan); } else { an = closest_next_anchor(Currentbuf->href, NULL, x, y); if (visited != TRUE) an = closest_next_anchor(Currentbuf->formitem, an, x, y); if (an == NULL) { if (visited == TRUE) return; an = pan; break; } x = an->start.pos; y = an->start.line; if (visited == TRUE) { parseURL2(an->url, &url, baseURL(Currentbuf)); if (getHashHist(URLHist, parsedURL2Str(&url)->ptr)) { goto _end; } } } } if (visited == TRUE) return; _end: if (an == NULL || an->hseq < 0) return; po = &hl->marks[an->hseq]; gotoLine(Currentbuf, po->line); Currentbuf->pos = po->pos; arrangeCursor(Currentbuf); displayBuffer(Currentbuf, B_NORMAL); }
243
164,039
0
void DownloadManagerImpl::ImportInProgressDownloads(uint32_t id) { for (auto& download : in_progress_downloads_) { auto item = std::move(download); if (item->GetId() == download::DownloadItem::kInvalidId) { item->SetDownloadId(id++); next_download_id_++; if (!should_persist_new_download_) in_progress_manager_->RemoveInProgressDownload(item->GetGuid()); } item->SetDelegate(this); DownloadItemUtils::AttachInfo(item.get(), GetBrowserContext(), nullptr); OnDownloadCreated(std::move(item)); } in_progress_downloads_.clear(); OnDownloadManagerInitialized(); }
244
5,181
0
PHP_FUNCTION(pg_result_error_field) { zval *result; zend_long fieldcode; PGresult *pgsql_result; pgsql_result_handle *pg_result; char *field = NULL; if (zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS(), "rl", &result, &fieldcode) == FAILURE) { RETURN_FALSE; } ZEND_FETCH_RESOURCE(pg_result, pgsql_result_handle *, result, -1, "PostgreSQL result", le_result); pgsql_result = pg_result->result; if (!pgsql_result) { RETURN_FALSE; } if (fieldcode & (PG_DIAG_SEVERITY|PG_DIAG_SQLSTATE|PG_DIAG_MESSAGE_PRIMARY|PG_DIAG_MESSAGE_DETAIL |PG_DIAG_MESSAGE_HINT|PG_DIAG_STATEMENT_POSITION #if PG_DIAG_INTERNAL_POSITION |PG_DIAG_INTERNAL_POSITION #endif #if PG_DIAG_INTERNAL_QUERY |PG_DIAG_INTERNAL_QUERY #endif |PG_DIAG_CONTEXT|PG_DIAG_SOURCE_FILE|PG_DIAG_SOURCE_LINE |PG_DIAG_SOURCE_FUNCTION)) { field = (char *)PQresultErrorField(pgsql_result, (int)fieldcode); if (field == NULL) { RETURN_NULL(); } else { RETURN_STRING(field); } } else { RETURN_FALSE; } }
245
531
0
static void pdf_run_gs_op(fz_context *ctx, pdf_processor *proc, int b) { pdf_run_processor *pr = (pdf_run_processor *)proc; pdf_gstate *gstate = pdf_flush_text(ctx, pr); gstate->fill.color_params.op = b; }
246
104,629
0
bool IsBaseCrxKey(const std::string& key) { for (size_t i = 0; i < arraysize(kBaseCrxKeys); ++i) { if (key == kBaseCrxKeys[i]) return true; } return false; }
247
175,840
0
void vp8_decoder_create_threads(VP8D_COMP *pbi) { int core_count = 0; unsigned int ithread; pbi->b_multithreaded_rd = 0; pbi->allocated_decoding_thread_count = 0; /* limit decoding threads to the max number of token partitions */ core_count = (pbi->max_threads > 8) ? 8 : pbi->max_threads; /* limit decoding threads to the available cores */ if (core_count > pbi->common.processor_core_count) core_count = pbi->common.processor_core_count; if (core_count > 1) { pbi->b_multithreaded_rd = 1; pbi->decoding_thread_count = core_count - 1; CALLOC_ARRAY(pbi->h_decoding_thread, pbi->decoding_thread_count); CALLOC_ARRAY(pbi->h_event_start_decoding, pbi->decoding_thread_count); CALLOC_ARRAY_ALIGNED(pbi->mb_row_di, pbi->decoding_thread_count, 32); CALLOC_ARRAY(pbi->de_thread_data, pbi->decoding_thread_count); for (ithread = 0; ithread < pbi->decoding_thread_count; ithread++) { sem_init(&pbi->h_event_start_decoding[ithread], 0, 0); vp8_setup_block_dptrs(&pbi->mb_row_di[ithread].mbd); pbi->de_thread_data[ithread].ithread = ithread; pbi->de_thread_data[ithread].ptr1 = (void *)pbi; pbi->de_thread_data[ithread].ptr2 = (void *) &pbi->mb_row_di[ithread]; pthread_create(&pbi->h_decoding_thread[ithread], 0, thread_decoding_proc, (&pbi->de_thread_data[ithread])); } sem_init(&pbi->h_event_end_decoding, 0, 0); pbi->allocated_decoding_thread_count = pbi->decoding_thread_count; } }
248
66,218
0
static int mailimf_parse_unwanted_msg_id(const char * message, size_t length, size_t * indx) { size_t cur_token; int r; char * word; int token_parsed; cur_token = * indx; token_parsed = TRUE; while (token_parsed) { token_parsed = FALSE; r = mailimf_word_parse(message, length, &cur_token, &word); if (r == MAILIMF_NO_ERROR) { mailimf_word_free(word); token_parsed = TRUE; } else if (r == MAILIMF_ERROR_PARSE) { /* do nothing */ } else return r; r = mailimf_semi_colon_parse(message, length, &cur_token); if (r == MAILIMF_NO_ERROR) token_parsed = TRUE; else if (r == MAILIMF_ERROR_PARSE) { /* do nothing */ } else return r; r = mailimf_comma_parse(message, length, &cur_token); if (r == MAILIMF_NO_ERROR) token_parsed = TRUE; else if (r == MAILIMF_ERROR_PARSE) { /* do nothing */ } else return r; r = mailimf_plus_parse(message, length, &cur_token); if (r == MAILIMF_NO_ERROR) token_parsed = TRUE; else if (r == MAILIMF_ERROR_PARSE) { /* do nothing */ } else return r; r = mailimf_colon_parse(message, length, &cur_token); if (r == MAILIMF_NO_ERROR) token_parsed = TRUE; else if (r == MAILIMF_ERROR_PARSE) { /* do nothing */ } else return r; r = mailimf_point_parse(message, length, &cur_token); if (r == MAILIMF_NO_ERROR) token_parsed = TRUE; else if (r == MAILIMF_ERROR_PARSE) { /* do nothing */ } else return r; r = mailimf_at_sign_parse(message, length, &cur_token); if (r == MAILIMF_NO_ERROR) token_parsed = TRUE; else if (r == MAILIMF_ERROR_PARSE) { /* do nothing */ } else return r; } * indx = cur_token; return MAILIMF_NO_ERROR; }
249
186,706
1
IDNConversionResult IDNToUnicodeWithAdjustmentsImpl( base::StringPiece host, base::OffsetAdjuster::Adjustments* adjustments, bool enable_spoof_checks) { if (adjustments) adjustments->clear(); // Convert the ASCII input to a base::string16 for ICU. base::string16 input16; input16.reserve(host.length()); input16.insert(input16.end(), host.begin(), host.end()); bool is_tld_ascii = true; size_t last_dot = host.rfind('.'); if (last_dot != base::StringPiece::npos && host.substr(last_dot).starts_with(".xn--")) { is_tld_ascii = false; } IDNConversionResult result; // Do each component of the host separately, since we enforce script matching // on a per-component basis. base::string16 out16; for (size_t component_start = 0, component_end; component_start < input16.length(); component_start = component_end + 1) { // Find the end of the component. component_end = input16.find('.', component_start); if (component_end == base::string16::npos) component_end = input16.length(); // For getting the last component. size_t component_length = component_end - component_start; size_t new_component_start = out16.length(); bool converted_idn = false; if (component_end > component_start) { // Add the substring that we just found. bool has_idn_component = false; converted_idn = IDNToUnicodeOneComponent( input16.data() + component_start, component_length, is_tld_ascii, enable_spoof_checks, &out16, &has_idn_component); result.has_idn_component |= has_idn_component; } size_t new_component_length = out16.length() - new_component_start; if (converted_idn && adjustments) { adjustments->push_back(base::OffsetAdjuster::Adjustment( component_start, component_length, new_component_length)); } // Need to add the dot we just found (if we found one). if (component_end < input16.length()) out16.push_back('.'); } result.result = out16; // Leave as punycode any inputs that spoof top domains. if (result.has_idn_component) { result.matching_top_domain = g_idn_spoof_checker.Get().GetSimilarTopDomain(out16); if (enable_spoof_checks && !result.matching_top_domain.domain.empty()) { if (adjustments) adjustments->clear(); result.result = input16; } } return result; }
250
52,955
0
static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, struct file *fp, const struct qib_user_info *uinfo) { struct qib_filedata *fd = fp->private_data; struct qib_devdata *dd = ppd->dd; struct qib_ctxtdata *rcd; void *ptmp = NULL; int ret; int numa_id; assign_ctxt_affinity(fp, dd); numa_id = qib_numa_aware ? ((fd->rec_cpu_num != -1) ? cpu_to_node(fd->rec_cpu_num) : numa_node_id()) : dd->assigned_node_id; rcd = qib_create_ctxtdata(ppd, ctxt, numa_id); /* * Allocate memory for use in qib_tid_update() at open to * reduce cost of expected send setup per message segment */ if (rcd) ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) + dd->rcvtidcnt * sizeof(struct page **), GFP_KERNEL); if (!rcd || !ptmp) { qib_dev_err(dd, "Unable to allocate ctxtdata memory, failing open\n"); ret = -ENOMEM; goto bailerr; } rcd->userversion = uinfo->spu_userversion; ret = init_subctxts(dd, rcd, uinfo); if (ret) goto bailerr; rcd->tid_pg_list = ptmp; rcd->pid = current->pid; init_waitqueue_head(&dd->rcd[ctxt]->wait); strlcpy(rcd->comm, current->comm, sizeof(rcd->comm)); ctxt_fp(fp) = rcd; qib_stats.sps_ctxts++; dd->freectxts--; ret = 0; goto bail; bailerr: if (fd->rec_cpu_num != -1) __clear_bit(fd->rec_cpu_num, qib_cpulist); dd->rcd[ctxt] = NULL; kfree(rcd); kfree(ptmp); bail: return ret; }
251
101,040
0
void QuotaManagerProxy::NotifyOriginInUse( const GURL& origin) { if (!io_thread_->BelongsToCurrentThread()) { io_thread_->PostTask(FROM_HERE, NewRunnableMethod( this, &QuotaManagerProxy::NotifyOriginInUse, origin)); return; } if (manager_) manager_->NotifyOriginInUse(origin); }
252
174,214
0
status_t Camera3Device::disconnect() { ATRACE_CALL(); Mutex::Autolock il(mInterfaceLock); ALOGV("%s: E", __FUNCTION__); status_t res = OK; { Mutex::Autolock l(mLock); if (mStatus == STATUS_UNINITIALIZED) return res; if (mStatus == STATUS_ACTIVE || (mStatus == STATUS_ERROR && mRequestThread != NULL)) { res = mRequestThread->clearRepeatingRequests(); if (res != OK) { SET_ERR_L("Can't stop streaming"); } else { res = waitUntilStateThenRelock(/*active*/ false, kShutdownTimeout); if (res != OK) { SET_ERR_L("Timeout waiting for HAL to drain"); } } } if (mStatus == STATUS_ERROR) { CLOGE("Shutting down in an error state"); } if (mStatusTracker != NULL) { mStatusTracker->requestExit(); } if (mRequestThread != NULL) { mRequestThread->requestExit(); } mOutputStreams.clear(); mInputStream.clear(); } if (mRequestThread != NULL && mStatus != STATUS_ERROR) { mRequestThread->join(); } if (mStatusTracker != NULL) { mStatusTracker->join(); } camera3_device_t *hal3Device; { Mutex::Autolock l(mLock); mRequestThread.clear(); mStatusTracker.clear(); hal3Device = mHal3Device; } if (hal3Device != NULL) { ATRACE_BEGIN("camera3->close"); hal3Device->common.close(&hal3Device->common); ATRACE_END(); } { Mutex::Autolock l(mLock); mHal3Device = NULL; internalUpdateStatusLocked(STATUS_UNINITIALIZED); } ALOGV("%s: X", __FUNCTION__); return res; }
253
68,409
0
void perf_sample_event_took(u64 sample_len_ns) { u64 max_len = READ_ONCE(perf_sample_allowed_ns); u64 running_len; u64 avg_len; u32 max; if (max_len == 0) return; /* Decay the counter by 1 average sample. */ running_len = __this_cpu_read(running_sample_length); running_len -= running_len/NR_ACCUMULATED_SAMPLES; running_len += sample_len_ns; __this_cpu_write(running_sample_length, running_len); /* * Note: this will be biased artifically low until we have * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us * from having to maintain a count. */ avg_len = running_len/NR_ACCUMULATED_SAMPLES; if (avg_len <= max_len) return; __report_avg = avg_len; __report_allowed = max_len; /* * Compute a throttle threshold 25% below the current duration. */ avg_len += avg_len / 4; max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent; if (avg_len < max) max /= (u32)avg_len; else max = 1; WRITE_ONCE(perf_sample_allowed_ns, avg_len); WRITE_ONCE(max_samples_per_tick, max); sysctl_perf_event_sample_rate = max * HZ; perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; if (!irq_work_queue(&perf_duration_work)) { early_printk("perf: interrupt took too long (%lld > %lld), lowering " "kernel.perf_event_max_sample_rate to %d\n", __report_avg, __report_allowed, sysctl_perf_event_sample_rate); } }
254
16,711
0
pdf_load_version(fz_context *ctx, pdf_document *doc) { char buf[20]; fz_seek(ctx, doc->file, 0, SEEK_SET); fz_read_line(ctx, doc->file, buf, sizeof buf); if (memcmp(buf, "%PDF-", 5) != 0) fz_throw(ctx, FZ_ERROR_GENERIC, "cannot recognize version marker"); doc->version = 10 * (fz_atof(buf+5) + 0.05f); if (doc->version < 10 || doc->version > 17) if (doc->version != 20) fz_warn(ctx, "unknown PDF version: %d.%d", doc->version / 10, doc->version % 10); }
255
154,079
0
void GLES2DecoderImpl::DoUniform1i(GLint fake_location, GLint v0) { GLenum type = 0; GLsizei count = 1; GLint real_location = -1; if (!PrepForSetUniformByLocation(fake_location, "glUniform1i", Program::kUniform1i, &real_location, &type, &count)) { return; } if (!state_.current_program->SetSamplers( state_.texture_units.size(), fake_location, 1, &v0)) { LOCAL_SET_GL_ERROR( GL_INVALID_VALUE, "glUniform1i", "texture unit out of range"); return; } api()->glUniform1iFn(real_location, v0); }
256
57,950
0
static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_set *set; struct nft_ctx ctx; int err; if (nfmsg->nfgen_family == NFPROTO_UNSPEC) return -EAFNOSUPPORT; if (nla[NFTA_SET_TABLE] == NULL) return -EINVAL; err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla); if (err < 0) return err; set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); if (IS_ERR(set)) return PTR_ERR(set); if (set->flags & NFT_SET_INACTIVE) return -ENOENT; if (!list_empty(&set->bindings)) return -EBUSY; return nft_delset(&ctx, set); }
257
96,894
0
static long vmsplice_to_pipe(struct file *file, struct iov_iter *iter, unsigned int flags) { struct pipe_inode_info *pipe; long ret = 0; unsigned buf_flag = 0; if (flags & SPLICE_F_GIFT) buf_flag = PIPE_BUF_FLAG_GIFT; pipe = get_pipe_info(file); if (!pipe) return -EBADF; pipe_lock(pipe); ret = wait_for_space(pipe, flags); if (!ret) ret = iter_to_pipe(iter, pipe, buf_flag); pipe_unlock(pipe); if (ret > 0) wakeup_pipe_readers(pipe); return ret; }
258
24,094
0
static int writeWepKeyRid(struct airo_info *ai, WepKeyRid *wkr, int perm, int lock) { int rc; rc = PC4500_writerid(ai, RID_WEP_TEMP, wkr, sizeof(*wkr), lock); if (rc!=SUCCESS) airo_print_err(ai->dev->name, "WEP_TEMP set %x", rc); if (perm) { rc = PC4500_writerid(ai, RID_WEP_PERM, wkr, sizeof(*wkr), lock); if (rc!=SUCCESS) airo_print_err(ai->dev->name, "WEP_PERM set %x", rc); } return rc; }
259
170,017
0
xsltBuildVariable(xsltTransformContextPtr ctxt, xsltStylePreCompPtr castedComp, xmlNodePtr tree) { #ifdef XSLT_REFACTORED xsltStyleBasicItemVariablePtr comp = (xsltStyleBasicItemVariablePtr) castedComp; #else xsltStylePreCompPtr comp = castedComp; #endif xsltStackElemPtr elem; #ifdef WITH_XSLT_DEBUG_VARIABLE XSLT_TRACE(ctxt,XSLT_TRACE_VARIABLES,xsltGenericDebug(xsltGenericDebugContext, "Building variable %s", comp->name)); if (comp->select != NULL) XSLT_TRACE(ctxt,XSLT_TRACE_VARIABLES,xsltGenericDebug(xsltGenericDebugContext, " select %s", comp->select)); XSLT_TRACE(ctxt,XSLT_TRACE_VARIABLES,xsltGenericDebug(xsltGenericDebugContext, "\n")); #endif elem = xsltNewStackElem(ctxt); if (elem == NULL) return(NULL); elem->comp = (xsltStylePreCompPtr) comp; elem->name = comp->name; elem->select = comp->select; elem->nameURI = comp->ns; elem->tree = tree; elem->value = xsltEvalVariable(ctxt, elem, (xsltStylePreCompPtr) comp); if (elem->value != NULL) elem->computed = 1; return(elem); }
260
82,131
0
mrb_module_new(mrb_state *mrb) { struct RClass *m = (struct RClass*)mrb_obj_alloc(mrb, MRB_TT_MODULE, mrb->module_class); boot_initmod(mrb, m); return m; }
261
62,383
0
fn_printzp(netdissect_options *ndo, register const u_char *s, register u_int n, register const u_char *ep) { register int ret; register u_char c; ret = 1; /* assume truncated */ while (n > 0 && (ep == NULL || s < ep)) { n--; c = *s++; if (c == '\0') { ret = 0; break; } if (!ND_ISASCII(c)) { c = ND_TOASCII(c); ND_PRINT((ndo, "M-")); } if (!ND_ISPRINT(c)) { c ^= 0x40; /* DEL to ?, others to alpha */ ND_PRINT((ndo, "^")); } ND_PRINT((ndo, "%c", c)); } return (n == 0) ? 0 : ret; }
262
39,278
0
int security_get_reject_unknown(void) { return policydb.reject_unknown; }
263
136,775
0
const SecurityOrigin* TargetOrigin() const { return target_origin_.get(); }
264
176,139
0
VOID ixheaacd_hbe_post_anal_xprod3(ia_esbr_hbe_txposer_struct *ptr_hbe_txposer, WORD32 qmf_voc_columns, WORD32 qmf_band_idx, FLOAT32 p, WORD32 pitch_in_bins_idx) { WORD32 i, inp_band_idx, rem; FLOAT32 *out_buf = &ptr_hbe_txposer->qmf_out_buf[2][2 * qmf_band_idx]; for (; qmf_band_idx < ptr_hbe_txposer->x_over_qmf[2]; qmf_band_idx++) { FLOAT32 temp_r, temp_i; FLOAT32 temp_r1, temp_i1; const FLOAT32 *ptr_sel, *ptr_sel1; inp_band_idx = (2 * qmf_band_idx) / 3; ptr_sel = &ixheaacd_sel_case[(inp_band_idx + 1) & 3][0]; ptr_sel1 = &ixheaacd_sel_case[((inp_band_idx + 1) & 3) + 1][0]; rem = 2 * qmf_band_idx - 3 * inp_band_idx; if (rem == 0 || rem == 1) { FLOAT32 *in_buf = &ptr_hbe_txposer->qmf_in_buf[0][2 * inp_band_idx]; for (i = 0; i < qmf_voc_columns; i += 1) { WORD32 k; FLOAT32 vec_x[2 * HBE_OPER_WIN_LEN]; FLOAT32 *ptr_vec_x = &vec_x[0]; FLOAT32 x_zero_band_r, x_zero_band_i; FLOAT32 mag_scaling_fac; for (k = 0; k < (HBE_OPER_BLK_LEN_3); k += 2) { FLOAT64 base1; FLOAT64 base = 1e-17; temp_r = in_buf[0]; temp_i = in_buf[1]; in_buf += 256; base1 = base + temp_r * temp_r; base1 = base1 + temp_i * temp_i; mag_scaling_fac = (FLOAT32)(ixheaacd_cbrt_calc((FLOAT32)base1)); ptr_vec_x[0] = temp_r * mag_scaling_fac; ptr_vec_x[1] = temp_i * mag_scaling_fac; temp_r = in_buf[0]; temp_i = in_buf[1]; in_buf -= 128; temp_r1 = ptr_sel[0] * temp_r + ptr_sel[1] * temp_i; temp_i1 = ptr_sel[2] * temp_r + ptr_sel[3] * temp_i; temp_r = in_buf[0]; temp_i = in_buf[1]; temp_r1 += ptr_sel[4] * temp_r + ptr_sel[5] * temp_i; temp_i1 += ptr_sel[6] * temp_r + ptr_sel[7] * temp_i; temp_r1 *= 0.3984033437f; temp_i1 *= 0.3984033437f; base1 = base + temp_r1 * temp_r1; base1 = base1 + temp_i1 * temp_i1; mag_scaling_fac = (FLOAT32)(ixheaacd_cbrt_calc((FLOAT32)base1)); ptr_vec_x[2] = temp_r1 * mag_scaling_fac; ptr_vec_x[3] = temp_i1 * mag_scaling_fac; ptr_vec_x += 4; in_buf += 256; } ptr_vec_x = &vec_x[0]; temp_r = vec_x[2 * (HBE_ZERO_BAND_IDX - 2)]; temp_i = vec_x[(2 * (HBE_ZERO_BAND_IDX - 2)) + 1]; x_zero_band_r = temp_r * temp_r - temp_i * temp_i; x_zero_band_i = temp_r * temp_i + temp_i * temp_r; for (k = 0; k < (HBE_OPER_BLK_LEN_3); k++) { temp_r = ptr_vec_x[0] * x_zero_band_r - ptr_vec_x[1] * x_zero_band_i; temp_i = ptr_vec_x[0] * x_zero_band_i + ptr_vec_x[1] * x_zero_band_r; out_buf[0] += (temp_r * 0.4714045f); out_buf[1] += (temp_i * 0.4714045f); ptr_vec_x += 2; out_buf += 128; } ixheaacd_hbe_xprod_proc_3(ptr_hbe_txposer, qmf_band_idx, i, p, pitch_in_bins_idx); in_buf -= 128 * 11; out_buf -= 128 * 6; } } else { FLOAT32 *in_buf = &ptr_hbe_txposer->qmf_in_buf[0][2 * inp_band_idx]; FLOAT32 *in_buf1 = &ptr_hbe_txposer->qmf_in_buf[0][2 * (inp_band_idx + 1)]; for (i = 0; i < qmf_voc_columns; i++) { WORD32 k; FLOAT32 vec_x[2 * HBE_OPER_WIN_LEN]; FLOAT32 vec_x_cap[2 * HBE_OPER_WIN_LEN]; FLOAT32 x_zero_band_r, x_zero_band_i; FLOAT32 *ptr_vec_x = &vec_x[0]; FLOAT32 *ptr_vec_x_cap = &vec_x_cap[0]; FLOAT32 mag_scaling_fac; for (k = 0; k < (HBE_OPER_BLK_LEN_3); k += 2) { FLOAT32 tmp_vr, tmp_vi; FLOAT32 tmp_cr, tmp_ci; FLOAT64 base1; FLOAT64 base = 1e-17; temp_r1 = in_buf[0]; temp_i1 = in_buf[1]; temp_r = in_buf1[0]; temp_i = in_buf1[1]; base1 = base + temp_r * temp_r; base1 = base1 + temp_i * temp_i; mag_scaling_fac = (FLOAT32)(ixheaacd_cbrt_calc((FLOAT32)base1)); ptr_vec_x[0] = temp_r * mag_scaling_fac; ptr_vec_x[1] = temp_i * mag_scaling_fac; base1 = base + temp_r1 * temp_r1; base1 = base1 + temp_i1 * temp_i1; mag_scaling_fac = (FLOAT32)(ixheaacd_cbrt_calc((FLOAT32)base1)); ptr_vec_x_cap[0] = temp_r1 * mag_scaling_fac; ptr_vec_x_cap[1] = temp_i1 * mag_scaling_fac; in_buf += 256; temp_r = in_buf[0]; temp_i = in_buf[1]; temp_r1 = ptr_sel[0] * temp_r + ptr_sel[1] * temp_i; temp_i1 = ptr_sel[2] * temp_r + ptr_sel[3] * temp_i; in_buf -= 128; temp_r = in_buf[0]; temp_i = in_buf[1]; tmp_cr = temp_r1 + ptr_sel[4] * temp_r + ptr_sel[5] * temp_i; tmp_ci = temp_i1 + ptr_sel[6] * temp_r + ptr_sel[7] * temp_i; in_buf1 += 256; temp_r = in_buf1[0]; temp_i = in_buf1[1]; temp_r1 = ptr_sel1[0] * temp_r + ptr_sel1[1] * temp_i; temp_i1 = ptr_sel1[2] * temp_r + ptr_sel1[3] * temp_i; in_buf1 -= 128; temp_r = in_buf1[0]; temp_i = in_buf1[1]; tmp_vr = temp_r1 + ptr_sel1[4] * temp_r + ptr_sel1[5] * temp_i; tmp_vi = temp_i1 + ptr_sel1[6] * temp_r + ptr_sel1[7] * temp_i; tmp_cr *= 0.3984033437f; tmp_ci *= 0.3984033437f; tmp_vr *= 0.3984033437f; tmp_vi *= 0.3984033437f; base1 = base + tmp_vr * tmp_vr; base1 = base1 + tmp_vi * tmp_vi; mag_scaling_fac = (FLOAT32)(ixheaacd_cbrt_calc((FLOAT32)base1)); ptr_vec_x[2] = tmp_vr * mag_scaling_fac; ptr_vec_x[3] = tmp_vi * mag_scaling_fac; base1 = base + tmp_cr * tmp_cr; base1 = base1 + tmp_ci * tmp_ci; mag_scaling_fac = (FLOAT32)(ixheaacd_cbrt_calc((FLOAT32)base1)); ptr_vec_x_cap[2] = tmp_cr * mag_scaling_fac; ptr_vec_x_cap[3] = tmp_ci * mag_scaling_fac; in_buf += 256; in_buf1 += 256; ptr_vec_x += 4; ptr_vec_x_cap += 4; } ptr_vec_x = &vec_x[0]; ptr_vec_x_cap = &vec_x_cap[0]; temp_r = vec_x_cap[2 * (HBE_ZERO_BAND_IDX - 2)]; temp_i = vec_x_cap[2 * (HBE_ZERO_BAND_IDX - 2) + 1]; temp_r1 = vec_x[2 * (HBE_ZERO_BAND_IDX - 2)]; temp_i1 = vec_x[2 * (HBE_ZERO_BAND_IDX - 2) + 1]; x_zero_band_r = temp_r * temp_r - temp_i * temp_i; x_zero_band_i = temp_r * temp_i + temp_i * temp_r; temp_r = temp_r1 * temp_r1 - temp_i1 * temp_i1; temp_i = temp_r1 * temp_i1 + temp_i1 * temp_r1; for (k = 0; k < (HBE_OPER_BLK_LEN_3); k++) { temp_r1 = ptr_vec_x[0] * x_zero_band_r - ptr_vec_x[1] * x_zero_band_i; temp_i1 = ptr_vec_x[0] * x_zero_band_i + ptr_vec_x[1] * x_zero_band_r; temp_r1 += ptr_vec_x_cap[0] * temp_r - ptr_vec_x_cap[1] * temp_i; temp_i1 += ptr_vec_x_cap[0] * temp_i + ptr_vec_x_cap[1] * temp_r; out_buf[0] += (temp_r1 * 0.23570225f); out_buf[1] += (temp_i1 * 0.23570225f); out_buf += 128; ptr_vec_x += 2; ptr_vec_x_cap += 2; } ixheaacd_hbe_xprod_proc_3(ptr_hbe_txposer, qmf_band_idx, i, p, pitch_in_bins_idx); in_buf -= 128 * 11; in_buf1 -= 128 * 11; out_buf -= 128 * 6; } } out_buf -= (256 * qmf_voc_columns) - 2; } }
265
53,804
0
static void __init reserve_initrd(void) { }
266
96,986
0
static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { struct page *page = NULL; if (hstate_is_gigantic(h)) return NULL; spin_lock(&hugetlb_lock); if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) goto out_unlock; spin_unlock(&hugetlb_lock); page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask); if (!page) return NULL; spin_lock(&hugetlb_lock); /* * We could have raced with the pool size change. * Double check that and simply deallocate the new page * if we would end up overcommiting the surpluses. Abuse * temporary page to workaround the nasty free_huge_page * codeflow */ if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { SetPageHugeTemporary(page); put_page(page); page = NULL; } else { h->surplus_huge_pages++; h->surplus_huge_pages_node[page_to_nid(page)]++; } out_unlock: spin_unlock(&hugetlb_lock); return page; }
267
512
0
static void pdf_run_c(fz_context *ctx, pdf_processor *proc, float x1, float y1, float x2, float y2, float x3, float y3) { pdf_run_processor *pr = (pdf_run_processor *)proc; fz_curveto(ctx, pr->path, x1, y1, x2, y2, x3, y3); }
268
131,430
0
static void limitedToOnlyOtherAttributeAttributeGetterCallback(v8::Local<v8::String>, const v8::PropertyCallbackInfo<v8::Value>& info) { TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMGetter"); TestObjectPythonV8Internal::limitedToOnlyOtherAttributeAttributeGetter(info); TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution"); }
269
167,385
0
int clear_count() const { return clear_count_; }
270
25,922
0
__bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address, int si_code) { struct mm_struct *mm = current->mm; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ up_read(&mm->mmap_sem); __bad_area_nosemaphore(regs, error_code, address, si_code); }
271
65,451
0
find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp) { struct nfs4_ol_stateid *lst; struct nfs4_client *clp = lo->lo_owner.so_client; lockdep_assert_held(&clp->cl_lock); list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) { if (lst->st_stid.sc_file == fp) { atomic_inc(&lst->st_stid.sc_count); return lst; } } return NULL; }
272
53,491
0
rar_read_ahead(struct archive_read *a, size_t min, ssize_t *avail) { struct rar *rar = (struct rar *)(a->format->data); const void *h = __archive_read_ahead(a, min, avail); int ret; if (avail) { if (a->archive.read_data_is_posix_read && *avail > (ssize_t)a->archive.read_data_requested) *avail = a->archive.read_data_requested; if (*avail > rar->bytes_remaining) *avail = (ssize_t)rar->bytes_remaining; if (*avail < 0) return NULL; else if (*avail == 0 && rar->main_flags & MHD_VOLUME && rar->file_flags & FHD_SPLIT_AFTER) { ret = archive_read_format_rar_read_header(a, a->entry); if (ret == (ARCHIVE_EOF)) { rar->has_endarc_header = 1; ret = archive_read_format_rar_read_header(a, a->entry); } if (ret != (ARCHIVE_OK)) return NULL; return rar_read_ahead(a, min, avail); } } return h; }
273
89,251
0
conv_swap4bytes(const UChar* s, const UChar* end, UChar* conv) { while (s < end) { *conv++ = s[3]; *conv++ = s[2]; *conv++ = s[1]; *conv++ = s[0]; s += 4; } }
274
116,324
0
QVector2D QQuickWebViewPrivate::FlickableAxisLocker::touchVelocity(const QTouchEvent* event) { static bool touchVelocityAvailable = event->device()->capabilities().testFlag(QTouchDevice::Velocity); const QTouchEvent::TouchPoint& touchPoint = event->touchPoints().first(); if (touchVelocityAvailable) return touchPoint.velocity(); const QLineF movementLine(touchPoint.pos(), m_initialPosition); const ulong elapsed = event->timestamp() - m_time; if (!elapsed) return QVector2D(0, 0); return QVector2D(1000 * movementLine.dx() / elapsed, 1000 * movementLine.dy() / elapsed); }
275
65,685
0
static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp, struct list_head *reaplist) { struct nfs4_stid *s = &stp->st_stid; struct nfs4_client *clp = s->sc_client; lockdep_assert_held(&clp->cl_lock); WARN_ON_ONCE(!list_empty(&stp->st_locks)); if (!atomic_dec_and_test(&s->sc_count)) { wake_up_all(&close_wq); return; } idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); list_add(&stp->st_locks, reaplist); }
276
91,240
0
static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) { struct bmc_device *bmc = intf->bmc; if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) /* Not for me */ return; if (msg->msg.data[0] != 0) { /* Error from getting the GUID, the BMC doesn't have one. */ bmc->dyn_guid_set = 0; goto out; } if (msg->msg.data_len < 17) { bmc->dyn_guid_set = 0; dev_warn(intf->si_dev, "The GUID response from the BMC was too short, it was %d but should have been 17. Assuming GUID is not available.\n", msg->msg.data_len); goto out; } memcpy(bmc->fetch_guid.b, msg->msg.data + 1, 16); /* * Make sure the guid data is available before setting * dyn_guid_set. */ smp_wmb(); bmc->dyn_guid_set = 1; out: wake_up(&intf->waitq); }
277
182,573
1
static int hns_xgmac_get_sset_count(int stringset) { if (stringset == ETH_SS_STATS) return ARRAY_SIZE(g_xgmac_stats_string); return 0; }
278
46,973
0
static int ghash_async_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct ahash_request *cryptd_req = ahash_request_ctx(req); struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; if (!irq_fpu_usable()) { memcpy(cryptd_req, req, sizeof(*req)); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); return crypto_ahash_digest(cryptd_req); } else { struct shash_desc *desc = cryptd_shash_desc(cryptd_req); struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm); desc->tfm = child; desc->flags = req->base.flags; return shash_ahash_digest(req, desc); } }
279
48,717
0
apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response) { apr_status_t status = APR_SUCCESS; apr_array_header_t *pushes; int i; pushes = h2_push_collect_update(stream, stream->request, response); if (pushes && !apr_is_empty_array(pushes)) { ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, "h2_stream(%ld-%d): found %d push candidates", stream->session->id, stream->id, pushes->nelts); for (i = 0; i < pushes->nelts; ++i) { h2_push *push = APR_ARRAY_IDX(pushes, i, h2_push*); h2_stream *s = h2_session_push(stream->session, stream, push); if (!s) { status = APR_ECONNRESET; break; } } } return status; }
280
44,605
0
int lxc_setup(struct lxc_handler *handler) { const char *name = handler->name; struct lxc_conf *lxc_conf = handler->conf; const char *lxcpath = handler->lxcpath; if (do_rootfs_setup(lxc_conf, name, lxcpath) < 0) { ERROR("Error setting up rootfs mount after spawn"); return -1; } if (lxc_conf->inherit_ns_fd[LXC_NS_UTS] == -1) { if (setup_utsname(lxc_conf->utsname)) { ERROR("failed to setup the utsname for '%s'", name); return -1; } } if (setup_network(&lxc_conf->network)) { ERROR("failed to setup the network for '%s'", name); return -1; } if (lxc_conf->autodev > 0) { if (mount_autodev(name, &lxc_conf->rootfs, lxcpath)) { ERROR("failed to mount /dev in the container"); return -1; } } /* do automatic mounts (mainly /proc and /sys), but exclude * those that need to wait until other stuff has finished */ if (lxc_mount_auto_mounts(lxc_conf, lxc_conf->auto_mounts & ~LXC_AUTO_CGROUP_MASK, handler) < 0) { ERROR("failed to setup the automatic mounts for '%s'", name); return -1; } if (setup_mount(&lxc_conf->rootfs, lxc_conf->fstab, name)) { ERROR("failed to setup the mounts for '%s'", name); return -1; } if (!lxc_list_empty(&lxc_conf->mount_list) && setup_mount_entries(&lxc_conf->rootfs, &lxc_conf->mount_list, name)) { ERROR("failed to setup the mount entries for '%s'", name); return -1; } /* Make sure any start hooks are in the container */ if (!verify_start_hooks(lxc_conf)) return -1; if (lxc_conf->is_execute) lxc_execute_bind_init(lxc_conf); /* now mount only cgroup, if wanted; * before, /sys could not have been mounted * (is either mounted automatically or via fstab entries) */ if (lxc_mount_auto_mounts(lxc_conf, lxc_conf->auto_mounts & LXC_AUTO_CGROUP_MASK, handler) < 0) { ERROR("failed to setup the automatic mounts for '%s'", name); return -1; } if (run_lxc_hooks(name, "mount", lxc_conf, lxcpath, NULL)) { ERROR("failed to run mount hooks for container '%s'.", name); return -1; } if (lxc_conf->autodev > 0) { if (run_lxc_hooks(name, "autodev", lxc_conf, lxcpath, NULL)) { ERROR("failed to run autodev hooks for container '%s'.", name); return -1; } if (fill_autodev(&lxc_conf->rootfs)) { ERROR("failed to populate /dev in the container"); return -1; } } if (!lxc_conf->is_execute && setup_console(&lxc_conf->rootfs, &lxc_conf->console, lxc_conf->ttydir)) { ERROR("failed to setup the console for '%s'", name); return -1; } if (lxc_conf->kmsg) { if (setup_kmsg(&lxc_conf->rootfs, &lxc_conf->console)) // don't fail ERROR("failed to setup kmsg for '%s'", name); } if (!lxc_conf->is_execute && setup_dev_symlinks(&lxc_conf->rootfs)) { ERROR("failed to setup /dev symlinks for '%s'", name); return -1; } /* mount /proc if it's not already there */ if (tmp_proc_mount(lxc_conf) < 0) { ERROR("failed to LSM mount proc for '%s'", name); return -1; } if (setup_pivot_root(&lxc_conf->rootfs)) { ERROR("failed to set rootfs for '%s'", name); return -1; } if (setup_pts(lxc_conf->pts)) { ERROR("failed to setup the new pts instance"); return -1; } if (lxc_create_tty(name, lxc_conf)) { ERROR("failed to create the ttys"); return -1; } if (send_ttys_to_parent(handler) < 0) { ERROR("failure sending console info to parent"); return -1; } if (!lxc_conf->is_execute && setup_tty(lxc_conf)) { ERROR("failed to setup the ttys for '%s'", name); return -1; } if (lxc_conf->pty_names && setenv("container_ttys", lxc_conf->pty_names, 1)) SYSERROR("failed to set environment variable for container ptys"); if (setup_personality(lxc_conf->personality)) { ERROR("failed to setup personality"); return -1; } if (!lxc_list_empty(&lxc_conf->keepcaps)) { if (!lxc_list_empty(&lxc_conf->caps)) { ERROR("Simultaneously requested dropping and keeping caps"); return -1; } if (dropcaps_except(&lxc_conf->keepcaps)) { ERROR("failed to keep requested caps"); return -1; } } else if (setup_caps(&lxc_conf->caps)) { ERROR("failed to drop capabilities"); return -1; } NOTICE("'%s' is setup.", name); return 0; }
281
100,024
0
bool WebPluginImpl::handleInputEvent( const WebInputEvent& event, WebCursorInfo& cursor_info) { return delegate_->HandleInputEvent(event, &cursor_info); }
282
158,346
0
void RenderWidgetHostImpl::RendererExited(base::TerminationStatus status, int exit_code) { if (!renderer_initialized_) return; monitoring_composition_info_ = false; renderer_initialized_ = false; waiting_for_screen_rects_ack_ = false; suppress_events_until_keydown_ = false; ResetSentVisualProperties(); if (!is_hidden_) { is_hidden_ = true; if (!destroyed_) process_->UpdateClientPriority(this); } in_flight_event_count_ = 0; StopInputEventAckTimeout(); if (view_) { view_->RenderProcessGone(status, exit_code); view_.reset(); // The View should be deleted by RenderProcessGone. } SetupInputRouter(); synthetic_gesture_controller_.reset(); current_content_source_id_ = 0; frame_token_message_queue_->Reset(); }
283
33,890
0
cib_remote_inputfd(cib_t * cib) { cib_remote_opaque_t *private = cib->variant_opaque; return private->callback.socket; }
284
16,042
0
XmpPtr xmp_new_empty() { RESET_ERROR; SXMPMeta *txmp = new SXMPMeta; return (XmpPtr)txmp; }
285
133,572
0
RenderFrameHostManager* WebContentsImpl::GetRenderManagerForTesting() { return GetRenderManager(); }
286
183,180
1
static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, size_t count, loff_t *offset) { struct snd_timer_user *tu; long result = 0, unit; int qhead; int err = 0; tu = file->private_data; unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read); spin_lock_irq(&tu->qlock); while ((long)count - result >= unit) { while (!tu->qused) { wait_queue_t wait; if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { err = -EAGAIN; goto _error; } set_current_state(TASK_INTERRUPTIBLE); init_waitqueue_entry(&wait, current); add_wait_queue(&tu->qchange_sleep, &wait); spin_unlock_irq(&tu->qlock); schedule(); spin_lock_irq(&tu->qlock); remove_wait_queue(&tu->qchange_sleep, &wait); if (tu->disconnected) { err = -ENODEV; goto _error; } if (signal_pending(current)) { err = -ERESTARTSYS; goto _error; } } qhead = tu->qhead++; tu->qhead %= tu->queue_size; tu->qused--; spin_unlock_irq(&tu->qlock); mutex_lock(&tu->ioctl_lock); if (tu->tread) { if (copy_to_user(buffer, &tu->tqueue[qhead], sizeof(struct snd_timer_tread))) err = -EFAULT; } else { if (copy_to_user(buffer, &tu->queue[qhead], sizeof(struct snd_timer_read))) err = -EFAULT; } mutex_unlock(&tu->ioctl_lock); spin_lock_irq(&tu->qlock); if (err < 0) goto _error; result += unit; buffer += unit; } _error: spin_unlock_irq(&tu->qlock); return result > 0 ? result : err; }
287
29,911
0
static ssize_t map_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos, int cap_setid, struct uid_gid_map *map, struct uid_gid_map *parent_map) { struct seq_file *seq = file->private_data; struct user_namespace *ns = seq->private; struct uid_gid_map new_map; unsigned idx; struct uid_gid_extent *extent = NULL; unsigned long page = 0; char *kbuf, *pos, *next_line; ssize_t ret = -EINVAL; /* * The id_map_mutex serializes all writes to any given map. * * Any map is only ever written once. * * An id map fits within 1 cache line on most architectures. * * On read nothing needs to be done unless you are on an * architecture with a crazy cache coherency model like alpha. * * There is a one time data dependency between reading the * count of the extents and the values of the extents. The * desired behavior is to see the values of the extents that * were written before the count of the extents. * * To achieve this smp_wmb() is used on guarantee the write * order and smp_read_barrier_depends() is guaranteed that we * don't have crazy architectures returning stale data. * */ mutex_lock(&id_map_mutex); ret = -EPERM; /* Only allow one successful write to the map */ if (map->nr_extents != 0) goto out; /* * Adjusting namespace settings requires capabilities on the target. */ if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN)) goto out; /* Get a buffer */ ret = -ENOMEM; page = __get_free_page(GFP_TEMPORARY); kbuf = (char *) page; if (!page) goto out; /* Only allow <= page size writes at the beginning of the file */ ret = -EINVAL; if ((*ppos != 0) || (count >= PAGE_SIZE)) goto out; /* Slurp in the user data */ ret = -EFAULT; if (copy_from_user(kbuf, buf, count)) goto out; kbuf[count] = '\0'; /* Parse the user data */ ret = -EINVAL; pos = kbuf; new_map.nr_extents = 0; for (;pos; pos = next_line) { extent = &new_map.extent[new_map.nr_extents]; /* Find the end of line and ensure I don't look past it */ next_line = strchr(pos, '\n'); if (next_line) { *next_line = '\0'; next_line++; if (*next_line == '\0') next_line = NULL; } pos = skip_spaces(pos); extent->first = simple_strtoul(pos, &pos, 10); if (!isspace(*pos)) goto out; pos = skip_spaces(pos); extent->lower_first = simple_strtoul(pos, &pos, 10); if (!isspace(*pos)) goto out; pos = skip_spaces(pos); extent->count = simple_strtoul(pos, &pos, 10); if (*pos && !isspace(*pos)) goto out; /* Verify there is not trailing junk on the line */ pos = skip_spaces(pos); if (*pos != '\0') goto out; /* Verify we have been given valid starting values */ if ((extent->first == (u32) -1) || (extent->lower_first == (u32) -1 )) goto out; /* Verify count is not zero and does not cause the extent to wrap */ if ((extent->first + extent->count) <= extent->first) goto out; if ((extent->lower_first + extent->count) <= extent->lower_first) goto out; /* Do the ranges in extent overlap any previous extents? */ if (mappings_overlap(&new_map, extent)) goto out; new_map.nr_extents++; /* Fail if the file contains too many extents */ if ((new_map.nr_extents == UID_GID_MAP_MAX_EXTENTS) && (next_line != NULL)) goto out; } /* Be very certaint the new map actually exists */ if (new_map.nr_extents == 0) goto out; ret = -EPERM; /* Validate the user is allowed to use user id's mapped to. */ if (!new_idmap_permitted(file, ns, cap_setid, &new_map)) goto out; /* Map the lower ids from the parent user namespace to the * kernel global id space. */ for (idx = 0; idx < new_map.nr_extents; idx++) { u32 lower_first; extent = &new_map.extent[idx]; lower_first = map_id_range_down(parent_map, extent->lower_first, extent->count); /* Fail if we can not map the specified extent to * the kernel global id space. */ if (lower_first == (u32) -1) goto out; extent->lower_first = lower_first; } /* Install the map */ memcpy(map->extent, new_map.extent, new_map.nr_extents*sizeof(new_map.extent[0])); smp_wmb(); map->nr_extents = new_map.nr_extents; *ppos = count; ret = count; out: mutex_unlock(&id_map_mutex); if (page) free_page(page); return ret; }
288
169,532
0
void AutomationInternalCustomBindings::OnAtomicUpdateFinished( ui::AXTree* tree, bool root_changed, const std::vector<ui::AXTreeDelegate::Change>& changes) { auto iter = axtree_to_tree_cache_map_.find(tree); if (iter == axtree_to_tree_cache_map_.end()) return; for (const auto change : changes) { ui::AXNode* node = change.node; switch (change.type) { case NODE_CREATED: SendTreeChangeEvent( api::automation::TREE_CHANGE_TYPE_NODECREATED, tree, node); break; case SUBTREE_CREATED: SendTreeChangeEvent( api::automation::TREE_CHANGE_TYPE_SUBTREECREATED, tree, node); break; case NODE_CHANGED: SendTreeChangeEvent( api::automation::TREE_CHANGE_TYPE_NODECHANGED, tree, node); break; } } for (int id : text_changed_node_ids_) { SendTreeChangeEvent(api::automation::TREE_CHANGE_TYPE_TEXTCHANGED, tree, tree->GetFromId(id)); } text_changed_node_ids_.clear(); }
289
126,646
0
void TabStripModel::NotifyIfActiveTabChanged(TabContents* old_contents, NotifyTypes notify_types) { TabContents* new_contents = GetTabContentsAtImpl(active_index()); if (old_contents != new_contents) { FOR_EACH_OBSERVER(TabStripModelObserver, observers_, ActiveTabChanged(old_contents, new_contents, active_index(), notify_types == NOTIFY_USER_GESTURE)); contents_data_[active_index()]->discarded = false; } }
290
184,660
1
void GoBackCrossSite() { NavigationEntry* entry = contents()->controller().GetEntryAtOffset(-1); ASSERT_TRUE(entry); contents()->controller().GoBack(); // The navigation should commit in the pending RVH. contents()->TestDidNavigate( contents()->pending_rvh(), entry->page_id(), GURL(entry->url()), content::PAGE_TRANSITION_TYPED); }
291
52,810
0
static ssize_t ib_ucm_send_info(struct ib_ucm_file *file, const char __user *inbuf, int in_len, int (*func)(struct ib_cm_id *cm_id, int status, const void *info, u8 info_len, const void *data, u8 data_len)) { struct ib_ucm_context *ctx; struct ib_ucm_info cmd; const void *data = NULL; const void *info = NULL; int result; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; result = ib_ucm_alloc_data(&data, cmd.data, cmd.data_len); if (result) goto done; result = ib_ucm_alloc_data(&info, cmd.info, cmd.info_len); if (result) goto done; ctx = ib_ucm_ctx_get(file, cmd.id); if (!IS_ERR(ctx)) { result = func(ctx->cm_id, cmd.status, info, cmd.info_len, data, cmd.data_len); ib_ucm_ctx_put(ctx); } else result = PTR_ERR(ctx); done: kfree(data); kfree(info); return result; }
292
13,702
0
int ssl3_renegotiate_check(SSL *s) { int ret = 0; if (s->s3->renegotiate) { if ((s->s3->rbuf.left == 0) && (s->s3->wbuf.left == 0) && !SSL_in_init(s)) { /* * if we are the server, and we have sent a 'RENEGOTIATE' * message, we need to go to SSL_ST_ACCEPT. */ /* SSL_ST_ACCEPT */ s->state = SSL_ST_RENEGOTIATE; s->s3->renegotiate = 0; s->s3->num_renegotiations++; s->s3->total_renegotiations++; ret = 1; } } return (ret); }
293
128,746
0
ReadableStream::ReadableStream(UnderlyingSource* source) : m_source(source) , m_isStarted(false) , m_isDraining(false) , m_isPulling(false) , m_isDisturbed(false) , m_state(Readable) { }
294
165,764
0
void SVGElement::AddedEventListener( const AtomicString& event_type, RegisteredEventListener& registered_listener) { Node::AddedEventListener(event_type, registered_listener); HeapHashSet<WeakMember<SVGElement>> instances; CollectInstancesForSVGElement(this, instances); AddEventListenerOptionsResolved* options = registered_listener.Options(); EventListener* listener = registered_listener.Callback(); for (SVGElement* element : instances) { bool result = element->Node::AddEventListenerInternal(event_type, listener, options); DCHECK(result); } }
295
129,137
0
bool CaptureOnly(const Extension* extension, const GURL& url, int tab_id) { return !extension->permissions_data()->CanAccessPage( extension, url, url, tab_id, -1, NULL) && extension->permissions_data()->CanCaptureVisiblePage(tab_id, NULL); }
296
114,177
0
BrowserGpuChannelHostFactory::AllocateSharedMemory(uint32 size) { scoped_ptr<base::SharedMemory> shm(new base::SharedMemory()); if (!shm->CreateAnonymous(size)) return scoped_ptr<base::SharedMemory>(); return shm.Pass(); }
297
149,059
0
static int checkConstraintExprNode(Walker *pWalker, Expr *pExpr){ if( pExpr->op==TK_COLUMN ){ assert( pExpr->iColumn>=0 || pExpr->iColumn==-1 ); if( pExpr->iColumn>=0 ){ if( pWalker->u.aiCol[pExpr->iColumn]>=0 ){ pWalker->eCode |= CKCNSTRNT_COLUMN; } }else{ pWalker->eCode |= CKCNSTRNT_ROWID; } } return WRC_Continue; }
298
141,087
0
MediaQueryMatcher& Document::GetMediaQueryMatcher() { if (!media_query_matcher_) media_query_matcher_ = MediaQueryMatcher::Create(*this); return *media_query_matcher_; }
299