unique_id
int64 13
189k
| target
int64 0
1
| code
stringlengths 20
241k
| __index_level_0__
int64 0
18.9k
|
---|---|---|---|
64,892 | 0 | static int rle_patch_file_size(struct iwbmpwcontext *wctx,size_t rlesize)
{
iw_byte buf[4];
size_t fileheader_size;
int ret;
if(!wctx->iodescr->seek_fn) {
iw_set_error(wctx->ctx,"Writing compressed BMP requires a seek function");
return 0;
}
if(wctx->include_file_header) {
ret=(*wctx->iodescr->seek_fn)(wctx->ctx,wctx->iodescr,2,SEEK_SET);
if(!ret) return 0;
iw_set_ui32le(buf,(unsigned int)(14+wctx->header_size+wctx->bitfields_size+wctx->palsize+rlesize));
iwbmp_write(wctx,buf,4);
fileheader_size = 14;
}
else {
fileheader_size = 0;
}
ret=(*wctx->iodescr->seek_fn)(wctx->ctx,wctx->iodescr,fileheader_size+20,SEEK_SET);
if(!ret) return 0;
iw_set_ui32le(buf,(unsigned int)rlesize);
iwbmp_write(wctx,buf,4);
(*wctx->iodescr->seek_fn)(wctx->ctx,wctx->iodescr,0,SEEK_END);
return 1;
}
| 8,600 |
40,241 | 0 | static int pn_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
struct sk_buff *skb;
int answ;
switch (cmd) {
case SIOCINQ:
lock_sock(sk);
skb = skb_peek(&sk->sk_receive_queue);
answ = skb ? skb->len : 0;
release_sock(sk);
return put_user(answ, (int __user *)arg);
case SIOCPNADDRESOURCE:
case SIOCPNDELRESOURCE: {
u32 res;
if (get_user(res, (u32 __user *)arg))
return -EFAULT;
if (res >= 256)
return -EINVAL;
if (cmd == SIOCPNADDRESOURCE)
return pn_sock_bind_res(sk, res);
else
return pn_sock_unbind_res(sk, res);
}
}
return -ENOIOCTLCMD;
}
| 8,601 |
180,111 | 1 | ZEND_API void zend_objects_store_del_ref_by_handle_ex(zend_object_handle handle, const zend_object_handlers *handlers TSRMLS_DC) /* {{{ */
{
struct _store_object *obj;
int failure = 0;
if (!EG(objects_store).object_buckets) {
return;
}
obj = &EG(objects_store).object_buckets[handle].bucket.obj;
/* Make sure we hold a reference count during the destructor call
otherwise, when the destructor ends the storage might be freed
when the refcount reaches 0 a second time
*/
if (EG(objects_store).object_buckets[handle].valid) {
if (obj->refcount == 1) {
if (!EG(objects_store).object_buckets[handle].destructor_called) {
EG(objects_store).object_buckets[handle].destructor_called = 1;
if (obj->dtor) {
if (handlers && !obj->handlers) {
obj->handlers = handlers;
}
zend_try {
obj->dtor(obj->object, handle TSRMLS_CC);
} zend_catch {
failure = 1;
} zend_end_try();
}
}
/* re-read the object from the object store as the store might have been reallocated in the dtor */
obj = &EG(objects_store).object_buckets[handle].bucket.obj;
if (obj->refcount == 1) {
GC_REMOVE_ZOBJ_FROM_BUFFER(obj);
if (obj->free_storage) {
zend_try {
obj->free_storage(obj->object TSRMLS_CC);
} zend_catch {
failure = 1;
} zend_end_try();
}
ZEND_OBJECTS_STORE_ADD_TO_FREE_LIST();
}
}
}
obj->refcount--;
#if ZEND_DEBUG_OBJECTS
if (obj->refcount == 0) {
fprintf(stderr, "Deallocated object id #%d\n", handle);
} else {
fprintf(stderr, "Decreased refcount of object id #%d\n", handle);
}
#endif
if (failure) {
zend_bailout();
}
}
/* }}} */
| 8,602 |
64,954 | 0 | static iw_tmpsample x_to_linear_sample(iw_tmpsample v, const struct iw_csdescr *csdescr)
{
switch(csdescr->cstype) {
case IW_CSTYPE_SRGB:
return srgb_to_linear_sample(v);
case IW_CSTYPE_LINEAR:
return v;
case IW_CSTYPE_GAMMA:
return gamma_to_linear_sample(v,csdescr->gamma);
case IW_CSTYPE_REC709:
return rec709_to_linear_sample(v);
}
return srgb_to_linear_sample(v);
}
| 8,603 |
41,776 | 0 | static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
{
return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
}
| 8,604 |
29,248 | 0 | ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
{
struct ip_vs_scheduler *sched, *old_sched;
int ret = 0;
/*
* Lookup the scheduler, by 'u->sched_name'
*/
sched = ip_vs_scheduler_get(u->sched_name);
if (sched == NULL) {
pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name);
return -ENOENT;
}
old_sched = sched;
#ifdef CONFIG_IP_VS_IPV6
if (u->af == AF_INET6 && (u->netmask < 1 || u->netmask > 128)) {
ret = -EINVAL;
goto out;
}
#endif
write_lock_bh(&__ip_vs_svc_lock);
/*
* Wait until all other svc users go away.
*/
IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
/*
* Set the flags and timeout value
*/
svc->flags = u->flags | IP_VS_SVC_F_HASHED;
svc->timeout = u->timeout * HZ;
svc->netmask = u->netmask;
old_sched = svc->scheduler;
if (sched != old_sched) {
/*
* Unbind the old scheduler
*/
if ((ret = ip_vs_unbind_scheduler(svc))) {
old_sched = sched;
goto out_unlock;
}
/*
* Bind the new scheduler
*/
if ((ret = ip_vs_bind_scheduler(svc, sched))) {
/*
* If ip_vs_bind_scheduler fails, restore the old
* scheduler.
* The main reason of failure is out of memory.
*
* The question is if the old scheduler can be
* restored all the time. TODO: if it cannot be
* restored some time, we must delete the service,
* otherwise the system may crash.
*/
ip_vs_bind_scheduler(svc, old_sched);
old_sched = sched;
goto out_unlock;
}
}
out_unlock:
write_unlock_bh(&__ip_vs_svc_lock);
#ifdef CONFIG_IP_VS_IPV6
out:
#endif
if (old_sched)
ip_vs_scheduler_put(old_sched);
return ret;
}
| 8,605 |
96,847 | 0 | SYSCALL_DEFINE1(pipe, int __user *, fildes)
{
return do_pipe2(fildes, 0);
}
| 8,606 |
70,014 | 0 | void dictListDestructor(void *privdata, void *val)
{
DICT_NOTUSED(privdata);
listRelease((list*)val);
}
| 8,607 |
142,253 | 0 | void ManualFillingControllerImpl::Hide(FillingSource source) {
if (source == FillingSource::AUTOFILL &&
!base::FeatureList::IsEnabled(
autofill::features::kAutofillKeyboardAccessory)) {
return;
}
visible_sources_.erase(source);
if (visible_sources_.empty())
view_->Hide();
}
| 8,608 |
150,714 | 0 | base::string16 GetSoundSettingString(ContentSetting setting) {
return PageInfoUI::PermissionActionToUIString(
profile(), CONTENT_SETTINGS_TYPE_SOUND, setting, default_setting_,
content_settings::SettingSource::SETTING_SOURCE_USER);
}
| 8,609 |
120,144 | 0 | static void RunCopyCallbackOnMainThread(scoped_ptr<CopyOutputRequest> request,
scoped_ptr<CopyOutputResult> result) {
request->SendResult(result.Pass());
}
| 8,610 |
1,090 | 0 | GfxColorSpace *GfxColorSpace::parse(Object *csObj) {
GfxColorSpace *cs;
Object obj1;
cs = NULL;
if (csObj->isName()) {
if (csObj->isName("DeviceGray") || csObj->isName("G")) {
cs = new GfxDeviceGrayColorSpace();
} else if (csObj->isName("DeviceRGB") || csObj->isName("RGB")) {
cs = new GfxDeviceRGBColorSpace();
} else if (csObj->isName("DeviceCMYK") || csObj->isName("CMYK")) {
cs = new GfxDeviceCMYKColorSpace();
} else if (csObj->isName("Pattern")) {
cs = new GfxPatternColorSpace(NULL);
} else {
error(-1, "Bad color space '%s'", csObj->getName());
}
} else if (csObj->isArray()) {
csObj->arrayGet(0, &obj1);
if (obj1.isName("DeviceGray") || obj1.isName("G")) {
cs = new GfxDeviceGrayColorSpace();
} else if (obj1.isName("DeviceRGB") || obj1.isName("RGB")) {
cs = new GfxDeviceRGBColorSpace();
} else if (obj1.isName("DeviceCMYK") || obj1.isName("CMYK")) {
cs = new GfxDeviceCMYKColorSpace();
} else if (obj1.isName("CalGray")) {
cs = GfxCalGrayColorSpace::parse(csObj->getArray());
} else if (obj1.isName("CalRGB")) {
cs = GfxCalRGBColorSpace::parse(csObj->getArray());
} else if (obj1.isName("Lab")) {
cs = GfxLabColorSpace::parse(csObj->getArray());
} else if (obj1.isName("ICCBased")) {
cs = GfxICCBasedColorSpace::parse(csObj->getArray());
} else if (obj1.isName("Indexed") || obj1.isName("I")) {
cs = GfxIndexedColorSpace::parse(csObj->getArray());
} else if (obj1.isName("Separation")) {
cs = GfxSeparationColorSpace::parse(csObj->getArray());
} else if (obj1.isName("DeviceN")) {
cs = GfxDeviceNColorSpace::parse(csObj->getArray());
} else if (obj1.isName("Pattern")) {
cs = GfxPatternColorSpace::parse(csObj->getArray());
} else {
error(-1, "Bad color space");
}
obj1.free();
} else {
error(-1, "Bad color space - expected name or array");
}
return cs;
}
| 8,611 |
63,152 | 0 | float MSG_ReadDeltaKeyFloat( msg_t *msg, int key, float oldV ) {
if ( MSG_ReadBits( msg, 1 ) ) {
floatint_t fi;
fi.i = MSG_ReadBits( msg, 32 ) ^ key;
return fi.f;
}
return oldV;
}
| 8,612 |
51,429 | 0 | int gdImageColorResolveAlpha (gdImagePtr im, int r, int g, int b, int a)
{
int c;
int ct = -1;
int op = -1;
long rd, gd, bd, ad, dist;
long mindist = 4 * 255 * 255; /* init to max poss dist */
if (im->trueColor)
{
return gdTrueColorAlpha (r, g, b, a);
}
for (c = 0; c < im->colorsTotal; c++)
{
if (im->open[c])
{
op = c; /* Save open slot */
continue; /* Color not in use */
}
if (c == im->transparent)
{
/* don't ever resolve to the color that has
* been designated as the transparent color */
continue;
}
rd = (long) (im->red[c] - r);
gd = (long) (im->green[c] - g);
bd = (long) (im->blue[c] - b);
ad = (long) (im->alpha[c] - a);
dist = rd * rd + gd * gd + bd * bd + ad * ad;
if (dist < mindist)
{
if (dist == 0)
{
return c; /* Return exact match color */
}
mindist = dist;
ct = c;
}
}
/* no exact match. We now know closest, but first try to allocate exact */
if (op == -1)
{
op = im->colorsTotal;
if (op == gdMaxColors)
{ /* No room for more colors */
return ct; /* Return closest available color */
}
im->colorsTotal++;
}
im->red[op] = r;
im->green[op] = g;
im->blue[op] = b;
im->alpha[op] = a;
im->open[op] = 0;
return op; /* Return newly allocated color */
}
| 8,613 |
39,283 | 0 | int security_mls_enabled(void)
{
return policydb.mls_enabled;
}
| 8,614 |
140,347 | 0 | void Editor::pasteAsPlainText(EditorCommandSource source) {
if (tryDHTMLPaste(PlainTextOnly))
return;
if (!canPaste())
return;
spellChecker().updateMarkersForWordsAffectedByEditing(false);
pasteAsPlainTextWithPasteboard(Pasteboard::generalPasteboard());
}
| 8,615 |
164,404 | 0 | PermissionsData::PageAccess PermissionsData::CanRunOnPage(
const GURL& document_url,
int tab_id,
const URLPatternSet& permitted_url_patterns,
const URLPatternSet& withheld_url_patterns,
const URLPatternSet* tab_url_patterns,
std::string* error) const {
runtime_lock_.AssertAcquired();
if (location_ != Manifest::COMPONENT &&
IsPolicyBlockedHostUnsafe(document_url)) {
if (error)
*error = extension_misc::kPolicyBlockedScripting;
return PageAccess::kDenied;
}
if (IsRestrictedUrl(document_url, error))
return PageAccess::kDenied;
if (tab_url_patterns && tab_url_patterns->MatchesURL(document_url))
return PageAccess::kAllowed;
if (permitted_url_patterns.MatchesURL(document_url))
return PageAccess::kAllowed;
if (withheld_url_patterns.MatchesURL(document_url))
return PageAccess::kWithheld;
if (error) {
if (active_permissions_unsafe_->HasAPIPermission(APIPermission::kTab)) {
*error = ErrorUtils::FormatErrorMessage(
manifest_errors::kCannotAccessPageWithUrl, document_url.spec());
} else {
*error = manifest_errors::kCannotAccessPage;
}
}
return PageAccess::kDenied;
}
| 8,616 |
180,514 | 1 | static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst,
struct request_sock *req_unhash,
bool *own_req)
{
struct inet_request_sock *ireq;
struct ipv6_pinfo *newnp;
const struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp6_sock *newtcp6sk;
struct inet_sock *newinet;
struct tcp_sock *newtp;
struct sock *newsk;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *key;
#endif
struct flowi6 fl6;
if (skb->protocol == htons(ETH_P_IP)) {
/*
* v6 mapped
*/
newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
req_unhash, own_req);
if (!newsk)
return NULL;
newtcp6sk = (struct tcp6_sock *)newsk;
inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
newinet = inet_sk(newsk);
newnp = inet6_sk(newsk);
newtp = tcp_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
newnp->saddr = newsk->sk_v6_rcv_saddr;
inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
newsk->sk_backlog_rcv = tcp_v4_do_rcv;
#ifdef CONFIG_TCP_MD5SIG
newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = tcp_v6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
if (np->repflow)
newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
* here, tcp_create_openreq_child now does this for us, see the comment in
* that function for the gory details. -acme
*/
/* It is tricky place. Until this moment IPv4 tcp
worked with IPv6 icsk.icsk_af_ops.
Sync it now.
*/
tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
return newsk;
}
ireq = inet_rsk(req);
if (sk_acceptq_is_full(sk))
goto out_overflow;
if (!dst) {
dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
if (!dst)
goto out;
}
newsk = tcp_create_openreq_child(sk, req, skb);
if (!newsk)
goto out_nonewsk;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks
* count here, tcp_create_openreq_child now does this for us, see the
* comment in that function for the gory details. -acme
*/
newsk->sk_gso_type = SKB_GSO_TCPV6;
__ip6_dst_store(newsk, dst, NULL, NULL);
inet6_sk_rx_dst_set(newsk, skb);
newtcp6sk = (struct tcp6_sock *)newsk;
inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
newtp = tcp_sk(newsk);
newinet = inet_sk(newsk);
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
newnp->saddr = ireq->ir_v6_loc_addr;
newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
newsk->sk_bound_dev_if = ireq->ir_iif;
/* Now IPv6 options...
First: no IPv4 options.
*/
newinet->inet_opt = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
/* Clone RX bits */
newnp->rxopt.all = np->rxopt.all;
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = tcp_v6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
if (np->repflow)
newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
/* Clone native IPv6 options from listening socket (if any)
Yes, keeping reference count would be much more clever,
but we make one more one thing there: reattach optmem
to newsk.
*/
if (np->opt)
newnp->opt = ipv6_dup_options(newsk, np->opt);
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (newnp->opt)
inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
newnp->opt->opt_flen);
tcp_ca_openreq_child(newsk, dst);
tcp_sync_mss(newsk, dst_mtu(dst));
newtp->advmss = dst_metric_advmss(dst);
if (tcp_sk(sk)->rx_opt.user_mss &&
tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
tcp_initialize_rcv_mss(newsk);
newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
#ifdef CONFIG_TCP_MD5SIG
/* Copy over the MD5 key from the original socket */
key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
if (key) {
/* We're using one, so create a matching key
* on the newsk structure. If we fail to get
* memory, then we end up not copying the key
* across. Shucks.
*/
tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
AF_INET6, key->key, key->keylen,
sk_gfp_atomic(sk, GFP_ATOMIC));
}
#endif
if (__inet_inherit_port(sk, newsk) < 0) {
inet_csk_prepare_forced_close(newsk);
tcp_done(newsk);
goto out;
}
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
if (*own_req) {
tcp_move_syn(newtp, req);
/* Clone pktoptions received with SYN, if we own the req */
if (ireq->pktopts) {
newnp->pktoptions = skb_clone(ireq->pktopts,
sk_gfp_atomic(sk, GFP_ATOMIC));
consume_skb(ireq->pktopts);
ireq->pktopts = NULL;
if (newnp->pktoptions)
skb_set_owner_r(newnp->pktoptions, newsk);
}
}
return newsk;
out_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
out_nonewsk:
dst_release(dst);
out:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL;
}
| 8,617 |
31,662 | 0 | static int intel_alt_er(int idx)
{
if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
return idx;
if (idx == EXTRA_REG_RSP_0)
return EXTRA_REG_RSP_1;
if (idx == EXTRA_REG_RSP_1)
return EXTRA_REG_RSP_0;
return idx;
}
| 8,618 |
102,111 | 0 | SyncBackendHost::Core::DoInitializeOptions::~DoInitializeOptions() {}
| 8,619 |
113,132 | 0 | DownloadManagerTest()
: ui_thread_(content::BrowserThread::UI, &message_loop_),
file_thread_(content::BrowserThread::FILE, &message_loop_),
next_download_id_(0) {
}
| 8,620 |
76,607 | 0 | static int fsck_msg_type(enum fsck_msg_id msg_id,
struct fsck_options *options)
{
int msg_type;
assert(msg_id >= 0 && msg_id < FSCK_MSG_MAX);
if (options->msg_type)
msg_type = options->msg_type[msg_id];
else {
msg_type = msg_id_info[msg_id].msg_type;
if (options->strict && msg_type == FSCK_WARN)
msg_type = FSCK_ERROR;
}
return msg_type;
}
| 8,621 |
168,945 | 0 | void DevToolsSession::AttachToAgent(
const blink::mojom::DevToolsAgentAssociatedPtr& agent) {
blink::mojom::DevToolsSessionHostAssociatedPtrInfo host_ptr_info;
binding_.Bind(mojo::MakeRequest(&host_ptr_info));
agent->AttachDevToolsSession(
std::move(host_ptr_info), mojo::MakeRequest(&session_ptr_),
mojo::MakeRequest(&io_session_ptr_), state_cookie_);
session_ptr_.set_connection_error_handler(base::BindOnce(
&DevToolsSession::MojoConnectionDestroyed, base::Unretained(this)));
if (!suspended_sending_messages_to_agent_) {
for (const auto& pair : waiting_for_response_messages_) {
int call_id = pair.first;
const WaitingMessage& message = pair.second;
DispatchProtocolMessageToAgent(call_id, message.method, message.message);
}
} else {
std::vector<SuspendedMessage> temp;
for (const auto& pair : waiting_for_response_messages_)
temp.push_back({pair.first, pair.second.method, pair.second.message});
suspended_messages_.insert(suspended_messages_.begin(), temp.begin(),
temp.end());
waiting_for_response_messages_.clear();
}
if (!state_cookie_.has_value())
state_cookie_ = std::string();
}
| 8,622 |
179,189 | 1 | static int udp_v6_push_pending_frames(struct sock *sk)
{
struct sk_buff *skb;
struct udphdr *uh;
struct udp_sock *up = udp_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
int err = 0;
int is_udplite = IS_UDPLITE(sk);
__wsum csum = 0;
/* Grab the skbuff where UDP header space exists. */
if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
goto out;
/*
* Create a UDP header
*/
uh = udp_hdr(skb);
uh->source = fl6->fl6_sport;
uh->dest = fl6->fl6_dport;
uh->len = htons(up->len);
uh->check = 0;
if (is_udplite)
csum = udplite_csum_outgoing(sk, skb);
else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr,
up->len);
goto send;
} else
csum = udp_csum_outgoing(sk, skb);
/* add protocol-dependent pseudo-header */
uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
up->len, fl6->flowi6_proto, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
send:
err = ip6_push_pending_frames(sk);
if (err) {
if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
err = 0;
}
} else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_OUTDATAGRAMS, is_udplite);
out:
up->len = 0;
up->pending = 0;
return err;
}
| 8,623 |
155,583 | 0 | base::string16 AttestationPermissionRequestSheetModel::GetStepDescription()
const {
return l10n_util::GetStringFUTF16(
IDS_WEBAUTHN_REQUEST_ATTESTATION_PERMISSION_DESC,
GetRelyingPartyIdString(dialog_model()));
}
| 8,624 |
188,203 | 1 | void SoftHEVC::onQueueFilled(OMX_U32 portIndex) {
UNUSED(portIndex);
if (mSignalledError) {
return;
}
if (mOutputPortSettingsChange != NONE) {
return;
}
if (NULL == mCodecCtx) {
if (OK != initDecoder()) {
return;
}
}
if (outputBufferWidth() != mStride) {
/* Set the run-time (dynamic) parameters */
mStride = outputBufferWidth();
setParams(mStride);
}
List<BufferInfo *> &inQueue = getPortQueue(kInputPortIndex);
List<BufferInfo *> &outQueue = getPortQueue(kOutputPortIndex);
/* If input EOS is seen and decoder is not in flush mode,
* set the decoder in flush mode.
* There can be a case where EOS is sent along with last picture data
* In that case, only after decoding that input data, decoder has to be
* put in flush. This case is handled here */
if (mReceivedEOS && !mIsInFlush) {
setFlushMode();
}
while (!outQueue.empty()) {
BufferInfo *inInfo;
OMX_BUFFERHEADERTYPE *inHeader;
BufferInfo *outInfo;
OMX_BUFFERHEADERTYPE *outHeader;
size_t timeStampIx;
inInfo = NULL;
inHeader = NULL;
if (!mIsInFlush) {
if (!inQueue.empty()) {
inInfo = *inQueue.begin();
inHeader = inInfo->mHeader;
} else {
break;
}
}
outInfo = *outQueue.begin();
outHeader = outInfo->mHeader;
outHeader->nFlags = 0;
outHeader->nTimeStamp = 0;
outHeader->nOffset = 0;
if (inHeader != NULL && (inHeader->nFlags & OMX_BUFFERFLAG_EOS)) {
mReceivedEOS = true;
if (inHeader->nFilledLen == 0) {
inQueue.erase(inQueue.begin());
inInfo->mOwnedByUs = false;
notifyEmptyBufferDone(inHeader);
inHeader = NULL;
setFlushMode();
}
}
/* Get a free slot in timestamp array to hold input timestamp */
{
size_t i;
timeStampIx = 0;
for (i = 0; i < MAX_TIME_STAMPS; i++) {
if (!mTimeStampsValid[i]) {
timeStampIx = i;
break;
}
}
if (inHeader != NULL) {
mTimeStampsValid[timeStampIx] = true;
mTimeStamps[timeStampIx] = inHeader->nTimeStamp;
}
}
{
ivd_video_decode_ip_t s_dec_ip;
ivd_video_decode_op_t s_dec_op;
WORD32 timeDelay, timeTaken;
size_t sizeY, sizeUV;
setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx);
GETTIME(&mTimeStart, NULL);
/* Compute time elapsed between end of previous decode()
* to start of current decode() */
TIME_DIFF(mTimeEnd, mTimeStart, timeDelay);
IV_API_CALL_STATUS_T status;
status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
bool resChanged = (IVD_RES_CHANGED == (s_dec_op.u4_error_code & 0xFF));
GETTIME(&mTimeEnd, NULL);
/* Compute time taken for decode() */
TIME_DIFF(mTimeStart, mTimeEnd, timeTaken);
ALOGV("timeTaken=%6d delay=%6d numBytes=%6d", timeTaken, timeDelay,
s_dec_op.u4_num_bytes_consumed);
if (s_dec_op.u4_frame_decoded_flag && !mFlushNeeded) {
mFlushNeeded = true;
}
if ((inHeader != NULL) && (1 != s_dec_op.u4_frame_decoded_flag)) {
/* If the input did not contain picture data, then ignore
* the associated timestamp */
mTimeStampsValid[timeStampIx] = false;
}
// If the decoder is in the changing resolution mode and there is no output present,
// that means the switching is done and it's ready to reset the decoder and the plugin.
if (mChangingResolution && !s_dec_op.u4_output_present) {
mChangingResolution = false;
resetDecoder();
resetPlugin();
continue;
}
if (resChanged) {
mChangingResolution = true;
if (mFlushNeeded) {
setFlushMode();
}
continue;
}
if ((0 < s_dec_op.u4_pic_wd) && (0 < s_dec_op.u4_pic_ht)) {
uint32_t width = s_dec_op.u4_pic_wd;
uint32_t height = s_dec_op.u4_pic_ht;
bool portWillReset = false;
handlePortSettingsChange(&portWillReset, width, height);
if (portWillReset) {
resetDecoder();
return;
}
}
if (s_dec_op.u4_output_present) {
outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * 3) / 2;
outHeader->nTimeStamp = mTimeStamps[s_dec_op.u4_ts];
mTimeStampsValid[s_dec_op.u4_ts] = false;
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
outInfo = NULL;
notifyFillBufferDone(outHeader);
outHeader = NULL;
} else {
/* If in flush mode and no output is returned by the codec,
* then come out of flush mode */
mIsInFlush = false;
/* If EOS was recieved on input port and there is no output
* from the codec, then signal EOS on output port */
if (mReceivedEOS) {
outHeader->nFilledLen = 0;
outHeader->nFlags |= OMX_BUFFERFLAG_EOS;
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
outInfo = NULL;
notifyFillBufferDone(outHeader);
outHeader = NULL;
resetPlugin();
}
}
}
// TODO: Handle more than one picture data
if (inHeader != NULL) {
inInfo->mOwnedByUs = false;
inQueue.erase(inQueue.begin());
inInfo = NULL;
notifyEmptyBufferDone(inHeader);
inHeader = NULL;
}
}
}
| 8,625 |
17,838 | 0 | rsa_sign (gcry_sexp_t *r_sig, gcry_sexp_t s_data, gcry_sexp_t keyparms)
{
gpg_err_code_t rc;
struct pk_encoding_ctx ctx;
gcry_mpi_t data = NULL;
RSA_secret_key sk = {NULL, NULL, NULL, NULL, NULL, NULL};
RSA_public_key pk;
gcry_mpi_t sig = NULL;
gcry_mpi_t result = NULL;
_gcry_pk_util_init_encoding_ctx (&ctx, PUBKEY_OP_SIGN,
rsa_get_nbits (keyparms));
/* Extract the data. */
rc = _gcry_pk_util_data_to_mpi (s_data, &data, &ctx);
if (rc)
goto leave;
if (DBG_CIPHER)
log_printmpi ("rsa_sign data", data);
if (mpi_is_opaque (data))
{
rc = GPG_ERR_INV_DATA;
goto leave;
}
/* Extract the key. */
rc = sexp_extract_param (keyparms, NULL, "nedp?q?u?",
&sk.n, &sk.e, &sk.d, &sk.p, &sk.q, &sk.u,
NULL);
if (rc)
goto leave;
if (DBG_CIPHER)
{
log_printmpi ("rsa_sign n", sk.n);
log_printmpi ("rsa_sign e", sk.e);
if (!fips_mode ())
{
log_printmpi ("rsa_sign d", sk.d);
log_printmpi ("rsa_sign p", sk.p);
log_printmpi ("rsa_sign q", sk.q);
log_printmpi ("rsa_sign u", sk.u);
}
}
/* Do RSA computation. */
sig = mpi_new (0);
if ((ctx.flags & PUBKEY_FLAG_NO_BLINDING))
secret (sig, data, &sk);
else
secret_blinded (sig, data, &sk, ctx.nbits);
if (DBG_CIPHER)
log_printmpi ("rsa_sign res", sig);
/* Check that the created signature is good. This detects a failure
of the CRT algorithm (Lenstra's attack on RSA's use of the CRT). */
result = mpi_new (0);
pk.n = sk.n;
pk.e = sk.e;
public (result, sig, &pk);
if (mpi_cmp (result, data))
{
rc = GPG_ERR_BAD_SIGNATURE;
goto leave;
}
/* Convert the result. */
if ((ctx.flags & PUBKEY_FLAG_FIXEDLEN))
{
/* We need to make sure to return the correct length to avoid
problems with missing leading zeroes. */
unsigned char *em;
size_t emlen = (mpi_get_nbits (sk.n)+7)/8;
rc = _gcry_mpi_to_octet_string (&em, NULL, sig, emlen);
if (!rc)
{
rc = sexp_build (r_sig, NULL, "(sig-val(rsa(s%b)))", (int)emlen, em);
xfree (em);
}
}
else
rc = sexp_build (r_sig, NULL, "(sig-val(rsa(s%M)))", sig);
leave:
_gcry_mpi_release (result);
_gcry_mpi_release (sig);
_gcry_mpi_release (sk.n);
_gcry_mpi_release (sk.e);
_gcry_mpi_release (sk.d);
_gcry_mpi_release (sk.p);
_gcry_mpi_release (sk.q);
_gcry_mpi_release (sk.u);
_gcry_mpi_release (data);
_gcry_pk_util_free_encoding_ctx (&ctx);
if (DBG_CIPHER)
log_debug ("rsa_sign => %s\n", gpg_strerror (rc));
return rc;
}
| 8,626 |
119,601 | 0 | bool endLineMatched() const { return m_endLineMatched; }
| 8,627 |
142,953 | 0 | double HTMLMediaElement::duration() const {
return duration_;
}
| 8,628 |
23,460 | 0 | static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req,
struct xdr_stream *xdr,
struct nfs41_exchange_id_args *args)
{
struct compound_hdr hdr = {
.minorversion = args->client->cl_mvops->minor_version,
};
encode_compound_hdr(xdr, req, &hdr);
encode_exchange_id(xdr, args, &hdr);
encode_nops(&hdr);
}
| 8,629 |
39,939 | 0 | static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
{
put_page(spd->pages[i]);
}
| 8,630 |
174,254 | 0 | status_t Camera3Device::RequestThread::queueTrigger(
RequestTrigger trigger[],
size_t count) {
Mutex::Autolock l(mTriggerMutex);
status_t ret;
for (size_t i = 0; i < count; ++i) {
ret = queueTriggerLocked(trigger[i]);
if (ret != OK) {
return ret;
}
}
return OK;
}
| 8,631 |
188,560 | 1 | void EncoderTest::InitializeConfig() {
const vpx_codec_err_t res = codec_->DefaultEncoderConfig(&cfg_, 0);
ASSERT_EQ(VPX_CODEC_OK, res);
}
| 8,632 |
52,214 | 0 | PHP_FUNCTION(locale_parse)
{
const char* loc_name = NULL;
int loc_name_len = 0;
int grOffset = 0;
intl_error_reset( NULL TSRMLS_CC );
if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s",
&loc_name, &loc_name_len ) == FAILURE)
{
intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR,
"locale_parse: unable to parse input params", 0 TSRMLS_CC );
RETURN_FALSE;
}
if(loc_name_len == 0) {
loc_name = intl_locale_get_default(TSRMLS_C);
}
array_init( return_value );
grOffset = findOffset( LOC_GRANDFATHERED , loc_name );
if( grOffset >= 0 ){
add_assoc_string( return_value , LOC_GRANDFATHERED_LANG_TAG , estrdup(loc_name) ,FALSE );
}
else{
/* Not grandfathered */
add_array_entry( loc_name , return_value , LOC_LANG_TAG TSRMLS_CC);
add_array_entry( loc_name , return_value , LOC_SCRIPT_TAG TSRMLS_CC);
add_array_entry( loc_name , return_value , LOC_REGION_TAG TSRMLS_CC);
add_array_entry( loc_name , return_value , LOC_VARIANT_TAG TSRMLS_CC);
add_array_entry( loc_name , return_value , LOC_PRIVATE_TAG TSRMLS_CC);
}
}
| 8,633 |
52,367 | 0 | ip6t_get_target_c(const struct ip6t_entry *e)
{
return ip6t_get_target((struct ip6t_entry *)e);
}
| 8,634 |
182,410 | 1 | void mp_decode_to_lua_array(lua_State *L, mp_cur *c, size_t len) {
assert(len <= UINT_MAX);
int index = 1;
lua_newtable(L);
while(len--) {
lua_pushnumber(L,index++);
mp_decode_to_lua_type(L,c);
if (c->err) return;
lua_settable(L,-3);
}
}
| 8,635 |
9,911 | 0 | void Part::selectionChanged()
{
m_infoPanel->setIndexes(m_view->selectionModel()->selectedRows());
}
| 8,636 |
72,146 | 0 | _set_batch_job_limits(slurm_msg_t *msg)
{
int i;
uint32_t alloc_lps = 0, last_bit = 0;
bool cpu_log = slurm_get_debug_flags() & DEBUG_FLAG_CPU_BIND;
slurm_cred_arg_t arg;
batch_job_launch_msg_t *req = (batch_job_launch_msg_t *)msg->data;
if (slurm_cred_get_args(req->cred, &arg) != SLURM_SUCCESS)
return;
req->job_core_spec = arg.job_core_spec; /* Prevent user reset */
if (cpu_log) {
char *per_job = "";
uint32_t job_mem = arg.job_mem_limit;
if (job_mem & MEM_PER_CPU) {
job_mem &= (~MEM_PER_CPU);
per_job = "_per_CPU";
}
info("====================");
info("batch_job:%u job_mem:%uMB%s", req->job_id,
job_mem, per_job);
}
if (cpu_log || (arg.job_mem_limit & MEM_PER_CPU)) {
if (arg.job_nhosts > 0) {
last_bit = arg.sockets_per_node[0] *
arg.cores_per_socket[0];
for (i=0; i<last_bit; i++) {
if (!bit_test(arg.job_core_bitmap, i))
continue;
if (cpu_log)
info("JobNode[0] CPU[%u] Job alloc",i);
alloc_lps++;
}
}
if (cpu_log)
info("====================");
if (alloc_lps == 0) {
error("_set_batch_job_limit: alloc_lps is zero");
alloc_lps = 1;
}
/* NOTE: alloc_lps is the count of allocated resources
* (typically cores). Convert to CPU count as needed */
if (last_bit < 1)
error("Batch job credential allocates no CPUs");
else {
i = conf->cpus / last_bit;
if (i > 1)
alloc_lps *= i;
}
}
if (arg.job_mem_limit & MEM_PER_CPU) {
req->job_mem = arg.job_mem_limit & (~MEM_PER_CPU);
req->job_mem *= alloc_lps;
} else
req->job_mem = arg.job_mem_limit;
slurm_cred_free_args(&arg);
}
| 8,637 |
52,909 | 0 | int uverbs_dealloc_mw(struct ib_mw *mw)
{
struct ib_pd *pd = mw->pd;
int ret;
ret = mw->device->dealloc_mw(mw);
if (!ret)
atomic_dec(&pd->usecnt);
return ret;
}
| 8,638 |
86,373 | 0 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
pte_t *ptep, entry;
spinlock_t *ptl;
int ret;
u32 hash;
pgoff_t idx;
struct page *page = NULL;
struct page *pagecache_page = NULL;
struct hstate *h = hstate_vma(vma);
struct address_space *mapping;
int need_wait_lock = 0;
address &= huge_page_mask(h);
ptep = huge_pte_offset(mm, address, huge_page_size(h));
if (ptep) {
entry = huge_ptep_get(ptep);
if (unlikely(is_hugetlb_entry_migration(entry))) {
migration_entry_wait_huge(vma, mm, ptep);
return 0;
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
return VM_FAULT_HWPOISON_LARGE |
VM_FAULT_SET_HINDEX(hstate_index(h));
} else {
ptep = huge_pte_alloc(mm, address, huge_page_size(h));
if (!ptep)
return VM_FAULT_OOM;
}
mapping = vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, vma, address);
/*
* Serialize hugepage allocation and instantiation, so that we don't
* get spurious allocation failures if two CPUs race to instantiate
* the same page in the page cache.
*/
hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
entry = huge_ptep_get(ptep);
if (huge_pte_none(entry)) {
ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
goto out_mutex;
}
ret = 0;
/*
* entry could be a migration/hwpoison entry at this point, so this
* check prevents the kernel from going below assuming that we have
* a active hugepage in pagecache. This goto expects the 2nd page fault,
* and is_hugetlb_entry_(migration|hwpoisoned) check will properly
* handle it.
*/
if (!pte_present(entry))
goto out_mutex;
/*
* If we are going to COW the mapping later, we examine the pending
* reservations for this page now. This will ensure that any
* allocations necessary to record that reservation occur outside the
* spinlock. For private mappings, we also lookup the pagecache
* page now as it is used to determine if a reservation has been
* consumed.
*/
if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
if (vma_needs_reservation(h, vma, address) < 0) {
ret = VM_FAULT_OOM;
goto out_mutex;
}
/* Just decrements count, does not deallocate */
vma_end_reservation(h, vma, address);
if (!(vma->vm_flags & VM_MAYSHARE))
pagecache_page = hugetlbfs_pagecache_page(h,
vma, address);
}
ptl = huge_pte_lock(h, mm, ptep);
/* Check for a racing update before calling hugetlb_cow */
if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
goto out_ptl;
/*
* hugetlb_cow() requires page locks of pte_page(entry) and
* pagecache_page, so here we need take the former one
* when page != pagecache_page or !pagecache_page.
*/
page = pte_page(entry);
if (page != pagecache_page)
if (!trylock_page(page)) {
need_wait_lock = 1;
goto out_ptl;
}
get_page(page);
if (flags & FAULT_FLAG_WRITE) {
if (!huge_pte_write(entry)) {
ret = hugetlb_cow(mm, vma, address, ptep,
pagecache_page, ptl);
goto out_put_page;
}
entry = huge_pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
if (huge_ptep_set_access_flags(vma, address, ptep, entry,
flags & FAULT_FLAG_WRITE))
update_mmu_cache(vma, address, ptep);
out_put_page:
if (page != pagecache_page)
unlock_page(page);
put_page(page);
out_ptl:
spin_unlock(ptl);
if (pagecache_page) {
unlock_page(pagecache_page);
put_page(pagecache_page);
}
out_mutex:
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
/*
* Generally it's safe to hold refcount during waiting page lock. But
* here we just wait to defer the next page fault to avoid busy loop and
* the page is not used after unlocked before returning from the current
* page fault. So we are safe from accessing freed page, even if we wait
* here without taking refcount.
*/
if (need_wait_lock)
wait_on_page_locked(page);
return ret;
}
| 8,639 |
123,570 | 0 | void FreeMenuItem(const PP_Flash_MenuItem* menu_item) {
if (menu_item->name)
delete [] menu_item->name;
if (menu_item->submenu)
FreeMenu(menu_item->submenu);
}
| 8,640 |
85,310 | 0 | add_job_subscriptions(
cupsd_client_t *con, /* I - Client connection */
cupsd_job_t *job) /* I - Newly created job */
{
int i; /* Looping var */
ipp_attribute_t *prev, /* Previous attribute */
*next, /* Next attribute */
*attr; /* Current attribute */
cupsd_subscription_t *sub; /* Subscription object */
const char *recipient, /* notify-recipient-uri */
*pullmethod; /* notify-pull-method */
ipp_attribute_t *user_data; /* notify-user-data */
int interval; /* notify-time-interval */
unsigned mask; /* notify-events */
/*
* Find the first subscription group attribute; return if we have
* none...
*/
for (attr = job->attrs->attrs; attr; attr = attr->next)
if (attr->group_tag == IPP_TAG_SUBSCRIPTION)
break;
if (!attr)
return;
/*
* Process the subscription attributes in the request...
*/
while (attr)
{
recipient = NULL;
pullmethod = NULL;
user_data = NULL;
interval = 0;
mask = CUPSD_EVENT_NONE;
while (attr && attr->group_tag != IPP_TAG_ZERO)
{
if (!strcmp(attr->name, "notify-recipient-uri") &&
attr->value_tag == IPP_TAG_URI)
{
/*
* Validate the recipient scheme against the ServerBin/notifier
* directory...
*/
char notifier[1024], /* Notifier filename */
scheme[HTTP_MAX_URI], /* Scheme portion of URI */
userpass[HTTP_MAX_URI], /* Username portion of URI */
host[HTTP_MAX_URI], /* Host portion of URI */
resource[HTTP_MAX_URI]; /* Resource portion of URI */
int port; /* Port portion of URI */
recipient = attr->values[0].string.text;
if (httpSeparateURI(HTTP_URI_CODING_ALL, recipient,
scheme, sizeof(scheme), userpass, sizeof(userpass),
host, sizeof(host), &port,
resource, sizeof(resource)) < HTTP_URI_OK)
{
send_ipp_status(con, IPP_NOT_POSSIBLE,
_("Bad notify-recipient-uri \"%s\"."), recipient);
ippAddInteger(con->response, IPP_TAG_SUBSCRIPTION, IPP_TAG_ENUM,
"notify-status-code", IPP_URI_SCHEME);
return;
}
snprintf(notifier, sizeof(notifier), "%s/notifier/%s", ServerBin,
scheme);
if (access(notifier, X_OK))
{
send_ipp_status(con, IPP_NOT_POSSIBLE,
_("notify-recipient-uri URI \"%s\" uses unknown "
"scheme."), recipient);
ippAddInteger(con->response, IPP_TAG_SUBSCRIPTION, IPP_TAG_ENUM,
"notify-status-code", IPP_URI_SCHEME);
return;
}
if (!strcmp(scheme, "rss") && !check_rss_recipient(recipient))
{
send_ipp_status(con, IPP_NOT_POSSIBLE,
_("notify-recipient-uri URI \"%s\" is already used."),
recipient);
ippAddInteger(con->response, IPP_TAG_SUBSCRIPTION, IPP_TAG_ENUM,
"notify-status-code", IPP_ATTRIBUTES);
return;
}
}
else if (!strcmp(attr->name, "notify-pull-method") &&
attr->value_tag == IPP_TAG_KEYWORD)
{
pullmethod = attr->values[0].string.text;
if (strcmp(pullmethod, "ippget"))
{
send_ipp_status(con, IPP_NOT_POSSIBLE,
_("Bad notify-pull-method \"%s\"."), pullmethod);
ippAddInteger(con->response, IPP_TAG_SUBSCRIPTION, IPP_TAG_ENUM,
"notify-status-code", IPP_ATTRIBUTES);
return;
}
}
else if (!strcmp(attr->name, "notify-charset") &&
attr->value_tag == IPP_TAG_CHARSET &&
strcmp(attr->values[0].string.text, "us-ascii") &&
strcmp(attr->values[0].string.text, "utf-8"))
{
send_ipp_status(con, IPP_CHARSET,
_("Character set \"%s\" not supported."),
attr->values[0].string.text);
return;
}
else if (!strcmp(attr->name, "notify-natural-language") &&
(attr->value_tag != IPP_TAG_LANGUAGE ||
strcmp(attr->values[0].string.text, DefaultLanguage)))
{
send_ipp_status(con, IPP_CHARSET,
_("Language \"%s\" not supported."),
attr->values[0].string.text);
return;
}
else if (!strcmp(attr->name, "notify-user-data") &&
attr->value_tag == IPP_TAG_STRING)
{
if (attr->num_values > 1 || attr->values[0].unknown.length > 63)
{
send_ipp_status(con, IPP_REQUEST_VALUE,
_("The notify-user-data value is too large "
"(%d > 63 octets)."),
attr->values[0].unknown.length);
return;
}
user_data = attr;
}
else if (!strcmp(attr->name, "notify-events") &&
attr->value_tag == IPP_TAG_KEYWORD)
{
for (i = 0; i < attr->num_values; i ++)
mask |= cupsdEventValue(attr->values[i].string.text);
}
else if (!strcmp(attr->name, "notify-lease-duration"))
{
send_ipp_status(con, IPP_BAD_REQUEST,
_("The notify-lease-duration attribute cannot be "
"used with job subscriptions."));
return;
}
else if (!strcmp(attr->name, "notify-time-interval") &&
attr->value_tag == IPP_TAG_INTEGER)
interval = attr->values[0].integer;
attr = attr->next;
}
if (!recipient && !pullmethod)
break;
if (mask == CUPSD_EVENT_NONE)
mask = CUPSD_EVENT_JOB_COMPLETED;
if ((sub = cupsdAddSubscription(mask, cupsdFindDest(job->dest), job,
recipient, 0)) != NULL)
{
sub->interval = interval;
cupsdSetString(&sub->owner, job->username);
if (user_data)
{
sub->user_data_len = user_data->values[0].unknown.length;
memcpy(sub->user_data, user_data->values[0].unknown.data,
(size_t)sub->user_data_len);
}
ippAddSeparator(con->response);
ippAddInteger(con->response, IPP_TAG_SUBSCRIPTION, IPP_TAG_INTEGER,
"notify-subscription-id", sub->id);
cupsdLogMessage(CUPSD_LOG_DEBUG, "Added subscription %d for job %d",
sub->id, job->id);
}
if (attr)
attr = attr->next;
}
cupsdMarkDirty(CUPSD_DIRTY_SUBSCRIPTIONS);
/*
* Remove all of the subscription attributes from the job request...
*
* TODO: Optimize this since subscription groups have to come at the
* end of the request...
*/
for (attr = job->attrs->attrs, prev = NULL; attr; attr = next)
{
next = attr->next;
if (attr->group_tag == IPP_TAG_SUBSCRIPTION ||
attr->group_tag == IPP_TAG_ZERO)
{
/*
* Free and remove this attribute...
*/
ippDeleteAttribute(NULL, attr);
if (prev)
prev->next = next;
else
job->attrs->attrs = next;
}
else
prev = attr;
}
job->attrs->last = prev;
job->attrs->current = prev;
}
| 8,641 |
176,673 | 0 | xmlParseStringPEReference(xmlParserCtxtPtr ctxt, const xmlChar **str) {
const xmlChar *ptr;
xmlChar cur;
xmlChar *name;
xmlEntityPtr entity = NULL;
if ((str == NULL) || (*str == NULL)) return(NULL);
ptr = *str;
cur = *ptr;
if (cur != '%')
return(NULL);
ptr++;
name = xmlParseStringName(ctxt, &ptr);
if (name == NULL) {
xmlFatalErrMsg(ctxt, XML_ERR_NAME_REQUIRED,
"xmlParseStringPEReference: no name\n");
*str = ptr;
return(NULL);
}
cur = *ptr;
if (cur != ';') {
xmlFatalErr(ctxt, XML_ERR_ENTITYREF_SEMICOL_MISSING, NULL);
xmlFree(name);
*str = ptr;
return(NULL);
}
ptr++;
/*
* Increate the number of entity references parsed
*/
ctxt->nbentities++;
/*
* Request the entity from SAX
*/
if ((ctxt->sax != NULL) &&
(ctxt->sax->getParameterEntity != NULL))
entity = ctxt->sax->getParameterEntity(ctxt->userData, name);
if (ctxt->instate == XML_PARSER_EOF) {
xmlFree(name);
return(NULL);
}
if (entity == NULL) {
/*
* [ WFC: Entity Declared ]
* In a document without any DTD, a document with only an
* internal DTD subset which contains no parameter entity
* references, or a document with "standalone='yes'", ...
* ... The declaration of a parameter entity must precede
* any reference to it...
*/
if ((ctxt->standalone == 1) ||
((ctxt->hasExternalSubset == 0) && (ctxt->hasPErefs == 0))) {
xmlFatalErrMsgStr(ctxt, XML_ERR_UNDECLARED_ENTITY,
"PEReference: %%%s; not found\n", name);
} else {
/*
* [ VC: Entity Declared ]
* In a document with an external subset or external
* parameter entities with "standalone='no'", ...
* ... The declaration of a parameter entity must
* precede any reference to it...
*/
xmlWarningMsg(ctxt, XML_WAR_UNDECLARED_ENTITY,
"PEReference: %%%s; not found\n",
name, NULL);
ctxt->valid = 0;
}
xmlParserEntityCheck(ctxt, 0, NULL, 0);
} else {
/*
* Internal checking in case the entity quest barfed
*/
if ((entity->etype != XML_INTERNAL_PARAMETER_ENTITY) &&
(entity->etype != XML_EXTERNAL_PARAMETER_ENTITY)) {
xmlWarningMsg(ctxt, XML_WAR_UNDECLARED_ENTITY,
"%%%s; is not a parameter entity\n",
name, NULL);
}
}
ctxt->hasPErefs = 1;
xmlFree(name);
*str = ptr;
return(entity);
}
| 8,642 |
77,032 | 0 | ofpacts_verify_nested(const struct ofpact *a, enum ofpact_type outer_action)
{
const struct mf_field *field = ofpact_get_mf_dst(a);
if (field && field_requires_ct(field->id) && outer_action != OFPACT_CT) {
VLOG_WARN("cannot set CT fields outside of ct action");
return OFPERR_OFPBAC_BAD_SET_ARGUMENT;
}
if (a->type == OFPACT_NAT) {
if (outer_action != OFPACT_CT) {
VLOG_WARN("Cannot have NAT action outside of \"ct\" action");
return OFPERR_OFPBAC_BAD_SET_ARGUMENT;
}
return 0;
}
if (outer_action) {
ovs_assert(outer_action == OFPACT_WRITE_ACTIONS
|| outer_action == OFPACT_CT);
if (outer_action == OFPACT_CT) {
if (!field) {
return unsupported_nesting(a->type, outer_action);
} else if (!field_requires_ct(field->id)) {
VLOG_WARN("%s action doesn't support nested modification "
"of %s", ofpact_name(outer_action), field->name);
return OFPERR_OFPBAC_BAD_ARGUMENT;
}
}
}
return 0;
}
| 8,643 |
90,244 | 0 | static void sender(void *send_info,
struct ipmi_smi_msg *msg)
{
struct smi_info *smi_info = send_info;
unsigned long flags;
debug_timestamp("Enqueue");
if (smi_info->run_to_completion) {
/*
* If we are running to completion, start it. Upper
* layer will call flush_messages to clear it out.
*/
smi_info->waiting_msg = msg;
return;
}
spin_lock_irqsave(&smi_info->si_lock, flags);
/*
* The following two lines don't need to be under the lock for
* the lock's sake, but they do need SMP memory barriers to
* avoid getting things out of order. We are already claiming
* the lock, anyway, so just do it under the lock to avoid the
* ordering problem.
*/
BUG_ON(smi_info->waiting_msg);
smi_info->waiting_msg = msg;
check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
| 8,644 |
183,018 | 1 | rx_cache_insert(netdissect_options *ndo,
const u_char *bp, const struct ip *ip, int dport)
{
struct rx_cache_entry *rxent;
const struct rx_header *rxh = (const struct rx_header *) bp;
if (ndo->ndo_snapend - bp + 1 <= (int)(sizeof(struct rx_header) + sizeof(int32_t)))
return;
rxent = &rx_cache[rx_cache_next];
if (++rx_cache_next >= RX_CACHE_SIZE)
rx_cache_next = 0;
rxent->callnum = EXTRACT_32BITS(&rxh->callNumber);
UNALIGNED_MEMCPY(&rxent->client, &ip->ip_src, sizeof(uint32_t));
UNALIGNED_MEMCPY(&rxent->server, &ip->ip_dst, sizeof(uint32_t));
rxent->dport = dport;
rxent->serviceId = EXTRACT_32BITS(&rxh->serviceId);
rxent->opcode = EXTRACT_32BITS(bp + sizeof(struct rx_header));
}
| 8,645 |
160,680 | 0 | void RenderFrameImpl::OnSelectAll() {
AutoResetMember<bool> handling_select_range(
this, &RenderFrameImpl::handling_select_range_, true);
frame_->ExecuteCommand(WebString::FromUTF8("SelectAll"));
}
| 8,646 |
5,281 | 0 | grow_hunkmax (void)
{
hunkmax *= 2;
assert (p_line && p_len && p_Char);
if ((p_line = (char **) realloc (p_line, hunkmax * sizeof (*p_line)))
&& (p_len = (size_t *) realloc (p_len, hunkmax * sizeof (*p_len)))
&& (p_Char = realloc (p_Char, hunkmax * sizeof (*p_Char))))
return true;
if (!using_plan_a)
xalloc_die ();
/* Don't free previous values of p_line etc.,
since some broken implementations free them for us.
Whatever is null will be allocated again from within plan_a (),
of all places. */
return false;
}
| 8,647 |
138,776 | 0 | void RenderFrameHostImpl::OnUpdateTitle(
const base::string16& title,
blink::WebTextDirection title_direction) {
if (frame_tree_node_->parent())
return;
if (title.length() > kMaxTitleChars) {
NOTREACHED() << "Renderer sent too many characters in title.";
return;
}
delegate_->UpdateTitle(
this, title, WebTextDirectionToChromeTextDirection(title_direction));
}
| 8,648 |
30,773 | 0 | __releases(ax25_list_lock)
{
spin_unlock_bh(&ax25_list_lock);
}
| 8,649 |
4,829 | 0 | EventSuppressForWindow(WindowPtr pWin, ClientPtr client,
Mask mask, Bool *checkOptional)
{
int i, freed;
if (mask & ~PropagateMask) {
client->errorValue = mask;
return BadValue;
}
if (pWin->dontPropagate)
DontPropagateRefCnts[pWin->dontPropagate]--;
if (!mask)
i = 0;
else {
for (i = DNPMCOUNT, freed = 0; --i > 0;) {
if (!DontPropagateRefCnts[i])
freed = i;
else if (mask == DontPropagateMasks[i])
break;
}
if (!i && freed) {
i = freed;
DontPropagateMasks[i] = mask;
}
}
if (i || !mask) {
pWin->dontPropagate = i;
if (i)
DontPropagateRefCnts[i]++;
if (pWin->optional) {
pWin->optional->dontPropagateMask = mask;
*checkOptional = TRUE;
}
}
else {
if (!pWin->optional && !MakeWindowOptional(pWin)) {
if (pWin->dontPropagate)
DontPropagateRefCnts[pWin->dontPropagate]++;
return BadAlloc;
}
pWin->dontPropagate = 0;
pWin->optional->dontPropagateMask = mask;
}
RecalculateDeliverableEvents(pWin);
return Success;
}
| 8,650 |
162,666 | 0 | bool CanonicalizeScheme(const base::char16* spec,
const Component& scheme,
CanonOutput* output,
Component* out_scheme) {
return DoScheme<base::char16, base::char16>(spec, scheme, output, out_scheme);
}
| 8,651 |
90,885 | 0 | MagickPrivate ssize_t FormatLocaleStringList(char *magick_restrict string,
const size_t length,const char *magick_restrict format,va_list operands)
{
ssize_t
n;
#if defined(MAGICKCORE_LOCALE_SUPPORT) && defined(MAGICKCORE_HAVE_VSNPRINTF_L)
{
locale_t
locale;
locale=AcquireCLocale();
if (locale == (locale_t) NULL)
n=(ssize_t) vsnprintf(string,length,format,operands);
else
#if defined(MAGICKCORE_WINDOWS_SUPPORT)
n=(ssize_t) vsnprintf_l(string,length,format,locale,operands);
#else
n=(ssize_t) vsnprintf_l(string,length,locale,format,operands);
#endif
}
#elif defined(MAGICKCORE_HAVE_VSNPRINTF)
#if defined(MAGICKCORE_LOCALE_SUPPORT) && defined(MAGICKCORE_HAVE_USELOCALE)
{
locale_t
locale,
previous_locale;
locale=AcquireCLocale();
if (locale == (locale_t) NULL)
n=(ssize_t) vsnprintf(string,length,format,operands);
else
{
previous_locale=uselocale(locale);
n=(ssize_t) vsnprintf(string,length,format,operands);
uselocale(previous_locale);
}
}
#else
n=(ssize_t) vsnprintf(string,length,format,operands);
#endif
#else
n=(ssize_t) vsprintf(string,format,operands);
#endif
if (n < 0)
string[length-1]='\0';
return(n);
}
| 8,652 |
25,259 | 0 | static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
enum armv7_counters counter)
{
int ret = 0;
if (counter == ARMV7_CYCLE_COUNTER)
ret = pmnc & ARMV7_FLAG_C;
else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
ret = pmnc & ARMV7_FLAG_P(counter);
else
pr_err("CPU%u checking wrong counter %d overflow status\n",
smp_processor_id(), counter);
return ret;
}
| 8,653 |
6,878 | 0 | enum act_parse_ret parse_http_set_status(const char **args, int *orig_arg, struct proxy *px,
struct act_rule *rule, char **err)
{
char *error;
rule->action = ACT_CUSTOM;
rule->action_ptr = action_http_set_status;
/* Check if an argument is available */
if (!*args[*orig_arg]) {
memprintf(err, "expects 1 argument: <status>; or 3 arguments: <status> reason <fmt>");
return ACT_RET_PRS_ERR;
}
/* convert status code as integer */
rule->arg.status.code = strtol(args[*orig_arg], &error, 10);
if (*error != '\0' || rule->arg.status.code < 100 || rule->arg.status.code > 999) {
memprintf(err, "expects an integer status code between 100 and 999");
return ACT_RET_PRS_ERR;
}
(*orig_arg)++;
/* set custom reason string */
rule->arg.status.reason = NULL; // If null, we use the default reason for the status code.
if (*args[*orig_arg] && strcmp(args[*orig_arg], "reason") == 0 &&
(*args[*orig_arg + 1] && strcmp(args[*orig_arg + 1], "if") != 0 && strcmp(args[*orig_arg + 1], "unless") != 0)) {
(*orig_arg)++;
rule->arg.status.reason = strdup(args[*orig_arg]);
(*orig_arg)++;
}
return ACT_RET_PRS_OK;
}
| 8,654 |
146,786 | 0 | HTMLLinkElement* Document::LinkManifest() const {
HTMLHeadElement* head = this->head();
if (!head)
return 0;
for (HTMLLinkElement* link_element =
Traversal<HTMLLinkElement>::FirstChild(*head);
link_element;
link_element = Traversal<HTMLLinkElement>::NextSibling(*link_element)) {
if (!link_element->RelAttribute().IsManifest())
continue;
return link_element;
}
return 0;
}
| 8,655 |
100,652 | 0 | Browser* GetBrowserForDisposition(browser::NavigateParams* params) {
if (!params->source_contents && params->browser)
params->source_contents =
params->browser->GetSelectedTabContentsWrapper();
Profile* profile =
params->browser ? params->browser->profile() : params->profile;
switch (params->disposition) {
case CURRENT_TAB:
if (!params->browser && profile) {
params->browser = Browser::GetOrCreateTabbedBrowser(profile);
}
return params->browser;
case SINGLETON_TAB:
case NEW_FOREGROUND_TAB:
case NEW_BACKGROUND_TAB:
if (params->browser && WindowCanOpenTabs(params->browser))
return params->browser;
if (profile)
return GetOrCreateBrowser(profile);
return NULL;
case NEW_POPUP: {
if (profile) {
std::string app_name;
if (!params->extension_app_id.empty()) {
app_name = web_app::GenerateApplicationNameFromExtensionId(
params->extension_app_id);
} else if (params->browser && !params->browser->app_name().empty()) {
app_name = params->browser->app_name();
} else if (params->source_contents &&
params->source_contents->extension_tab_helper()->is_app()) {
app_name = web_app::GenerateApplicationNameFromExtensionId(
params->source_contents->extension_tab_helper()->
extension_app()->id());
}
if (app_name.empty()) {
Browser::CreateParams browser_params(Browser::TYPE_POPUP, profile);
browser_params.initial_bounds = params->window_bounds;
return Browser::CreateWithParams(browser_params);
} else {
return Browser::CreateForApp(Browser::TYPE_POPUP, app_name,
params->window_bounds, profile);
}
}
return NULL;
}
case NEW_WINDOW:
if (profile) {
Browser* browser = new Browser(Browser::TYPE_TABBED, profile);
browser->InitBrowserWindow();
return browser;
}
return NULL;
case OFF_THE_RECORD:
if (profile)
return GetOrCreateBrowser(profile->GetOffTheRecordProfile());
return NULL;
case SUPPRESS_OPEN:
case SAVE_TO_DISK:
case IGNORE_ACTION:
return NULL;
default:
NOTREACHED();
}
return NULL;
}
| 8,656 |
63,127 | 0 | static void add_bit (char bit, byte *fout) {
if ((bloc&7) == 0) {
fout[(bloc>>3)] = 0;
}
fout[(bloc>>3)] |= bit << (bloc&7);
bloc++;
}
| 8,657 |
106,813 | 0 | FloatQuad RenderBox::absoluteContentQuad() const
{
IntRect rect = contentBoxRect();
return localToAbsoluteQuad(FloatRect(rect));
}
| 8,658 |
114,105 | 0 | static Ewk_View_Smart_Class* miniBrowserViewSmartClass()
{
static Ewk_View_Smart_Class ewkViewClass = EWK_VIEW_SMART_CLASS_INIT_NAME_VERSION("MiniBrowser_View");
return &ewkViewClass;
}
| 8,659 |
91,244 | 0 | static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
struct ipmi_ipmb_addr ipmb_addr;
struct ipmi_recv_msg *recv_msg;
/*
* This is 11, not 10, because the response must contain a
* completion code.
*/
if (msg->rsp_size < 11) {
/* Message not big enough, just ignore it. */
ipmi_inc_stat(intf, invalid_ipmb_responses);
return 0;
}
if (msg->rsp[2] != 0) {
/* An error getting the response, just ignore it. */
return 0;
}
ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
ipmb_addr.slave_addr = msg->rsp[6];
ipmb_addr.channel = msg->rsp[3] & 0x0f;
ipmb_addr.lun = msg->rsp[7] & 3;
/*
* It's a response from a remote entity. Look up the sequence
* number and handle the response.
*/
if (intf_find_seq(intf,
msg->rsp[7] >> 2,
msg->rsp[3] & 0x0f,
msg->rsp[8],
(msg->rsp[4] >> 2) & (~1),
(struct ipmi_addr *) &ipmb_addr,
&recv_msg)) {
/*
* We were unable to find the sequence number,
* so just nuke the message.
*/
ipmi_inc_stat(intf, unhandled_ipmb_responses);
return 0;
}
memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
/*
* The other fields matched, so no need to set them, except
* for netfn, which needs to be the response that was
* returned, not the request value.
*/
recv_msg->msg.netfn = msg->rsp[4] >> 2;
recv_msg->msg.data = recv_msg->msg_data;
recv_msg->msg.data_len = msg->rsp_size - 10;
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
if (deliver_response(intf, recv_msg))
ipmi_inc_stat(intf, unhandled_ipmb_responses);
else
ipmi_inc_stat(intf, handled_ipmb_responses);
return 0;
}
| 8,660 |
136,816 | 0 | const AtomicString& LocalDOMWindow::name() const {
if (!IsCurrentlyDisplayedInFrame())
return g_null_atom;
return GetFrame()->Tree().GetName();
}
| 8,661 |
144,127 | 0 | png_write_IHDR(png_structp png_ptr, png_uint_32 width, png_uint_32 height,
int bit_depth, int color_type, int compression_type, int filter_type,
int interlace_type)
{
#ifdef PNG_USE_LOCAL_ARRAYS
PNG_IHDR;
#endif
int ret;
png_byte buf[13]; /* Buffer to store the IHDR info */
png_debug(1, "in png_write_IHDR");
/* Check that we have valid input data from the application info */
switch (color_type)
{
case PNG_COLOR_TYPE_GRAY:
switch (bit_depth)
{
case 1:
case 2:
case 4:
case 8:
case 16: png_ptr->channels = 1; break;
default: png_error(png_ptr,
"Invalid bit depth for grayscale image");
}
break;
case PNG_COLOR_TYPE_RGB:
if (bit_depth != 8 && bit_depth != 16)
png_error(png_ptr, "Invalid bit depth for RGB image");
png_ptr->channels = 3;
break;
case PNG_COLOR_TYPE_PALETTE:
switch (bit_depth)
{
case 1:
case 2:
case 4:
case 8: png_ptr->channels = 1; break;
default: png_error(png_ptr, "Invalid bit depth for paletted image");
}
break;
case PNG_COLOR_TYPE_GRAY_ALPHA:
if (bit_depth != 8 && bit_depth != 16)
png_error(png_ptr, "Invalid bit depth for grayscale+alpha image");
png_ptr->channels = 2;
break;
case PNG_COLOR_TYPE_RGB_ALPHA:
if (bit_depth != 8 && bit_depth != 16)
png_error(png_ptr, "Invalid bit depth for RGBA image");
png_ptr->channels = 4;
break;
default:
png_error(png_ptr, "Invalid image color type specified");
}
if (compression_type != PNG_COMPRESSION_TYPE_BASE)
{
png_warning(png_ptr, "Invalid compression type specified");
compression_type = PNG_COMPRESSION_TYPE_BASE;
}
/* Write filter_method 64 (intrapixel differencing) only if
* 1. Libpng was compiled with PNG_MNG_FEATURES_SUPPORTED and
* 2. Libpng did not write a PNG signature (this filter_method is only
* used in PNG datastreams that are embedded in MNG datastreams) and
* 3. The application called png_permit_mng_features with a mask that
* included PNG_FLAG_MNG_FILTER_64 and
* 4. The filter_method is 64 and
* 5. The color_type is RGB or RGBA
*/
if (
#ifdef PNG_MNG_FEATURES_SUPPORTED
!((png_ptr->mng_features_permitted & PNG_FLAG_MNG_FILTER_64) &&
((png_ptr->mode&PNG_HAVE_PNG_SIGNATURE) == 0) &&
(color_type == PNG_COLOR_TYPE_RGB ||
color_type == PNG_COLOR_TYPE_RGB_ALPHA) &&
(filter_type == PNG_INTRAPIXEL_DIFFERENCING)) &&
#endif
filter_type != PNG_FILTER_TYPE_BASE)
{
png_warning(png_ptr, "Invalid filter type specified");
filter_type = PNG_FILTER_TYPE_BASE;
}
#ifdef PNG_WRITE_INTERLACING_SUPPORTED
if (interlace_type != PNG_INTERLACE_NONE &&
interlace_type != PNG_INTERLACE_ADAM7)
{
png_warning(png_ptr, "Invalid interlace type specified");
interlace_type = PNG_INTERLACE_ADAM7;
}
#else
interlace_type=PNG_INTERLACE_NONE;
#endif
/* Save the relevent information */
png_ptr->bit_depth = (png_byte)bit_depth;
png_ptr->color_type = (png_byte)color_type;
png_ptr->interlaced = (png_byte)interlace_type;
#ifdef PNG_MNG_FEATURES_SUPPORTED
png_ptr->filter_type = (png_byte)filter_type;
#endif
png_ptr->compression_type = (png_byte)compression_type;
png_ptr->width = width;
png_ptr->height = height;
png_ptr->pixel_depth = (png_byte)(bit_depth * png_ptr->channels);
png_ptr->rowbytes = PNG_ROWBYTES(png_ptr->pixel_depth, width);
/* Set the usr info, so any transformations can modify it */
png_ptr->usr_width = png_ptr->width;
png_ptr->usr_bit_depth = png_ptr->bit_depth;
png_ptr->usr_channels = png_ptr->channels;
/* Pack the header information into the buffer */
png_save_uint_32(buf, width);
png_save_uint_32(buf + 4, height);
buf[8] = (png_byte)bit_depth;
buf[9] = (png_byte)color_type;
buf[10] = (png_byte)compression_type;
buf[11] = (png_byte)filter_type;
buf[12] = (png_byte)interlace_type;
/* Write the chunk */
png_write_chunk(png_ptr, (png_bytep)png_IHDR, buf, (png_size_t)13);
/* Initialize zlib with PNG info */
png_ptr->zstream.zalloc = png_zalloc;
png_ptr->zstream.zfree = png_zfree;
png_ptr->zstream.opaque = (voidpf)png_ptr;
if (!(png_ptr->do_filter))
{
if (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE ||
png_ptr->bit_depth < 8)
png_ptr->do_filter = PNG_FILTER_NONE;
else
png_ptr->do_filter = PNG_ALL_FILTERS;
}
if (!(png_ptr->flags & PNG_FLAG_ZLIB_CUSTOM_STRATEGY))
{
if (png_ptr->do_filter != PNG_FILTER_NONE)
png_ptr->zlib_strategy = Z_FILTERED;
else
png_ptr->zlib_strategy = Z_DEFAULT_STRATEGY;
}
if (!(png_ptr->flags & PNG_FLAG_ZLIB_CUSTOM_LEVEL))
png_ptr->zlib_level = Z_DEFAULT_COMPRESSION;
if (!(png_ptr->flags & PNG_FLAG_ZLIB_CUSTOM_MEM_LEVEL))
png_ptr->zlib_mem_level = 8;
if (!(png_ptr->flags & PNG_FLAG_ZLIB_CUSTOM_WINDOW_BITS))
png_ptr->zlib_window_bits = 15;
if (!(png_ptr->flags & PNG_FLAG_ZLIB_CUSTOM_METHOD))
png_ptr->zlib_method = 8;
ret = deflateInit2(&png_ptr->zstream, png_ptr->zlib_level,
png_ptr->zlib_method, png_ptr->zlib_window_bits,
png_ptr->zlib_mem_level, png_ptr->zlib_strategy);
if (ret != Z_OK)
{
if (ret == Z_VERSION_ERROR) png_error(png_ptr,
"zlib failed to initialize compressor -- version error");
if (ret == Z_STREAM_ERROR) png_error(png_ptr,
"zlib failed to initialize compressor -- stream error");
if (ret == Z_MEM_ERROR) png_error(png_ptr,
"zlib failed to initialize compressor -- mem error");
png_error(png_ptr, "zlib failed to initialize compressor");
}
png_ptr->zstream.next_out = png_ptr->zbuf;
png_ptr->zstream.avail_out = (uInt)png_ptr->zbuf_size;
/* libpng is not interested in zstream.data_type */
/* Set it to a predefined value, to avoid its evaluation inside zlib */
png_ptr->zstream.data_type = Z_BINARY;
png_ptr->mode = PNG_HAVE_IHDR;
}
| 8,662 |
88,038 | 0 | static const struct genl_family *genl_family_find_byid(unsigned int id)
{
return idr_find(&genl_fam_idr, id);
}
| 8,663 |
46,867 | 0 | int __camellia_setkey(struct camellia_ctx *cctx, const unsigned char *key,
unsigned int key_len, u32 *flags)
{
if (key_len != 16 && key_len != 24 && key_len != 32) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
cctx->key_length = key_len;
switch (key_len) {
case 16:
camellia_setup128(key, cctx->key_table);
break;
case 24:
camellia_setup192(key, cctx->key_table);
break;
case 32:
camellia_setup256(key, cctx->key_table);
break;
}
return 0;
}
| 8,664 |
134,105 | 0 | void InputMethodIBus::ExtractCompositionText(
const chromeos::IBusText& text,
uint32 cursor_position,
CompositionText* out_composition) const {
out_composition->Clear();
out_composition->text = UTF8ToUTF16(text.text());
if (out_composition->text.empty())
return;
std::vector<size_t> char16_offsets;
size_t length = out_composition->text.length();
base::i18n::UTF16CharIterator char_iterator(&out_composition->text);
do {
char16_offsets.push_back(char_iterator.array_pos());
} while (char_iterator.Advance());
uint32 char_length = static_cast<uint32>(char16_offsets.size());
char16_offsets.push_back(length);
size_t cursor_offset =
char16_offsets[std::min(char_length, cursor_position)];
out_composition->selection = gfx::Range(cursor_offset);
const std::vector<chromeos::IBusText::UnderlineAttribute>&
underline_attributes = text.underline_attributes();
const std::vector<chromeos::IBusText::SelectionAttribute>&
selection_attributes = text.selection_attributes();
if (!underline_attributes.empty()) {
for (size_t i = 0; i < underline_attributes.size(); ++i) {
const uint32 start = underline_attributes[i].start_index;
const uint32 end = underline_attributes[i].end_index;
if (start >= end)
continue;
CompositionUnderline underline(
char16_offsets[start], char16_offsets[end],
SK_ColorBLACK, false /* thick */);
if (underline_attributes[i].type ==
chromeos::IBusText::IBUS_TEXT_UNDERLINE_DOUBLE)
underline.thick = true;
else if (underline_attributes[i].type ==
chromeos::IBusText::IBUS_TEXT_UNDERLINE_ERROR)
underline.color = SK_ColorRED;
out_composition->underlines.push_back(underline);
}
}
if (!selection_attributes.empty()) {
LOG_IF(ERROR, selection_attributes.size() != 1)
<< "Chrome does not support multiple selection";
for (uint32 i = 0; i < selection_attributes.size(); ++i) {
const uint32 start = selection_attributes[i].start_index;
const uint32 end = selection_attributes[i].end_index;
if (start >= end)
continue;
CompositionUnderline underline(
char16_offsets[start], char16_offsets[end],
SK_ColorBLACK, true /* thick */);
out_composition->underlines.push_back(underline);
if (underline.start_offset == cursor_offset) {
out_composition->selection.set_start(underline.end_offset);
out_composition->selection.set_end(cursor_offset);
} else if (underline.end_offset == cursor_offset) {
out_composition->selection.set_start(underline.start_offset);
out_composition->selection.set_end(cursor_offset);
}
}
}
if (out_composition->underlines.empty()) {
out_composition->underlines.push_back(CompositionUnderline(
0, length, SK_ColorBLACK, false /* thick */));
}
}
| 8,665 |
164,220 | 0 | void Verify_LoadCache_Far_Hit() {
EXPECT_TRUE(delegate()->loaded_cache_.get());
EXPECT_TRUE(delegate()->loaded_cache_->HasOneRef());
EXPECT_EQ(1, delegate()->loaded_cache_id_);
EXPECT_TRUE(delegate()->loaded_cache_->owning_group());
EXPECT_TRUE(delegate()->loaded_cache_->owning_group()->HasOneRef());
EXPECT_EQ(1, delegate()->loaded_cache_->owning_group()->group_id());
EXPECT_EQ(1, mock_quota_manager_proxy_->notify_storage_accessed_count_);
EXPECT_EQ(0, mock_quota_manager_proxy_->notify_storage_modified_count_);
delegate()->loaded_cache_ = nullptr;
EXPECT_FALSE(delegate()->loaded_group_.get());
PushNextTask(
base::BindOnce(&AppCacheStorageImplTest::Verify_LoadGroup_Far_Hit,
base::Unretained(this)));
storage()->LoadOrCreateGroup(kManifestUrl, delegate());
}
| 8,666 |
125,191 | 0 | void RenderMessageFilter::OnCacheableMetadataAvailable(
const GURL& url,
double expected_response_time,
const std::vector<char>& data) {
if (!CheckPreparsedJsCachingEnabled())
return;
net::HttpCache* cache = request_context_->GetURLRequestContext()->
http_transaction_factory()->GetCache();
DCHECK(cache);
scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(data.size()));
memcpy(buf->data(), &data.front(), data.size());
cache->WriteMetadata(
url, base::Time::FromDoubleT(expected_response_time), buf, data.size());
}
| 8,667 |
152,468 | 0 | void RenderFrameImpl::PepperCaretPositionChanged(
PepperPluginInstanceImpl* instance) {
if (instance != focused_pepper_plugin_)
return;
GetLocalRootRenderWidget()->UpdateSelectionBounds();
}
| 8,668 |
127,943 | 0 | bool BrowserViewRenderer::IsVisible() const {
return view_visible_ && (!attached_to_window_ || window_visible_);
}
| 8,669 |
46,810 | 0 | static int sha512_sparc64_init(struct shash_desc *desc)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA512_H0;
sctx->state[1] = SHA512_H1;
sctx->state[2] = SHA512_H2;
sctx->state[3] = SHA512_H3;
sctx->state[4] = SHA512_H4;
sctx->state[5] = SHA512_H5;
sctx->state[6] = SHA512_H6;
sctx->state[7] = SHA512_H7;
sctx->count[0] = sctx->count[1] = 0;
return 0;
}
| 8,670 |
145,450 | 0 | void QuicStreamHost::Initialize(QuicTransportHost* transport_host,
P2PQuicStream* p2p_stream) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(transport_host);
DCHECK(p2p_stream);
transport_host_ = transport_host;
p2p_stream_ = p2p_stream;
p2p_stream_->SetDelegate(this);
}
| 8,671 |
33,803 | 0 | static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
void *key)
{
struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
if (!((unsigned long)key & poll->mask))
return 0;
vhost_poll_queue(poll);
return 0;
}
| 8,672 |
100,940 | 0 | void ScriptableHandle::Unref(ScriptableHandle** handle) {
if (*handle != NULL) {
(*handle)->Unref();
*handle = NULL;
}
}
| 8,673 |
86,401 | 0 | static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
{
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
return (get_vma_private_data(vma) & flag) != 0;
}
| 8,674 |
56,663 | 0 | int ext4_force_commit(struct super_block *sb)
{
journal_t *journal;
if (sb->s_flags & MS_RDONLY)
return 0;
journal = EXT4_SB(sb)->s_journal;
return ext4_journal_force_commit(journal);
}
| 8,675 |
161,944 | 0 | void PrintRenderFrameHelper::PrepareFrameForPreviewDocument() {
reset_prep_frame_view_ = false;
if (!print_pages_params_ || CheckForCancel()) {
DidFinishPrinting(FAIL_PREVIEW);
return;
}
if (prep_frame_view_ && prep_frame_view_->IsLoadingSelection()) {
reset_prep_frame_view_ = true;
return;
}
const PrintMsg_Print_Params& print_params = print_pages_params_->params;
prep_frame_view_ = base::MakeUnique<PrepareFrameAndViewForPrint>(
print_params, print_preview_context_.source_frame(),
print_preview_context_.source_node(), ignore_css_margins_);
prep_frame_view_->CopySelectionIfNeeded(
render_frame()->GetWebkitPreferences(),
base::Bind(&PrintRenderFrameHelper::OnFramePreparedForPreviewDocument,
weak_ptr_factory_.GetWeakPtr()));
}
| 8,676 |
33,943 | 0 | void mon_st_callback(stonith_t *st, stonith_event_t *e)
{
char *desc = g_strdup_printf(
"Operation %s requested by %s for peer %s: %s (ref=%s)",
e->operation, e->origin, e->target, pcmk_strerror(e->result), e->id);
if (snmp_target) {
send_snmp_trap(e->target, NULL, e->operation, pcmk_ok, e->result, 0, desc);
}
if (crm_mail_to) {
send_smtp_trap(e->target, NULL, e->operation, pcmk_ok, e->result, 0, desc);
}
if (external_agent) {
send_custom_trap(e->target, NULL, e->operation, pcmk_ok, e->result, 0, desc);
}
g_free(desc);
}
| 8,677 |
160,921 | 0 | DOMWindow::~DOMWindow() {
DCHECK(!frame_);
}
| 8,678 |
79,230 | 0 | void StreamTcpStreamCleanup(TcpStream *stream)
{
if (stream != NULL) {
StreamTcpSackFreeList(stream);
StreamTcpReturnStreamSegments(stream);
StreamingBufferClear(&stream->sb);
}
}
| 8,679 |
64,507 | 0 | MagickExport MagickBooleanType CopyImagePixels(Image *image,
const Image *source_image,const RectangleInfo *geometry,
const OffsetInfo *offset,ExceptionInfo *exception)
{
#define CopyImageTag "Copy/Image"
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(source_image != (Image *) NULL);
assert(geometry != (RectangleInfo *) NULL);
assert(offset != (OffsetInfo *) NULL);
if ((offset->x < 0) || (offset->y < 0) ||
((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) ||
((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows))
ThrowBinaryException(OptionError,"GeometryDoesNotContainImage",
image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Copy image pixels.
*/
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,image,geometry->height,1)
#endif
for (y=0; y < (ssize_t) geometry->height; y++)
{
register const IndexPacket
*magick_restrict source_indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(image_view,offset->x,y+offset->y,
geometry->width,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
source_indexes=GetCacheViewVirtualIndexQueue(source_view);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) geometry->width; x++)
{
*q=(*p);
if (image->colorspace == CMYKColorspace)
indexes[x]=source_indexes[x];
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CopyImagePixels)
#endif
proceed=SetImageProgress(image,CopyImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
source_view=DestroyCacheView(source_view);
return(status);
}
| 8,680 |
178,946 | 1 | static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
{
unsigned long pmnc;
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
/*
* Get and reset the IRQ flags
*/
pmnc = armv7_pmnc_getreset_flags();
/*
* Did an overflow occur?
*/
if (!armv7_pmnc_has_overflowed(pmnc))
return IRQ_NONE;
/*
* Handle the counter(s) overflow(s)
*/
regs = get_irq_regs();
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
*/
if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, 0, &data, regs))
armpmu->disable(hwc, idx);
}
/*
* Handle the pending perf events.
*
* Note: this call *must* be run with interrupts disabled. For
* platforms that can have the PMU interrupts raised as an NMI, this
* will not work.
*/
irq_work_run();
return IRQ_HANDLED;
}
| 8,681 |
49,154 | 0 | static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
{
int idx, len;
len = po->rx_ring.frame_max + 1;
idx = po->rx_ring.head;
if (pow_off)
idx += len >> pow_off;
if (idx >= len)
idx -= len;
return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
}
| 8,682 |
180,331 | 1 | dissect_spoolss_keybuffer(tvbuff_t *tvb, int offset, packet_info *pinfo,
proto_tree *tree, dcerpc_info *di, guint8 *drep)
{
guint32 size;
int end_offset;
if (di->conformant_run)
return offset;
/* Dissect size and data */
offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep,
hf_keybuffer_size, &size);
end_offset = offset + (size*2);
if (end_offset < offset) {
/*
* Overflow - make the end offset one past the end of
* the packet data, so we throw an exception (as the
* size is almost certainly too big).
*/
end_offset = tvb_reported_length_remaining(tvb, offset) + 1;
}
while (offset < end_offset)
offset = dissect_spoolss_uint16uni(
tvb, offset, pinfo, tree, drep, NULL, hf_keybuffer);
return offset;
}
| 8,683 |
145,376 | 0 | void SaveImpl(const char* name,
v8::Local<v8::Value> value,
v8::Local<v8::Context> context) {
CHECK(!value.IsEmpty() && value->IsObject()) << name;
context->Global()
->SetPrivate(context, MakeKey(name, context->GetIsolate()), value)
.FromJust();
}
| 8,684 |
19,445 | 0 | static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
struct efx_self_tests *tests,
struct ethtool_string *strings,
u64 *data)
{
struct efx_channel *channel;
unsigned int n = 0, i;
enum efx_loopback_mode mode;
efx_fill_test(n++, strings, data, &tests->phy_alive,
"phy", 0, "alive", NULL);
efx_fill_test(n++, strings, data, &tests->nvram,
"core", 0, "nvram", NULL);
efx_fill_test(n++, strings, data, &tests->interrupt,
"core", 0, "interrupt", NULL);
/* Event queues */
efx_for_each_channel(channel, efx) {
efx_fill_test(n++, strings, data,
&tests->eventq_dma[channel->channel],
EFX_CHANNEL_NAME(channel),
"eventq.dma", NULL);
efx_fill_test(n++, strings, data,
&tests->eventq_int[channel->channel],
EFX_CHANNEL_NAME(channel),
"eventq.int", NULL);
efx_fill_test(n++, strings, data,
&tests->eventq_poll[channel->channel],
EFX_CHANNEL_NAME(channel),
"eventq.poll", NULL);
}
efx_fill_test(n++, strings, data, &tests->registers,
"core", 0, "registers", NULL);
if (efx->phy_op->run_tests != NULL) {
EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL);
for (i = 0; true; ++i) {
const char *name;
EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
name = efx->phy_op->test_name(efx, i);
if (name == NULL)
break;
efx_fill_test(n++, strings, data, &tests->phy_ext[i],
"phy", 0, name, NULL);
}
}
/* Loopback tests */
for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
if (!(efx->loopback_modes & (1 << mode)))
continue;
n = efx_fill_loopback_test(efx,
&tests->loopback[mode], mode, n,
strings, data);
}
return n;
}
| 8,685 |
118,897 | 0 | base::TimeTicks WebContentsImpl::GetLastSelectedTime() const {
return last_selected_time_;
}
| 8,686 |
73,341 | 0 | static int ass_detect_change(ASS_Renderer *priv)
{
ASS_Image *img, *img2;
int diff;
if (priv->state.has_clips)
return 2;
img = priv->prev_images_root;
img2 = priv->images_root;
diff = 0;
while (img && diff < 2) {
ASS_Image *next, *next2;
next = img->next;
if (img2) {
int d = ass_image_compare(img, img2);
if (d > diff)
diff = d;
next2 = img2->next;
} else {
diff = 2;
break;
}
img = next;
img2 = next2;
}
if (img2)
diff = 2;
return diff;
}
| 8,687 |
3,069 | 0 | static int devicenrange(i_ctx_t * i_ctx_p, ref *space, float *ptr)
{
int i, limit, code;
PS_colour_space_t *cspace;
ref altspace;
code = array_get(imemory, space, 1, &altspace);
if (code < 0)
return code;
code = get_space_object(i_ctx_p, &altspace, &cspace);
if (code < 0)
return code;
code = cspace->numcomponents(i_ctx_p, &altspace, &limit);
if (code < 0)
return code;
for (i = 0;i < limit * 2;i+=2) {
ptr[i] = 0;
ptr[i+1] = 1;
}
return 0;
}
| 8,688 |
94,498 | 0 | static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
{
DECLARE_WAITQUEUE(wait, current);
struct rfcomm_dev *dev;
struct rfcomm_dlc *dlc;
unsigned long flags;
int err, id;
id = tty->index;
BT_DBG("tty %p id %d", tty, id);
/* We don't leak this refcount. For reasons which are not entirely
clear, the TTY layer will call our ->close() method even if the
open fails. We decrease the refcount there, and decreasing it
here too would cause breakage. */
dev = rfcomm_dev_get(id);
if (!dev)
return -ENODEV;
BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst),
dev->channel, dev->port.count);
spin_lock_irqsave(&dev->port.lock, flags);
if (++dev->port.count > 1) {
spin_unlock_irqrestore(&dev->port.lock, flags);
return 0;
}
spin_unlock_irqrestore(&dev->port.lock, flags);
dlc = dev->dlc;
/* Attach TTY and open DLC */
rfcomm_dlc_lock(dlc);
tty->driver_data = dev;
dev->port.tty = tty;
rfcomm_dlc_unlock(dlc);
set_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
err = rfcomm_dlc_open(dlc, &dev->src, &dev->dst, dev->channel);
if (err < 0)
return err;
/* Wait for DLC to connect */
add_wait_queue(&dev->wait, &wait);
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (dlc->state == BT_CLOSED) {
err = -dev->err;
break;
}
if (dlc->state == BT_CONNECTED)
break;
if (signal_pending(current)) {
err = -EINTR;
break;
}
tty_unlock();
schedule();
tty_lock();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->wait, &wait);
if (err == 0)
device_move(dev->tty_dev, rfcomm_get_device(dev),
DPM_ORDER_DEV_AFTER_PARENT);
rfcomm_tty_copy_pending(dev);
rfcomm_dlc_unthrottle(dev->dlc);
return err;
}
| 8,689 |
55,499 | 0 | cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp;
/* Remove the sched domains which do not contribute to scheduling. */
for (tmp = sd; tmp; ) {
struct sched_domain *parent = tmp->parent;
if (!parent)
break;
if (sd_parent_degenerate(tmp, parent)) {
tmp->parent = parent->parent;
if (parent->parent)
parent->parent->child = tmp;
/*
* Transfer SD_PREFER_SIBLING down in case of a
* degenerate parent; the spans match for this
* so the property transfers.
*/
if (parent->flags & SD_PREFER_SIBLING)
tmp->flags |= SD_PREFER_SIBLING;
destroy_sched_domain(parent, cpu);
} else
tmp = tmp->parent;
}
if (sd && sd_degenerate(sd)) {
tmp = sd;
sd = sd->parent;
destroy_sched_domain(tmp, cpu);
if (sd)
sd->child = NULL;
}
sched_domain_debug(sd, cpu);
rq_attach_root(rq, rd);
tmp = rq->sd;
rcu_assign_pointer(rq->sd, sd);
destroy_sched_domains(tmp, cpu);
update_top_cache_domain(cpu);
}
| 8,690 |
32,485 | 0 | static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
{
int res = 0;
struct tg3_fiber_aneginfo aninfo;
int status = ANEG_FAILED;
unsigned int tick;
u32 tmp;
tw32_f(MAC_TX_AUTO_NEG, 0);
tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
udelay(40);
tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
udelay(40);
memset(&aninfo, 0, sizeof(aninfo));
aninfo.flags |= MR_AN_ENABLE;
aninfo.state = ANEG_STATE_UNKNOWN;
aninfo.cur_time = 0;
tick = 0;
while (++tick < 195000) {
status = tg3_fiber_aneg_smachine(tp, &aninfo);
if (status == ANEG_DONE || status == ANEG_FAILED)
break;
udelay(1);
}
tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
tw32_f(MAC_MODE, tp->mac_mode);
udelay(40);
*txflags = aninfo.txconfig;
*rxflags = aninfo.flags;
if (status == ANEG_DONE &&
(aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
MR_LP_ADV_FULL_DUPLEX)))
res = 1;
return res;
}
| 8,691 |
161,825 | 0 | bool PlatformSensorWin::CheckSensorConfiguration(
const PlatformSensorConfiguration& configuration) {
DCHECK(task_runner_->BelongsToCurrentThread());
double minimal_reporting_interval_ms =
sensor_reader_->GetMinimalReportingIntervalMs();
if (minimal_reporting_interval_ms == 0)
return true;
double max_frequency =
base::Time::kMillisecondsPerSecond / minimal_reporting_interval_ms;
return configuration.frequency() <= max_frequency;
}
| 8,692 |
128,931 | 0 | void doWriteAlgorithmId(blink::WebCryptoAlgorithmId id)
{
switch (id) {
case blink::WebCryptoAlgorithmIdAesCbc:
return doWriteUint32(AesCbcTag);
case blink::WebCryptoAlgorithmIdHmac:
return doWriteUint32(HmacTag);
case blink::WebCryptoAlgorithmIdRsaSsaPkcs1v1_5:
return doWriteUint32(RsaSsaPkcs1v1_5Tag);
case blink::WebCryptoAlgorithmIdSha1:
return doWriteUint32(Sha1Tag);
case blink::WebCryptoAlgorithmIdSha256:
return doWriteUint32(Sha256Tag);
case blink::WebCryptoAlgorithmIdSha384:
return doWriteUint32(Sha384Tag);
case blink::WebCryptoAlgorithmIdSha512:
return doWriteUint32(Sha512Tag);
case blink::WebCryptoAlgorithmIdAesGcm:
return doWriteUint32(AesGcmTag);
case blink::WebCryptoAlgorithmIdRsaOaep:
return doWriteUint32(RsaOaepTag);
case blink::WebCryptoAlgorithmIdAesCtr:
return doWriteUint32(AesCtrTag);
case blink::WebCryptoAlgorithmIdAesKw:
return doWriteUint32(AesKwTag);
}
ASSERT_NOT_REACHED();
}
| 8,693 |
100,861 | 0 | int ClearAppCacheFunction::GetRemovalMask() const {
return BrowsingDataRemover::REMOVE_APPCACHE;
}
| 8,694 |
109,279 | 0 | static bool decodeBuffer(const char* buffer, unsigned size, const String& textEncodingName, String* result)
{
if (buffer) {
WTF::TextEncoding encoding(textEncodingName);
if (!encoding.isValid())
encoding = WindowsLatin1Encoding();
*result = encoding.decode(buffer, size);
return true;
}
return false;
}
| 8,695 |
83,521 | 0 | NSC_CONTEXT* nsc_context_new(void)
{
NSC_CONTEXT* context;
context = (NSC_CONTEXT*) calloc(1, sizeof(NSC_CONTEXT));
if (!context)
return NULL;
context->priv = (NSC_CONTEXT_PRIV*) calloc(1, sizeof(NSC_CONTEXT_PRIV));
if (!context->priv)
goto error;
context->priv->log = WLog_Get("com.freerdp.codec.nsc");
WLog_OpenAppender(context->priv->log);
context->BitmapData = NULL;
context->decode = nsc_decode;
context->encode = nsc_encode;
context->priv->PlanePool = BufferPool_New(TRUE, 0, 16);
if (!context->priv->PlanePool)
goto error;
PROFILER_CREATE(context->priv->prof_nsc_rle_decompress_data,
"nsc_rle_decompress_data")
PROFILER_CREATE(context->priv->prof_nsc_decode, "nsc_decode")
PROFILER_CREATE(context->priv->prof_nsc_rle_compress_data,
"nsc_rle_compress_data")
PROFILER_CREATE(context->priv->prof_nsc_encode, "nsc_encode")
/* Default encoding parameters */
context->ColorLossLevel = 3;
context->ChromaSubsamplingLevel = 1;
/* init optimized methods */
NSC_INIT_SIMD(context);
return context;
error:
nsc_context_free(context);
return NULL;
}
| 8,696 |
48,438 | 0 | Strsubstr(Str s, int beg, int len)
{
Str new_s;
int i;
STR_LENGTH_CHECK(s);
new_s = Strnew();
if (beg >= s->length)
return new_s;
for (i = 0; i < len && beg + i < s->length; i++)
Strcat_char(new_s, s->ptr[beg + i]);
return new_s;
}
| 8,697 |
128,620 | 0 | bool SkipConditionalFeatureEntry(const FeatureEntry& entry) {
version_info::Channel channel = chrome::GetChannel();
#if defined(OS_ANDROID)
if (!strcmp("enable-data-reduction-proxy-dev", entry.internal_name) &&
channel != version_info::Channel::BETA &&
channel != version_info::Channel::DEV) {
return true;
}
if (!strcmp("enable-data-reduction-proxy-alt", entry.internal_name) &&
channel != version_info::Channel::DEV) {
return true;
}
if (!strcmp("enable-data-reduction-proxy-carrier-test",
entry.internal_name) &&
channel != version_info::Channel::DEV &&
channel != version_info::Channel::CANARY &&
channel != version_info::Channel::UNKNOWN) {
return true;
}
#endif
if ((!strcmp("data-reduction-proxy-lo-fi", entry.internal_name) ||
!strcmp("enable-data-reduction-proxy-lo-fi-preview",
entry.internal_name)) &&
channel != version_info::Channel::BETA &&
channel != version_info::Channel::DEV &&
channel != version_info::Channel::CANARY &&
channel != version_info::Channel::UNKNOWN) {
return true;
}
return false;
}
| 8,698 |
8,167 | 0 | void Gfx::opShFill(Object args[], int numArgs) {
GfxShading *shading;
GfxPath *savedPath;
double xMin, yMin, xMax, yMax;
if (!(shading = res->lookupShading(args[0].getName(), this))) {
return;
}
savedPath = state->getPath()->copy();
saveState();
if (shading->getHasBBox()) {
shading->getBBox(&xMin, &yMin, &xMax, &yMax);
state->moveTo(xMin, yMin);
state->lineTo(xMax, yMin);
state->lineTo(xMax, yMax);
state->lineTo(xMin, yMax);
state->closePath();
state->clip();
out->clip(state);
state->clearPath();
}
state->setFillColorSpace(shading->getColorSpace()->copy());
out->updateFillColorSpace(state);
#if 1 //~tmp: turn off anti-aliasing temporarily
GBool vaa = out->getVectorAntialias();
if (vaa) {
out->setVectorAntialias(gFalse);
}
#endif
switch (shading->getType()) {
case 1:
doFunctionShFill((GfxFunctionShading *)shading);
break;
case 2:
doAxialShFill((GfxAxialShading *)shading);
break;
case 3:
doRadialShFill((GfxRadialShading *)shading);
break;
case 4:
case 5:
doGouraudTriangleShFill((GfxGouraudTriangleShading *)shading);
break;
case 6:
case 7:
doPatchMeshShFill((GfxPatchMeshShading *)shading);
break;
}
#if 1 //~tmp: turn off anti-aliasing temporarily
if (vaa) {
out->setVectorAntialias(gTrue);
}
#endif
restoreState();
state->setPath(savedPath);
delete shading;
}
| 8,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.