unique_id
int64
13
189k
target
int64
0
1
code
stringlengths
20
241k
__index_level_0__
int64
0
18.9k
52,935
0
void qib_device_remove(struct qib_devdata *dd) { qib_user_remove(dd); qib_diag_remove(dd); }
18,100
115,401
0
void InjectedBundlePage::didReachApplicationCacheOriginQuota(WKBundlePageRef page, WKSecurityOriginRef origin, int64_t totalBytesNeeded, const void* clientInfo) { static_cast<InjectedBundlePage*>(const_cast<void*>(clientInfo))->didReachApplicationCacheOriginQuota(origin, totalBytesNeeded); }
18,101
146,665
0
void PageInfoBubbleView::DidStartNavigation(content::NavigationHandle* handle) { GetWidget()->Close(); }
18,102
2,170
0
void red_channel_pipes_new_add_tail(RedChannel *channel, new_pipe_item_t creator, void *data) { red_channel_pipes_create_batch(channel, creator, data, red_channel_client_pipe_add_tail_no_push); }
18,103
108,510
0
void QuotaManager::GetUsageAndQuotaForEviction( const GetUsageAndQuotaForEvictionCallback& callback) { DCHECK(io_thread_->BelongsToCurrentThread()); GetUsageAndQuotaInternal( GURL(), kStorageTypeTemporary, true /* global */, callback); }
18,104
25,849
0
static struct p4_event_bind *p4_config_get_bind(u64 config) { unsigned int evnt = p4_config_unpack_event(config); struct p4_event_bind *bind = NULL; if (evnt < ARRAY_SIZE(p4_event_bind_map)) bind = &p4_event_bind_map[evnt]; return bind; }
18,105
83,496
0
do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, struct pt_regs *regs, long error_code) { if (v8086_mode(regs)) { /* * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86. * On nmi (interrupt 2), do_trap should not be called. */ if (trapnr < X86_TRAP_UD) { if (!handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr)) return 0; } return -1; } if (!user_mode(regs)) { if (fixup_exception(regs, trapnr)) return 0; tsk->thread.error_code = error_code; tsk->thread.trap_nr = trapnr; die(str, regs, error_code); } return -1; }
18,106
179,061
1
static int inotify_release(struct inode *ignored, struct file *file) { struct fsnotify_group *group = file->private_data; struct user_struct *user = group->inotify_data.user; pr_debug("%s: group=%p\n", __func__, group); fsnotify_clear_marks_by_group(group); /* free this group, matching get was inotify_init->fsnotify_obtain_group */ fsnotify_put_group(group); atomic_dec(&user->inotify_devs); return 0; }
18,107
94,833
0
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); if (image_info->scenes != (char *) NULL) image_info->scenes=DestroyString(image_info->scenes); if (image_info->page != (char *) NULL) image_info->page=DestroyString(image_info->page); if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); if (image_info->cache != (void *) NULL) image_info->cache=DestroyPixelCache(image_info->cache); if (image_info->profile != (StringInfo *) NULL) image_info->profile=(void *) DestroyStringInfo((StringInfo *) image_info->profile); DestroyImageOptions(image_info); image_info->signature=(~MagickCoreSignature); image_info=(ImageInfo *) RelinquishMagickMemory(image_info); return(image_info); }
18,108
34,456
0
static noinline int may_destroy_subvol(struct btrfs_root *root) { struct btrfs_path *path; struct btrfs_key key; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = root->root_key.objectid; key.type = BTRFS_ROOT_REF_KEY; key.offset = (u64)-1; ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path, 0, 0); if (ret < 0) goto out; BUG_ON(ret == 0); ret = 0; if (path->slots[0] > 0) { path->slots[0]--; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid == root->root_key.objectid && key.type == BTRFS_ROOT_REF_KEY) ret = -ENOTEMPTY; } out: btrfs_free_path(path); return ret; }
18,109
3,627
0
static int rsa_cms_decrypt(CMS_RecipientInfo *ri) { EVP_PKEY_CTX *pkctx; X509_ALGOR *cmsalg; int nid; int rv = -1; unsigned char *label = NULL; int labellen = 0; const EVP_MD *mgf1md = NULL, *md = NULL; RSA_OAEP_PARAMS *oaep; X509_ALGOR *maskHash; pkctx = CMS_RecipientInfo_get0_pkey_ctx(ri); if (!pkctx) return 0; if (!CMS_RecipientInfo_ktri_get0_algs(ri, NULL, NULL, &cmsalg)) return -1; nid = OBJ_obj2nid(cmsalg->algorithm); if (nid == NID_rsaEncryption) return 1; if (nid != NID_rsaesOaep) { RSAerr(RSA_F_RSA_CMS_DECRYPT, RSA_R_UNSUPPORTED_ENCRYPTION_TYPE); return -1; } /* Decode OAEP parameters */ oaep = rsa_oaep_decode(cmsalg, &maskHash); if (oaep == NULL) { RSAerr(RSA_F_RSA_CMS_DECRYPT, RSA_R_INVALID_OAEP_PARAMETERS); goto err; } mgf1md = rsa_mgf1_to_md(oaep->maskGenFunc, maskHash); if (!mgf1md) goto err; md = rsa_algor_to_md(oaep->hashFunc); if (!md) goto err; if (oaep->pSourceFunc) { X509_ALGOR *plab = oaep->pSourceFunc; if (OBJ_obj2nid(plab->algorithm) != NID_pSpecified) { RSAerr(RSA_F_RSA_CMS_DECRYPT, RSA_R_UNSUPPORTED_LABEL_SOURCE); goto err; } if (plab->parameter->type != V_ASN1_OCTET_STRING) { RSAerr(RSA_F_RSA_CMS_DECRYPT, RSA_R_INVALID_LABEL); goto err; } label = plab->parameter->value.octet_string->data; /* Stop label being freed when OAEP parameters are freed */ plab->parameter->value.octet_string->data = NULL; labellen = plab->parameter->value.octet_string->length; } if (EVP_PKEY_CTX_set_rsa_padding(pkctx, RSA_PKCS1_OAEP_PADDING) <= 0) goto err; if (EVP_PKEY_CTX_set_rsa_oaep_md(pkctx, md) <= 0) goto err; if (EVP_PKEY_CTX_set_rsa_mgf1_md(pkctx, mgf1md) <= 0) goto err; if (EVP_PKEY_CTX_set0_rsa_oaep_label(pkctx, label, labellen) <= 0) goto err; /* Carry on */ rv = 1; err: RSA_OAEP_PARAMS_free(oaep); if (maskHash) X509_ALGOR_free(maskHash); return rv; }
18,110
143,561
0
void OomInterventionTabHelper::DeclineIntervention() { RecordInterventionUserDecision(false); ResetInterfaces(); intervention_state_ = InterventionState::DECLINED; if (decider_) { DCHECK(!web_contents()->GetBrowserContext()->IsOffTheRecord()); const std::string& host = web_contents()->GetVisibleURL().host(); decider_->OnInterventionDeclined(host); } }
18,111
11,009
0
PHP_MINFO_FUNCTION(bcmath) { php_info_print_table_start(); php_info_print_table_row(2, "BCMath support", "enabled"); php_info_print_table_end(); DISPLAY_INI_ENTRIES(); }
18,112
13,918
0
void SSL_CTX_set_default_read_buffer_len(SSL_CTX *ctx, size_t len) { ctx->default_read_buf_len = len; }
18,113
83,522
0
BOOL nsc_context_reset(NSC_CONTEXT* context, UINT32 width, UINT32 height) { if (!context) return FALSE; context->width = width; context->height = height; return TRUE; }
18,114
32,581
0
static int tg3_halt_cpu(struct tg3 *tp, u32 offset) { int i; BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); if (tg3_asic_rev(tp) == ASIC_REV_5906) { u32 val = tr32(GRC_VCPU_EXT_CTRL); tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); return 0; } if (offset == RX_CPU_BASE) { for (i = 0; i < 10000; i++) { tw32(offset + CPU_STATE, 0xffffffff); tw32(offset + CPU_MODE, CPU_MODE_HALT); if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) break; } tw32(offset + CPU_STATE, 0xffffffff); tw32_f(offset + CPU_MODE, CPU_MODE_HALT); udelay(10); } else { /* * There is only an Rx CPU for the 5750 derivative in the * BCM4785. */ if (tg3_flag(tp, IS_SSB_CORE)) return 0; for (i = 0; i < 10000; i++) { tw32(offset + CPU_STATE, 0xffffffff); tw32(offset + CPU_MODE, CPU_MODE_HALT); if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) break; } } if (i >= 10000) { netdev_err(tp->dev, "%s timed out, %s CPU\n", __func__, offset == RX_CPU_BASE ? "RX" : "TX"); return -ENODEV; } /* Clear firmware's nvram arbitration. */ if (tg3_flag(tp, NVRAM)) tw32(NVRAM_SWARB, SWARB_REQ_CLR0); return 0; }
18,115
36,986
0
static struct vmcs *alloc_vmcs(void) { return alloc_vmcs_cpu(raw_smp_processor_id()); }
18,116
2,049
0
prune_bounding_set(void) { int i, rc = 0; static int bounding_set_cleared; if (bounding_set_cleared) return 0; for (i = 0; i <= CAP_LAST_CAP && rc == 0; ++i) rc = prctl(PR_CAPBSET_DROP, i); if (rc != 0) { fprintf(stderr, "Unable to clear capability bounding set: %d\n", rc); return EX_SYSERR; } ++bounding_set_cleared; return 0; }
18,117
17,619
0
SProcRenderCreateSolidFill(ClientPtr client) { REQUEST(xRenderCreateSolidFillReq); REQUEST_AT_LEAST_SIZE(xRenderCreateSolidFillReq); swaps(&stuff->length); swapl(&stuff->pid); swaps(&stuff->color.alpha); swaps(&stuff->color.red); swaps(&stuff->color.green); swaps(&stuff->color.blue); return (*ProcRenderVector[stuff->renderReqType]) (client); }
18,118
89,440
0
static int nfc_genl_dump_devices(struct sk_buff *skb, struct netlink_callback *cb) { struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; struct nfc_dev *dev = (struct nfc_dev *) cb->args[1]; bool first_call = false; if (!iter) { first_call = true; iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL); if (!iter) return -ENOMEM; cb->args[0] = (long) iter; } mutex_lock(&nfc_devlist_mutex); cb->seq = nfc_devlist_generation; if (first_call) { nfc_device_iter_init(iter); dev = nfc_device_iter_next(iter); } while (dev) { int rc; rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb, NLM_F_MULTI); if (rc < 0) break; dev = nfc_device_iter_next(iter); } mutex_unlock(&nfc_devlist_mutex); cb->args[1] = (long) dev; return skb->len; }
18,119
150,242
0
WindowStateType TabletModeWindowState::GetType() const { return current_state_type_; }
18,120
95,144
0
static void cmd_getacl(const char *tag, const char *name) { int r, access; char *acl; char *rights, *nextid; char *freeme = NULL; mbentry_t *mbentry = NULL; char *intname = mboxname_from_external(name, &imapd_namespace, imapd_userid); r = mlookup(tag, name, intname, &mbentry); if (r == IMAP_MAILBOX_MOVED) return; if (!r) { access = cyrus_acl_myrights(imapd_authstate, mbentry->acl); if (!(access & ACL_ADMIN) && !imapd_userisadmin && !mboxname_userownsmailbox(imapd_userid, intname)) { r = (access & ACL_LOOKUP) ? IMAP_PERMISSION_DENIED : IMAP_MAILBOX_NONEXISTENT; } } imapd_check(NULL, 0); if (r) { prot_printf(imapd_out, "%s NO %s\r\n", tag, error_message(r)); mboxlist_entry_free(&mbentry); free(intname); return; } prot_printf(imapd_out, "* ACL "); prot_printastring(imapd_out, name); freeme = acl = xstrdupnull(mbentry->acl); while (acl) { rights = strchr(acl, '\t'); if (!rights) break; *rights++ = '\0'; nextid = strchr(rights, '\t'); if (!nextid) break; *nextid++ = '\0'; prot_printf(imapd_out, " "); prot_printastring(imapd_out, acl); prot_printf(imapd_out, " "); prot_printastring(imapd_out, rights); acl = nextid; } prot_printf(imapd_out, "\r\n"); prot_printf(imapd_out, "%s OK %s\r\n", tag, error_message(IMAP_OK_COMPLETED)); free(freeme); mboxlist_entry_free(&mbentry); free(intname); }
18,121
138,478
0
void HTMLScriptRunner::executeScriptsWaitingForLoad(Resource* resource) { ASSERT(!isExecutingScript()); ASSERT(hasParserBlockingScript()); ASSERT_UNUSED(resource, m_parserBlockingScript.resource() == resource); ASSERT(m_parserBlockingScript.isReady()); executeParsingBlockingScripts(); }
18,122
36,505
0
card_number_show_attr(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_card *card = container_of(dev, struct snd_card, card_dev); return snprintf(buf, PAGE_SIZE, "%i\n", card->number); }
18,123
58,345
0
static void null_restart(char mode, const char *cmd) { }
18,124
48,848
0
void net_disable_timestamp(void) { #ifdef HAVE_JUMP_LABEL if (in_interrupt()) { atomic_inc(&netstamp_needed_deferred); return; } #endif static_key_slow_dec(&netstamp_needed); }
18,125
69,302
0
void ssl_set_sig_mask(uint32_t *pmask_a, SSL *s, int op) { const unsigned char *sigalgs; size_t i, sigalgslen; int have_rsa = 0, have_dsa = 0, have_ecdsa = 0; /* * Now go through all signature algorithms seeing if we support any for * RSA, DSA, ECDSA. Do this for all versions not just TLS 1.2. To keep * down calls to security callback only check if we have to. */ sigalgslen = tls12_get_psigalgs(s, 1, &sigalgs); for (i = 0; i < sigalgslen; i += 2, sigalgs += 2) { switch (sigalgs[1]) { #ifndef OPENSSL_NO_RSA case TLSEXT_signature_rsa: if (!have_rsa && tls12_sigalg_allowed(s, op, sigalgs)) have_rsa = 1; break; #endif #ifndef OPENSSL_NO_DSA case TLSEXT_signature_dsa: if (!have_dsa && tls12_sigalg_allowed(s, op, sigalgs)) have_dsa = 1; break; #endif #ifndef OPENSSL_NO_EC case TLSEXT_signature_ecdsa: if (!have_ecdsa && tls12_sigalg_allowed(s, op, sigalgs)) have_ecdsa = 1; break; #endif } } if (!have_rsa) *pmask_a |= SSL_aRSA; if (!have_dsa) *pmask_a |= SSL_aDSS; if (!have_ecdsa) *pmask_a |= SSL_aECDSA; }
18,126
52,044
0
dissect_spoolss_relstrarray(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep, int hf_index, int struct_start, char **data) { proto_item *item; proto_tree *subtree; guint32 relstr_offset, relstr_start/*, relstr_end, relstr_len*/; char *text; item = proto_tree_add_string(tree, hf_index, tvb, offset, 4, ""); subtree = proto_item_add_subtree(item, ett_RELSTR_ARRAY); offset = dissect_ndr_uint32( tvb, offset, pinfo, subtree, di, drep, hf_offset, &relstr_offset); /* A relative offset of zero is a NULL string */ relstr_start = relstr_offset + struct_start; if (relstr_offset) /*relstr_end = */dissect_spoolss_uint16uni( tvb, relstr_start, pinfo, subtree, drep, &text, hf_relative_string); else { text = g_strdup("NULL"); /*relstr_end = offset;*/ } /*relstr_len = relstr_end - relstr_start;*/ proto_item_append_text(item, "%s", text); if (data) *data = text; else g_free(text); return offset; }
18,127
63,477
0
YR_OBJECT* yr_object_lookup_field( YR_OBJECT* object, const char* field_name) { YR_STRUCTURE_MEMBER* member; assert(object != NULL); assert(object->type == OBJECT_TYPE_STRUCTURE); member = object_as_structure(object)->members; while (member != NULL) { if (strcmp(member->object->identifier, field_name) == 0) return member->object; member = member->next; } return NULL; }
18,128
76,623
0
static void put_object_name(struct fsck_options *options, struct object *obj, const char *fmt, ...) { va_list ap; struct strbuf buf = STRBUF_INIT; char *existing; if (!options->object_names) return; existing = lookup_decoration(options->object_names, obj); if (existing) return; va_start(ap, fmt); strbuf_vaddf(&buf, fmt, ap); add_decoration(options->object_names, obj, strbuf_detach(&buf, NULL)); va_end(ap); }
18,129
105,439
0
WebKitLoadStatus webkit_web_view_get_load_status(WebKitWebView* webView) { g_return_val_if_fail(WEBKIT_IS_WEB_VIEW(webView), WEBKIT_LOAD_FINISHED); WebKitWebViewPrivate* priv = webView->priv; return priv->loadStatus; }
18,130
40,318
0
static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct ddpehdr *ddp; struct sock *sock; struct atalk_iface *atif; struct sockaddr_at tosat; int origlen; __u16 len_hops; if (!net_eq(dev_net(dev), &init_net)) goto drop; /* Don't mangle buffer if shared */ if (!(skb = skb_share_check(skb, GFP_ATOMIC))) goto out; /* Size check and make sure header is contiguous */ if (!pskb_may_pull(skb, sizeof(*ddp))) goto drop; ddp = ddp_hdr(skb); len_hops = ntohs(ddp->deh_len_hops); /* Trim buffer in case of stray trailing data */ origlen = skb->len; skb_trim(skb, min_t(unsigned int, skb->len, len_hops & 1023)); /* * Size check to see if ddp->deh_len was crap * (Otherwise we'll detonate most spectacularly * in the middle of atalk_checksum() or recvmsg()). */ if (skb->len < sizeof(*ddp) || skb->len < (len_hops & 1023)) { pr_debug("AppleTalk: dropping corrupted frame (deh_len=%u, " "skb->len=%u)\n", len_hops & 1023, skb->len); goto drop; } /* * Any checksums. Note we don't do htons() on this == is assumed to be * valid for net byte orders all over the networking code... */ if (ddp->deh_sum && atalk_checksum(skb, len_hops & 1023) != ddp->deh_sum) /* Not a valid AppleTalk frame - dustbin time */ goto drop; /* Check the packet is aimed at us */ if (!ddp->deh_dnet) /* Net 0 is 'this network' */ atif = atalk_find_anynet(ddp->deh_dnode, dev); else atif = atalk_find_interface(ddp->deh_dnet, ddp->deh_dnode); if (!atif) { /* Not ours, so we route the packet via the correct * AppleTalk iface */ return atalk_route_packet(skb, dev, ddp, len_hops, origlen); } /* if IP over DDP is not selected this code will be optimized out */ if (is_ip_over_ddp(skb)) return handle_ip_over_ddp(skb); /* * Which socket - atalk_search_socket() looks for a *full match* * of the <net, node, port> tuple. */ tosat.sat_addr.s_net = ddp->deh_dnet; tosat.sat_addr.s_node = ddp->deh_dnode; tosat.sat_port = ddp->deh_dport; sock = atalk_search_socket(&tosat, atif); if (!sock) /* But not one of our sockets */ goto drop; /* Queue packet (standard) */ skb->sk = sock; if (sock_queue_rcv_skb(sock, skb) < 0) goto drop; return NET_RX_SUCCESS; drop: kfree_skb(skb); out: return NET_RX_DROP; }
18,131
181,528
1
checked_xcalloc (size_t num, size_t size) { alloc_limit_assert ("checked_xcalloc", (num *size)); return xcalloc (num, size); }
18,132
87,910
0
static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, struct fm10k_rx_buffer *bi) { struct page *page = bi->page; dma_addr_t dma; /* Only page will be NULL if buffer was consumed */ if (likely(page)) return true; /* alloc new page for storage */ page = dev_alloc_page(); if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; return false; } /* map page for use */ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); /* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use */ if (dma_mapping_error(rx_ring->dev, dma)) { __free_page(page); rx_ring->rx_stats.alloc_failed++; return false; } bi->dma = dma; bi->page = page; bi->page_offset = 0; return true; }
18,133
107,000
0
void QQuickWebView::goBack() { Q_D(QQuickWebView); d->webPageProxy->goBack(); }
18,134
181,162
1
static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, bool from_vmentry, u32 *entry_failure_code) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exec_control, vmcs12_exec_ctrl; vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); if (from_vmentry && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); } else { kvm_set_dr(vcpu, 7, vcpu->arch.dr7); vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); } if (from_vmentry) { vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, vmcs12->vm_entry_intr_info_field); vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, vmcs12->vm_entry_exception_error_code); vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, vmcs12->vm_entry_instruction_len); vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, vmcs12->guest_interruptibility_info); vmx->loaded_vmcs->nmi_known_unmasked = !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); } else { vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); } vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); vmx_set_rflags(vcpu, vmcs12->guest_rflags); vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, vmcs12->guest_pending_dbg_exceptions); vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); if (nested_cpu_has_xsaves(vmcs12)) vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); vmcs_write64(VMCS_LINK_POINTER, -1ull); exec_control = vmcs12->pin_based_vm_exec_control; /* Preemption timer setting is only taken from vmcs01. */ exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; exec_control |= vmcs_config.pin_based_exec_ctrl; if (vmx->hv_deadline_tsc == -1) exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; /* Posted interrupts setting is only taken from vmcs12. */ if (nested_cpu_has_posted_intr(vmcs12)) { vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; vmx->nested.pi_pending = false; vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); } else { exec_control &= ~PIN_BASED_POSTED_INTR; } vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); vmx->nested.preemption_timer_expired = false; if (nested_cpu_has_preemption_timer(vmcs12)) vmx_start_preemption_timer(vcpu); /* * Whether page-faults are trapped is determined by a combination of * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. * If enable_ept, L0 doesn't care about page faults and we should * set all of these to L1's desires. However, if !enable_ept, L0 does * care about (at least some) page faults, and because it is not easy * (if at all possible?) to merge L0 and L1's desires, we simply ask * to exit on each and every L2 page fault. This is done by setting * MASK=MATCH=0 and (see below) EB.PF=1. * Note that below we don't need special code to set EB.PF beyond the * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when * !enable_ept, EB.PF is 1, so the "or" will always be 1. */ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, enable_ept ? vmcs12->page_fault_error_code_mask : 0); vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, enable_ept ? vmcs12->page_fault_error_code_match : 0); if (cpu_has_secondary_exec_ctrls()) { exec_control = vmx->secondary_exec_control; /* Take the following fields only from vmcs12 */ exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_XSAVES | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_ENABLE_VMFUNC); if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) { vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control & ~SECONDARY_EXEC_ENABLE_PML; exec_control |= vmcs12_exec_ctrl; } /* All VMFUNCs are currently emulated through L0 vmexits. */ if (exec_control & SECONDARY_EXEC_ENABLE_VMFUNC) vmcs_write64(VM_FUNCTION_CONTROL, 0); if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) { vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); vmcs_write16(GUEST_INTR_STATUS, vmcs12->guest_intr_status); } /* * Write an illegal value to APIC_ACCESS_ADDR. Later, * nested_get_vmcs12_pages will either fix it up or * remove the VM execution control. */ if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) vmcs_write64(APIC_ACCESS_ADDR, -1ull); vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); } /* * Set host-state according to L0's settings (vmcs12 is irrelevant here) * Some constant fields are set here by vmx_set_constant_host_state(). * Other fields are different per CPU, and will be set later when * vmx_vcpu_load() is called, and when vmx_save_host_state() is called. */ vmx_set_constant_host_state(vmx); /* * Set the MSR load/store lists to match L0's settings. */ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); /* * HOST_RSP is normally set correctly in vmx_vcpu_run() just before * entry, but only if the current (host) sp changed from the value * we wrote last (vmx->host_rsp). This cache is no longer relevant * if we switch vmcs, and rather than hold a separate cache per vmcs, * here we just force the write to happen on entry. */ vmx->host_rsp = 0; exec_control = vmx_exec_control(vmx); /* L0's desires */ exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; exec_control &= ~CPU_BASED_TPR_SHADOW; exec_control |= vmcs12->cpu_based_vm_exec_control; /* * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if * nested_get_vmcs12_pages can't fix it up, the illegal value * will result in a VM entry failure. */ if (exec_control & CPU_BASED_TPR_SHADOW) { vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); } /* * Merging of IO bitmap not currently supported. * Rather, exit every time. */ exec_control &= ~CPU_BASED_USE_IO_BITMAPS; exec_control |= CPU_BASED_UNCOND_IO_EXITING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the * bitwise-or of what L1 wants to trap for L2, and what we want to * trap. Note that CR0.TS also needs updating - we do this later. */ update_exception_bitmap(vcpu); vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); /* L2->L1 exit controls are emulated - the hardware exit is to L0 so * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER * bits are further modified by vmx_set_efer() below. */ vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are * emulated by vmx_set_efer(), below. */ vm_entry_controls_init(vmx, (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER & ~VM_ENTRY_IA32E_MODE) | (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE)); if (from_vmentry && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); vcpu->arch.pat = vmcs12->guest_ia32_pat; } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); } set_cr4_guest_host_mask(vmx); if (from_vmentry && vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset + vmcs12->tsc_offset); else vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); if (kvm_has_tsc_control) decache_tsc_multiplier(vmx); if (enable_vpid) { /* * There is no direct mapping between vpid02 and vpid12, the * vpid02 is per-vCPU for L0 and reused while the value of * vpid12 is changed w/ one invvpid during nested vmentry. * The vpid12 is allocated by L1 for L2, so it will not * influence global bitmap(for vpid01 and vpid02 allocation) * even if spawn a lot of nested vCPUs. */ if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) { vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { vmx->nested.last_vpid = vmcs12->virtual_processor_id; __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); } } else { vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); vmx_flush_tlb(vcpu); } } if (enable_pml) { /* * Conceptually we want to copy the PML address and index from * vmcs01 here, and then back to vmcs01 on nested vmexit. But, * since we always flush the log on each vmexit, this happens * to be equivalent to simply resetting the fields in vmcs02. */ ASSERT(vmx->pml_pg); vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); } if (nested_cpu_has_ept(vmcs12)) { if (nested_ept_init_mmu_context(vcpu)) { *entry_failure_code = ENTRY_FAIL_DEFAULT; return 1; } } else if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { vmx_flush_tlb_ept_only(vcpu); } /* * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those * bits which we consider mandatory enabled. * The CR0_READ_SHADOW is what L2 should have expected to read given * the specifications by L1; It's not enough to take * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we * have more bits than L1 expected. */ vmx_set_cr0(vcpu, vmcs12->guest_cr0); vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); vmx_set_cr4(vcpu, vmcs12->guest_cr4); vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); if (from_vmentry && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) vcpu->arch.efer = vmcs12->guest_ia32_efer; else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) vcpu->arch.efer |= (EFER_LMA | EFER_LME); else vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ vmx_set_efer(vcpu, vcpu->arch.efer); /* Shadow page tables on either EPT or shadow page tables. */ if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), entry_failure_code)) return 1; if (!enable_ept) vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; /* * L1 may access the L2's PDPTR, so save them to construct vmcs12 */ if (enable_ept) { vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); } kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); return 0; }
18,135
116,393
0
GURL ChromeContentRendererClient::GetNaClContentHandlerURL( const std::string& actual_mime_type, const webkit::WebPluginInfo& plugin) { const char* kNaClPluginManifestAttribute = "nacl"; string16 nacl_attr = ASCIIToUTF16(kNaClPluginManifestAttribute); for (size_t i = 0; i < plugin.mime_types.size(); ++i) { if (plugin.mime_types[i].mime_type == actual_mime_type) { const WebPluginMimeType& content_type = plugin.mime_types[i]; for (size_t i = 0; i < content_type.additional_param_names.size(); ++i) { if (content_type.additional_param_names[i] == nacl_attr) return GURL(content_type.additional_param_values[i]); } break; } } return GURL(); }
18,136
3,506
0
irc_server_recv_cb (void *data, int fd) { struct t_irc_server *server; static char buffer[4096 + 2]; int num_read; /* make C compiler happy */ (void) fd; server = (struct t_irc_server *)data; if (!server) return WEECHAT_RC_ERROR; #ifdef HAVE_GNUTLS if (server->ssl_connected) num_read = gnutls_record_recv (server->gnutls_sess, buffer, sizeof (buffer) - 2); else #endif num_read = recv (server->sock, buffer, sizeof (buffer) - 2, 0); if (num_read > 0) { buffer[num_read] = '\0'; irc_server_msgq_add_buffer (server, buffer); irc_server_msgq_flush (); } else { #ifdef HAVE_GNUTLS if (server->ssl_connected) { if ((num_read == 0) || ((num_read != GNUTLS_E_AGAIN) && (num_read != GNUTLS_E_INTERRUPTED))) { weechat_printf (server->buffer, _("%s%s: reading data on socket: error %d %s"), weechat_prefix ("error"), IRC_PLUGIN_NAME, num_read, (num_read == 0) ? _("(connection closed by peer)") : gnutls_strerror (num_read)); weechat_printf (server->buffer, _("%s: disconnecting from server..."), IRC_PLUGIN_NAME); irc_server_disconnect (server, !server->is_connected, 1); } } else #endif { if ((num_read == 0) || ((errno != EAGAIN) && (errno != EWOULDBLOCK))) { weechat_printf (server->buffer, _("%s%s: reading data on socket: error %d %s"), weechat_prefix ("error"), IRC_PLUGIN_NAME, errno, (num_read == 0) ? _("(connection closed by peer)") : strerror (errno)); weechat_printf (server->buffer, _("%s: disconnecting from server..."), IRC_PLUGIN_NAME); irc_server_disconnect (server, !server->is_connected, 1); } } } return WEECHAT_RC_OK; }
18,137
67,230
0
static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *ndev, u64 reqid) { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); brcmf_dbg(SCAN, "enter\n"); brcmf_pno_stop_sched_scan(ifp, reqid); if (cfg->int_escan_map) brcmf_notify_escan_complete(cfg, ifp, true, true); return 0; }
18,138
22,845
0
static int _nfs4_proc_lookupfh(struct nfs_server *server, const struct nfs_fh *dirfh, const struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { int status; struct nfs4_lookup_arg args = { .bitmask = server->attr_bitmask, .dir_fh = dirfh, .name = name, }; struct nfs4_lookup_res res = { .server = server, .fattr = fattr, .fh = fhandle, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], .rpc_argp = &args, .rpc_resp = &res, }; nfs_fattr_init(fattr); dprintk("NFS call lookupfh %s\n", name->name); status = rpc_call_sync(server->client, &msg, 0); dprintk("NFS reply lookupfh: %d\n", status); return status; }
18,139
154,621
0
error::Error GLES2DecoderPassthroughImpl::DoDeleteRenderbuffers( GLsizei n, const volatile GLuint* renderbuffers) { if (n < 0) { InsertError(GL_INVALID_VALUE, "n cannot be negative."); return error::kNoError; } return DeleteHelper(n, renderbuffers, &resources_->renderbuffer_id_map, [this](GLsizei n, GLuint* renderbuffers) { api()->glDeleteRenderbuffersEXTFn(n, renderbuffers); }); }
18,140
5,472
0
static Bool Ins_SxVTL( EXEC_OPS Int aIdx1, Int aIdx2, Int aOpc, TT_UnitVector* Vec ) { Long A, B, C; if ( BOUNDS( aIdx1, CUR.zp2.n_points ) || BOUNDS( aIdx2, CUR.zp1.n_points ) ) { CUR.error = TT_Err_Invalid_Reference; return FAILURE; } A = CUR.zp1.cur_x[aIdx2] - CUR.zp2.cur_x[aIdx1]; B = CUR.zp1.cur_y[aIdx2] - CUR.zp2.cur_y[aIdx1]; if ( (aOpc & 1) != 0 ) { C = B; /* CounterClockwise rotation */ B = A; A = -C; } if ( NORMalize( A, B, Vec ) == FAILURE ) { /* When the vector is too small or zero! */ CUR.error = TT_Err_Ok; Vec->x = 0x4000; Vec->y = 0; } return SUCCESS; }
18,141
140,400
0
void TypingCommand::insertLineBreak(EditingState* editingState) { if (!canAppendNewLineFeedToSelection(endingSelection())) return; applyCommandToComposite(InsertLineBreakCommand::create(document()), editingState); if (editingState->isAborted()) return; typingAddedToOpenCommand(InsertLineBreak); }
18,142
10,534
0
static int parse_timeout(const char *target) { QemuOptsList *list; QemuOpts *opts; const char *timeout; list = qemu_find_opts("iscsi"); if (list) { opts = qemu_opts_find(list, target); if (!opts) { opts = QTAILQ_FIRST(&list->head); } if (opts) { timeout = qemu_opt_get(opts, "timeout"); if (timeout) { return atoi(timeout); } } } return 0; }
18,143
5,210
0
static void _close_pgsql_plink(zend_resource *rsrc) { PGconn *link = (PGconn *)rsrc->ptr; PGresult *res; while ((res = PQgetResult(link))) { PQclear(res); } PQfinish(link); PGG(num_persistent)--; PGG(num_links)--; }
18,144
43,209
0
static struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd, struct urb *urb) { struct oz_urb_link *urbl; list_for_each_entry(urbl, &ozhcd->urb_cancel_list, link) { if (urb == urbl->urb) { list_del_init(&urbl->link); return urbl; } } return NULL; }
18,145
36,004
0
small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon, void **request_buf) { int rc = 0; rc = smb2_reconnect(smb2_command, tcon); if (rc) return rc; /* BB eventually switch this to SMB2 specific small buf size */ *request_buf = cifs_small_buf_get(); if (*request_buf == NULL) { /* BB should we add a retry in here if not a writepage? */ return -ENOMEM; } smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon); if (tcon != NULL) { #ifdef CONFIG_CIFS_STATS2 uint16_t com_code = le16_to_cpu(smb2_command); cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]); #endif cifs_stats_inc(&tcon->num_smbs_sent); } return rc; }
18,146
73
0
static long timelib_parse_tz_cor(char **ptr) { char *begin = *ptr, *end; long tmp; while (isdigit(**ptr) || **ptr == ':') { ++*ptr; } end = *ptr; switch (end - begin) { case 1: case 2: return HOUR(strtol(begin, NULL, 10)); break; case 3: case 4: if (begin[1] == ':') { tmp = HOUR(strtol(begin, NULL, 10)) + strtol(begin + 2, NULL, 10); return tmp; } else if (begin[2] == ':') { tmp = HOUR(strtol(begin, NULL, 10)) + strtol(begin + 3, NULL, 10); return tmp; } else { tmp = strtol(begin, NULL, 10); return HOUR(tmp / 100) + tmp % 100; } case 5: tmp = HOUR(strtol(begin, NULL, 10)) + strtol(begin + 3, NULL, 10); return tmp; } return 0; }
18,147
147,302
0
void V8TestObject::CustomGetterLongAttributeAttributeSetterCallback( const v8::FunctionCallbackInfo<v8::Value>& info) { RUNTIME_CALL_TIMER_SCOPE_DISABLED_BY_DEFAULT(info.GetIsolate(), "Blink_TestObject_customGetterLongAttribute_Setter"); v8::Local<v8::Value> v8_value = info[0]; test_object_v8_internal::CustomGetterLongAttributeAttributeSetter(v8_value, info); }
18,148
101,599
0
void Browser::CloseWindow() { UserMetrics::RecordAction(UserMetricsAction("CloseWindow")); window_->Close(); }
18,149
129,607
0
AffineTransform LayoutSVGViewportContainer::viewportTransform() const { ASSERT(element()); if (isSVGSVGElement(*element())) { SVGSVGElement* svg = toSVGSVGElement(element()); return svg->viewBoxToViewTransform(m_viewport.width(), m_viewport.height()); } return AffineTransform(); }
18,150
101,232
0
std::string GetUpdatesResponseString( const sync_pb::GetUpdatesResponse& response) { std::string output; output.append("GetUpdatesResponse:\n"); for (int i = 0; i < response.entries_size(); i++) { output.append(SyncerProtoUtil::SyncEntityDebugString(response.entries(i))); output.append("\n"); } return output; }
18,151
60,721
0
static ssize_t tower_read (struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct lego_usb_tower *dev; size_t bytes_to_read; int i; int retval = 0; unsigned long timeout = 0; dev = file->private_data; /* lock this object */ if (mutex_lock_interruptible(&dev->lock)) { retval = -ERESTARTSYS; goto exit; } /* verify that the device wasn't unplugged */ if (dev->udev == NULL) { retval = -ENODEV; pr_err("No device or device unplugged %d\n", retval); goto unlock_exit; } /* verify that we actually have some data to read */ if (count == 0) { dev_dbg(&dev->udev->dev, "read request of 0 bytes\n"); goto unlock_exit; } if (read_timeout) { timeout = jiffies + msecs_to_jiffies(read_timeout); } /* wait for data */ tower_check_for_read_packet (dev); while (dev->read_packet_length == 0) { if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto unlock_exit; } retval = wait_event_interruptible_timeout(dev->read_wait, dev->interrupt_in_done, dev->packet_timeout_jiffies); if (retval < 0) { goto unlock_exit; } /* reset read timeout during read or write activity */ if (read_timeout && (dev->read_buffer_length || dev->interrupt_out_busy)) { timeout = jiffies + msecs_to_jiffies(read_timeout); } /* check for read timeout */ if (read_timeout && time_after (jiffies, timeout)) { retval = -ETIMEDOUT; goto unlock_exit; } tower_check_for_read_packet (dev); } /* copy the data from read_buffer into userspace */ bytes_to_read = min(count, dev->read_packet_length); if (copy_to_user (buffer, dev->read_buffer, bytes_to_read)) { retval = -EFAULT; goto unlock_exit; } spin_lock_irq (&dev->read_buffer_lock); dev->read_buffer_length -= bytes_to_read; dev->read_packet_length -= bytes_to_read; for (i=0; i<dev->read_buffer_length; i++) { dev->read_buffer[i] = dev->read_buffer[i+bytes_to_read]; } spin_unlock_irq (&dev->read_buffer_lock); retval = bytes_to_read; unlock_exit: /* unlock the device */ mutex_unlock(&dev->lock); exit: return retval; }
18,152
163,247
0
int will_process_called() { return will_process_called_; }
18,153
171,640
0
static int calc_audiotime(struct a2dp_config cfg, int bytes) { int chan_count = popcount(cfg.channel_flags); ASSERTC(cfg.format == AUDIO_FORMAT_PCM_16_BIT, "unsupported sample sz", cfg.format); return bytes*(1000000/(chan_count*2))/cfg.rate; }
18,154
93,835
0
virDomainInterfaceStats(virDomainPtr dom, const char *path, virDomainInterfaceStatsPtr stats, size_t size) { virConnectPtr conn; virDomainInterfaceStatsStruct stats2 = { -1, -1, -1, -1, -1, -1, -1, -1 }; VIR_DOMAIN_DEBUG(dom, "path=%s, stats=%p, size=%zi", path, stats, size); virResetLastError(); virCheckDomainReturn(dom, -1); virCheckNonNullArgGoto(path, error); virCheckNonNullArgGoto(stats, error); if (size > sizeof(stats2)) { virReportInvalidArg(size, _("size must not exceed %zu"), sizeof(stats2)); goto error; } conn = dom->conn; if (conn->driver->domainInterfaceStats) { if (conn->driver->domainInterfaceStats(dom, path, &stats2) == -1) goto error; memcpy(stats, &stats2, size); return 0; } virReportUnsupportedError(); error: virDispatchError(dom->conn); return -1; }
18,155
83,327
0
void CSoundFile::PortamentoFineMPT(ModChannel* pChn, int param) { if(m_PlayState.m_nTickCount == 0) pChn->nOldFinePortaUpDown = 0; const int tickParam = static_cast<int>((m_PlayState.m_nTickCount + 1.0) * param / m_PlayState.m_nMusicSpeed); pChn->m_PortamentoFineSteps += (param >= 0) ? tickParam - pChn->nOldFinePortaUpDown : tickParam + pChn->nOldFinePortaUpDown; if(m_PlayState.m_nTickCount + 1 == m_PlayState.m_nMusicSpeed) pChn->nOldFinePortaUpDown = static_cast<int8>(mpt::abs(param)); else pChn->nOldFinePortaUpDown = static_cast<int8>(mpt::abs(tickParam)); pChn->m_CalculateFreq = true; }
18,156
180,886
1
construct_command_line(struct manager_ctx *manager, struct server *server) { static char cmd[BUF_SIZE]; char *method = manager->method; int i; build_config(working_dir, server); if (server->method) method = server->method; memset(cmd, 0, BUF_SIZE); snprintf(cmd, BUF_SIZE, "%s -m %s --manager-address %s -f %s/.shadowsocks_%s.pid -c %s/.shadowsocks_%s.conf", executable, method, manager->manager_address, working_dir, server->port, working_dir, server->port); if (manager->acl != NULL) { int len = strlen(cmd); snprintf(cmd + len, BUF_SIZE - len, " --acl %s", manager->acl); } if (manager->timeout != NULL) { int len = strlen(cmd); snprintf(cmd + len, BUF_SIZE - len, " -t %s", manager->timeout); } #ifdef HAVE_SETRLIMIT if (manager->nofile) { int len = strlen(cmd); snprintf(cmd + len, BUF_SIZE - len, " -n %d", manager->nofile); } #endif if (manager->user != NULL) { int len = strlen(cmd); snprintf(cmd + len, BUF_SIZE - len, " -a %s", manager->user); } if (manager->verbose) { int len = strlen(cmd); snprintf(cmd + len, BUF_SIZE - len, " -v"); } if (server->mode == NULL && manager->mode == UDP_ONLY) { int len = strlen(cmd); snprintf(cmd + len, BUF_SIZE - len, " -U"); } if (server->mode == NULL && manager->mode == TCP_AND_UDP) { int len = strlen(cmd); snprintf(cmd + len, BUF_SIZE - len, " -u"); } if (server->fast_open[0] == 0 && manager->fast_open) { int len = strlen(cmd); snprintf(cmd + len, BUF_SIZE - len, " --fast-open"); } if (manager->ipv6first) { int len = strlen(cmd); snprintf(cmd + len, BUF_SIZE - len, " -6"); } if (manager->mtu) { int len = strlen(cmd); snprintf(cmd + len, BUF_SIZE - len, " --mtu %d", manager->mtu); } if (server->plugin == NULL && manager->plugin) { int len = strlen(cmd); snprintf(cmd + len, BUF_SIZE - len, " --plugin \"%s\"", manager->plugin); } if (server->plugin_opts == NULL && manager->plugin_opts) { int len = strlen(cmd); snprintf(cmd + len, BUF_SIZE - len, " --plugin-opts \"%s\"", manager->plugin_opts); } for (i = 0; i < manager->nameserver_num; i++) { int len = strlen(cmd); snprintf(cmd + len, BUF_SIZE - len, " -d %s", manager->nameservers[i]); } for (i = 0; i < manager->host_num; i++) { int len = strlen(cmd); snprintf(cmd + len, BUF_SIZE - len, " -s %s", manager->hosts[i]); } // Always enable reuse port { int len = strlen(cmd); snprintf(cmd + len, BUF_SIZE - len, " --reuse-port"); } if (verbose) { LOGI("cmd: %s", cmd); } return cmd; }
18,157
43,179
0
static void oz_hcd_complete_set_config(struct oz_port *port, struct urb *urb, u8 rcode, u8 config_num) { int rc = 0; struct usb_hcd *hcd = port->ozhcd->hcd; if (rcode == 0) { port->config_num = config_num; oz_clean_endpoints_for_config(hcd, port); if (oz_build_endpoints_for_config(hcd, port, &urb->dev->config[port->config_num-1], GFP_ATOMIC)) { rc = -ENOMEM; } } else { rc = -ENOMEM; } oz_complete_urb(hcd, urb, rc); }
18,158
76,878
0
encode_POP_QUEUE(const struct ofpact_null *null OVS_UNUSED, enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out) { put_NXAST_POP_QUEUE(out); }
18,159
149,906
0
bool LayerTreeHostImpl::CanDraw() const { if (!compositor_frame_sink_) { TRACE_EVENT_INSTANT0("cc", "LayerTreeHostImpl::CanDraw no CompositorFrameSink", TRACE_EVENT_SCOPE_THREAD); return false; } if (active_tree_->LayerListIsEmpty()) { TRACE_EVENT_INSTANT0("cc", "LayerTreeHostImpl::CanDraw no root layer", TRACE_EVENT_SCOPE_THREAD); return false; } if (resourceless_software_draw_) return true; if (DrawViewportSize().IsEmpty()) { TRACE_EVENT_INSTANT0("cc", "LayerTreeHostImpl::CanDraw empty viewport", TRACE_EVENT_SCOPE_THREAD); return false; } if (active_tree_->ViewportSizeInvalid()) { TRACE_EVENT_INSTANT0( "cc", "LayerTreeHostImpl::CanDraw viewport size recently changed", TRACE_EVENT_SCOPE_THREAD); return false; } if (EvictedUIResourcesExist()) { TRACE_EVENT_INSTANT0( "cc", "LayerTreeHostImpl::CanDraw UI resources evicted not recreated", TRACE_EVENT_SCOPE_THREAD); return false; } return true; }
18,160
55,338
0
static int atl2_set_mac(struct net_device *netdev, void *p) { struct atl2_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (netif_running(netdev)) return -EBUSY; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); atl2_set_mac_addr(&adapter->hw); return 0; }
18,161
28,106
0
av_cold void avpriv_dsputil_init(DSPContext *c, AVCodecContext *avctx) { ff_dsputil_init(c, avctx); }
18,162
178,054
1
apprentice_load(struct magic_set *ms, const char *fn, int action) { int errs = 0; uint32_t i, j; size_t files = 0, maxfiles = 0; char **filearr = NULL; struct stat st; struct magic_map *map; struct magic_entry_set mset[MAGIC_SETS]; php_stream *dir; php_stream_dirent d; TSRMLS_FETCH(); memset(mset, 0, sizeof(mset)); ms->flags |= MAGIC_CHECK; /* Enable checks for parsed files */ if ((map = CAST(struct magic_map *, ecalloc(1, sizeof(*map)))) == NULL) { file_oomem(ms, sizeof(*map)); return NULL; } /* print silly verbose header for USG compat. */ if (action == FILE_CHECK) (void)fprintf(stderr, "%s\n", usg_hdr); /* load directory or file */ /* FIXME: Read file names and sort them to prevent non-determinism. See Debian bug #488562. */ if (php_sys_stat(fn, &st) == 0 && S_ISDIR(st.st_mode)) { int mflen; char mfn[MAXPATHLEN]; dir = php_stream_opendir((char *)fn, REPORT_ERRORS, NULL); if (!dir) { errs++; goto out; } while (php_stream_readdir(dir, &d)) { if ((mflen = snprintf(mfn, sizeof(mfn), "%s/%s", fn, d.d_name)) < 0) { file_oomem(ms, strlen(fn) + strlen(d.d_name) + 2); errs++; php_stream_closedir(dir); goto out; } if (stat(mfn, &st) == -1 || !S_ISREG(st.st_mode)) { continue; } if (files >= maxfiles) { size_t mlen; maxfiles = (maxfiles + 1) * 2; mlen = maxfiles * sizeof(*filearr); if ((filearr = CAST(char **, erealloc(filearr, mlen))) == NULL) { file_oomem(ms, mlen); efree(mfn); php_stream_closedir(dir); errs++; goto out; goto out; } } filearr[files++] = estrndup(mfn, (mflen > sizeof(mfn) - 1)? sizeof(mfn) - 1: mflen); } php_stream_closedir(dir); qsort(filearr, files, sizeof(*filearr), cmpstrp); for (i = 0; i < files; i++) { load_1(ms, action, filearr[i], &errs, mset); efree(filearr[i]); } efree(filearr); } else load_1(ms, action, fn, &errs, mset); if (errs) goto out; for (j = 0; j < MAGIC_SETS; j++) { /* Set types of tests */ for (i = 0; i < mset[j].count; ) { if (mset[j].me[i].mp->cont_level != 0) { i++; continue; } i = set_text_binary(ms, mset[j].me, mset[j].count, i); } qsort(mset[j].me, mset[j].count, sizeof(*mset[j].me), apprentice_sort); /* * Make sure that any level 0 "default" line is last * (if one exists). */ set_last_default(ms, mset[j].me, mset[j].count); /* coalesce per file arrays into a single one */ if (coalesce_entries(ms, mset[j].me, mset[j].count, &map->magic[j], &map->nmagic[j]) == -1) { errs++; goto out; } } out: for (j = 0; j < MAGIC_SETS; j++) magic_entry_free(mset[j].me, mset[j].count); if (errs) { for (j = 0; j < MAGIC_SETS; j++) { if (map->magic[j]) efree(map->magic[j]); } efree(map); return NULL; } return map; }
18,163
27,486
0
static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct ip_tunnel *t = netdev_priv(dev); struct ip_tunnel_parm *p = &t->parms; NLA_PUT_U32(skb, IFLA_GRE_LINK, p->link); NLA_PUT_BE16(skb, IFLA_GRE_IFLAGS, p->i_flags); NLA_PUT_BE16(skb, IFLA_GRE_OFLAGS, p->o_flags); NLA_PUT_BE32(skb, IFLA_GRE_IKEY, p->i_key); NLA_PUT_BE32(skb, IFLA_GRE_OKEY, p->o_key); NLA_PUT_BE32(skb, IFLA_GRE_LOCAL, p->iph.saddr); NLA_PUT_BE32(skb, IFLA_GRE_REMOTE, p->iph.daddr); NLA_PUT_U8(skb, IFLA_GRE_TTL, p->iph.ttl); NLA_PUT_U8(skb, IFLA_GRE_TOS, p->iph.tos); NLA_PUT_U8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF))); return 0; nla_put_failure: return -EMSGSIZE; }
18,164
70,406
0
static jpc_dec_cp_t *jpc_dec_cp_create(uint_fast16_t numcomps) { jpc_dec_cp_t *cp; jpc_dec_ccp_t *ccp; int compno; if (!(cp = jas_malloc(sizeof(jpc_dec_cp_t)))) { return 0; } cp->flags = 0; cp->numcomps = numcomps; cp->prgord = 0; cp->numlyrs = 0; cp->mctid = 0; cp->csty = 0; if (!(cp->ccps = jas_alloc2(cp->numcomps, sizeof(jpc_dec_ccp_t)))) { goto error; } if (!(cp->pchglist = jpc_pchglist_create())) { goto error; } for (compno = 0, ccp = cp->ccps; compno < cp->numcomps; ++compno, ++ccp) { ccp->flags = 0; ccp->numrlvls = 0; ccp->cblkwidthexpn = 0; ccp->cblkheightexpn = 0; ccp->qmfbid = 0; ccp->numstepsizes = 0; ccp->numguardbits = 0; ccp->roishift = 0; ccp->cblkctx = 0; } return cp; error: if (cp) { jpc_dec_cp_destroy(cp); } return 0; }
18,165
91,559
0
static int kvm_dev_ioctl_create_vm(unsigned long type) { int r; struct kvm *kvm; struct file *file; kvm = kvm_create_vm(type); if (IS_ERR(kvm)) return PTR_ERR(kvm); #ifdef CONFIG_KVM_MMIO r = kvm_coalesced_mmio_init(kvm); if (r < 0) goto put_kvm; #endif r = get_unused_fd_flags(O_CLOEXEC); if (r < 0) goto put_kvm; file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); if (IS_ERR(file)) { put_unused_fd(r); r = PTR_ERR(file); goto put_kvm; } /* * Don't call kvm_put_kvm anymore at this point; file->f_op is * already set, with ->release() being kvm_vm_release(). In error * cases it will be called by the final fput(file) and will take * care of doing kvm_put_kvm(kvm). */ if (kvm_create_vm_debugfs(kvm, r) < 0) { put_unused_fd(r); fput(file); return -ENOMEM; } kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); fd_install(r, file); return r; put_kvm: kvm_put_kvm(kvm); return r; }
18,166
43,642
0
struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, const char *name, const struct open_flags *op) { struct nameidata nd; struct file *file; struct filename *filename; int flags = op->lookup_flags | LOOKUP_ROOT; nd.root.mnt = mnt; nd.root.dentry = dentry; if (d_is_symlink(dentry) && op->intent & LOOKUP_OPEN) return ERR_PTR(-ELOOP); filename = getname_kernel(name); if (unlikely(IS_ERR(filename))) return ERR_CAST(filename); set_nameidata(&nd, -1, filename); file = path_openat(&nd, op, flags | LOOKUP_RCU); if (unlikely(file == ERR_PTR(-ECHILD))) file = path_openat(&nd, op, flags); if (unlikely(file == ERR_PTR(-ESTALE))) file = path_openat(&nd, op, flags | LOOKUP_REVAL); restore_nameidata(); putname(filename); return file; }
18,167
174,678
0
WORD32 ih264d_do_mmco_buffer(dpb_commands_t *ps_dpb_cmds, dpb_manager_t *ps_dpb_mgr, UWORD8 u1_numRef_frames_for_seq, /*!< num_ref_frames from active SeqParSet*/ UWORD32 u4_cur_pic_num, UWORD32 u2_u4_max_pic_num_minus1, UWORD8 u1_nal_unit_type, struct pic_buffer_t *ps_pic_buf, UWORD8 u1_buf_id, UWORD8 u1_fld_pic_flag, UWORD8 u1_curr_pic_in_err) { WORD32 i; UWORD8 u1_buf_mode, u1_marked_lt; struct dpb_info_t *ps_next_dpb; UWORD8 u1_num_gaps; UWORD8 u1_del_node = 1; UWORD8 u1_insert_st_pic = 1; WORD32 ret; UNUSED(u1_nal_unit_type); UNUSED(u2_u4_max_pic_num_minus1); u1_buf_mode = ps_dpb_cmds->u1_buf_mode; //0 - sliding window; 1 - Adaptive u1_marked_lt = 0; u1_num_gaps = ps_dpb_mgr->u1_num_gaps; if(!u1_buf_mode) { if((ps_dpb_mgr->u1_num_st_ref_bufs + ps_dpb_mgr->u1_num_lt_ref_bufs + u1_num_gaps) == u1_numRef_frames_for_seq) { UWORD8 u1_new_node_flag = 1; if((0 == ps_dpb_mgr->u1_num_st_ref_bufs) && (0 == u1_num_gaps)) { UWORD32 i4_error_code; i4_error_code = ERROR_DBP_MANAGER_T; return i4_error_code; } ps_next_dpb = ps_dpb_mgr->ps_dpb_st_head; if(ps_dpb_mgr->u1_num_st_ref_bufs > 1) { if(ps_next_dpb->i4_frame_num == (WORD32)u4_cur_pic_num) { /* Incase of filed pictures top_field has been allocated */ /* picture buffer and complementary bottom field pair comes */ /* then the sliding window mechanism should not allocate a */ /* new node */ u1_new_node_flag = 0; } for(i = 1; i < (ps_dpb_mgr->u1_num_st_ref_bufs - 1); i++) { if(ps_next_dpb == NULL) { UWORD32 i4_error_code; i4_error_code = ERROR_DBP_MANAGER_T; return i4_error_code; } if(ps_next_dpb->i4_frame_num == (WORD32)u4_cur_pic_num) { /* Incase of field pictures top_field has been allocated */ /* picture buffer and complementary bottom field pair comes */ /* then the sliding window mechanism should not allocate a */ /* new node */ u1_new_node_flag = 0; } ps_next_dpb = ps_next_dpb->ps_prev_short; } if(ps_next_dpb->ps_prev_short->ps_prev_short != NULL) { UWORD32 i4_error_code; i4_error_code = ERROR_DBP_MANAGER_T; return i4_error_code; } if(u1_new_node_flag) { if(u1_num_gaps) { ret = ih264d_delete_gap_frm_sliding(ps_dpb_mgr, ps_next_dpb->ps_prev_short->i4_frame_num, &u1_del_node); if(ret != OK) return ret; } if(u1_del_node) { ps_dpb_mgr->u1_num_st_ref_bufs--; ps_next_dpb->ps_prev_short->u1_used_as_ref = UNUSED_FOR_REF; ps_next_dpb->ps_prev_short->s_top_field.u1_reference_info = UNUSED_FOR_REF; ps_next_dpb->ps_prev_short->s_bot_field.u1_reference_info = UNUSED_FOR_REF; ih264d_free_ref_pic_mv_bufs(ps_dpb_mgr->pv_codec_handle, ps_next_dpb->ps_prev_short->u1_buf_id); ps_next_dpb->ps_prev_short->ps_pic_buf = NULL; ps_next_dpb->ps_prev_short = NULL; } } } else { if(ps_dpb_mgr->u1_num_st_ref_bufs) { ret = ih264d_delete_gap_frm_sliding(ps_dpb_mgr, ps_next_dpb->i4_frame_num, &u1_del_node); if(ret != OK) return ret; if((ps_next_dpb->i4_frame_num != (WORD32)u4_cur_pic_num) && u1_del_node) { ps_dpb_mgr->u1_num_st_ref_bufs--; ps_next_dpb->u1_used_as_ref = FALSE; ps_next_dpb->s_top_field.u1_reference_info = UNUSED_FOR_REF; ps_next_dpb->s_bot_field.u1_reference_info = UNUSED_FOR_REF; ih264d_free_ref_pic_mv_bufs(ps_dpb_mgr->pv_codec_handle, ps_next_dpb->u1_buf_id); ps_next_dpb->ps_pic_buf = NULL; ps_next_dpb->ps_prev_short = NULL; ps_dpb_mgr->ps_dpb_st_head = NULL; ps_next_dpb = NULL; } else if(ps_next_dpb->i4_frame_num == (WORD32)u4_cur_pic_num) { if(u1_curr_pic_in_err) { u1_insert_st_pic = 0; } else if(ps_dpb_mgr->u1_num_st_ref_bufs > 0) { ps_dpb_mgr->u1_num_st_ref_bufs--; ps_next_dpb->u1_used_as_ref = FALSE; ps_next_dpb->s_top_field.u1_reference_info = UNUSED_FOR_REF; ps_next_dpb->s_bot_field.u1_reference_info = UNUSED_FOR_REF; ih264d_free_ref_pic_mv_bufs(ps_dpb_mgr->pv_codec_handle, ps_next_dpb->u1_buf_id); ps_next_dpb->ps_pic_buf = NULL; ps_next_dpb = NULL; } } } else { ret = ih264d_delete_gap_frm_sliding(ps_dpb_mgr, INVALID_FRAME_NUM, &u1_del_node); if(ret != OK) return ret; if(u1_del_node) { UWORD32 i4_error_code; i4_error_code = ERROR_DBP_MANAGER_T; return i4_error_code; } } } } } else { UWORD32 u4_mmco; UWORD32 u4_diff_pic_num; WORD32 i4_pic_num; UWORD32 u4_lt_idx; WORD32 j; struct MMCParams *ps_mmc_params; for(j = 0; j < ps_dpb_cmds->u1_num_of_commands; j++) { ps_mmc_params = &ps_dpb_cmds->as_mmc_params[j]; u4_mmco = ps_mmc_params->u4_mmco; //Get MMCO switch(u4_mmco) { case MARK_ST_PICNUM_AS_NONREF: { { UWORD32 i4_cur_pic_num = u4_cur_pic_num; u4_diff_pic_num = ps_mmc_params->u4_diff_pic_num; //Get absDiffPicnumMinus1 if(u1_fld_pic_flag) i4_cur_pic_num = i4_cur_pic_num * 2 + 1; i4_pic_num = i4_cur_pic_num - (u4_diff_pic_num + 1); } if(ps_dpb_mgr->u1_num_st_ref_bufs > 0) { ret = ih264d_delete_st_node_or_make_lt(ps_dpb_mgr, i4_pic_num, MAX_REF_BUFS + 1, u1_fld_pic_flag); if(ret != OK) return ret; } else { UWORD8 u1_dummy; ret = ih264d_delete_gap_frm_mmco(ps_dpb_mgr, i4_pic_num, &u1_dummy); if(ret != OK) return ret; } break; } case MARK_LT_INDEX_AS_NONREF: { WORD32 i4_status; u4_lt_idx = ps_mmc_params->u4_lt_idx; //Get long term index ret = ih264d_delete_lt_node(ps_dpb_mgr, u4_lt_idx, u1_fld_pic_flag, 0, &i4_status); if(ret != OK) return ret; if(i4_status) { UWORD32 i4_error_code; i4_error_code = ERROR_DBP_MANAGER_T; return i4_error_code; } break; } case MARK_ST_PICNUM_AS_LT_INDEX: { { UWORD32 i4_cur_pic_num = u4_cur_pic_num; u4_diff_pic_num = ps_mmc_params->u4_diff_pic_num; //Get absDiffPicnumMinus1 if(u1_fld_pic_flag) i4_cur_pic_num = i4_cur_pic_num * 2 + 1; i4_pic_num = i4_cur_pic_num - (u4_diff_pic_num + 1); } u4_lt_idx = ps_mmc_params->u4_lt_idx; //Get long term index if(ps_dpb_mgr->u1_num_st_ref_bufs > 0) { ret = ih264d_delete_st_node_or_make_lt(ps_dpb_mgr, i4_pic_num, u4_lt_idx, u1_fld_pic_flag); if(ret != OK) return ret; } break; } case SET_MAX_LT_INDEX: { UWORD8 uc_numLT = ps_dpb_mgr->u1_num_lt_ref_bufs; u4_lt_idx = ps_mmc_params->u4_max_lt_idx_plus1; //Get Max_long_term_index_plus1 if(u4_lt_idx < ps_dpb_mgr->u1_max_lt_pic_idx_plus1 && uc_numLT > 0) { struct dpb_info_t *ps_nxtDPB; ps_nxtDPB = ps_dpb_mgr->ps_dpb_ht_head; ps_next_dpb = ps_nxtDPB->ps_prev_long; if(ps_nxtDPB->u1_lt_idx >= u4_lt_idx) { i = 0; ps_dpb_mgr->ps_dpb_ht_head = NULL; } else { for(i = 1; i < uc_numLT; i++) { if(ps_next_dpb->u1_lt_idx >= u4_lt_idx) break; ps_nxtDPB = ps_next_dpb; ps_next_dpb = ps_next_dpb->ps_prev_long; } ps_nxtDPB->ps_prev_long = NULL; //Terminate the link of the closest LTIndex that is <=Max } ps_dpb_mgr->u1_num_lt_ref_bufs = i; if(i == 0) ps_next_dpb = ps_nxtDPB; for(; i < uc_numLT; i++) { ps_nxtDPB = ps_next_dpb; ps_nxtDPB->u1_lt_idx = MAX_REF_BUFS + 1; ps_nxtDPB->u1_used_as_ref = UNUSED_FOR_REF; ps_nxtDPB->s_top_field.u1_reference_info = UNUSED_FOR_REF; ps_nxtDPB->s_bot_field.u1_reference_info = UNUSED_FOR_REF; ps_nxtDPB->ps_pic_buf = NULL; ih264d_free_ref_pic_mv_bufs(ps_dpb_mgr->pv_codec_handle, ps_nxtDPB->u1_buf_id); ps_next_dpb = ps_nxtDPB->ps_prev_long; ps_nxtDPB->ps_prev_long = NULL; } } ps_dpb_mgr->u1_max_lt_pic_idx_plus1 = u4_lt_idx; break; } case SET_LT_INDEX: { u4_lt_idx = ps_mmc_params->u4_lt_idx; //Get long term index ret = ih264d_insert_st_node(ps_dpb_mgr, ps_pic_buf, u1_buf_id, u4_cur_pic_num); if(ret != OK) return ret; ret = ih264d_delete_st_node_or_make_lt(ps_dpb_mgr, u4_cur_pic_num, u4_lt_idx, u1_fld_pic_flag); if(ret != OK) return ret; u1_marked_lt = 1; break; } default: break; } if(u4_mmco == RESET_REF_PICTURES || u4_mmco == RESET_ALL_PICTURES) { ih264d_reset_ref_bufs(ps_dpb_mgr); u4_cur_pic_num = 0; } } } if(!u1_marked_lt && u1_insert_st_pic) { ret = ih264d_insert_st_node(ps_dpb_mgr, ps_pic_buf, u1_buf_id, u4_cur_pic_num); if(ret != OK) return ret; } return OK; }
18,168
36,621
0
static bool setup_stratum_socket(struct pool *pool) { struct addrinfo servinfobase, *servinfo, *hints, *p; char *sockaddr_url, *sockaddr_port; int sockd; int ret; mutex_lock(&pool->stratum_lock); pool->stratum_active = false; if (pool->sock) { /* FIXME: change to LOG_DEBUG if issue #88 resolved */ applog(LOG_INFO, "Closing %s socket", get_pool_name(pool)); CLOSESOCKET(pool->sock); } pool->sock = 0; mutex_unlock(&pool->stratum_lock); hints = &pool->stratum_hints; memset(hints, 0, sizeof(struct addrinfo)); hints->ai_family = AF_UNSPEC; hints->ai_socktype = SOCK_STREAM; servinfo = &servinfobase; if (!pool->rpc_proxy && opt_socks_proxy) { pool->rpc_proxy = opt_socks_proxy; extract_sockaddr(pool->rpc_proxy, &pool->sockaddr_proxy_url, &pool->sockaddr_proxy_port); pool->rpc_proxytype = PROXY_SOCKS5; } if (pool->rpc_proxy) { sockaddr_url = pool->sockaddr_proxy_url; sockaddr_port = pool->sockaddr_proxy_port; } else { sockaddr_url = pool->sockaddr_url; sockaddr_port = pool->stratum_port; } ret = getaddrinfo(sockaddr_url, sockaddr_port, hints, &servinfo); if (ret) { applog(LOG_INFO, "getaddrinfo() in setup_stratum_socket() returned %i: %s", ret, gai_strerror(ret)); if (!pool->probed) { applog(LOG_WARNING, "Failed to resolve (wrong URL?) %s:%s", sockaddr_url, sockaddr_port); pool->probed = true; } else { applog(LOG_INFO, "Failed to getaddrinfo for %s:%s", sockaddr_url, sockaddr_port); } return false; } for (p = servinfo; p != NULL; p = p->ai_next) { sockd = socket(p->ai_family, p->ai_socktype, p->ai_protocol); if (sockd == -1) { applog(LOG_DEBUG, "Failed socket"); continue; } /* Iterate non blocking over entries returned by getaddrinfo * to cope with round robin DNS entries, finding the first one * we can connect to quickly. */ noblock_socket(sockd); if (connect(sockd, p->ai_addr, p->ai_addrlen) == -1) { struct timeval tv_timeout = {1, 0}; int selret; fd_set rw; if (!sock_connecting()) { CLOSESOCKET(sockd); applog(LOG_DEBUG, "Failed sock connect"); continue; } retry: FD_ZERO(&rw); FD_SET(sockd, &rw); selret = select(sockd + 1, NULL, &rw, NULL, &tv_timeout); if (selret > 0 && FD_ISSET(sockd, &rw)) { socklen_t len; int err, n; len = sizeof(err); n = getsockopt(sockd, SOL_SOCKET, SO_ERROR, (char *)&err, &len); if (!n && !err) { applog(LOG_DEBUG, "Succeeded delayed connect"); block_socket(sockd); break; } } if (selret < 0 && interrupted()) goto retry; CLOSESOCKET(sockd); applog(LOG_DEBUG, "Select timeout/failed connect"); continue; } applog(LOG_WARNING, "Succeeded immediate connect"); block_socket(sockd); break; } if (p == NULL) { applog(LOG_INFO, "Failed to connect to stratum on %s:%s", sockaddr_url, sockaddr_port); freeaddrinfo(servinfo); return false; } freeaddrinfo(servinfo); if (pool->rpc_proxy) { switch (pool->rpc_proxytype) { case PROXY_HTTP_1_0: if (!http_negotiate(pool, sockd, true)) return false; break; case PROXY_HTTP: if (!http_negotiate(pool, sockd, false)) return false; break; case PROXY_SOCKS5: case PROXY_SOCKS5H: if (!socks5_negotiate(pool, sockd)) return false; break; case PROXY_SOCKS4: if (!socks4_negotiate(pool, sockd, false)) return false; break; case PROXY_SOCKS4A: if (!socks4_negotiate(pool, sockd, true)) return false; break; default: applog(LOG_WARNING, "Unsupported proxy type for %s:%s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; break; } } if (!pool->sockbuf) { pool->sockbuf = (char *)calloc(RBUFSIZE, 1); if (!pool->sockbuf) quithere(1, "Failed to calloc pool sockbuf"); pool->sockbuf_size = RBUFSIZE; } pool->sock = sockd; keep_sockalive(sockd); return true; }
18,169
58,565
0
static int transport_read_nonblocking(rdpTransport* transport) { int status; status = transport_read(transport, transport->ReceiveBuffer); if (status <= 0) return status; Stream_Seek(transport->ReceiveBuffer, status); return status; }
18,170
41,061
0
static int encrypt_and_sign_nss_2_2 ( struct crypto_instance *instance, const unsigned char *buf_in, const size_t buf_in_len, unsigned char *buf_out, size_t *buf_out_len) { if (encrypt_nss(instance, buf_in, buf_in_len, buf_out + sizeof(struct crypto_config_header), buf_out_len) < 0) { return -1; } *buf_out_len += sizeof(struct crypto_config_header); if (hash_to_nss[instance->crypto_hash_type]) { if (calculate_nss_hash(instance, buf_out, *buf_out_len, buf_out + *buf_out_len) < 0) { return -1; } *buf_out_len += hash_len[instance->crypto_hash_type]; } return 0; }
18,171
27,992
0
static void decode_refpass(Jpeg2000T1Context *t1, int width, int height, int bpno) { int phalf, nhalf; int y0, x, y; phalf = 1 << (bpno - 1); nhalf = -phalf; for (y0 = 0; y0 < height; y0 += 4) for (x = 0; x < width; x++) for (y = y0; y < height && y < y0 + 4; y++) if ((t1->flags[y + 1][x + 1] & (JPEG2000_T1_SIG | JPEG2000_T1_VIS)) == JPEG2000_T1_SIG) { int ctxno = ff_jpeg2000_getrefctxno(t1->flags[y + 1][x + 1]); int r = ff_mqc_decode(&t1->mqc, t1->mqc.cx_states + ctxno) ? phalf : nhalf; t1->data[y][x] += t1->data[y][x] < 0 ? -r : r; t1->flags[y + 1][x + 1] |= JPEG2000_T1_REF; } }
18,172
42,772
0
static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set) { struct kvm_kernel_irq_routing_entry *e; struct kvm_irq_routing_table *irq_rt; struct kvm_lapic_irq irq; struct kvm_vcpu *vcpu; struct vcpu_data vcpu_info; int idx, ret = -EINVAL; if (!kvm_arch_has_assigned_device(kvm) || !irq_remapping_cap(IRQ_POSTING_CAP)) return 0; idx = srcu_read_lock(&kvm->irq_srcu); irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); BUG_ON(guest_irq >= irq_rt->nr_rt_entries); hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { if (e->type != KVM_IRQ_ROUTING_MSI) continue; /* * VT-d PI cannot support posting multicast/broadcast * interrupts to a vCPU, we still use interrupt remapping * for these kind of interrupts. * * For lowest-priority interrupts, we only support * those with single CPU as the destination, e.g. user * configures the interrupts via /proc/irq or uses * irqbalance to make the interrupts single-CPU. * * We will support full lowest-priority interrupt later. */ kvm_set_msi_irq(e, &irq); if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) continue; vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); vcpu_info.vector = irq.vector; trace_kvm_pi_irte_update(vcpu->vcpu_id, e->gsi, vcpu_info.vector, vcpu_info.pi_desc_addr, set); if (set) ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); else { /* suppress notification event before unposting */ pi_set_sn(vcpu_to_pi_desc(vcpu)); ret = irq_set_vcpu_affinity(host_irq, NULL); pi_clear_sn(vcpu_to_pi_desc(vcpu)); } if (ret < 0) { printk(KERN_INFO "%s: failed to update PI IRTE\n", __func__); goto out; } } ret = 0; out: srcu_read_unlock(&kvm->irq_srcu, idx); return ret; }
18,173
116,414
0
ChromeContentRendererClient::~ChromeContentRendererClient() { }
18,174
95,490
0
void Com_TouchMemory( void ) { int start, end; int i, j; int sum; memblock_t *block; Z_CheckHeap(); start = Sys_Milliseconds(); sum = 0; j = hunk_low.permanent >> 2; for ( i = 0 ; i < j ; i+=64 ) { // only need to touch each page sum += ((int *)s_hunkData)[i]; } i = ( s_hunkTotal - hunk_high.permanent ) >> 2; j = hunk_high.permanent >> 2; for ( ; i < j ; i+=64 ) { // only need to touch each page sum += ((int *)s_hunkData)[i]; } for (block = mainzone->blocklist.next ; ; block = block->next) { if ( block->tag ) { j = block->size >> 2; for ( i = 0 ; i < j ; i+=64 ) { // only need to touch each page sum += ((int *)block)[i]; } } if ( block->next == &mainzone->blocklist ) { break; // all blocks have been hit } } end = Sys_Milliseconds(); Com_Printf( "Com_TouchMemory: %i msec\n", end - start ); }
18,175
90,751
0
static int read_quant_matrix_ext(MpegEncContext *s, GetBitContext *gb) { int i, j, v; if (get_bits1(gb)) { if (get_bits_left(gb) < 64*8) return AVERROR_INVALIDDATA; /* intra_quantiser_matrix */ for (i = 0; i < 64; i++) { v = get_bits(gb, 8); j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; s->intra_matrix[j] = v; s->chroma_intra_matrix[j] = v; } } if (get_bits1(gb)) { if (get_bits_left(gb) < 64*8) return AVERROR_INVALIDDATA; /* non_intra_quantiser_matrix */ for (i = 0; i < 64; i++) { get_bits(gb, 8); } } if (get_bits1(gb)) { if (get_bits_left(gb) < 64*8) return AVERROR_INVALIDDATA; /* chroma_intra_quantiser_matrix */ for (i = 0; i < 64; i++) { v = get_bits(gb, 8); j = s->idsp.idct_permutation[ff_zigzag_direct[i]]; s->chroma_intra_matrix[j] = v; } } if (get_bits1(gb)) { if (get_bits_left(gb) < 64*8) return AVERROR_INVALIDDATA; /* chroma_non_intra_quantiser_matrix */ for (i = 0; i < 64; i++) { get_bits(gb, 8); } } next_start_code_studio(gb); return 0; }
18,176
41,023
0
cmsBool OptimizeByResampling(cmsPipeline** Lut, cmsUInt32Number Intent, cmsUInt32Number* InputFormat, cmsUInt32Number* OutputFormat, cmsUInt32Number* dwFlags) { cmsPipeline* Src = NULL; cmsPipeline* Dest = NULL; cmsStage* mpe; cmsStage* CLUT; cmsStage *KeepPreLin = NULL, *KeepPostLin = NULL; int nGridPoints; cmsColorSpaceSignature ColorSpace, OutputColorSpace; cmsStage *NewPreLin = NULL; cmsStage *NewPostLin = NULL; _cmsStageCLutData* DataCLUT; cmsToneCurve** DataSetIn; cmsToneCurve** DataSetOut; Prelin16Data* p16; if (_cmsFormatterIsFloat(*InputFormat) || _cmsFormatterIsFloat(*OutputFormat)) return FALSE; ColorSpace = _cmsICCcolorSpace(T_COLORSPACE(*InputFormat)); OutputColorSpace = _cmsICCcolorSpace(T_COLORSPACE(*OutputFormat)); nGridPoints = _cmsReasonableGridpointsByColorspace(ColorSpace, *dwFlags); if (cmsPipelineStageCount(*Lut) == 0) nGridPoints = 2; Src = *Lut; for (mpe = cmsPipelineGetPtrToFirstStage(Src); mpe != NULL; mpe = cmsStageNext(mpe)) { if (cmsStageType(mpe) == cmsSigNamedColorElemType) return FALSE; } Dest = cmsPipelineAlloc(Src ->ContextID, Src ->InputChannels, Src ->OutputChannels); if (!Dest) return FALSE; if (*dwFlags & cmsFLAGS_CLUT_PRE_LINEARIZATION) { cmsStage* PreLin = cmsPipelineGetPtrToFirstStage(Src); if (PreLin ->Type == cmsSigCurveSetElemType) { if (!AllCurvesAreLinear(PreLin)) { NewPreLin = cmsStageDup(PreLin); if(!cmsPipelineInsertStage(Dest, cmsAT_BEGIN, NewPreLin)) goto Error; cmsPipelineUnlinkStage(Src, cmsAT_BEGIN, &KeepPreLin); } } } CLUT = cmsStageAllocCLut16bit(Src ->ContextID, nGridPoints, Src ->InputChannels, Src->OutputChannels, NULL); if (CLUT == NULL) return FALSE; if (!cmsPipelineInsertStage(Dest, cmsAT_END, CLUT)) { goto Error; } if (*dwFlags & cmsFLAGS_CLUT_POST_LINEARIZATION) { cmsStage* PostLin = cmsPipelineGetPtrToLastStage(Src); if (cmsStageType(PostLin) == cmsSigCurveSetElemType) { if (!AllCurvesAreLinear(PostLin)) { NewPostLin = cmsStageDup(PostLin); if (!cmsPipelineInsertStage(Dest, cmsAT_END, NewPostLin)) goto Error; cmsPipelineUnlinkStage(Src, cmsAT_END, &KeepPostLin); } } } if (!cmsStageSampleCLut16bit(CLUT, XFormSampler16, (void*) Src, 0)) { Error: if (KeepPreLin != NULL) { if (!cmsPipelineInsertStage(Src, cmsAT_BEGIN, KeepPreLin)) { _cmsAssert(0); // This never happens } } if (KeepPostLin != NULL) { if (!cmsPipelineInsertStage(Src, cmsAT_END, KeepPostLin)) { _cmsAssert(0); // This never happens } } cmsPipelineFree(Dest); return FALSE; } if (KeepPreLin != NULL) cmsStageFree(KeepPreLin); if (KeepPostLin != NULL) cmsStageFree(KeepPostLin); cmsPipelineFree(Src); DataCLUT = (_cmsStageCLutData*) CLUT ->Data; if (NewPreLin == NULL) DataSetIn = NULL; else DataSetIn = ((_cmsStageToneCurvesData*) NewPreLin ->Data) ->TheCurves; if (NewPostLin == NULL) DataSetOut = NULL; else DataSetOut = ((_cmsStageToneCurvesData*) NewPostLin ->Data) ->TheCurves; if (DataSetIn == NULL && DataSetOut == NULL) { _cmsPipelineSetOptimizationParameters(Dest, (_cmsOPTeval16Fn) DataCLUT->Params->Interpolation.Lerp16, DataCLUT->Params, NULL, NULL); } else { p16 = PrelinOpt16alloc(Dest ->ContextID, DataCLUT ->Params, Dest ->InputChannels, DataSetIn, Dest ->OutputChannels, DataSetOut); _cmsPipelineSetOptimizationParameters(Dest, PrelinEval16, (void*) p16, PrelinOpt16free, Prelin16dup); } if (Intent == INTENT_ABSOLUTE_COLORIMETRIC) *dwFlags |= cmsFLAGS_NOWHITEONWHITEFIXUP; if (!(*dwFlags & cmsFLAGS_NOWHITEONWHITEFIXUP)) { FixWhiteMisalignment(Dest, ColorSpace, OutputColorSpace); } *Lut = Dest; return TRUE; cmsUNUSED_PARAMETER(Intent); }
18,177
116,689
0
void RenderViewImpl::exitFullscreen() { exitFullScreen(); }
18,178
52,724
0
static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *timer) { id->dev_class = timer->tmr_class; id->dev_sclass = SNDRV_TIMER_SCLASS_NONE; id->card = timer->card ? timer->card->number : -1; id->device = timer->tmr_device; id->subdevice = timer->tmr_subdevice; }
18,179
106,835
0
LayoutUnit RenderBox::computeBorderBoxLogicalHeight(LayoutUnit height) const { LayoutUnit bordersPlusPadding = borderAndPaddingLogicalHeight(); if (style()->boxSizing() == CONTENT_BOX) return height + bordersPlusPadding; return max(height, bordersPlusPadding); }
18,180
96,804
0
static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { int total, ret; int page_nr = 0; struct pipe_buffer *bufs; struct fuse_copy_state cs; struct fuse_dev *fud = fuse_get_dev(in); if (!fud) return -EPERM; bufs = kvmalloc_array(pipe->buffers, sizeof(struct pipe_buffer), GFP_KERNEL); if (!bufs) return -ENOMEM; fuse_copy_init(&cs, 1, NULL); cs.pipebufs = bufs; cs.pipe = pipe; ret = fuse_dev_do_read(fud, in, &cs, len); if (ret < 0) goto out; if (pipe->nrbufs + cs.nr_segs > pipe->buffers) { ret = -EIO; goto out; } for (ret = total = 0; page_nr < cs.nr_segs; total += ret) { /* * Need to be careful about this. Having buf->ops in module * code can Oops if the buffer persists after module unload. */ bufs[page_nr].ops = &nosteal_pipe_buf_ops; bufs[page_nr].flags = 0; ret = add_to_pipe(pipe, &bufs[page_nr++]); if (unlikely(ret < 0)) break; } if (total) ret = total; out: for (; page_nr < cs.nr_segs; page_nr++) put_page(bufs[page_nr].page); kvfree(bufs); return ret; }
18,181
66,356
0
static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s, int modrm) { int def_seg, base, index, scale, mod, rm; target_long disp; bool havesib; def_seg = R_DS; index = -1; scale = 0; disp = 0; mod = (modrm >> 6) & 3; rm = modrm & 7; base = rm | REX_B(s); if (mod == 3) { /* Normally filtered out earlier, but including this path simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */ goto done; } switch (s->aflag) { case MO_64: case MO_32: havesib = 0; if (rm == 4) { int code = cpu_ldub_code(env, s->pc++); scale = (code >> 6) & 3; index = ((code >> 3) & 7) | REX_X(s); if (index == 4) { index = -1; /* no index */ } base = (code & 7) | REX_B(s); havesib = 1; } switch (mod) { case 0: if ((base & 7) == 5) { base = -1; disp = (int32_t)cpu_ldl_code(env, s->pc); s->pc += 4; if (CODE64(s) && !havesib) { base = -2; disp += s->pc + s->rip_offset; } } break; case 1: disp = (int8_t)cpu_ldub_code(env, s->pc++); break; default: case 2: disp = (int32_t)cpu_ldl_code(env, s->pc); s->pc += 4; break; } /* For correct popl handling with esp. */ if (base == R_ESP && s->popl_esp_hack) { disp += s->popl_esp_hack; } if (base == R_EBP || base == R_ESP) { def_seg = R_SS; } break; case MO_16: if (mod == 0) { if (rm == 6) { base = -1; disp = cpu_lduw_code(env, s->pc); s->pc += 2; break; } } else if (mod == 1) { disp = (int8_t)cpu_ldub_code(env, s->pc++); } else { disp = (int16_t)cpu_lduw_code(env, s->pc); s->pc += 2; } switch (rm) { case 0: base = R_EBX; index = R_ESI; break; case 1: base = R_EBX; index = R_EDI; break; case 2: base = R_EBP; index = R_ESI; def_seg = R_SS; break; case 3: base = R_EBP; index = R_EDI; def_seg = R_SS; break; case 4: base = R_ESI; break; case 5: base = R_EDI; break; case 6: base = R_EBP; def_seg = R_SS; break; default: case 7: base = R_EBX; break; } break; default: tcg_abort(); } done: return (AddressParts){ def_seg, base, index, scale, disp }; }
18,182
40,056
0
int kvm_dev_ioctl_check_extension(long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: case KVM_CAP_HLT: case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: case KVM_CAP_SET_TSS_ADDR: case KVM_CAP_EXT_CPUID: case KVM_CAP_EXT_EMUL_CPUID: case KVM_CAP_CLOCKSOURCE: case KVM_CAP_PIT: case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_MP_STATE: case KVM_CAP_SYNC_MMU: case KVM_CAP_USER_NMI: case KVM_CAP_REINJECT_CONTROL: case KVM_CAP_IRQ_INJECT_STATUS: case KVM_CAP_IRQFD: case KVM_CAP_IOEVENTFD: case KVM_CAP_PIT2: case KVM_CAP_PIT_STATE2: case KVM_CAP_SET_IDENTITY_MAP_ADDR: case KVM_CAP_XEN_HVM: case KVM_CAP_ADJUST_CLOCK: case KVM_CAP_VCPU_EVENTS: case KVM_CAP_HYPERV: case KVM_CAP_HYPERV_VAPIC: case KVM_CAP_HYPERV_SPIN: case KVM_CAP_PCI_SEGMENT: case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: case KVM_CAP_XSAVE: case KVM_CAP_ASYNC_PF: case KVM_CAP_GET_TSC_KHZ: case KVM_CAP_KVMCLOCK_CTRL: case KVM_CAP_READONLY_MEM: case KVM_CAP_HYPERV_TIME: #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT case KVM_CAP_ASSIGN_DEV_IRQ: case KVM_CAP_PCI_2_3: #endif r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_VAPIC: r = !kvm_x86_ops->cpu_has_accelerated_tpr(); break; case KVM_CAP_NR_VCPUS: r = KVM_SOFT_MAX_VCPUS; break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; case KVM_CAP_NR_MEMSLOTS: r = KVM_USER_MEM_SLOTS; break; case KVM_CAP_PV_MMU: /* obsolete */ r = 0; break; #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT case KVM_CAP_IOMMU: r = iommu_present(&pci_bus_type); break; #endif case KVM_CAP_MCE: r = KVM_MAX_MCE_BANKS; break; case KVM_CAP_XCRS: r = cpu_has_xsave; break; case KVM_CAP_TSC_CONTROL: r = kvm_has_tsc_control; break; case KVM_CAP_TSC_DEADLINE_TIMER: r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER); break; default: r = 0; break; } return r; }
18,183
102,932
0
void DefaultTabHandler::TabDetachedAt(TabContentsWrapper* contents, int index) { delegate_->AsBrowser()->TabDetachedAt(contents, index); }
18,184
6,633
0
static int zgetfilename(i_ctx_t *i_ctx_p) { os_ptr op = osp; uint fnlen; gs_const_string pfname; stream *s; byte *sbody; int code; check_ostack(1); check_read_type(*op, t_file); s = (op)->value.pfile; code = sfilename(s, &pfname); if (code < 0) { pfname.size = 0; } fnlen = pfname.size; sbody = ialloc_string(fnlen, ".getfilename"); if (sbody == 0) { code = gs_note_error(gs_error_VMerror); return code; } memcpy(sbody, pfname.data, fnlen); make_string(op, a_readonly | icurrent_space, fnlen, sbody); return 0; }
18,185
90,054
0
static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1, ZSTD_compressionParameters cParams2) { return (cParams1.hashLog == cParams2.hashLog) & (cParams1.chainLog == cParams2.chainLog) & (cParams1.strategy == cParams2.strategy) /* opt parser space */ & ((cParams1.searchLength==3) == (cParams2.searchLength==3)); /* hashlog3 space */ }
18,186
77,730
0
parse_oxms(struct ofpbuf *payload, bool loose, struct mf_bitmap *exactp, struct mf_bitmap *maskedp) { struct mf_bitmap exact = MF_BITMAP_INITIALIZER; struct mf_bitmap masked = MF_BITMAP_INITIALIZER; while (payload->size > 0) { const struct mf_field *field; enum ofperr error; bool hasmask; error = nx_pull_header(payload, NULL, &field, &hasmask); if (!error) { bitmap_set1(hasmask ? masked.bm : exact.bm, field->id); } else if (error != OFPERR_OFPBMC_BAD_FIELD || !loose) { return error; } } if (exactp) { *exactp = exact; } else if (!bitmap_is_all_zeros(exact.bm, MFF_N_IDS)) { return OFPERR_OFPBMC_BAD_MASK; } if (maskedp) { *maskedp = masked; } else if (!bitmap_is_all_zeros(masked.bm, MFF_N_IDS)) { return OFPERR_OFPBMC_BAD_MASK; } return 0; }
18,187
59,191
0
static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, const struct bpf_reg_state *ptr_reg, const struct bpf_reg_state *off_reg) { struct bpf_reg_state *regs = cur_regs(env), *dst_reg; bool known = tnum_is_const(off_reg->var_off); s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; u8 opcode = BPF_OP(insn->code); u32 dst = insn->dst_reg; dst_reg = &regs[dst]; if (WARN_ON_ONCE(known && (smin_val != smax_val))) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: known but bad sbounds\n"); return -EINVAL; } if (WARN_ON_ONCE(known && (umin_val != umax_val))) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: known but bad ubounds\n"); return -EINVAL; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops on pointers produce (meaningless) scalars */ if (!env->allow_ptr_leaks) verbose(env, "R%d 32-bit pointer arithmetic prohibited\n", dst); return -EACCES; } if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", dst); return -EACCES; } if (ptr_reg->type == CONST_PTR_TO_MAP) { if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", dst); return -EACCES; } if (ptr_reg->type == PTR_TO_PACKET_END) { if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", dst); return -EACCES; } /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. * The id may be overwritten later if we create a new variable offset. */ dst_reg->type = ptr_reg->type; dst_reg->id = ptr_reg->id; switch (opcode) { case BPF_ADD: /* We can take a fixed offset as long as it doesn't overflow * the s32 'off' field */ if (known && (ptr_reg->off + smin_val == (s64)(s32)(ptr_reg->off + smin_val))) { /* pointer += K. Accumulate it into fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->off = ptr_reg->off + smin_val; dst_reg->range = ptr_reg->range; break; } /* A new variable offset is created. Note that off_reg->off * == 0, since it's a scalar. * dst_reg gets the pointer type and since some positive * integer value was added to the pointer, give it a new 'id' * if it's a PTR_TO_PACKET. * this creates a new 'base' pointer, off_reg (variable) gets * added into the variable offset, and we copy the fixed offset * from ptr_reg. */ if (signed_add_overflows(smin_ptr, smin_val) || signed_add_overflows(smax_ptr, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr + smin_val; dst_reg->smax_value = smax_ptr + smax_val; } if (umin_ptr + umin_val < umin_ptr || umax_ptr + umax_val < umax_ptr) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value = umin_ptr + umin_val; dst_reg->umax_value = umax_ptr + umax_val; } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ dst_reg->range = 0; } break; case BPF_SUB: if (dst_reg == off_reg) { /* scalar -= pointer. Creates an unknown scalar */ if (!env->allow_ptr_leaks) verbose(env, "R%d tried to subtract pointer from scalar\n", dst); return -EACCES; } /* We don't allow subtraction from FP, because (according to * test_verifier.c test "invalid fp arithmetic", JITs might not * be able to deal with it. */ if (ptr_reg->type == PTR_TO_STACK) { if (!env->allow_ptr_leaks) verbose(env, "R%d subtraction from stack pointer prohibited\n", dst); return -EACCES; } if (known && (ptr_reg->off - smin_val == (s64)(s32)(ptr_reg->off - smin_val))) { /* pointer -= K. Subtract it from fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->id = ptr_reg->id; dst_reg->off = ptr_reg->off - smin_val; dst_reg->range = ptr_reg->range; break; } /* A new variable offset is created. If the subtrahend is known * nonnegative, then any reg->range we had before is still good. */ if (signed_sub_overflows(smin_ptr, smax_val) || signed_sub_overflows(smax_ptr, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr - smax_val; dst_reg->smax_value = smax_ptr - smin_val; } if (umin_ptr < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value = umin_ptr - umax_val; dst_reg->umax_value = umax_ptr - umin_val; } dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ if (smin_val < 0) dst_reg->range = 0; } break; case BPF_AND: case BPF_OR: case BPF_XOR: /* bitwise ops on pointers are troublesome, prohibit for now. * (However, in principle we could allow some cases, e.g. * ptr &= ~3 which would reduce min_value by 3.) */ if (!env->allow_ptr_leaks) verbose(env, "R%d bitwise operator %s on pointer prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; default: /* other operators (e.g. MUL,LSH) produce non-pointer results */ if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; } __update_reg_bounds(dst_reg); __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; }
18,188
92,763
0
static void update_numa_stats(struct numa_stats *ns, int nid) { int cpu; memset(ns, 0, sizeof(*ns)); for_each_cpu(cpu, cpumask_of_node(nid)) { struct rq *rq = cpu_rq(cpu); ns->load += weighted_cpuload(rq); ns->compute_capacity += capacity_of(cpu); } }
18,189
75,423
0
static int opfxch(RAsm *a, ut8 *data, const Opcode *op) { int l = 0; switch (op->operands_count) { case 0: data[l++] = 0xd9; data[l++] = 0xc9; break; case 1: if (op->operands[0].type & OT_FPUREG & ~OT_REGALL) { data[l++] = 0xd9; data[l++] = 0xc8 | op->operands[0].reg; } else { return -1; } break; default: return -1; } return l; }
18,190
15,656
0
static void tsc210x_write(TSC210xState *s, uint16_t value) { /* * This is a two-state state machine for reading * command and data every second time. */ if (!s->state) { s->command = value >> 15; s->page = (value >> 11) & 0x0f; s->offset = (value >> 5) & 0x3f; s->state = 1; } else { if (s->command) fprintf(stderr, "tsc210x_write: SPI overrun!\n"); else switch (s->page) { case TSC_DATA_REGISTERS_PAGE: tsc2102_data_register_write(s, s->offset, value); break; case TSC_CONTROL_REGISTERS_PAGE: tsc2102_control_register_write(s, s->offset, value); break; case TSC_AUDIO_REGISTERS_PAGE: tsc2102_audio_register_write(s, s->offset, value); break; default: hw_error("tsc210x_write: wrong memory page\n"); } tsc210x_pin_update(s); s->state = 0; } }
18,191
54,802
0
static void snd_usbmidi_raw_input(struct snd_usb_midi_in_endpoint *ep, uint8_t *buffer, int buffer_length) { snd_usbmidi_input_data(ep, 0, buffer, buffer_length); }
18,192
185,370
1
bool LayoutSVGTransformableContainer::calculateLocalTransform() { SVGGraphicsElement* element = toSVGGraphicsElement(this->element()); ASSERT(element); // If we're either the layoutObject for a <use> element, or for any <g> element inside the shadow // tree, that was created during the use/symbol/svg expansion in SVGUseElement. These containers // need to respect the translations induced by their corresponding use elements x/y attributes. SVGUseElement* useElement = nullptr; if (isSVGUseElement(*element)) { useElement = toSVGUseElement(element); } else if (isSVGGElement(*element) && toSVGGElement(element)->inUseShadowTree()) { SVGElement* correspondingElement = element->correspondingElement(); if (isSVGUseElement(correspondingElement)) useElement = toSVGUseElement(correspondingElement); } if (useElement) { SVGLengthContext lengthContext(useElement); FloatSize translation( useElement->x()->currentValue()->value(lengthContext), useElement->y()->currentValue()->value(lengthContext)); if (translation != m_additionalTranslation) m_needsTransformUpdate = true; m_additionalTranslation = translation; } if (!m_needsTransformUpdate) return false; m_localTransform = element->calculateAnimatedLocalTransform(); m_localTransform.translate(m_additionalTranslation.width(), m_additionalTranslation.height()); m_needsTransformUpdate = false; return true; }
18,193
160,472
0
void RenderFrameHostImpl::OnShowPopup( const FrameHostMsg_ShowPopup_Params& params) { RenderViewHostDelegateView* view = render_view_host_->delegate_->GetDelegateView(); if (view) { gfx::Point original_point(params.bounds.x(), params.bounds.y()); gfx::Point transformed_point = static_cast<RenderWidgetHostViewBase*>(GetView()) ->TransformPointToRootCoordSpace(original_point); gfx::Rect transformed_bounds(transformed_point.x(), transformed_point.y(), params.bounds.width(), params.bounds.height()); view->ShowPopupMenu(this, transformed_bounds, params.item_height, params.item_font_size, params.selected_item, params.popup_items, params.right_aligned, params.allow_multiple_selection); } }
18,194
96,515
0
static int AppLayerProtoDetectTest04(void) { AppLayerProtoDetectUnittestCtxBackup(); AppLayerProtoDetectSetup(); uint8_t l7data[] = "HTTP/1.1 200 OK\r\nServer: Apache/1.0\r\n\r\n"; const char *buf; int r = 0; Flow f; AppProto pm_results[ALPROTO_MAX]; AppLayerProtoDetectThreadCtx *alpd_tctx; memset(&f, 0x00, sizeof(f)); f.protomap = FlowGetProtoMapping(IPPROTO_TCP); memset(pm_results, 0, sizeof(pm_results)); buf = "200 "; AppLayerProtoDetectPMRegisterPatternCS(IPPROTO_TCP, ALPROTO_HTTP, buf, 13, 0, STREAM_TOCLIENT); AppLayerProtoDetectPrepareState(); /* AppLayerProtoDetectGetCtxThread() should be called post AppLayerProtoDetectPrepareState(), since * it sets internal structures which depends on the above function. */ alpd_tctx = AppLayerProtoDetectGetCtxThread(); if (alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[0].max_pat_id != 0) { printf("alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[0].max_pat_id != 0\n"); goto end; } if (alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].max_pat_id != 1) { printf("alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].max_pat_id != 2\n"); goto end; } if (alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[0].map != NULL) { printf("alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[0].map != NULL\n"); goto end; } if (alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].map == NULL) { printf("alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].map != NULL\n"); goto end; } if (alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].map[0]->alproto != ALPROTO_HTTP) { printf("alpd_ctx.ctx_ipp[FLOW_PROTO_TCP].ctx_pm[1].map[0].alproto != ALPROTO_HTTP\n"); goto end; } uint32_t cnt = AppLayerProtoDetectPMGetProto(alpd_tctx, &f, l7data, sizeof(l7data), STREAM_TOCLIENT, IPPROTO_TCP, pm_results); if (cnt != 1 && pm_results[0] != ALPROTO_HTTP) { printf("cnt != 1 && pm_results[0] != AlPROTO_HTTP\n"); goto end; } r = 1; end: if (alpd_tctx != NULL) AppLayerProtoDetectDestroyCtxThread(alpd_tctx); AppLayerProtoDetectDeSetup(); AppLayerProtoDetectUnittestCtxRestore(); return r; }
18,195
32,381
0
static inline void mnt_dec_writers(struct mount *mnt) { #ifdef CONFIG_SMP this_cpu_dec(mnt->mnt_pcp->mnt_writers); #else mnt->mnt_writers--; #endif }
18,196
161,987
0
explicit PdfCompositorTestService(const std::string& creator) : PdfCompositorService(creator) {}
18,197
93,212
0
METHODDEF(JDIMENSION) get_text_rgb_cmyk_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading text-format PPM files with any maxval and converting to CMYK */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; FILE *infile = source->pub.input_file; register JSAMPROW ptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; ptr = source->pub.buffer[0]; if (maxval == MAXJSAMPLE) { for (col = cinfo->image_width; col > 0; col--) { JSAMPLE r = read_pbm_integer(cinfo, infile, maxval); JSAMPLE g = read_pbm_integer(cinfo, infile, maxval); JSAMPLE b = read_pbm_integer(cinfo, infile, maxval); rgb_to_cmyk(r, g, b, ptr, ptr + 1, ptr + 2, ptr + 3); ptr += 4; } } else { for (col = cinfo->image_width; col > 0; col--) { JSAMPLE r = rescale[read_pbm_integer(cinfo, infile, maxval)]; JSAMPLE g = rescale[read_pbm_integer(cinfo, infile, maxval)]; JSAMPLE b = rescale[read_pbm_integer(cinfo, infile, maxval)]; rgb_to_cmyk(r, g, b, ptr, ptr + 1, ptr + 2, ptr + 3); ptr += 4; } } return 1; }
18,198
79,492
0
static int get_description(struct NntpData *nntp_data, char *wildmat, char *msg) { char buf[STRING]; char *cmd = NULL; /* get newsgroup description, if possible */ struct NntpServer *nserv = nntp_data->nserv; if (!wildmat) wildmat = nntp_data->group; if (nserv->hasLIST_NEWSGROUPS) cmd = "LIST NEWSGROUPS"; else if (nserv->hasXGTITLE) cmd = "XGTITLE"; else return 0; snprintf(buf, sizeof(buf), "%s %s\r\n", cmd, wildmat); int rc = nntp_fetch_lines(nntp_data, buf, sizeof(buf), msg, fetch_description, nserv); if (rc > 0) { mutt_error("%s: %s", cmd, buf); } return rc; }
18,199