unique_id
int64
13
189k
target
int64
0
1
code
stringlengths
20
241k
__index_level_0__
int64
0
18.9k
51,927
0
SpoolssAddPrinterEx_r(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep _U_) { dcerpc_call_value *dcv = (dcerpc_call_value *)di->call_data; e_ctx_hnd policy_hnd; proto_item *hnd_item; guint32 status; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, &policy_hnd, &hnd_item, TRUE, FALSE); offset = dissect_doserror( tvb, offset, pinfo, tree, di, drep, hf_rc, &status); if( status == 0 ){ const char *pol_name; if (dcv->se_data){ pol_name = wmem_strdup_printf(wmem_packet_scope(), "AddPrinterEx(%s)", (char *)dcv->se_data); } else { pol_name = "Unknown AddPrinterEx() handle"; } if(!pinfo->fd->flags.visited){ dcerpc_store_polhnd_name(&policy_hnd, pinfo, pol_name); } if(hnd_item) proto_item_append_text(hnd_item, ": %s", pol_name); } return offset; }
5,300
96,587
0
int mbedtls_ecp_self_test( int verbose ) { int ret; size_t i; mbedtls_ecp_group grp; mbedtls_ecp_point R, P; mbedtls_mpi m; unsigned long add_c_prev, dbl_c_prev, mul_c_prev; /* exponents especially adapted for secp192r1 */ const char *exponents[] = { "000000000000000000000000000000000000000000000001", /* one */ "FFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22830", /* N - 1 */ "5EA6F389A38B8BC81E767753B15AA5569E1782E30ABE7D25", /* random */ "400000000000000000000000000000000000000000000000", /* one and zeros */ "7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", /* all ones */ "555555555555555555555555555555555555555555555555", /* 101010... */ }; mbedtls_ecp_group_init( &grp ); mbedtls_ecp_point_init( &R ); mbedtls_ecp_point_init( &P ); mbedtls_mpi_init( &m ); /* Use secp192r1 if available, or any available curve */ #if defined(MBEDTLS_ECP_DP_SECP192R1_ENABLED) MBEDTLS_MPI_CHK( mbedtls_ecp_group_load( &grp, MBEDTLS_ECP_DP_SECP192R1 ) ); #else MBEDTLS_MPI_CHK( mbedtls_ecp_group_load( &grp, mbedtls_ecp_curve_list()->grp_id ) ); #endif if( verbose != 0 ) mbedtls_printf( " ECP test #1 (constant op_count, base point G): " ); /* Do a dummy multiplication first to trigger precomputation */ MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &m, 2 ) ); MBEDTLS_MPI_CHK( mbedtls_ecp_mul( &grp, &P, &m, &grp.G, NULL, NULL ) ); add_count = 0; dbl_count = 0; mul_count = 0; MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &m, 16, exponents[0] ) ); MBEDTLS_MPI_CHK( mbedtls_ecp_mul( &grp, &R, &m, &grp.G, NULL, NULL ) ); for( i = 1; i < sizeof( exponents ) / sizeof( exponents[0] ); i++ ) { add_c_prev = add_count; dbl_c_prev = dbl_count; mul_c_prev = mul_count; add_count = 0; dbl_count = 0; mul_count = 0; MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &m, 16, exponents[i] ) ); MBEDTLS_MPI_CHK( mbedtls_ecp_mul( &grp, &R, &m, &grp.G, NULL, NULL ) ); if( add_count != add_c_prev || dbl_count != dbl_c_prev || mul_count != mul_c_prev ) { if( verbose != 0 ) mbedtls_printf( "failed (%u)\n", (unsigned int) i ); ret = 1; goto cleanup; } } if( verbose != 0 ) mbedtls_printf( "passed\n" ); if( verbose != 0 ) mbedtls_printf( " ECP test #2 (constant op_count, other point): " ); /* We computed P = 2G last time, use it */ add_count = 0; dbl_count = 0; mul_count = 0; MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &m, 16, exponents[0] ) ); MBEDTLS_MPI_CHK( mbedtls_ecp_mul( &grp, &R, &m, &P, NULL, NULL ) ); for( i = 1; i < sizeof( exponents ) / sizeof( exponents[0] ); i++ ) { add_c_prev = add_count; dbl_c_prev = dbl_count; mul_c_prev = mul_count; add_count = 0; dbl_count = 0; mul_count = 0; MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &m, 16, exponents[i] ) ); MBEDTLS_MPI_CHK( mbedtls_ecp_mul( &grp, &R, &m, &P, NULL, NULL ) ); if( add_count != add_c_prev || dbl_count != dbl_c_prev || mul_count != mul_c_prev ) { if( verbose != 0 ) mbedtls_printf( "failed (%u)\n", (unsigned int) i ); ret = 1; goto cleanup; } } if( verbose != 0 ) mbedtls_printf( "passed\n" ); cleanup: if( ret < 0 && verbose != 0 ) mbedtls_printf( "Unexpected error, return code = %08X\n", ret ); mbedtls_ecp_group_free( &grp ); mbedtls_ecp_point_free( &R ); mbedtls_ecp_point_free( &P ); mbedtls_mpi_free( &m ); if( verbose != 0 ) mbedtls_printf( "\n" ); return( ret ); }
5,301
126,436
0
void BrowserWindowGtk::ShowOneClickSigninBubble( const StartSyncCallback& start_sync_callback) { new OneClickSigninBubbleGtk(this, start_sync_callback); }
5,302
121,524
0
bool TabsDetectLanguageFunction::RunImpl() { int tab_id = 0; Browser* browser = NULL; WebContents* contents = NULL; if (HasOptionalArgument(0)) { EXTENSION_FUNCTION_VALIDATE(args_->GetInteger(0, &tab_id)); if (!GetTabById(tab_id, profile(), include_incognito(), &browser, NULL, &contents, NULL, &error_)) { return false; } if (!browser || !contents) return false; } else { browser = GetCurrentBrowser(); if (!browser) return false; contents = browser->tab_strip_model()->GetActiveWebContents(); if (!contents) return false; } if (contents->GetController().NeedsReload()) { error_ = keys::kCannotDetermineLanguageOfUnloadedTab; return false; } AddRef(); // Balanced in GotLanguage(). TranslateTabHelper* translate_tab_helper = TranslateTabHelper::FromWebContents(contents); if (!translate_tab_helper->language_state().original_language().empty()) { MessageLoop::current()->PostTask(FROM_HERE, base::Bind( &TabsDetectLanguageFunction::GotLanguage, this, translate_tab_helper->language_state().original_language())); return true; } registrar_.Add(this, chrome::NOTIFICATION_TAB_LANGUAGE_DETERMINED, content::Source<WebContents>(contents)); registrar_.Add( this, chrome::NOTIFICATION_TAB_CLOSING, content::Source<NavigationController>(&(contents->GetController()))); registrar_.Add( this, content::NOTIFICATION_NAV_ENTRY_COMMITTED, content::Source<NavigationController>(&(contents->GetController()))); return true; }
5,303
24,613
0
static u64 fuse_get_unique(struct fuse_conn *fc) { fc->reqctr++; /* zero is special */ if (fc->reqctr == 0) fc->reqctr = 1; return fc->reqctr; }
5,304
133,895
0
bool FormAssociatedElement::rangeOverflow() const { return false; }
5,305
152,700
0
Factory(const std::string& name, HistogramType histogram_type, HistogramBase::Sample minimum, HistogramBase::Sample maximum, uint32_t bucket_count, int32_t flags) : name_(name), histogram_type_(histogram_type), minimum_(minimum), maximum_(maximum), bucket_count_(bucket_count), flags_(flags) {}
5,306
92,302
0
cdataSectionProcessor(XML_Parser parser, const char *start, const char *end, const char **endPtr) { enum XML_Error result = doCdataSection(parser, parser->m_encoding, &start, end, endPtr, (XML_Bool)!parser->m_parsingStatus.finalBuffer); if (result != XML_ERROR_NONE) return result; if (start) { if (parser->m_parentParser) { /* we are parsing an external entity */ parser->m_processor = externalEntityContentProcessor; return externalEntityContentProcessor(parser, start, end, endPtr); } else { parser->m_processor = contentProcessor; return contentProcessor(parser, start, end, endPtr); } } return result; }
5,307
138,849
0
void TestRenderWidgetHostView::StopSpeaking() {}
5,308
35,626
0
static void fill_user_desc(struct user_desc *info, int idx, const struct desc_struct *desc) { memset(info, 0, sizeof(*info)); info->entry_number = idx; info->base_addr = get_desc_base(desc); info->limit = get_desc_limit(desc); info->seg_32bit = desc->d; info->contents = desc->type >> 2; info->read_exec_only = !(desc->type & 2); info->limit_in_pages = desc->g; info->seg_not_present = !desc->p; info->useable = desc->avl; #ifdef CONFIG_X86_64 info->lm = desc->l; #endif }
5,309
134,600
0
void OSExchangeData::SetInDragLoop(bool in_drag_loop) { provider_->SetInDragLoop(in_drag_loop); }
5,310
86,040
0
static int f2fs_clear_qf_name(struct super_block *sb, int qtype) { struct f2fs_sb_info *sbi = F2FS_SB(sb); if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) { f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options" " when quota turned on"); return -EINVAL; } kfree(sbi->s_qf_names[qtype]); sbi->s_qf_names[qtype] = NULL; return 0; }
5,311
40,464
0
int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node) { struct ipxhdr *ipx = ipx_hdr(skb); struct net_device *dev = intrfc->if_dev; struct datalink_proto *dl = intrfc->if_dlink; char dest_node[IPX_NODE_LEN]; int send_to_wire = 1; int addr_len; ipx->ipx_tctrl = IPX_SKB_CB(skb)->ipx_tctrl; ipx->ipx_dest.net = IPX_SKB_CB(skb)->ipx_dest_net; ipx->ipx_source.net = IPX_SKB_CB(skb)->ipx_source_net; /* see if we need to include the netnum in the route list */ if (IPX_SKB_CB(skb)->last_hop.index >= 0) { __be32 *last_hop = (__be32 *)(((u8 *) skb->data) + sizeof(struct ipxhdr) + IPX_SKB_CB(skb)->last_hop.index * sizeof(__be32)); *last_hop = IPX_SKB_CB(skb)->last_hop.netnum; IPX_SKB_CB(skb)->last_hop.index = -1; } /* * We need to know how many skbuffs it will take to send out this * packet to avoid unnecessary copies. */ if (!dl || !dev || dev->flags & IFF_LOOPBACK) send_to_wire = 0; /* No non looped */ /* * See if this should be demuxed to sockets on this interface * * We want to ensure the original was eaten or that we only use * up clones. */ if (ipx->ipx_dest.net == intrfc->if_netnum) { /* * To our own node, loop and free the original. * The internal net will receive on all node address. */ if (intrfc == ipx_internal_net || !memcmp(intrfc->if_node, node, IPX_NODE_LEN)) { /* Don't charge sender */ skb_orphan(skb); /* Will charge receiver */ return ipxitf_demux_socket(intrfc, skb, 0); } /* Broadcast, loop and possibly keep to send on. */ if (!memcmp(ipx_broadcast_node, node, IPX_NODE_LEN)) { if (!send_to_wire) skb_orphan(skb); ipxitf_demux_socket(intrfc, skb, send_to_wire); if (!send_to_wire) goto out; } } /* * If the originating net is not equal to our net; this is routed * We are still charging the sender. Which is right - the driver * free will handle this fairly. */ if (ipx->ipx_source.net != intrfc->if_netnum) { /* * Unshare the buffer before modifying the count in * case it's a flood or tcpdump */ skb = skb_unshare(skb, GFP_ATOMIC); if (!skb) goto out; if (++ipx->ipx_tctrl > ipxcfg_max_hops) send_to_wire = 0; } if (!send_to_wire) { kfree_skb(skb); goto out; } /* Determine the appropriate hardware address */ addr_len = dev->addr_len; if (!memcmp(ipx_broadcast_node, node, IPX_NODE_LEN)) memcpy(dest_node, dev->broadcast, addr_len); else memcpy(dest_node, &(node[IPX_NODE_LEN-addr_len]), addr_len); /* Make any compensation for differing physical/data link size */ skb = ipxitf_adjust_skbuff(intrfc, skb); if (!skb) goto out; /* set up data link and physical headers */ skb->dev = dev; skb->protocol = htons(ETH_P_IPX); /* Send it out */ dl->request(dl, skb, dest_node); out: return 0; }
5,312
181,114
1
atm_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { u_int caplen = h->caplen; u_int length = h->len; uint32_t llchdr; u_int hdrlen = 0; if (caplen < 1 || length < 1) { ND_PRINT((ndo, "%s", tstr)); return (caplen); } /* Cisco Style NLPID ? */ if (*p == LLC_UI) { if (ndo->ndo_eflag) ND_PRINT((ndo, "CNLPID ")); isoclns_print(ndo, p + 1, length - 1, caplen - 1); return hdrlen; } /* * Must have at least a DSAP, an SSAP, and the first byte of the * control field. */ if (caplen < 3 || length < 3) { ND_PRINT((ndo, "%s", tstr)); return (caplen); } /* * Extract the presumed LLC header into a variable, for quick * testing. * Then check for a header that's neither a header for a SNAP * packet nor an RFC 2684 routed NLPID-formatted PDU nor * an 802.2-but-no-SNAP IP packet. */ llchdr = EXTRACT_24BITS(p); if (llchdr != LLC_UI_HDR(LLCSAP_SNAP) && llchdr != LLC_UI_HDR(LLCSAP_ISONS) && llchdr != LLC_UI_HDR(LLCSAP_IP)) { /* * XXX - assume 802.6 MAC header from Fore driver. * * Unfortunately, the above list doesn't check for * all known SAPs, doesn't check for headers where * the source and destination SAP aren't the same, * and doesn't check for non-UI frames. It also * runs the risk of an 802.6 MAC header that happens * to begin with one of those values being * incorrectly treated as an 802.2 header. * * So is that Fore driver still around? And, if so, * is it still putting 802.6 MAC headers on ATM * packets? If so, could it be changed to use a * new DLT_IEEE802_6 value if we added it? */ if (caplen < 20 || length < 20) { ND_PRINT((ndo, "%s", tstr)); return (caplen); } if (ndo->ndo_eflag) ND_PRINT((ndo, "%08x%08x %08x%08x ", EXTRACT_32BITS(p), EXTRACT_32BITS(p+4), EXTRACT_32BITS(p+8), EXTRACT_32BITS(p+12))); p += 20; length -= 20; caplen -= 20; hdrlen += 20; } hdrlen += atm_llc_print(ndo, p, length, caplen); return (hdrlen); }
5,313
165,120
0
void RemoteFrame::Navigate(Document& origin_document, const KURL& url, bool replace_current_item, UserGestureStatus user_gesture_status) { FrameLoadRequest frame_request(&origin_document, ResourceRequest(url)); frame_request.SetReplacesCurrentItem(replace_current_item); frame_request.GetResourceRequest().SetHasUserGesture( user_gesture_status == UserGestureStatus::kActive); frame_request.GetResourceRequest().SetFrameType( IsMainFrame() ? network::mojom::RequestContextFrameType::kTopLevel : network::mojom::RequestContextFrameType::kNested); Navigate(frame_request); }
5,314
138,187
0
bool AXObject::nameFromContents() const { switch (roleValue()) { case AnchorRole: case ButtonRole: case CheckBoxRole: case DirectoryRole: case DisclosureTriangleRole: case HeadingRole: case LineBreakRole: case LinkRole: case ListBoxOptionRole: case ListItemRole: case MenuItemRole: case MenuItemCheckBoxRole: case MenuItemRadioRole: case MenuListOptionRole: case PopUpButtonRole: case RadioButtonRole: case StaticTextRole: case StatusRole: case SwitchRole: case TabRole: case ToggleButtonRole: case TreeItemRole: return true; default: return false; } }
5,315
60,176
0
R_API int r_bin_is_string(RBin *bin, ut64 va) { RBinString *string; RListIter *iter; RList *list; if (!(list = r_bin_get_strings (bin))) { return false; } r_list_foreach (list, iter, string) { if (string->vaddr == va) { return true; } if (string->vaddr > va) { return false; } } return false; }
5,316
64,380
0
static void vmw_hw_surface_destroy(struct vmw_resource *res) { struct vmw_private *dev_priv = res->dev_priv; struct vmw_surface *srf; void *cmd; if (res->func->destroy == vmw_gb_surface_destroy) { (void) vmw_gb_surface_destroy(res); return; } if (res->id != -1) { cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); if (unlikely(!cmd)) { DRM_ERROR("Failed reserving FIFO space for surface " "destruction.\n"); return; } vmw_surface_destroy_encode(res->id, cmd); vmw_fifo_commit(dev_priv, vmw_surface_destroy_size()); /* * used_memory_size_atomic, or separate lock * to avoid taking dev_priv::cmdbuf_mutex in * the destroy path. */ mutex_lock(&dev_priv->cmdbuf_mutex); srf = vmw_res_to_srf(res); dev_priv->used_memory_size -= res->backup_size; mutex_unlock(&dev_priv->cmdbuf_mutex); } vmw_fifo_resource_dec(dev_priv); }
5,317
144,455
0
void WebContentsImpl::CreateNewWindow( SiteInstance* source_site_instance, int32_t route_id, int32_t main_frame_route_id, int32_t main_frame_widget_route_id, const ViewHostMsg_CreateWindow_Params& params, SessionStorageNamespace* session_storage_namespace) { bool is_guest = BrowserPluginGuest::IsGuest(this); if (is_guest && BrowserPluginGuestMode::UseCrossProcessFramesForGuests()) { NOTREACHED(); } DCHECK(!params.opener_suppressed || route_id == MSG_ROUTING_NONE); scoped_refptr<SiteInstance> site_instance = params.opener_suppressed && !is_guest ? SiteInstance::CreateForURL(GetBrowserContext(), params.target_url) : source_site_instance; int render_process_id = source_site_instance->GetProcess()->GetID(); if (!HasMatchingProcess(&frame_tree_, render_process_id)) { RenderProcessHost* rph = source_site_instance->GetProcess(); base::ProcessHandle process_handle = rph->GetHandle(); if (process_handle != base::kNullProcessHandle) { RecordAction( base::UserMetricsAction("Terminate_ProcessMismatch_CreateNewWindow")); rph->Shutdown(RESULT_CODE_KILLED, false); } return; } const std::string& partition_id = GetContentClient()->browser()-> GetStoragePartitionIdForSite(GetBrowserContext(), site_instance->GetSiteURL()); StoragePartition* partition = BrowserContext::GetStoragePartition( GetBrowserContext(), site_instance.get()); DOMStorageContextWrapper* dom_storage_context = static_cast<DOMStorageContextWrapper*>(partition->GetDOMStorageContext()); SessionStorageNamespaceImpl* session_storage_namespace_impl = static_cast<SessionStorageNamespaceImpl*>(session_storage_namespace); CHECK(session_storage_namespace_impl->IsFromContext(dom_storage_context)); if (delegate_ && !delegate_->ShouldCreateWebContents( this, route_id, main_frame_route_id, main_frame_widget_route_id, params.window_container_type, params.frame_name, params.target_url, partition_id, session_storage_namespace)) { if (route_id != MSG_ROUTING_NONE && !RenderViewHost::FromID(render_process_id, route_id)) { Send(new ViewMsg_Close(route_id)); } ResourceDispatcherHostImpl::ResumeBlockedRequestsForRouteFromUI( GlobalFrameRoutingId(render_process_id, main_frame_route_id)); return; } CreateParams create_params(GetBrowserContext(), site_instance.get()); create_params.routing_id = route_id; create_params.main_frame_routing_id = main_frame_route_id; create_params.main_frame_widget_routing_id = main_frame_widget_route_id; create_params.main_frame_name = params.frame_name; create_params.opener_render_process_id = render_process_id; create_params.opener_render_frame_id = params.opener_render_frame_id; create_params.opener_suppressed = params.opener_suppressed; if (params.disposition == NEW_BACKGROUND_TAB) create_params.initially_hidden = true; create_params.renderer_initiated_creation = main_frame_route_id != MSG_ROUTING_NONE; WebContentsImpl* new_contents = NULL; if (!is_guest) { create_params.context = view_->GetNativeView(); create_params.initial_size = GetContainerBounds().size(); new_contents = static_cast<WebContentsImpl*>( WebContents::Create(create_params)); } else { new_contents = GetBrowserPluginGuest()->CreateNewGuestWindow(create_params); } new_contents->GetController().SetSessionStorageNamespace( partition_id, session_storage_namespace); if (!params.frame_name.empty()) new_contents->GetRenderManager()->CreateProxiesForNewNamedFrame(); if (!params.opener_suppressed) { if (!is_guest) { WebContentsView* new_view = new_contents->view_.get(); new_view->CreateViewForWidget( new_contents->GetRenderViewHost()->GetWidget(), false); } DCHECK_NE(MSG_ROUTING_NONE, route_id); pending_contents_[route_id] = new_contents; AddDestructionObserver(new_contents); } if (delegate_) { delegate_->WebContentsCreated( this, params.opener_render_frame_id, params.frame_name, params.target_url, new_contents); } if (params.opener_suppressed) { bool was_blocked = false; if (delegate_) { gfx::Rect initial_rect; delegate_->AddNewContents( this, new_contents, params.disposition, initial_rect, params.user_gesture, &was_blocked); } if (!was_blocked) { OpenURLParams open_params(params.target_url, Referrer(), CURRENT_TAB, ui::PAGE_TRANSITION_LINK, true /* is_renderer_initiated */); open_params.user_gesture = params.user_gesture; if (delegate_ && !is_guest && !delegate_->ShouldResumeRequestsForCreatedWindow()) { new_contents->delayed_open_url_params_.reset( new OpenURLParams(open_params)); } else { new_contents->OpenURL(open_params); } } } }
5,318
14,158
0
int __glXDisp_DestroyPixmap(__GLXclientState *cl, GLbyte *pc) { xGLXDestroyPixmapReq *req = (xGLXDestroyPixmapReq *) pc; return DoDestroyDrawable(cl, req->glxpixmap, GLX_DRAWABLE_PIXMAP); }
5,319
107,202
0
virtual bool IsDataLoaded() const { return true; }
5,320
62,720
0
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(cube_info,node_info); }
5,321
57,929
0
static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct socket *sock = file->private_data; struct msghdr msg = {.msg_iter = *from}; ssize_t res; if (iocb->ki_pos != 0) return -ESPIPE; if (file->f_flags & O_NONBLOCK) msg.msg_flags = MSG_DONTWAIT; if (sock->type == SOCK_SEQPACKET) msg.msg_flags |= MSG_EOR; res = __sock_sendmsg(iocb, sock, &msg, iocb->ki_nbytes); *from = msg.msg_iter; return res; }
5,322
145,396
0
explicit DummyCryptoServerStreamHelper(quic::QuicRandom* random) : random_(random) {}
5,323
73,478
0
MagickExport ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); }
5,324
31,920
0
void perf_bp_event(struct perf_event *bp, void *data) { struct perf_sample_data sample; struct pt_regs *regs = data; perf_sample_data_init(&sample, bp->attr.bp_addr, 0); if (!bp->hw.state && !perf_exclude_event(bp, regs)) perf_swevent_event(bp, 1, &sample, regs); }
5,325
140,173
0
void GaiaCookieManagerService::ForceOnCookieChangedProcessing() { GURL google_url = GaiaUrls::GetInstance()->google_url(); std::unique_ptr<net::CanonicalCookie> cookie(net::CanonicalCookie::Create( google_url, kGaiaCookieName, std::string(), "." + google_url.host(), std::string(), base::Time(), base::Time(), false, false, net::CookieSameSite::DEFAULT_MODE, false, net::COOKIE_PRIORITY_DEFAULT)); OnCookieChanged(*cookie, net::CookieStore::ChangeCause::UNKNOWN_DELETION); }
5,326
91,396
0
static bool arg_type_is_refcounted(enum bpf_arg_type type) { return type == ARG_PTR_TO_SOCKET; }
5,327
20,838
0
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW)) return -EINVAL; process_nmi(vcpu); vcpu->arch.exception.pending = events->exception.injected; vcpu->arch.exception.nr = events->exception.nr; vcpu->arch.exception.has_error_code = events->exception.has_error_code; vcpu->arch.exception.error_code = events->exception.error_code; vcpu->arch.interrupt.pending = events->interrupt.injected; vcpu->arch.interrupt.nr = events->interrupt.nr; vcpu->arch.interrupt.soft = events->interrupt.soft; if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) kvm_x86_ops->set_interrupt_shadow(vcpu, events->interrupt.shadow); vcpu->arch.nmi_injected = events->nmi.injected; if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) vcpu->arch.nmi_pending = events->nmi.pending; kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR) vcpu->arch.sipi_vector = events->sipi_vector; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; }
5,328
68,621
0
archive_read_format_lha_read_data_skip(struct archive_read *a) { struct lha *lha; int64_t bytes_skipped; lha = (struct lha *)(a->format->data); if (lha->entry_unconsumed) { /* Consume as much as the decompressor actually used. */ __archive_read_consume(a, lha->entry_unconsumed); lha->entry_unconsumed = 0; } /* if we've already read to end of data, we're done. */ if (lha->end_of_entry_cleanup) return (ARCHIVE_OK); /* * If the length is at the beginning, we can skip the * compressed data much more quickly. */ bytes_skipped = __archive_read_consume(a, lha->entry_bytes_remaining); if (bytes_skipped < 0) return (ARCHIVE_FATAL); /* This entry is finished and done. */ lha->end_of_entry_cleanup = lha->end_of_entry = 1; return (ARCHIVE_OK); }
5,329
56,321
0
static double filter_cosine(const double x) { if ((x >= -1.0) && (x <= 1.0)) return ((cos(x * M_PI) + 1.0)/2.0); return 0; }
5,330
8,480
0
UINT CSoundFile::GetNumChannels() const { UINT n = 0; for (UINT i=0; i<m_nChannels; i++) if (ChnSettings[i].nVolume) n++; return n; }
5,331
49,418
0
void proc_flush_task(struct task_struct *task) { int i; struct pid *pid, *tgid; struct upid *upid; pid = task_pid(task); tgid = task_tgid(task); for (i = 0; i <= pid->level; i++) { upid = &pid->numbers[i]; proc_flush_task_mnt(upid->ns->proc_mnt, upid->nr, tgid->numbers[i].nr); } }
5,332
6,283
0
PHP_FUNCTION(localtime) { long timestamp = (long)time(NULL); zend_bool associative = 0; timelib_tzinfo *tzi; timelib_time *ts; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|lb", &timestamp, &associative) == FAILURE) { RETURN_FALSE; } tzi = get_timezone_info(TSRMLS_C); ts = timelib_time_ctor(); ts->tz_info = tzi; ts->zone_type = TIMELIB_ZONETYPE_ID; timelib_unixtime2local(ts, (timelib_sll) timestamp); array_init(return_value); if (associative) { add_assoc_long(return_value, "tm_sec", ts->s); add_assoc_long(return_value, "tm_min", ts->i); add_assoc_long(return_value, "tm_hour", ts->h); add_assoc_long(return_value, "tm_mday", ts->d); add_assoc_long(return_value, "tm_mon", ts->m - 1); add_assoc_long(return_value, "tm_year", ts->y - 1900); add_assoc_long(return_value, "tm_wday", timelib_day_of_week(ts->y, ts->m, ts->d)); add_assoc_long(return_value, "tm_yday", timelib_day_of_year(ts->y, ts->m, ts->d)); add_assoc_long(return_value, "tm_isdst", ts->dst); } else { add_next_index_long(return_value, ts->s); add_next_index_long(return_value, ts->i); add_next_index_long(return_value, ts->h); add_next_index_long(return_value, ts->d); add_next_index_long(return_value, ts->m - 1); add_next_index_long(return_value, ts->y- 1900); add_next_index_long(return_value, timelib_day_of_week(ts->y, ts->m, ts->d)); add_next_index_long(return_value, timelib_day_of_year(ts->y, ts->m, ts->d)); add_next_index_long(return_value, ts->dst); } timelib_time_dtor(ts); }
5,333
9,628
0
static char *php_session_encode(int *newlen TSRMLS_DC) /* {{{ */ { char *ret = NULL; IF_SESSION_VARS() { if (!PS(serializer)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unknown session.serialize_handler. Failed to encode session object"); ret = NULL; } else if (PS(serializer)->encode(&ret, newlen TSRMLS_CC) == FAILURE) { ret = NULL; } } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Cannot encode non-existent session"); } return ret; } /* }}} */
5,334
104,853
0
std::vector<std::string> Extension::GetDistinctHostsForDisplay( const URLPatternList& list) { return GetDistinctHosts(list, true); }
5,335
66,651
0
static int crypto_cbcmac_digest_update(struct shash_desc *pdesc, const u8 *p, unsigned int len) { struct crypto_shash *parent = pdesc->tfm; struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent); struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc); struct crypto_cipher *tfm = tctx->child; int bs = crypto_shash_digestsize(parent); u8 *dg = (u8 *)ctx + crypto_shash_descsize(parent) - bs; while (len > 0) { unsigned int l = min(len, bs - ctx->len); crypto_xor(dg + ctx->len, p, l); ctx->len +=l; len -= l; p += l; if (ctx->len == bs) { crypto_cipher_encrypt_one(tfm, dg, dg); ctx->len = 0; } } return 0; }
5,336
121,710
0
bool MediaStreamDevicesController::ShouldAlwaysAllowOrigin() const { return profile_->GetHostContentSettingsMap()->ShouldAllowAllContent( request_.security_origin, request_.security_origin, CONTENT_SETTINGS_TYPE_MEDIASTREAM); }
5,337
41,885
0
static inline int rt_scope(int ifa_scope) { if (ifa_scope & IFA_HOST) return RT_SCOPE_HOST; else if (ifa_scope & IFA_LINK) return RT_SCOPE_LINK; else if (ifa_scope & IFA_SITE) return RT_SCOPE_SITE; else return RT_SCOPE_UNIVERSE; }
5,338
82,030
0
R_API RAnalBlock *r_anal_bb_get_failbb(RAnalFunction *fcn, RAnalBlock *bb) { RListIter *iter; RAnalBlock *b; if (bb->fail == UT64_MAX) { return NULL; } if (bb->failbb) { return bb->failbb; } r_list_foreach (fcn->bbs, iter, b) { if (b->addr == bb->fail) { bb->failbb = b; b->prev = bb; return b; } } return NULL; }
5,339
6,463
0
static gboolean cache_file_is_updated( const char* cache_file, int* n_used_files, char*** used_files ) { gboolean ret = FALSE; struct stat st; #if 0 time_t cache_mtime; char** files; int n, i; #endif FILE* f; f = fopen( cache_file, "r" ); if( f ) { if( fstat( fileno(f), &st) == 0 ) { #if 0 cache_mtime = st.st_mtime; if( read_all_used_files(f, &n, &files) ) { for( i =0; i < n; ++i ) { /* files[i][0] is 'D' or 'F' indicating file type. */ if( stat( files[i] + 1, &st ) == -1 ) continue; if( st.st_mtime > cache_mtime ) break; } if( i >= n ) { ret = TRUE; *n_used_files = n; *used_files = files; } } #else ret = read_all_used_files(f, n_used_files, used_files); #endif } fclose( f ); } return ret; }
5,340
168,024
0
void FrameLoader::UpgradeInsecureRequest(ResourceRequest& resource_request, ExecutionContext* origin_context) { if (!origin_context) return; if (!(origin_context->GetSecurityContext().GetInsecureRequestPolicy() & kUpgradeInsecureRequests)) return; if (resource_request.GetFrameType() == network::mojom::RequestContextFrameType::kNested) { return; } resource_request.SetUpgradeIfInsecure(true); KURL url = resource_request.Url(); if (!url.ProtocolIs("http")) return; if (resource_request.GetFrameType() == network::mojom::RequestContextFrameType::kNone || resource_request.GetRequestContext() == WebURLRequest::kRequestContextForm || (!url.Host().IsNull() && origin_context->GetSecurityContext() .InsecureNavigationsToUpgrade() ->Contains(url.Host().Impl()->GetHash()))) { UseCounter::Count(origin_context, WebFeature::kUpgradeInsecureRequestsUpgradedRequest); url.SetProtocol("https"); if (url.Port() == 80) url.SetPort(443); resource_request.SetURL(url); } }
5,341
162,163
0
void RenderProcessHostImpl::set_render_process_host_factory( const RenderProcessHostFactory* rph_factory) { g_render_process_host_factory_ = rph_factory; }
5,342
170,772
0
virtual uint32_t getCaps() { Parcel data, reply; data.writeInterfaceToken(IHDCP::getInterfaceDescriptor()); remote()->transact(HDCP_GET_CAPS, data, &reply); return reply.readInt32(); }
5,343
69,853
0
connection_ap_process_end_not_open( relay_header_t *rh, cell_t *cell, origin_circuit_t *circ, entry_connection_t *conn, crypt_path_t *layer_hint) { node_t *exitrouter; int reason = *(cell->payload+RELAY_HEADER_SIZE); int control_reason; edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(conn); (void) layer_hint; /* unused */ if (rh->length > 0) { if (reason == END_STREAM_REASON_TORPROTOCOL || reason == END_STREAM_REASON_DESTROY) { /* Both of these reasons could mean a failed tag * hit the exit and it complained. Do not probe. * Fail the circuit. */ circ->path_state = PATH_STATE_USE_FAILED; return -END_CIRC_REASON_TORPROTOCOL; } else if (reason == END_STREAM_REASON_INTERNAL) { /* We can't infer success or failure, since older Tors report * ENETUNREACH as END_STREAM_REASON_INTERNAL. */ } else { /* Path bias: If we get a valid reason code from the exit, * it wasn't due to tagging. * * We rely on recognized+digest being strong enough to make * tags unlikely to allow us to get tagged, yet 'recognized' * reason codes here. */ pathbias_mark_use_success(circ); } } if (rh->length == 0) { reason = END_STREAM_REASON_MISC; } control_reason = reason | END_STREAM_REASON_FLAG_REMOTE; if (edge_reason_is_retriable(reason) && /* avoid retry if rend */ !connection_edge_is_rendezvous_stream(edge_conn)) { const char *chosen_exit_digest = circ->build_state->chosen_exit->identity_digest; log_info(LD_APP,"Address '%s' refused due to '%s'. Considering retrying.", safe_str(conn->socks_request->address), stream_end_reason_to_string(reason)); exitrouter = node_get_mutable_by_id(chosen_exit_digest); switch (reason) { case END_STREAM_REASON_EXITPOLICY: { tor_addr_t addr; tor_addr_make_unspec(&addr); if (rh->length >= 5) { int ttl = -1; tor_addr_make_unspec(&addr); if (rh->length == 5 || rh->length == 9) { tor_addr_from_ipv4n(&addr, get_uint32(cell->payload+RELAY_HEADER_SIZE+1)); if (rh->length == 9) ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+5)); } else if (rh->length == 17 || rh->length == 21) { tor_addr_from_ipv6_bytes(&addr, (char*)(cell->payload+RELAY_HEADER_SIZE+1)); if (rh->length == 21) ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+17)); } if (tor_addr_is_null(&addr)) { log_info(LD_APP,"Address '%s' resolved to 0.0.0.0. Closing,", safe_str(conn->socks_request->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return 0; } if ((tor_addr_family(&addr) == AF_INET && !conn->ipv4_traffic_ok) || (tor_addr_family(&addr) == AF_INET6 && !conn->ipv6_traffic_ok)) { log_fn(LOG_PROTOCOL_WARN, LD_APP, "Got an EXITPOLICY failure on a connection with a " "mismatched family. Closing."); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return 0; } if (get_options()->ClientDNSRejectInternalAddresses && tor_addr_is_internal(&addr, 0)) { log_info(LD_APP,"Address '%s' resolved to internal. Closing,", safe_str(conn->socks_request->address)); connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL); return 0; } client_dns_set_addressmap(conn, conn->socks_request->address, &addr, conn->chosen_exit_name, ttl); { char new_addr[TOR_ADDR_BUF_LEN]; tor_addr_to_str(new_addr, &addr, sizeof(new_addr), 1); if (strcmp(conn->socks_request->address, new_addr)) { strlcpy(conn->socks_request->address, new_addr, sizeof(conn->socks_request->address)); control_event_stream_status(conn, STREAM_EVENT_REMAP, 0); } } } /* check if he *ought* to have allowed it */ adjust_exit_policy_from_exitpolicy_failure(circ, conn, exitrouter, &addr); if (conn->chosen_exit_optional || conn->chosen_exit_retries) { /* stop wanting a specific exit */ conn->chosen_exit_optional = 0; /* A non-zero chosen_exit_retries can happen if we set a * TrackHostExits for this address under a port that the exit * relay allows, but then try the same address with a different * port that it doesn't allow to exit. We shouldn't unregister * the mapping, since it is probably still wanted on the * original port. But now we give away to the exit relay that * we probably have a TrackHostExits on it. So be it. */ conn->chosen_exit_retries = 0; tor_free(conn->chosen_exit_name); /* clears it */ } if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0) return 0; /* else, conn will get closed below */ break; } case END_STREAM_REASON_CONNECTREFUSED: if (!conn->chosen_exit_optional) break; /* break means it'll close, below */ /* Else fall through: expire this circuit, clear the * chosen_exit_name field, and try again. */ case END_STREAM_REASON_RESOLVEFAILED: case END_STREAM_REASON_TIMEOUT: case END_STREAM_REASON_MISC: case END_STREAM_REASON_NOROUTE: if (client_dns_incr_failures(conn->socks_request->address) < MAX_RESOLVE_FAILURES) { /* We haven't retried too many times; reattach the connection. */ circuit_log_path(LOG_INFO,LD_APP,circ); /* Mark this circuit "unusable for new streams". */ mark_circuit_unusable_for_new_conns(circ); if (conn->chosen_exit_optional) { /* stop wanting a specific exit */ conn->chosen_exit_optional = 0; tor_free(conn->chosen_exit_name); /* clears it */ } if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0) return 0; /* else, conn will get closed below */ } else { log_notice(LD_APP, "Have tried resolving or connecting to address '%s' " "at %d different places. Giving up.", safe_str(conn->socks_request->address), MAX_RESOLVE_FAILURES); /* clear the failures, so it will have a full try next time */ client_dns_clear_failures(conn->socks_request->address); } break; case END_STREAM_REASON_HIBERNATING: case END_STREAM_REASON_RESOURCELIMIT: if (exitrouter) { policies_set_node_exitpolicy_to_reject_all(exitrouter); } if (conn->chosen_exit_optional) { /* stop wanting a specific exit */ conn->chosen_exit_optional = 0; tor_free(conn->chosen_exit_name); /* clears it */ } if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0) return 0; /* else, will close below */ break; } /* end switch */ log_info(LD_APP,"Giving up on retrying; conn can't be handled."); } log_info(LD_APP, "Edge got end (%s) before we're connected. Marking for close.", stream_end_reason_to_string(rh->length > 0 ? reason : -1)); circuit_log_path(LOG_INFO,LD_APP,circ); /* need to test because of detach_retriable */ if (!ENTRY_TO_CONN(conn)->marked_for_close) connection_mark_unattached_ap(conn, control_reason); return 0; }
5,344
34,405
0
static long btrfs_ioctl_clone_range(struct file *file, void __user *argp) { struct btrfs_ioctl_clone_range_args args; if (copy_from_user(&args, argp, sizeof(args))) return -EFAULT; return btrfs_ioctl_clone(file, args.src_fd, args.src_offset, args.src_length, args.dest_offset); }
5,345
122,866
0
bool RenderProcessHostImpl::FastShutdownIfPossible() { if (run_renderer_in_process()) return false; // Single process mode never shutdown the renderer. if (!GetContentClient()->browser()->IsFastShutdownPossible()) return false; if (!child_process_launcher_.get() || child_process_launcher_->IsStarting() || !GetHandle()) return false; // Render process hasn't started or is probably crashed. if (!SuddenTerminationAllowed()) return false; ProcessDied(false /* already_dead */); fast_shutdown_started_ = true; return true; }
5,346
6,787
0
int apply_filter_to_sts_line(struct stream *s, struct channel *rtr, struct hdr_exp *exp) { char *cur_ptr, *cur_end; int done; struct http_txn *txn = s->txn; int delta; if (unlikely(txn->flags & TX_SVDENY)) return 1; else if (unlikely(txn->flags & TX_SVALLOW) && (exp->action == ACT_ALLOW || exp->action == ACT_DENY)) return 0; else if (exp->action == ACT_REMOVE) return 0; done = 0; cur_ptr = rtr->buf->p; cur_end = cur_ptr + txn->rsp.sl.st.l; /* Now we have the status line between cur_ptr and cur_end */ if (regex_exec_match2(exp->preg, cur_ptr, cur_end-cur_ptr, MAX_MATCH, pmatch, 0)) { switch (exp->action) { case ACT_ALLOW: txn->flags |= TX_SVALLOW; done = 1; break; case ACT_DENY: txn->flags |= TX_SVDENY; done = 1; break; case ACT_REPLACE: trash.len = exp_replace(trash.str, trash.size, cur_ptr, exp->replace, pmatch); if (trash.len < 0) return -1; delta = buffer_replace2(rtr->buf, cur_ptr, cur_end, trash.str, trash.len); /* FIXME: if the user adds a newline in the replacement, the * index will not be recalculated for now, and the new line * will not be counted as a new header. */ http_msg_move_end(&txn->rsp, delta); cur_end += delta; cur_end = (char *)http_parse_stsline(&txn->rsp, HTTP_MSG_RPVER, cur_ptr, cur_end + 1, NULL, NULL); if (unlikely(!cur_end)) return -1; /* we have a full respnse and we know that we have either a CR * or an LF at <ptr>. */ txn->status = strl2ui(rtr->buf->p + txn->rsp.sl.st.c, txn->rsp.sl.st.c_l); hdr_idx_set_start(&txn->hdr_idx, txn->rsp.sl.st.l, *cur_end == '\r'); /* there is no point trying this regex on headers */ return 1; } } return done; }
5,347
49,372
0
static int __net_init nfnetlink_net_init(struct net *net) { struct sock *nfnl; struct netlink_kernel_cfg cfg = { .groups = NFNLGRP_MAX, .input = nfnetlink_rcv, #ifdef CONFIG_MODULES .bind = nfnetlink_bind, #endif }; nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, &cfg); if (!nfnl) return -ENOMEM; net->nfnl_stash = nfnl; rcu_assign_pointer(net->nfnl, nfnl); return 0; }
5,348
128,011
0
static jlong Init(JNIEnv* env, const JavaParamRef<jclass>&, const JavaParamRef<jobject>& browser_context) { scoped_ptr<WebContents> web_contents(content::WebContents::Create( content::WebContents::CreateParams(AwBrowserContext::GetDefault()))); return reinterpret_cast<intptr_t>(new AwContents(web_contents.Pass())); }
5,349
109,142
0
void RenderViewImpl::openFileSystem( WebFrame* frame, WebFileSystem::Type type, long long size, bool create, WebFileSystemCallbacks* callbacks) { DCHECK(callbacks); WebSecurityOrigin origin = frame->document().securityOrigin(); if (origin.isUnique()) { callbacks->didFail(WebKit::WebFileErrorAbort); return; } ChildThread::current()->file_system_dispatcher()->OpenFileSystem( GURL(origin.toString()), static_cast<fileapi::FileSystemType>(type), size, create, new WebFileSystemCallbackDispatcher(callbacks)); }
5,350
123,823
0
void WorkerGlobalScopeFileSystem::webkitRequestFileSystem(WorkerGlobalScope& worker, int type, long long size, PassOwnPtr<FileSystemCallback> successCallback, PassOwnPtr<ErrorCallback> errorCallback) { ExecutionContext* secureContext = worker.executionContext(); if (!secureContext->securityOrigin()->canAccessFileSystem()) { DOMFileSystem::scheduleCallback(&worker, errorCallback, FileError::create(FileError::SECURITY_ERR)); return; } FileSystemType fileSystemType = static_cast<FileSystemType>(type); if (!DOMFileSystemBase::isValidType(fileSystemType)) { DOMFileSystem::scheduleCallback(&worker, errorCallback, FileError::create(FileError::INVALID_MODIFICATION_ERR)); return; } LocalFileSystem::from(worker)->requestFileSystem(&worker, fileSystemType, size, FileSystemCallbacks::create(successCallback, errorCallback, &worker, fileSystemType)); }
5,351
88,323
0
void FVChangeChar(FontView *fv,int i) { if ( i!=-1 ) { FVDeselectAll(fv); fv->b.selected[i] = true; fv->sel_index = 1; fv->end_pos = fv->pressed_pos = i; FVToggleCharSelected(fv,i); FVScrollToChar(fv,i); FVShowInfo(fv); } }
5,352
164,967
0
void ResourceDispatcherHostImpl::SetLoaderDelegate( LoaderDelegate* loader_delegate) { loader_delegate_ = loader_delegate; }
5,353
128,680
0
bool MdPolicyPageEnabled() { return base::CommandLine::ForCurrentProcess()->HasSwitch( ::switches::kEnableMaterialDesignPolicyPage); }
5,354
181,508
1
size_t compile_tree(struct filter_op **fop) { int i = 1; struct filter_op *array = NULL; struct unfold_elm *ue; BUG_IF(tree_root == NULL); fprintf(stdout, " Unfolding the meta-tree "); fflush(stdout); /* start the recursion on the tree */ unfold_blk(&tree_root); fprintf(stdout, " done.\n\n"); /* substitute the virtual labels with real offsets */ labels_to_offsets(); /* convert the tailq into an array */ TAILQ_FOREACH(ue, &unfolded_tree, next) { /* label == 0 means a real instruction */ if (ue->label == 0) { SAFE_REALLOC(array, i * sizeof(struct filter_op)); memcpy(&array[i - 1], &ue->fop, sizeof(struct filter_op)); i++; } } /* always append the exit function to a script */ SAFE_REALLOC(array, i * sizeof(struct filter_op)); array[i - 1].opcode = FOP_EXIT; /* return the pointer to the array */ *fop = array; return (i); }
5,355
44,650
0
static int shutdown_empty(struct lxc_handler *handler, struct lxc_netdev *netdev) { int err; if (netdev->downscript) { err = run_script(handler->name, "net", netdev->downscript, "down", "empty", (char*) NULL); if (err) return -1; } return 0; }
5,356
99,214
0
void PrintDialogGtk::OnResponse(gint response_id) { gtk_widget_hide(dialog_); switch (response_id) { case GTK_RESPONSE_OK: { GtkPrinter* printer = gtk_print_unix_dialog_get_selected_printer( GTK_PRINT_UNIX_DIALOG(dialog_)); if (!gtk_printer_accepts_pdf(printer)) { browser_->GetSelectedTabContents()->AddInfoBar( new PdfUnsupportedInfoBarDelegate(browser_)); break; } GtkPrintSettings* settings = gtk_print_unix_dialog_get_settings( GTK_PRINT_UNIX_DIALOG(dialog_)); GtkPageSetup* setup = gtk_print_unix_dialog_get_page_setup( GTK_PRINT_UNIX_DIALOG(dialog_)); GtkPrintJob* job = gtk_print_job_new(path_to_pdf_.value().c_str(), printer, settings, setup); gtk_print_job_set_source_file(job, path_to_pdf_.value().c_str(), NULL); gtk_print_job_send(job, OnJobCompletedThunk, this, NULL); g_object_unref(settings); return; } case GTK_RESPONSE_DELETE_EVENT: // Fall through. case GTK_RESPONSE_CANCEL: { break; } case GTK_RESPONSE_APPLY: default: { NOTREACHED(); } } OnJobCompleted(NULL, NULL); }
5,357
155,664
0
bool AuthenticatorGenericErrorSheetModel::IsBackButtonVisible() const { return false; }
5,358
151,371
0
String UrlForFrame(LocalFrame* frame) { KURL url = frame->GetDocument()->Url(); url.RemoveFragmentIdentifier(); return url.GetString(); }
5,359
148,856
0
void RenderFrameHostManager::CommitPendingFramePolicy() { if (!frame_tree_node_->CommitPendingFramePolicy()) return; CHECK(frame_tree_node_->parent()); SiteInstance* parent_site_instance = frame_tree_node_->parent()->current_frame_host()->GetSiteInstance(); for (const auto& pair : proxy_hosts_) { if (pair.second->GetSiteInstance() != parent_site_instance) { pair.second->Send(new FrameMsg_DidUpdateFramePolicy( pair.second->GetRoutingID(), frame_tree_node_->current_replication_state().sandbox_flags, frame_tree_node_->current_replication_state().container_policy)); } } }
5,360
177,108
0
static int32_t DpbAllocWrapper(void *userData, unsigned int sizeInMbs, unsigned int numBuffers) { SoftAVCEncoder *encoder = static_cast<SoftAVCEncoder *>(userData); CHECK(encoder != NULL); return encoder->allocOutputBuffers(sizeInMbs, numBuffers); }
5,361
97,915
0
void RenderView::InsertCSS(const std::wstring& frame_xpath, const std::string& css, const std::string& id) { WebFrame* web_frame = GetChildFrame(frame_xpath); if (!web_frame) return; web_frame->insertStyleText(WebString::fromUTF8(css), WebString::fromUTF8(id)); }
5,362
161,668
0
static void CloseGpuMemoryBufferHandle( const gfx::GpuMemoryBufferHandle& handle) { for (const auto& fd : handle.native_pixmap_handle.fds) { base::ScopedFD scoped_fd(fd.fd); } }
5,363
173,878
0
void Chapters::Edition::Clear() { while (m_atoms_count > 0) { Atom& a = m_atoms[--m_atoms_count]; a.Clear(); } delete[] m_atoms; m_atoms = NULL; m_atoms_size = 0; }
5,364
41,637
0
static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { struct extent_map *em; struct btrfs_root *root = BTRFS_I(inode)->root; struct extent_state *cached_state = NULL; struct btrfs_dio_data *dio_data = NULL; u64 start = iblock << inode->i_blkbits; u64 lockstart, lockend; u64 len = bh_result->b_size; int unlock_bits = EXTENT_LOCKED; int ret = 0; if (create) unlock_bits |= EXTENT_DIRTY; else len = min_t(u64, len, root->sectorsize); lockstart = start; lockend = start + len - 1; if (current->journal_info) { /* * Need to pull our outstanding extents and set journal_info to NULL so * that anything that needs to check if there's a transction doesn't get * confused. */ dio_data = current->journal_info; current->journal_info = NULL; } /* * If this errors out it's because we couldn't invalidate pagecache for * this range and we need to fallback to buffered. */ if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create)) return -ENOTBLK; em = btrfs_get_extent(inode, NULL, 0, start, len, 0); if (IS_ERR(em)) { ret = PTR_ERR(em); goto unlock_err; } /* * Ok for INLINE and COMPRESSED extents we need to fallback on buffered * io. INLINE is special, and we could probably kludge it in here, but * it's still buffered so for safety lets just fall back to the generic * buffered path. * * For COMPRESSED we _have_ to read the entire extent in so we can * decompress it, so there will be buffering required no matter what we * do, so go ahead and fallback to buffered. * * We return -ENOTBLK because thats what makes DIO go ahead and go back * to buffered IO. Don't blame me, this is the price we pay for using * the generic code. */ if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || em->block_start == EXTENT_MAP_INLINE) { free_extent_map(em); ret = -ENOTBLK; goto unlock_err; } /* Just a good old fashioned hole, return */ if (!create && (em->block_start == EXTENT_MAP_HOLE || test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { free_extent_map(em); goto unlock_err; } /* * We don't allocate a new extent in the following cases * * 1) The inode is marked as NODATACOW. In this case we'll just use the * existing extent. * 2) The extent is marked as PREALLOC. We're good to go here and can * just use the extent. * */ if (!create) { len = min(len, em->len - (start - em->start)); lockstart = start + len; goto unlock; } if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && em->block_start != EXTENT_MAP_HOLE)) { int type; u64 block_start, orig_start, orig_block_len, ram_bytes; if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) type = BTRFS_ORDERED_PREALLOC; else type = BTRFS_ORDERED_NOCOW; len = min(len, em->len - (start - em->start)); block_start = em->block_start + (start - em->start); if (can_nocow_extent(inode, start, &len, &orig_start, &orig_block_len, &ram_bytes) == 1) { if (type == BTRFS_ORDERED_PREALLOC) { free_extent_map(em); em = create_pinned_em(inode, start, len, orig_start, block_start, len, orig_block_len, ram_bytes, type); if (IS_ERR(em)) { ret = PTR_ERR(em); goto unlock_err; } } ret = btrfs_add_ordered_extent_dio(inode, start, block_start, len, len, type); if (ret) { free_extent_map(em); goto unlock_err; } goto unlock; } } /* * this will cow the extent, reset the len in case we changed * it above */ len = bh_result->b_size; free_extent_map(em); em = btrfs_new_extent_direct(inode, start, len); if (IS_ERR(em)) { ret = PTR_ERR(em); goto unlock_err; } len = min(len, em->len - (start - em->start)); unlock: bh_result->b_blocknr = (em->block_start + (start - em->start)) >> inode->i_blkbits; bh_result->b_size = len; bh_result->b_bdev = em->bdev; set_buffer_mapped(bh_result); if (create) { if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) set_buffer_new(bh_result); /* * Need to update the i_size under the extent lock so buffered * readers will get the updated i_size when we unlock. */ if (start + len > i_size_read(inode)) i_size_write(inode, start + len); /* * If we have an outstanding_extents count still set then we're * within our reservation, otherwise we need to adjust our inode * counter appropriately. */ if (dio_data->outstanding_extents) { (dio_data->outstanding_extents)--; } else { spin_lock(&BTRFS_I(inode)->lock); BTRFS_I(inode)->outstanding_extents++; spin_unlock(&BTRFS_I(inode)->lock); } btrfs_free_reserved_data_space(inode, len); WARN_ON(dio_data->reserve < len); dio_data->reserve -= len; current->journal_info = dio_data; } /* * In the case of write we need to clear and unlock the entire range, * in the case of read we need to unlock only the end area that we * aren't using if there is any left over space. */ if (lockstart < lockend) { clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, unlock_bits, 1, 0, &cached_state, GFP_NOFS); } else { free_extent_state(cached_state); } free_extent_map(em); return 0; unlock_err: clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, unlock_bits, 1, 0, &cached_state, GFP_NOFS); if (dio_data) current->journal_info = dio_data; return ret; }
5,365
180,733
1
static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int start = 0; u32 prev_legacy, cur_legacy; mutex_lock(&kvm->arch.vpit->pit_state.lock); prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; if (!prev_legacy && cur_legacy) start = 1; memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, sizeof(kvm->arch.vpit->pit_state.channels)); kvm->arch.vpit->pit_state.flags = ps->flags; kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return 0; }
5,366
174,283
0
status_t Camera3Device::triggerPrecaptureMetering(uint32_t id) { ATRACE_CALL(); Mutex::Autolock il(mInterfaceLock); ALOGV("%s: Triggering precapture metering, id %d", __FUNCTION__, id); RequestTrigger trigger[] = { { ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_START }, { ANDROID_CONTROL_AE_PRECAPTURE_ID, static_cast<int32_t>(id) } }; return mRequestThread->queueTrigger(trigger, sizeof(trigger)/sizeof(trigger[0])); }
5,367
170,905
0
bool OMXNodeInstance::isProhibitedIndex_l(OMX_INDEXTYPE index) { static const char *restricted_extensions[] = { "OMX.google.android.index.storeMetaDataInBuffers", "OMX.google.android.index.storeANWBufferInMetadata", "OMX.google.android.index.prepareForAdaptivePlayback", "OMX.google.android.index.configureVideoTunnelMode", "OMX.google.android.index.useAndroidNativeBuffer2", "OMX.google.android.index.useAndroidNativeBuffer", "OMX.google.android.index.enableAndroidNativeBuffers", "OMX.google.android.index.allocateNativeHandle", "OMX.google.android.index.getAndroidNativeBufferUsage", }; if ((index > OMX_IndexComponentStartUnused && index <= OMX_IndexParamStandardComponentRole) || (index > OMX_IndexPortStartUnused && index <= OMX_IndexParamCompBufferSupplier) || (index > OMX_IndexAudioStartUnused && index <= OMX_IndexConfigAudioChannelVolume) || (index > OMX_IndexVideoStartUnused && index <= OMX_IndexConfigVideoNalSize) || (index > OMX_IndexCommonStartUnused && index <= OMX_IndexConfigCommonTransitionEffect) || (index > (OMX_INDEXTYPE)OMX_IndexExtAudioStartUnused && index <= (OMX_INDEXTYPE)OMX_IndexParamAudioProfileQuerySupported) || (index > (OMX_INDEXTYPE)OMX_IndexExtVideoStartUnused && index <= (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh) || (index > (OMX_INDEXTYPE)OMX_IndexExtOtherStartUnused && index <= (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits)) { return false; } if (!mQueriedProhibitedExtensions) { for (size_t i = 0; i < NELEM(restricted_extensions); ++i) { OMX_INDEXTYPE ext; if (OMX_GetExtensionIndex(mHandle, (OMX_STRING)restricted_extensions[i], &ext) == OMX_ErrorNone) { mProhibitedExtensions.add(ext); } } mQueriedProhibitedExtensions = true; } return mProhibitedExtensions.indexOf(index) >= 0; }
5,368
52,631
0
static int __init ppp_init(void) { int err; pr_info("PPP generic driver version " PPP_VERSION "\n"); err = register_pernet_device(&ppp_net_ops); if (err) { pr_err("failed to register PPP pernet device (%d)\n", err); goto out; } err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); if (err) { pr_err("failed to register PPP device (%d)\n", err); goto out_net; } ppp_class = class_create(THIS_MODULE, "ppp"); if (IS_ERR(ppp_class)) { err = PTR_ERR(ppp_class); goto out_chrdev; } /* not a big deal if we fail here :-) */ device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp"); return 0; out_chrdev: unregister_chrdev(PPP_MAJOR, "ppp"); out_net: unregister_pernet_device(&ppp_net_ops); out: return err; }
5,369
91,143
0
static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, __be32 daddr, const bool do_cache) { bool ret = false; spin_lock_bh(&fnhe_lock); if (daddr == fnhe->fnhe_daddr) { struct rtable __rcu **porig; struct rtable *orig; int genid = fnhe_genid(dev_net(rt->dst.dev)); if (rt_is_input_route(rt)) porig = &fnhe->fnhe_rth_input; else porig = &fnhe->fnhe_rth_output; orig = rcu_dereference(*porig); if (fnhe->fnhe_genid != genid) { fnhe->fnhe_genid = genid; fnhe->fnhe_gw = 0; fnhe->fnhe_pmtu = 0; fnhe->fnhe_expires = 0; fnhe->fnhe_mtu_locked = false; fnhe_flush_routes(fnhe); orig = NULL; } fill_route_from_fnhe(rt, fnhe); if (!rt->rt_gateway) rt->rt_gateway = daddr; if (do_cache) { dst_hold(&rt->dst); rcu_assign_pointer(*porig, rt); if (orig) { dst_dev_put(&orig->dst); dst_release(&orig->dst); } ret = true; } fnhe->fnhe_stamp = jiffies; } spin_unlock_bh(&fnhe_lock); return ret; }
5,370
92,098
0
static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) { kvfree(qp->sq.wqe_head); kvfree(qp->sq.w_list); kvfree(qp->sq.wrid); kvfree(qp->sq.wr_data); kvfree(qp->rq.wrid); mlx5_db_free(dev->mdev, &qp->db); mlx5_buf_free(dev->mdev, &qp->buf); }
5,371
53,481
0
lzss_position(struct lzss *lzss) { return lzss->position; }
5,372
110,049
0
void HTMLSelectElement::restoreFormControlState(const FormControlState& state) { recalcListItems(); const Vector<HTMLElement*>& items = listItems(); size_t itemsSize = items.size(); if (!itemsSize) return; for (size_t i = 0; i < itemsSize; ++i) { if (!items[i]->hasLocalName(optionTag)) continue; toHTMLOptionElement(items[i])->setSelectedState(false); } if (!multiple()) { size_t foundIndex = searchOptionsForValue(state[0], 0, itemsSize); if (foundIndex != notFound) toHTMLOptionElement(items[foundIndex])->setSelectedState(true); } else { size_t startIndex = 0; for (size_t i = 0; i < state.valueSize(); ++i) { const String& value = state[i]; size_t foundIndex = searchOptionsForValue(value, startIndex, itemsSize); if (foundIndex == notFound) foundIndex = searchOptionsForValue(value, 0, startIndex); if (foundIndex == notFound) continue; toHTMLOptionElement(items[foundIndex])->setSelectedState(true); startIndex = foundIndex + 1; } } setOptionsChangedOnRenderer(); setNeedsValidityCheck(); }
5,373
125,219
0
void RenderMessageFilter::OnOpenChannelToPlugin(int routing_id, const GURL& url, const GURL& policy_url, const std::string& mime_type, IPC::Message* reply_msg) { OpenChannelToNpapiPluginCallback* client = new OpenChannelToNpapiPluginCallback(this, resource_context_, reply_msg); DCHECK(!ContainsKey(plugin_host_clients_, client)); plugin_host_clients_.insert(client); plugin_service_->OpenChannelToNpapiPlugin( render_process_id_, routing_id, url, policy_url, mime_type, client); }
5,374
74,327
0
inf_gtk_certificate_manager_load_known_hosts(InfGtkCertificateManager* mgr, GError** error) { InfGtkCertificateManagerPrivate* priv; GHashTable* table; gchar* content; gsize size; GError* local_error; gchar* out_buf; gsize out_buf_len; gchar* pos; gchar* prev; gchar* next; gchar* sep; gsize len; gsize out_len; gint base64_state; guint base64_save; gnutls_datum_t data; gnutls_x509_crt_t cert; int res; priv = INF_GTK_CERTIFICATE_MANAGER_PRIVATE(mgr); table = g_hash_table_new_full( g_str_hash, g_str_equal, g_free, (GDestroyNotify)gnutls_x509_crt_deinit ); local_error = NULL; g_file_get_contents(priv->known_hosts_file, &content, &size, &local_error); if(local_error != NULL) { if(local_error->domain == G_FILE_ERROR && local_error->code == G_FILE_ERROR_NOENT) { return table; } g_propagate_prefixed_error( error, local_error, _("Failed to open known hosts file \"%s\": "), priv->known_hosts_file ); g_hash_table_destroy(table); return NULL; } out_buf = NULL; out_buf_len = 0; prev = content; for(prev = content; prev != NULL; prev = next) { pos = strchr(prev, '\n'); next = NULL; if(pos == NULL) pos = content + size; else next = pos + 1; sep = inf_gtk_certificate_manager_memrchr(prev, ':', pos - prev); if(sep == NULL) continue; /* ignore line */ *sep = '\0'; if(g_hash_table_lookup(table, prev) != NULL) { g_set_error( error, g_quark_from_static_string("INF_GTK_CERTIFICATE_MANAGER_ERROR"), INF_GTK_CERTIFICATE_MANAGER_ERROR_DUPLICATE_HOST_ENTRY, _("Certificate for host \"%s\" appears twice in " "known hosts file \"%s\""), prev, priv->known_hosts_file ); g_hash_table_destroy(table); g_free(out_buf); g_free(content); return NULL; } /* decode base64, import DER certificate */ len = (pos - (sep + 1)); out_len = len * 3 / 4; if(out_len > out_buf_len) { out_buf = g_realloc(out_buf, out_len); out_buf_len = out_len; } base64_state = 0; base64_save = 0; out_len = g_base64_decode_step( sep + 1, len, out_buf, &base64_state, &base64_save ); cert = NULL; res = gnutls_x509_crt_init(&cert); if(res == GNUTLS_E_SUCCESS) { data.data = out_buf; data.size = out_len; res = gnutls_x509_crt_import(cert, &data, GNUTLS_X509_FMT_DER); } if(res != GNUTLS_E_SUCCESS) { inf_gnutls_set_error(&local_error, res); g_propagate_prefixed_error( error, local_error, _("Failed to read certificate for host \"%s\" from " "known hosts file \"%s\": "), prev, priv->known_hosts_file ); if(cert != NULL) gnutls_x509_crt_deinit(cert); g_hash_table_destroy(table); g_free(out_buf); g_free(content); return NULL; } g_hash_table_insert(table, g_strdup(prev), cert); } g_free(out_buf); g_free(content); return table; }
5,375
94,354
0
static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode, __be32 *psfsrc, int delta) { struct ip_sf_list *psf, *psf_prev; psf_prev = NULL; for (psf=pmc->sources; psf; psf=psf->sf_next) { if (psf->sf_inaddr == *psfsrc) break; psf_prev = psf; } if (!psf) { psf = kzalloc(sizeof(*psf), GFP_ATOMIC); if (!psf) return -ENOBUFS; psf->sf_inaddr = *psfsrc; if (psf_prev) { psf_prev->sf_next = psf; } else pmc->sources = psf; } psf->sf_count[sfmode]++; if (psf->sf_count[sfmode] == 1) { ip_rt_multicast_event(pmc->interface); } return 0; }
5,376
135,972
0
void ContainerNode::setHovered(bool over) { if (over == hovered()) return; Node::setHovered(over); if (!layoutObject()) { if (over) return; if (isElementNode() && toElement(this)->childrenOrSiblingsAffectedByHover() && styleChangeType() < SubtreeStyleChange) document().styleEngine().pseudoStateChangedForElement(CSSSelector::PseudoHover, *toElement(this)); else setNeedsStyleRecalc(LocalStyleChange, StyleChangeReasonForTracing::createWithExtraData(StyleChangeReason::PseudoClass, StyleChangeExtraData::Hover)); return; } if (styleChangeType() < SubtreeStyleChange) { if (computedStyle()->affectedByHover() && computedStyle()->hasPseudoStyle(FIRST_LETTER)) setNeedsStyleRecalc(SubtreeStyleChange, StyleChangeReasonForTracing::createWithExtraData(StyleChangeReason::PseudoClass, StyleChangeExtraData::Hover)); else if (isElementNode() && toElement(this)->childrenOrSiblingsAffectedByHover()) document().styleEngine().pseudoStateChangedForElement(CSSSelector::PseudoHover, *toElement(this)); else if (computedStyle()->affectedByHover()) setNeedsStyleRecalc(LocalStyleChange, StyleChangeReasonForTracing::createWithExtraData(StyleChangeReason::PseudoClass, StyleChangeExtraData::Hover)); } LayoutTheme::theme().controlStateChanged(*layoutObject(), HoverControlState); }
5,377
157,038
0
void MultibufferDataSource::ProgressCallback(int64_t begin, int64_t end) { DVLOG(1) << __func__ << "(" << begin << ", " << end << ")"; DCHECK(render_task_runner_->BelongsToCurrentThread()); if (assume_fully_buffered()) return; base::AutoLock auto_lock(lock_); if (end > begin) { if (stop_signal_received_) return; host_->AddBufferedByteRange(begin, end); } if (buffer_size_update_counter_ > 0) { buffer_size_update_counter_--; } else { UpdateBufferSizes(); } UpdateLoadingState_Locked(false); }
5,378
79,870
0
static void filter181(int16_t *data, int width, int height, ptrdiff_t stride) { int x, y; /* horizontal filter */ for (y = 1; y < height - 1; y++) { int prev_dc = data[0 + y * stride]; for (x = 1; x < width - 1; x++) { int dc; dc = -prev_dc + data[x + y * stride] * 8 - data[x + 1 + y * stride]; dc = (av_clip(dc, INT_MIN/10923, INT_MAX/10923 - 32768) * 10923 + 32768) >> 16; prev_dc = data[x + y * stride]; data[x + y * stride] = dc; } } /* vertical filter */ for (x = 1; x < width - 1; x++) { int prev_dc = data[x]; for (y = 1; y < height - 1; y++) { int dc; dc = -prev_dc + data[x + y * stride] * 8 - data[x + (y + 1) * stride]; dc = (av_clip(dc, INT_MIN/10923, INT_MAX/10923 - 32768) * 10923 + 32768) >> 16; prev_dc = data[x + y * stride]; data[x + y * stride] = dc; } } }
5,379
76,265
0
static int cdrom_ram_open_write(struct cdrom_device_info *cdi) { struct rwrt_feature_desc rfd; int ret; if ((ret = cdrom_has_defect_mgt(cdi))) return ret; if ((ret = cdrom_get_random_writable(cdi, &rfd))) return ret; else if (CDF_RWRT == be16_to_cpu(rfd.feature_code)) ret = !rfd.curr; cd_dbg(CD_OPEN, "can open for random write\n"); return ret; }
5,380
17,262
0
tt_cmap12_char_map_binary( TT_CMap cmap, FT_UInt32* pchar_code, FT_Bool next ) { FT_UInt gindex = 0; FT_Byte* p = cmap->data + 12; FT_UInt32 num_groups = TT_PEEK_ULONG( p ); FT_UInt32 char_code = *pchar_code; FT_UInt32 start, end, start_id; FT_UInt32 max, min, mid; if ( !num_groups ) return 0; /* make compiler happy */ mid = num_groups; end = 0xFFFFFFFFUL; if ( next ) { if ( char_code >= 0xFFFFFFFFUL ) return 0; char_code++; } min = 0; max = num_groups; /* binary search */ while ( min < max ) { mid = ( min + max ) >> 1; p = cmap->data + 16 + 12 * mid; start = TT_NEXT_ULONG( p ); end = TT_NEXT_ULONG( p ); if ( char_code < start ) max = mid; else if ( char_code > end ) min = mid + 1; else { start_id = TT_PEEK_ULONG( p ); /* reject invalid glyph index */ if ( start_id > 0xFFFFFFFFUL - ( char_code - start ) ) gindex = 0; else gindex = (FT_UInt)( start_id + ( char_code - start ) ); break; } } if ( next ) { FT_Face face = cmap->cmap.charmap.face; TT_CMap12 cmap12 = (TT_CMap12)cmap; /* if `char_code' is not in any group, then `mid' is */ /* the group nearest to `char_code' */ if ( char_code > end ) { mid++; if ( mid == num_groups ) return 0; } cmap12->valid = 1; cmap12->cur_charcode = char_code; cmap12->cur_group = mid; if ( gindex >= (FT_UInt)face->num_glyphs ) gindex = 0; if ( !gindex ) { tt_cmap12_next( cmap12 ); if ( cmap12->valid ) gindex = cmap12->cur_gindex; } else cmap12->cur_gindex = gindex; *pchar_code = cmap12->cur_charcode; } return gindex; }
5,381
82,213
0
SYSCALL_DEFINE2(listen, int, fd, int, backlog) { return __sys_listen(fd, backlog); }
5,382
23,016
0
static int decode_delegreturn(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_DELEGRETURN); }
5,383
109,195
0
void InspectorController::getHighlight(Highlight* highlight) const { m_overlay->getHighlight(highlight); }
5,384
23,520
0
xdr_encode_array2(struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc) { if ((unsigned long) base + 4 + desc->array_len * desc->elem_size > buf->head->iov_len + buf->page_len + buf->tail->iov_len) return -EINVAL; return xdr_xcode_array2(buf, base, desc, 1); }
5,385
119,264
0
bool HTMLFormElement::rendererIsNeeded(const RenderStyle& style) { if (!m_wasDemoted) return HTMLElement::rendererIsNeeded(style); ContainerNode* node = parentNode(); RenderObject* parentRenderer = node->renderer(); bool parentIsTableElementPart = (parentRenderer->isTable() && isHTMLTableElement(node)) || (parentRenderer->isTableRow() && node->hasTagName(trTag)) || (parentRenderer->isTableSection() && node->hasTagName(tbodyTag)) || (parentRenderer->isRenderTableCol() && node->hasTagName(colTag)) || (parentRenderer->isTableCell() && node->hasTagName(trTag)); if (!parentIsTableElementPart) return true; EDisplay display = style.display(); bool formIsTablePart = display == TABLE || display == INLINE_TABLE || display == TABLE_ROW_GROUP || display == TABLE_HEADER_GROUP || display == TABLE_FOOTER_GROUP || display == TABLE_ROW || display == TABLE_COLUMN_GROUP || display == TABLE_COLUMN || display == TABLE_CELL || display == TABLE_CAPTION; return formIsTablePart; }
5,386
34,050
0
__archive_write_output(struct archive_write *a, const void *buff, size_t length) { return (__archive_write_filter(a->filter_first, buff, length)); }
5,387
168,454
0
PlatformFontSkia::~PlatformFontSkia() {}
5,388
19,518
0
static unsigned int udf_count_free_table(struct super_block *sb, struct inode *table) { unsigned int accum = 0; uint32_t elen; struct kernel_lb_addr eloc; int8_t etype; struct extent_position epos; mutex_lock(&UDF_SB(sb)->s_alloc_mutex); epos.block = UDF_I(table)->i_location; epos.offset = sizeof(struct unallocSpaceEntry); epos.bh = NULL; while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) accum += (elen >> table->i_sb->s_blocksize_bits); brelse(epos.bh); mutex_unlock(&UDF_SB(sb)->s_alloc_mutex); return accum; }
5,389
22,279
0
void __init proc_caches_init(void) { sighand_cachep = kmem_cache_create("sighand_cache", sizeof(struct sighand_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU| SLAB_NOTRACK, sighand_ctor); signal_cachep = kmem_cache_create("signal_cache", sizeof(struct signal_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); files_cachep = kmem_cache_create("files_cache", sizeof(struct files_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); fs_cachep = kmem_cache_create("fs_cache", sizeof(struct fs_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); mm_cachep = kmem_cache_create("mm_struct", sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); mmap_init(); }
5,390
103,343
0
void P2PSocketDispatcherHost::OnGetNetworkList(const IPC::Message& msg) { BrowserThread::PostTask( BrowserThread::FILE, FROM_HERE, NewRunnableMethod( this, &P2PSocketDispatcherHost::DoGetNetworkList, msg.routing_id())); }
5,391
32,059
0
static int __dev_close(struct net_device *dev) { const struct net_device_ops *ops = dev->netdev_ops; ASSERT_RTNL(); might_sleep(); /* * Tell people we are going down, so that they can * prepare to death, when device is still operating. */ call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); clear_bit(__LINK_STATE_START, &dev->state); /* Synchronize to scheduled poll. We cannot touch poll list, * it can be even on different cpu. So just clear netif_running(). * * dev->stop() will invoke napi_disable() on all of it's * napi_struct instances on this device. */ smp_mb__after_clear_bit(); /* Commit netif_running(). */ dev_deactivate(dev); /* * Call the device specific close. This cannot fail. * Only if device is UP * * We allow it to be called even after a DETACH hot-plug * event. */ if (ops->ndo_stop) ops->ndo_stop(dev); /* * Device is now down. */ dev->flags &= ~IFF_UP; /* * Shutdown NET_DMA */ net_dmaengine_put(); return 0; }
5,392
4,895
0
SetMaskForEvent(int deviceid, Mask mask, int event) { if (deviceid < 0 || deviceid >= MAXDEVICES) FatalError("SetMaskForEvent: bogus device id"); event_filters[deviceid][event] = mask; }
5,393
106,405
0
void BlobURLRequestJob::AdvanceItem() { CloseStream(); item_index_++; current_item_offset_ = 0; }
5,394
177,159
0
void SoftMP3::onQueueFilled(OMX_U32 /* portIndex */) { if (mSignalledError || mOutputPortSettingsChange != NONE) { return; } List<BufferInfo *> &inQueue = getPortQueue(0); List<BufferInfo *> &outQueue = getPortQueue(1); while ((!inQueue.empty() || (mSawInputEos && !mSignalledOutputEos)) && !outQueue.empty()) { BufferInfo *inInfo = NULL; OMX_BUFFERHEADERTYPE *inHeader = NULL; if (!inQueue.empty()) { inInfo = *inQueue.begin(); inHeader = inInfo->mHeader; } BufferInfo *outInfo = *outQueue.begin(); OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader; outHeader->nFlags = 0; if (inHeader) { if (inHeader->nOffset == 0 && inHeader->nFilledLen) { mAnchorTimeUs = inHeader->nTimeStamp; mNumFramesOutput = 0; } if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) { mSawInputEos = true; } mConfig->pInputBuffer = inHeader->pBuffer + inHeader->nOffset; mConfig->inputBufferCurrentLength = inHeader->nFilledLen; } else { mConfig->pInputBuffer = NULL; mConfig->inputBufferCurrentLength = 0; } mConfig->inputBufferMaxLength = 0; mConfig->inputBufferUsedLength = 0; mConfig->outputFrameSize = kOutputBufferSize / sizeof(int16_t); mConfig->pOutputBuffer = reinterpret_cast<int16_t *>(outHeader->pBuffer); ERROR_CODE decoderErr; if ((decoderErr = pvmp3_framedecoder(mConfig, mDecoderBuf)) != NO_DECODING_ERROR) { ALOGV("mp3 decoder returned error %d", decoderErr); if (decoderErr != NO_ENOUGH_MAIN_DATA_ERROR && decoderErr != SIDE_INFO_ERROR) { ALOGE("mp3 decoder returned error %d", decoderErr); notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL); mSignalledError = true; return; } if (mConfig->outputFrameSize == 0) { mConfig->outputFrameSize = kOutputBufferSize / sizeof(int16_t); } if (decoderErr == NO_ENOUGH_MAIN_DATA_ERROR && mSawInputEos) { if (!mIsFirst) { outHeader->nOffset = 0; outHeader->nFilledLen = kPVMP3DecoderDelay * mNumChannels * sizeof(int16_t); memset(outHeader->pBuffer, 0, outHeader->nFilledLen); } outHeader->nFlags = OMX_BUFFERFLAG_EOS; mSignalledOutputEos = true; } else { ALOGV_IF(mIsFirst, "insufficient data for first frame, sending silence"); memset(outHeader->pBuffer, 0, mConfig->outputFrameSize * sizeof(int16_t)); if (inHeader) { mConfig->inputBufferUsedLength = inHeader->nFilledLen; } } } else if (mConfig->samplingRate != mSamplingRate || mConfig->num_channels != mNumChannels) { mSamplingRate = mConfig->samplingRate; mNumChannels = mConfig->num_channels; notify(OMX_EventPortSettingsChanged, 1, 0, NULL); mOutputPortSettingsChange = AWAITING_DISABLED; return; } if (mIsFirst) { mIsFirst = false; outHeader->nOffset = kPVMP3DecoderDelay * mNumChannels * sizeof(int16_t); outHeader->nFilledLen = mConfig->outputFrameSize * sizeof(int16_t) - outHeader->nOffset; } else if (!mSignalledOutputEos) { outHeader->nOffset = 0; outHeader->nFilledLen = mConfig->outputFrameSize * sizeof(int16_t); } outHeader->nTimeStamp = mAnchorTimeUs + (mNumFramesOutput * 1000000ll) / mSamplingRate; if (inHeader) { CHECK_GE(inHeader->nFilledLen, mConfig->inputBufferUsedLength); inHeader->nOffset += mConfig->inputBufferUsedLength; inHeader->nFilledLen -= mConfig->inputBufferUsedLength; if (inHeader->nFilledLen == 0) { inInfo->mOwnedByUs = false; inQueue.erase(inQueue.begin()); inInfo = NULL; notifyEmptyBufferDone(inHeader); inHeader = NULL; } } mNumFramesOutput += mConfig->outputFrameSize / mNumChannels; outInfo->mOwnedByUs = false; outQueue.erase(outQueue.begin()); outInfo = NULL; notifyFillBufferDone(outHeader); outHeader = NULL; } }
5,395
5,029
0
static int check_crl(X509_STORE_CTX *ctx, X509_CRL *crl) { X509 *issuer = NULL; EVP_PKEY *ikey = NULL; int ok = 0, chnum, cnum; cnum = ctx->error_depth; chnum = sk_X509_num(ctx->chain) - 1; /* if we have an alternative CRL issuer cert use that */ if (ctx->current_issuer) issuer = ctx->current_issuer; /* * Else find CRL issuer: if not last certificate then issuer is next * certificate in chain. */ else if (cnum < chnum) issuer = sk_X509_value(ctx->chain, cnum + 1); else { issuer = sk_X509_value(ctx->chain, chnum); /* If not self signed, can't check signature */ if (!ctx->check_issued(ctx, issuer, issuer)) { ctx->error = X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER; ok = ctx->verify_cb(0, ctx); if (!ok) goto err; } } if (issuer) { /* * Skip most tests for deltas because they have already been done */ if (!crl->base_crl_number) { /* Check for cRLSign bit if keyUsage present */ if ((issuer->ex_flags & EXFLAG_KUSAGE) && !(issuer->ex_kusage & KU_CRL_SIGN)) { ctx->error = X509_V_ERR_KEYUSAGE_NO_CRL_SIGN; ok = ctx->verify_cb(0, ctx); if (!ok) goto err; } if (!(ctx->current_crl_score & CRL_SCORE_SCOPE)) { ctx->error = X509_V_ERR_DIFFERENT_CRL_SCOPE; ok = ctx->verify_cb(0, ctx); if (!ok) goto err; } if (!(ctx->current_crl_score & CRL_SCORE_SAME_PATH)) { if (check_crl_path(ctx, ctx->current_issuer) <= 0) { ctx->error = X509_V_ERR_CRL_PATH_VALIDATION_ERROR; ok = ctx->verify_cb(0, ctx); if (!ok) goto err; } } if (crl->idp_flags & IDP_INVALID) { ctx->error = X509_V_ERR_INVALID_EXTENSION; ok = ctx->verify_cb(0, ctx); if (!ok) goto err; } } if (!(ctx->current_crl_score & CRL_SCORE_TIME)) { ok = check_crl_time(ctx, crl, 1); if (!ok) goto err; } /* Attempt to get issuer certificate public key */ ikey = X509_get_pubkey(issuer); if (!ikey) { ctx->error = X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY; ok = ctx->verify_cb(0, ctx); if (!ok) goto err; } else { /* Verify CRL signature */ if (X509_CRL_verify(crl, ikey) <= 0) { ctx->error = X509_V_ERR_CRL_SIGNATURE_FAILURE; ok = ctx->verify_cb(0, ctx); if (!ok) goto err; } } } ok = 1; err: EVP_PKEY_free(ikey); return ok; }
5,396
146,287
0
void WebGLRenderingContextBase::EnableOrDisable(GLenum capability, bool enable) { if (isContextLost()) return; if (enable) ContextGL()->Enable(capability); else ContextGL()->Disable(capability); }
5,397
53,908
0
int ndp_get_log_priority(struct ndp *ndp) { return ndp->log_priority; }
5,398
49,057
0
static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap[]) { struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); struct ieee80211_supported_band *band; struct ieee80211_channel *channel; struct wiphy *wiphy; struct brcmf_chanspec_list *list; struct brcmu_chan ch; int err; u8 *pbuf; u32 i, j; u32 total; u32 chaninfo; u32 index; pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL); if (pbuf == NULL) return -ENOMEM; list = (struct brcmf_chanspec_list *)pbuf; err = brcmf_fil_iovar_data_get(ifp, "chanspecs", pbuf, BRCMF_DCMD_MEDLEN); if (err) { brcmf_err("get chanspecs error (%d)\n", err); goto fail_pbuf; } wiphy = cfg_to_wiphy(cfg); band = wiphy->bands[NL80211_BAND_2GHZ]; if (band) for (i = 0; i < band->n_channels; i++) band->channels[i].flags = IEEE80211_CHAN_DISABLED; band = wiphy->bands[NL80211_BAND_5GHZ]; if (band) for (i = 0; i < band->n_channels; i++) band->channels[i].flags = IEEE80211_CHAN_DISABLED; total = le32_to_cpu(list->count); for (i = 0; i < total; i++) { ch.chspec = (u16)le32_to_cpu(list->element[i]); cfg->d11inf.decchspec(&ch); if (ch.band == BRCMU_CHAN_BAND_2G) { band = wiphy->bands[NL80211_BAND_2GHZ]; } else if (ch.band == BRCMU_CHAN_BAND_5G) { band = wiphy->bands[NL80211_BAND_5GHZ]; } else { brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec); continue; } if (!band) continue; if (!(bw_cap[band->band] & WLC_BW_40MHZ_BIT) && ch.bw == BRCMU_CHAN_BW_40) continue; if (!(bw_cap[band->band] & WLC_BW_80MHZ_BIT) && ch.bw == BRCMU_CHAN_BW_80) continue; channel = band->channels; index = band->n_channels; for (j = 0; j < band->n_channels; j++) { if (channel[j].hw_value == ch.control_ch_num) { index = j; break; } } channel[index].center_freq = ieee80211_channel_to_frequency(ch.control_ch_num, band->band); channel[index].hw_value = ch.control_ch_num; /* assuming the chanspecs order is HT20, * HT40 upper, HT40 lower, and VHT80. */ if (ch.bw == BRCMU_CHAN_BW_80) { channel[index].flags &= ~IEEE80211_CHAN_NO_80MHZ; } else if (ch.bw == BRCMU_CHAN_BW_40) { brcmf_update_bw40_channel_flag(&channel[index], &ch); } else { /* enable the channel and disable other bandwidths * for now as mentioned order assure they are enabled * for subsequent chanspecs. */ channel[index].flags = IEEE80211_CHAN_NO_HT40 | IEEE80211_CHAN_NO_80MHZ; ch.bw = BRCMU_CHAN_BW_20; cfg->d11inf.encchspec(&ch); chaninfo = ch.chspec; err = brcmf_fil_bsscfg_int_get(ifp, "per_chan_info", &chaninfo); if (!err) { if (chaninfo & WL_CHAN_RADAR) channel[index].flags |= (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR); if (chaninfo & WL_CHAN_PASSIVE) channel[index].flags |= IEEE80211_CHAN_NO_IR; } } } fail_pbuf: kfree(pbuf); return err; }
5,399