unique_id
int64
13
189k
target
int64
0
1
code
stringlengths
20
241k
__index_level_0__
int64
0
18.9k
64,168
0
grub_fshelp_find_file (const char *path, grub_fshelp_node_t rootnode, grub_fshelp_node_t *foundnode, int (*iterate_dir) (grub_fshelp_node_t dir, int (*hook) (const char *filename, enum grub_fshelp_filetype filetype, grub_fshelp_node_t node, void *closure), void *closure), void *closure, char *(*read_symlink) (grub_fshelp_node_t node), enum grub_fshelp_filetype expecttype) { grub_err_t err; struct grub_fshelp_find_file_closure c; c.rootnode = rootnode; c.iterate_dir = iterate_dir; c.closure = closure; c.read_symlink = read_symlink; c.symlinknest = 0; c.foundtype = GRUB_FSHELP_DIR; if (!path || path[0] != '/') { grub_error (GRUB_ERR_BAD_FILENAME, "bad filename"); return grub_errno; } err = find_file (path, rootnode, foundnode, &c); if (err) return err; /* Check if the node that was found was of the expected type. */ if (expecttype == GRUB_FSHELP_REG && c.foundtype != expecttype) return grub_error (GRUB_ERR_BAD_FILE_TYPE, "not a regular file"); else if (expecttype == GRUB_FSHELP_DIR && c.foundtype != expecttype) return grub_error (GRUB_ERR_BAD_FILE_TYPE, "not a directory"); return 0; }
6,400
53,880
0
acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) { void __iomem *virt_addr; unsigned int size = width / 8; bool unmap = false; rcu_read_lock(); virt_addr = acpi_map_vaddr_lookup(phys_addr, size); if (!virt_addr) { rcu_read_unlock(); virt_addr = acpi_os_ioremap(phys_addr, size); if (!virt_addr) return AE_BAD_ADDRESS; unmap = true; } switch (width) { case 8: writeb(value, virt_addr); break; case 16: writew(value, virt_addr); break; case 32: writel(value, virt_addr); break; case 64: writeq(value, virt_addr); break; default: BUG(); } if (unmap) iounmap(virt_addr); else rcu_read_unlock(); return AE_OK; }
6,401
186,953
1
bool AppCacheDatabase::FindEntriesForCache(int64_t cache_id, std::vector<EntryRecord>* records) { DCHECK(records && records->empty()); if (!LazyOpen(kDontCreate)) return false; static const char kSql[] = "SELECT cache_id, url, flags, response_id, response_size FROM Entries" " WHERE cache_id = ?"; sql::Statement statement(db_->GetCachedStatement(SQL_FROM_HERE, kSql)); statement.BindInt64(0, cache_id); while (statement.Step()) { records->push_back(EntryRecord()); ReadEntryRecord(statement, &records->back()); DCHECK(records->back().cache_id == cache_id); } return statement.Succeeded(); }
6,402
69,661
0
entry_guard_add_bridge_to_sample(guard_selection_t *gs, const bridge_info_t *bridge) { const uint8_t *id_digest = bridge_get_rsa_id_digest(bridge); const tor_addr_port_t *addrport = bridge_get_addr_port(bridge); tor_assert(addrport); /* make sure that the guard is not already sampled. */ if (BUG(get_sampled_guard_for_bridge(gs, bridge))) return NULL; // LCOV_EXCL_LINE return entry_guard_add_to_sample_impl(gs, id_digest, NULL, addrport); }
6,403
37,750
0
static int emulate_on_interception(struct vcpu_svm *svm) { return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; }
6,404
36,212
0
static void *m_start(struct seq_file *m, loff_t *pos) { struct proc_mounts *p = proc_mounts(m); down_read(&namespace_sem); if (p->cached_event == p->ns->event) { void *v = p->cached_mount; if (*pos == p->cached_index) return v; if (*pos == p->cached_index + 1) { v = seq_list_next(v, &p->ns->list, &p->cached_index); return p->cached_mount = v; } } p->cached_event = p->ns->event; p->cached_mount = seq_list_start(&p->ns->list, *pos); p->cached_index = *pos; return p->cached_mount; }
6,405
129,707
0
ResourcePtr<RawResource> ResourceFetcher::fetchRawResource(FetchRequest& request) { return toRawResource(requestResource(Resource::Raw, request)); }
6,406
64,198
0
AP_DECLARE(const char *) ap_get_server_name(request_rec *r) { conn_rec *conn = r->connection; core_dir_config *d; const char *retval; d = (core_dir_config *)ap_get_core_module_config(r->per_dir_config); switch (d->use_canonical_name) { case USE_CANONICAL_NAME_ON: retval = r->server->server_hostname; break; case USE_CANONICAL_NAME_DNS: if (conn->local_host == NULL) { if (apr_getnameinfo(&conn->local_host, conn->local_addr, 0) != APR_SUCCESS) conn->local_host = apr_pstrdup(conn->pool, r->server->server_hostname); else { ap_str_tolower(conn->local_host); } } retval = conn->local_host; break; case USE_CANONICAL_NAME_OFF: case USE_CANONICAL_NAME_UNSET: retval = r->hostname ? r->hostname : r->server->server_hostname; break; default: ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00109) "ap_get_server_name: Invalid UCN Option somehow"); retval = "localhost"; break; } return retval; }
6,407
170,012
0
xsltTemplateParamsCleanup(xsltTransformContextPtr ctxt) { xsltStackElemPtr param; for (; ctxt->varsNr > ctxt->varsBase; ctxt->varsNr--) { param = ctxt->varsTab[ctxt->varsNr -1]; /* * Free xsl:param items. * xsl:with-param items will have a level of -1 or -2. */ if (param->level >= 0) { xsltFreeStackElemList(param); } } if (ctxt->varsNr > 0) ctxt->vars = ctxt->varsTab[ctxt->varsNr - 1]; else ctxt->vars = NULL; }
6,408
118,995
0
void WebContentsImpl::OnRequestPpapiBrokerPermission( int routing_id, const GURL& url, const base::FilePath& plugin_path) { if (!delegate_) { OnPpapiBrokerPermissionResult(routing_id, false); return; } if (!delegate_->RequestPpapiBrokerPermission( this, url, plugin_path, base::Bind(&WebContentsImpl::OnPpapiBrokerPermissionResult, base::Unretained(this), routing_id))) { NOTIMPLEMENTED(); OnPpapiBrokerPermissionResult(routing_id, false); } }
6,409
173,324
0
type_message(struct file *file, png_uint_32 type, const char *what) /* Error message for a chunk; the chunk name comes from 'type' */ { if (file->global->errors) { fputs(file->file_name, stderr); type_sep(stderr); type_name(type, stderr); type_sep(stderr); fputs(what, stderr); putc('\n', stderr); } }
6,410
181,352
1
static void svc_rdma_destroy_maps(struct svcxprt_rdma *xprt) { while (!list_empty(&xprt->sc_maps)) { struct svc_rdma_req_map *map; map = list_first_entry(&xprt->sc_maps, struct svc_rdma_req_map, free); list_del(&map->free); kfree(map); } }
6,411
72,175
0
mm_answer_gss_checkmic(int sock, Buffer *m) { gss_buffer_desc gssbuf, mic; OM_uint32 ret; u_int len; if (!options.gss_authentication) fatal("%s: GSSAPI authentication not enabled", __func__); gssbuf.value = buffer_get_string(m, &len); gssbuf.length = len; mic.value = buffer_get_string(m, &len); mic.length = len; ret = ssh_gssapi_checkmic(gsscontext, &gssbuf, &mic); free(gssbuf.value); free(mic.value); buffer_clear(m); buffer_put_int(m, ret); mm_request_send(sock, MONITOR_ANS_GSSCHECKMIC, m); if (!GSS_ERROR(ret)) monitor_permit(mon_dispatch, MONITOR_REQ_GSSUSEROK, 1); return (0); }
6,412
177,604
0
ExternalFrameBufferMD5Test() : DecoderTest(GET_PARAM(::libvpx_test::kCodecFactoryParam)), md5_file_(NULL), num_buffers_(0) {}
6,413
11,985
0
int dtls1_write_bytes(SSL *s, int type, const void *buf, int len) { int i; OPENSSL_assert(len <= SSL3_RT_MAX_PLAIN_LENGTH); s->rwstate = SSL_NOTHING; i = do_dtls1_write(s, type, buf, len, 0); return i; }
6,414
61,994
0
ikev1_n_print(netdissect_options *ndo, u_char tpay _U_, const struct isakmp_gen *ext, u_int item_len, const u_char *ep, uint32_t phase _U_, uint32_t doi0 _U_, uint32_t proto0 _U_, int depth _U_) { const struct ikev1_pl_n *p; struct ikev1_pl_n n; const u_char *cp; const u_char *ep2; uint32_t doi; uint32_t proto; static const char *notify_error_str[] = { NULL, "INVALID-PAYLOAD-TYPE", "DOI-NOT-SUPPORTED", "SITUATION-NOT-SUPPORTED", "INVALID-COOKIE", "INVALID-MAJOR-VERSION", "INVALID-MINOR-VERSION", "INVALID-EXCHANGE-TYPE", "INVALID-FLAGS", "INVALID-MESSAGE-ID", "INVALID-PROTOCOL-ID", "INVALID-SPI", "INVALID-TRANSFORM-ID", "ATTRIBUTES-NOT-SUPPORTED", "NO-PROPOSAL-CHOSEN", "BAD-PROPOSAL-SYNTAX", "PAYLOAD-MALFORMED", "INVALID-KEY-INFORMATION", "INVALID-ID-INFORMATION", "INVALID-CERT-ENCODING", "INVALID-CERTIFICATE", "CERT-TYPE-UNSUPPORTED", "INVALID-CERT-AUTHORITY", "INVALID-HASH-INFORMATION", "AUTHENTICATION-FAILED", "INVALID-SIGNATURE", "ADDRESS-NOTIFICATION", "NOTIFY-SA-LIFETIME", "CERTIFICATE-UNAVAILABLE", "UNSUPPORTED-EXCHANGE-TYPE", "UNEQUAL-PAYLOAD-LENGTHS", }; static const char *ipsec_notify_error_str[] = { "RESERVED", }; static const char *notify_status_str[] = { "CONNECTED", }; static const char *ipsec_notify_status_str[] = { "RESPONDER-LIFETIME", "REPLAY-STATUS", "INITIAL-CONTACT", }; /* NOTE: these macro must be called with x in proper range */ /* 0 - 8191 */ #define NOTIFY_ERROR_STR(x) \ STR_OR_ID((x), notify_error_str) /* 8192 - 16383 */ #define IPSEC_NOTIFY_ERROR_STR(x) \ STR_OR_ID((u_int)((x) - 8192), ipsec_notify_error_str) /* 16384 - 24575 */ #define NOTIFY_STATUS_STR(x) \ STR_OR_ID((u_int)((x) - 16384), notify_status_str) /* 24576 - 32767 */ #define IPSEC_NOTIFY_STATUS_STR(x) \ STR_OR_ID((u_int)((x) - 24576), ipsec_notify_status_str) ND_PRINT((ndo,"%s:", NPSTR(ISAKMP_NPTYPE_N))); p = (const struct ikev1_pl_n *)ext; ND_TCHECK(*p); UNALIGNED_MEMCPY(&n, ext, sizeof(n)); doi = ntohl(n.doi); proto = n.prot_id; if (doi != 1) { ND_PRINT((ndo," doi=%d", doi)); ND_PRINT((ndo," proto=%d", proto)); if (ntohs(n.type) < 8192) ND_PRINT((ndo," type=%s", NOTIFY_ERROR_STR(ntohs(n.type)))); else if (ntohs(n.type) < 16384) ND_PRINT((ndo," type=%s", numstr(ntohs(n.type)))); else if (ntohs(n.type) < 24576) ND_PRINT((ndo," type=%s", NOTIFY_STATUS_STR(ntohs(n.type)))); else ND_PRINT((ndo," type=%s", numstr(ntohs(n.type)))); if (n.spi_size) { ND_PRINT((ndo," spi=")); if (!rawprint(ndo, (const uint8_t *)(p + 1), n.spi_size)) goto trunc; } return (const u_char *)(p + 1) + n.spi_size; } ND_PRINT((ndo," doi=ipsec")); ND_PRINT((ndo," proto=%s", PROTOIDSTR(proto))); if (ntohs(n.type) < 8192) ND_PRINT((ndo," type=%s", NOTIFY_ERROR_STR(ntohs(n.type)))); else if (ntohs(n.type) < 16384) ND_PRINT((ndo," type=%s", IPSEC_NOTIFY_ERROR_STR(ntohs(n.type)))); else if (ntohs(n.type) < 24576) ND_PRINT((ndo," type=%s", NOTIFY_STATUS_STR(ntohs(n.type)))); else if (ntohs(n.type) < 32768) ND_PRINT((ndo," type=%s", IPSEC_NOTIFY_STATUS_STR(ntohs(n.type)))); else ND_PRINT((ndo," type=%s", numstr(ntohs(n.type)))); if (n.spi_size) { ND_PRINT((ndo," spi=")); if (!rawprint(ndo, (const uint8_t *)(p + 1), n.spi_size)) goto trunc; } cp = (const u_char *)(p + 1) + n.spi_size; ep2 = (const u_char *)p + item_len; if (cp < ep) { switch (ntohs(n.type)) { case IPSECDOI_NTYPE_RESPONDER_LIFETIME: { const struct attrmap *map = oakley_t_map; size_t nmap = sizeof(oakley_t_map)/sizeof(oakley_t_map[0]); ND_PRINT((ndo," attrs=(")); while (cp < ep && cp < ep2) { cp = ikev1_attrmap_print(ndo, cp, ep2, map, nmap); if (cp == NULL) { ND_PRINT((ndo,")")); goto trunc; } } ND_PRINT((ndo,")")); break; } case IPSECDOI_NTYPE_REPLAY_STATUS: ND_PRINT((ndo," status=(")); ND_PRINT((ndo,"replay detection %sabled", EXTRACT_32BITS(cp) ? "en" : "dis")); ND_PRINT((ndo,")")); break; default: /* * XXX - fill in more types here; see, for example, * draft-ietf-ipsec-notifymsg-04. */ if (ndo->ndo_vflag > 3) { ND_PRINT((ndo," data=(")); if (!rawprint(ndo, (const uint8_t *)(cp), ep - cp)) goto trunc; ND_PRINT((ndo,")")); } else { if (!ike_show_somedata(ndo, cp, ep)) goto trunc; } break; } } return (const u_char *)ext + item_len; trunc: ND_PRINT((ndo," [|%s]", NPSTR(ISAKMP_NPTYPE_N))); return NULL; }
6,415
93,414
0
static int netdev_adjacent_sysfs_add(struct net_device *dev, struct net_device *adj_dev, struct list_head *dev_list) { char linkname[IFNAMSIZ+7]; sprintf(linkname, dev_list == &dev->adj_list.upper ? "upper_%s" : "lower_%s", adj_dev->name); return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj), linkname); }
6,416
41,366
0
int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) { vcpu->arch.nmi_pending = false; vcpu->arch.nmi_injected = false; vcpu->arch.switch_db_regs = 0; memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = DR6_FIXED_1; vcpu->arch.dr7 = DR7_FIXED_1; kvm_make_request(KVM_REQ_EVENT, vcpu); vcpu->arch.apf.msr_val = 0; kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); vcpu->arch.apf.halted = false; return kvm_x86_ops->vcpu_reset(vcpu); }
6,417
92,199
0
static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size) { sig->signature = calc_sig(sig, size); }
6,418
66,451
0
static void cp2112_gpio_poll_callback(struct work_struct *work) { struct cp2112_device *dev = container_of(work, struct cp2112_device, gpio_poll_worker.work); struct irq_data *d; u8 gpio_mask; u8 virqs = (u8)dev->irq_mask; u32 irq_type; int irq, virq, ret; ret = cp2112_gpio_get_all(&dev->gc); if (ret == -ENODEV) /* the hardware has been disconnected */ return; if (ret < 0) goto exit; gpio_mask = ret; while (virqs) { virq = ffs(virqs) - 1; virqs &= ~BIT(virq); if (!dev->gc.to_irq) break; irq = dev->gc.to_irq(&dev->gc, virq); d = irq_get_irq_data(irq); if (!d) continue; irq_type = irqd_get_trigger_type(d); if (gpio_mask & BIT(virq)) { /* Level High */ if (irq_type & IRQ_TYPE_LEVEL_HIGH) handle_nested_irq(irq); if ((irq_type & IRQ_TYPE_EDGE_RISING) && !(dev->gpio_prev_state & BIT(virq))) handle_nested_irq(irq); } else { /* Level Low */ if (irq_type & IRQ_TYPE_LEVEL_LOW) handle_nested_irq(irq); if ((irq_type & IRQ_TYPE_EDGE_FALLING) && (dev->gpio_prev_state & BIT(virq))) handle_nested_irq(irq); } } dev->gpio_prev_state = gpio_mask; exit: if (dev->gpio_poll) schedule_delayed_work(&dev->gpio_poll_worker, 10); }
6,419
181,412
1
nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_writeargs *args) { unsigned int len, hdr, dlen; struct kvec *head = rqstp->rq_arg.head; int v; p = decode_fh(p, &args->fh); if (!p) return 0; p++; /* beginoffset */ args->offset = ntohl(*p++); /* offset */ p++; /* totalcount */ len = args->len = ntohl(*p++); /* * The protocol specifies a maximum of 8192 bytes. */ if (len > NFSSVC_MAXBLKSIZE_V2) return 0; /* * Check to make sure that we got the right number of * bytes. */ hdr = (void*)p - head->iov_base; dlen = head->iov_len + rqstp->rq_arg.page_len - hdr; /* * Round the length of the data which was specified up to * the next multiple of XDR units and then compare that * against the length which was actually received. * Note that when RPCSEC/GSS (for example) is used, the * data buffer can be padded so dlen might be larger * than required. It must never be smaller. */ if (dlen < XDR_QUADLEN(len)*4) return 0; rqstp->rq_vec[0].iov_base = (void*)p; rqstp->rq_vec[0].iov_len = head->iov_len - hdr; v = 0; while (len > rqstp->rq_vec[v].iov_len) { len -= rqstp->rq_vec[v].iov_len; v++; rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_pages[v]); rqstp->rq_vec[v].iov_len = PAGE_SIZE; } rqstp->rq_vec[v].iov_len = len; args->vlen = v + 1; return 1; }
6,420
24,880
0
static void *s_start(struct seq_file *m, loff_t *pos) { loff_t n = *pos; down_read(&slub_lock); if (!n) print_slabinfo_header(m); return seq_list_start(&slab_caches, *pos); }
6,421
135,743
0
AtomicString GetInputModeAttribute(Element* element) { if (!element) return AtomicString(); bool query_attribute = false; if (isHTMLInputElement(*element)) { query_attribute = toHTMLInputElement(*element).SupportsInputModeAttribute(); } else if (isHTMLTextAreaElement(*element)) { query_attribute = true; } else { element->GetDocument().UpdateStyleAndLayoutTree(); if (HasEditableStyle(*element)) query_attribute = true; } if (!query_attribute) return AtomicString(); return element->FastGetAttribute(HTMLNames::inputmodeAttr).DeprecatedLower(); }
6,422
16,009
0
bool xmp_files_can_put_xmp_cstr(XmpFilePtr xf, const char* xmp_packet, size_t len) { CHECK_PTR(xf, false); RESET_ERROR; SXMPFiles *txf = reinterpret_cast<SXMPFiles*>(xf); bool result = false; try { result = txf->CanPutXMP(xmp_packet, len); } catch(const XMP_Error & e) { set_error(e); return false; } return result; }
6,423
154,067
0
void GLES2DecoderImpl::DoSetReadbackBufferShadowAllocationINTERNAL( GLuint buffer_id, GLuint shm_id, GLuint shm_offset, GLuint size) { static const char kFunctionName[] = "glSetBufferShadowAllocationINTERNAL"; scoped_refptr<Buffer> buffer = buffer_manager()->GetBuffer(buffer_id); if (!buffer) { LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, kFunctionName, "unknown buffer"); return; } if (static_cast<GLsizeiptr>(size) != buffer->size()) { MarkContextLost(error::kGuilty); group_->LoseContexts(error::kUnknown); return; } scoped_refptr<gpu::Buffer> shm = GetSharedMemoryBuffer(shm_id); buffer->SetReadbackShadowAllocation(shm, shm_offset); writes_submitted_but_not_completed_.insert(buffer); }
6,424
21,216
0
static void copy_user_gigantic_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) { int i; struct page *dst_base = dst; struct page *src_base = src; for (i = 0; i < pages_per_huge_page; ) { cond_resched(); copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); i++; dst = mem_map_next(dst, dst_base, i); src = mem_map_next(src, src_base, i); } }
6,425
163,006
0
void ResetMaxCapacityBytes(size_t max_capacity_bytes) { max_capacity_bytes_ = max_capacity_bytes; Initialize(); }
6,426
46,996
0
static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[SERPENT_AVX2_PARALLEL_BLOCKS]; struct crypt_priv crypt_ctx = { .ctx = &ctx->serpent_ctx, .fpu_enabled = false, }; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = &crypt_ctx, .crypt_fn = decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ret = lrw_crypt(desc, dst, src, nbytes, &req); serpent_fpu_end(crypt_ctx.fpu_enabled); return ret; }
6,427
167,740
0
void WebRuntimeFeatures::EnableOrientationEvent(bool enable) { RuntimeEnabledFeatures::SetOrientationEventEnabled(enable); }
6,428
180,297
1
void gdImageCopyMerge (gdImagePtr dst, gdImagePtr src, int dstX, int dstY, int srcX, int srcY, int w, int h, int pct) { int c, dc; int x, y; int tox, toy; int ncR, ncG, ncB; toy = dstY; for (y = srcY; y < (srcY + h); y++) { tox = dstX; for (x = srcX; x < (srcX + w); x++) { int nc; c = gdImageGetPixel(src, x, y); /* Added 7/24/95: support transparent copies */ if (gdImageGetTransparent(src) == c) { tox++; continue; } /* If it's the same image, mapping is trivial */ if (dst == src) { nc = c; } else { dc = gdImageGetPixel(dst, tox, toy); ncR = (int)(gdImageRed (src, c) * (pct / 100.0) + gdImageRed (dst, dc) * ((100 - pct) / 100.0)); ncG = (int)(gdImageGreen (src, c) * (pct / 100.0) + gdImageGreen (dst, dc) * ((100 - pct) / 100.0)); ncB = (int)(gdImageBlue (src, c) * (pct / 100.0) + gdImageBlue (dst, dc) * ((100 - pct) / 100.0)); /* Find a reasonable color */ nc = gdImageColorResolve (dst, ncR, ncG, ncB); } gdImageSetPixel (dst, tox, toy, nc); tox++; } toy++; } }
6,429
63,495
0
static void test_hash_module() { uint8_t blob[] = {0x61, 0x62, 0x63, 0x64, 0x65}; assert_true_rule_blob( "import \"hash\" \ rule test { \ condition: \ hash.md5(0, filesize) == \ \"ab56b4d92b40713acc5af89985d4b786\" \ and \ hash.md5(1, filesize) == \ \"e02cfbe5502b64aa5ae9f2d0d69eaa8d\" \ and \ hash.sha1(0, filesize) == \ \"03de6c570bfe24bfc328ccd7ca46b76eadaf4334\" \ and \ hash.sha1(1, filesize) == \ \"a302d65ae4d9e768a1538d53605f203fd8e2d6e2\" \ and \ hash.sha256(0, filesize) == \ \"36bbe50ed96841d10443bcb670d6554f0a34b761be67ec9c4a8ad2c0c44ca42c\" \ and \ hash.sha256(1, filesize) == \ \"aaaaf2863e043b9df604158ad5c16ff1adaf3fd7e9fcea5dcb322b6762b3b59a\" \ }", blob); assert_true_rule_blob( "import \"hash\" \ rule test { \ condition: \ hash.md5(0, filesize) == \ \"ab56b4d92b40713acc5af89985d4b786\" \ and \ hash.md5(1, filesize) == \ \"e02cfbe5502b64aa5ae9f2d0d69eaa8d\" \ and \ hash.md5(0, filesize) == \ \"ab56b4d92b40713acc5af89985d4b786\" \ and \ hash.md5(1, filesize) == \ \"e02cfbe5502b64aa5ae9f2d0d69eaa8d\" \ }", blob); }
6,430
139,031
0
void run() { if (!m_loader) return; ExecutionContext& context = m_loader->element()->document(); InspectorInstrumentation::AsyncTask asyncTask(&context, this); if (m_scriptState->contextIsValid()) { ScriptState::Scope scope(m_scriptState.get()); m_loader->doUpdateFromElement(m_shouldBypassMainWorldCSP, m_updateBehavior, m_requestURL, m_referrerPolicy); } else { m_loader->doUpdateFromElement(m_shouldBypassMainWorldCSP, m_updateBehavior, m_requestURL, m_referrerPolicy); } }
6,431
106,688
0
static void compositionToUnderlines(const Vector<DWORD>& clauses, const Vector<BYTE>& attributes, Vector<CompositionUnderline>& underlines) { if (clauses.isEmpty()) { underlines.clear(); return; } size_t numBoundaries = clauses.size() - 1; underlines.resize(numBoundaries); for (unsigned i = 0; i < numBoundaries; ++i) { underlines[i].startOffset = clauses[i]; underlines[i].endOffset = clauses[i + 1]; BYTE attribute = attributes[clauses[i]]; underlines[i].thick = attribute == ATTR_TARGET_CONVERTED || attribute == ATTR_TARGET_NOTCONVERTED; underlines[i].color = Color::black; } }
6,432
88,778
0
static void Process_ipfix_templates(exporter_ipfix_domain_t *exporter, void *flowset_header, uint32_t size_left, FlowSource_t *fs) { ipfix_template_record_t *ipfix_template_record; void *DataPtr; uint32_t count; size_left -= 4; // subtract message header DataPtr = flowset_header + 4; ipfix_template_record = (ipfix_template_record_t *)DataPtr; count = ntohs(ipfix_template_record->FieldCount); if ( count == 0 ) { Process_ipfix_template_withdraw(exporter, DataPtr, size_left, fs); } else { Process_ipfix_template_add(exporter, DataPtr, size_left, fs); } } // End of Process_ipfix_templates
6,433
55,818
0
static int show_numa_map(struct seq_file *m, void *v, int is_pid) { struct numa_maps_private *numa_priv = m->private; struct proc_maps_private *proc_priv = &numa_priv->proc_maps; struct vm_area_struct *vma = v; struct numa_maps *md = &numa_priv->md; struct file *file = vma->vm_file; struct mm_struct *mm = vma->vm_mm; struct mm_walk walk = { .hugetlb_entry = gather_hugetlb_stats, .pmd_entry = gather_pte_stats, .private = md, .mm = mm, }; struct mempolicy *pol; char buffer[64]; int nid; if (!mm) return 0; /* Ensure we start with an empty set of numa_maps statistics. */ memset(md, 0, sizeof(*md)); pol = __get_vma_policy(vma, vma->vm_start); if (pol) { mpol_to_str(buffer, sizeof(buffer), pol); mpol_cond_put(pol); } else { mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy); } seq_printf(m, "%08lx %s", vma->vm_start, buffer); if (file) { seq_puts(m, " file="); seq_path(m, &file->f_path, "\n\t= "); } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { seq_puts(m, " heap"); } else { pid_t tid = pid_of_stack(proc_priv, vma, is_pid); if (tid != 0) { /* * Thread stack in /proc/PID/task/TID/maps or * the main process stack. */ if (!is_pid || (vma->vm_start <= mm->start_stack && vma->vm_end >= mm->start_stack)) seq_puts(m, " stack"); else seq_printf(m, " stack:%d", tid); } } if (is_vm_hugetlb_page(vma)) seq_puts(m, " huge"); /* mmap_sem is held by m_start */ walk_page_vma(vma, &walk); if (!md->pages) goto out; if (md->anon) seq_printf(m, " anon=%lu", md->anon); if (md->dirty) seq_printf(m, " dirty=%lu", md->dirty); if (md->pages != md->anon && md->pages != md->dirty) seq_printf(m, " mapped=%lu", md->pages); if (md->mapcount_max > 1) seq_printf(m, " mapmax=%lu", md->mapcount_max); if (md->swapcache) seq_printf(m, " swapcache=%lu", md->swapcache); if (md->active < md->pages && !is_vm_hugetlb_page(vma)) seq_printf(m, " active=%lu", md->active); if (md->writeback) seq_printf(m, " writeback=%lu", md->writeback); for_each_node_state(nid, N_MEMORY) if (md->node[nid]) seq_printf(m, " N%d=%lu", nid, md->node[nid]); seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10); out: seq_putc(m, '\n'); m_cache_vma(m, vma); return 0; }
6,434
68,313
0
static void ctx_sched_out(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx, enum event_type_t event_type) { int is_active = ctx->is_active; struct perf_event *event; lockdep_assert_held(&ctx->lock); if (likely(!ctx->nr_events)) { /* * See __perf_remove_from_context(). */ WARN_ON_ONCE(ctx->is_active); if (ctx->task) WARN_ON_ONCE(cpuctx->task_ctx); return; } ctx->is_active &= ~event_type; if (!(ctx->is_active & EVENT_ALL)) ctx->is_active = 0; if (ctx->task) { WARN_ON_ONCE(cpuctx->task_ctx != ctx); if (!ctx->is_active) cpuctx->task_ctx = NULL; } /* * Always update time if it was set; not only when it changes. * Otherwise we can 'forget' to update time for any but the last * context we sched out. For example: * * ctx_sched_out(.event_type = EVENT_FLEXIBLE) * ctx_sched_out(.event_type = EVENT_PINNED) * * would only update time for the pinned events. */ if (is_active & EVENT_TIME) { /* update (and stop) ctx time */ update_context_time(ctx); update_cgrp_time_from_cpuctx(cpuctx); } is_active ^= ctx->is_active; /* changed bits */ if (!ctx->nr_active || !(is_active & EVENT_ALL)) return; perf_pmu_disable(ctx->pmu); if (is_active & EVENT_PINNED) { list_for_each_entry(event, &ctx->pinned_groups, group_entry) group_sched_out(event, cpuctx, ctx); } if (is_active & EVENT_FLEXIBLE) { list_for_each_entry(event, &ctx->flexible_groups, group_entry) group_sched_out(event, cpuctx, ctx); } perf_pmu_enable(ctx->pmu); }
6,435
114,678
0
void MediaStreamImpl::OnVideoDeviceFailed(const std::string& label, int index) { DCHECK(CalledOnValidThread()); DVLOG(1) << "MediaStreamImpl::OnVideoDeviceFailed(" << label << ", " << index << ")"; NOTIMPLEMENTED(); }
6,436
140,722
0
void GLES2DecoderImpl::DoBlitFramebufferCHROMIUM( GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter) { DCHECK(!ShouldDeferReads() && !ShouldDeferDraws()); if (!CheckBoundFramebuffersValid("glBlitFramebufferCHROMIUM")) { return; } state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, false); ScopedRenderTo do_render(framebuffer_state_.bound_draw_framebuffer.get()); BlitFramebufferHelper( srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter); state_.SetDeviceCapabilityState(GL_SCISSOR_TEST, state_.enable_flags.scissor_test); }
6,437
116,754
0
void MockRenderThread::WidgetRestored() { }
6,438
143,300
0
ViewportDescription Document::viewportDescription() const { ViewportDescription appliedViewportDescription = m_viewportDescription; bool viewportMetaEnabled = settings() && settings()->viewportMetaEnabled(); if (m_legacyViewportDescription.type != ViewportDescription::UserAgentStyleSheet && viewportMetaEnabled) appliedViewportDescription = m_legacyViewportDescription; if (shouldOverrideLegacyDescription(m_viewportDescription.type)) appliedViewportDescription = m_viewportDescription; return appliedViewportDescription; }
6,439
70,101
0
static OPJ_BOOL opj_pi_next_lrcp(opj_pi_iterator_t * pi) { opj_pi_comp_t *comp = NULL; opj_pi_resolution_t *res = NULL; OPJ_UINT32 index = 0; if (!pi->first) { comp = &pi->comps[pi->compno]; res = &comp->resolutions[pi->resno]; goto LABEL_SKIP; } else { pi->first = 0; } for (pi->layno = pi->poc.layno0; pi->layno < pi->poc.layno1; pi->layno++) { for (pi->resno = pi->poc.resno0; pi->resno < pi->poc.resno1; pi->resno++) { for (pi->compno = pi->poc.compno0; pi->compno < pi->poc.compno1; pi->compno++) { comp = &pi->comps[pi->compno]; if (pi->resno >= comp->numresolutions) { continue; } res = &comp->resolutions[pi->resno]; if (!pi->tp_on) { pi->poc.precno1 = res->pw * res->ph; } for (pi->precno = pi->poc.precno0; pi->precno < pi->poc.precno1; pi->precno++) { index = pi->layno * pi->step_l + pi->resno * pi->step_r + pi->compno * pi->step_c + pi->precno * pi->step_p; if (!pi->include[index]) { pi->include[index] = 1; return OPJ_TRUE; } LABEL_SKIP: ; } } } } return OPJ_FALSE; }
6,440
176,725
0
void acquire_object(const sp<ProcessState>& proc, const flat_binder_object& obj, const void* who, size_t* outAshmemSize) { switch (obj.type) { case BINDER_TYPE_BINDER: if (obj.binder) { LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie); reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who); } return; case BINDER_TYPE_WEAK_BINDER: if (obj.binder) reinterpret_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who); return; case BINDER_TYPE_HANDLE: { const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle); if (b != NULL) { LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get()); b->incStrong(who); } return; } case BINDER_TYPE_WEAK_HANDLE: { const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle); if (b != NULL) b.get_refs()->incWeak(who); return; } case BINDER_TYPE_FD: { if ((obj.cookie != 0) && (outAshmemSize != NULL)) { struct stat st; int ret = fstat(obj.handle, &st); if (!ret && S_ISCHR(st.st_mode) && (st.st_rdev == ashmem_rdev())) { int size = ashmem_get_size_region(obj.handle); if (size > 0) { *outAshmemSize += size; } } } return; } } ALOGD("Invalid object type 0x%08x", obj.type); }
6,441
85,862
0
static void clone_endio(struct bio *bio) { blk_status_t error = bio->bi_status; struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); struct dm_io *io = tio->io; struct mapped_device *md = tio->io->md; dm_endio_fn endio = tio->ti->type->end_io; if (unlikely(error == BLK_STS_TARGET)) { if (bio_op(bio) == REQ_OP_WRITE_SAME && !bio->bi_disk->queue->limits.max_write_same_sectors) disable_write_same(md); if (bio_op(bio) == REQ_OP_WRITE_ZEROES && !bio->bi_disk->queue->limits.max_write_zeroes_sectors) disable_write_zeroes(md); } if (endio) { int r = endio(tio->ti, bio, &error); switch (r) { case DM_ENDIO_REQUEUE: error = BLK_STS_DM_REQUEUE; /*FALLTHRU*/ case DM_ENDIO_DONE: break; case DM_ENDIO_INCOMPLETE: /* The target will handle the io */ return; default: DMWARN("unimplemented target endio return value: %d", r); BUG(); } } free_tio(tio); dec_pending(io, error); }
6,442
144,215
0
void LockContentsView::RemoveUser(bool is_primary) { if (Shell::Get()->login_screen_controller()->IsAuthenticating()) return; LoginBigUserView* to_remove = is_primary ? primary_big_view_ : opt_secondary_big_view_; DCHECK(to_remove->GetCurrentUser()->can_remove); AccountId user = to_remove->GetCurrentUser()->basic_user_info->account_id; Shell::Get()->login_screen_controller()->RemoveUser(user); std::vector<mojom::LoginUserInfoPtr> new_users; if (!is_primary) new_users.push_back(primary_big_view_->GetCurrentUser()->Clone()); if (is_primary && opt_secondary_big_view_) new_users.push_back(opt_secondary_big_view_->GetCurrentUser()->Clone()); if (users_list_) { for (int i = 0; i < users_list_->user_count(); ++i) { new_users.push_back( users_list_->user_view_at(i)->current_user()->Clone()); } } data_dispatcher_->NotifyUsers(new_users); }
6,443
62,226
0
isis_print_mt_port_cap_subtlv(netdissect_options *ndo, const uint8_t *tptr, int len) { int stlv_type, stlv_len; const struct isis_subtlv_spb_mcid *subtlv_spb_mcid; int i; while (len > 2) { ND_TCHECK2(*tptr, 2); stlv_type = *(tptr++); stlv_len = *(tptr++); /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "\n\t %s subTLV #%u, length: %u", tok2str(isis_mt_port_cap_subtlv_values, "unknown", stlv_type), stlv_type, stlv_len)); /*len -= TLV_TYPE_LEN_OFFSET;*/ len = len -2; /* Make sure the subTLV fits within the space left */ if (len < stlv_len) goto trunc; /* Make sure the entire subTLV is in the captured data */ ND_TCHECK2(*(tptr), stlv_len); switch (stlv_type) { case ISIS_SUBTLV_SPB_MCID: { if (stlv_len < ISIS_SUBTLV_SPB_MCID_MIN_LEN) goto trunc; subtlv_spb_mcid = (const struct isis_subtlv_spb_mcid *)tptr; ND_PRINT((ndo, "\n\t MCID: ")); isis_print_mcid(ndo, &(subtlv_spb_mcid->mcid)); /*tptr += SPB_MCID_MIN_LEN; len -= SPB_MCID_MIN_LEN; */ ND_PRINT((ndo, "\n\t AUX-MCID: ")); isis_print_mcid(ndo, &(subtlv_spb_mcid->aux_mcid)); /*tptr += SPB_MCID_MIN_LEN; len -= SPB_MCID_MIN_LEN; */ tptr = tptr + ISIS_SUBTLV_SPB_MCID_MIN_LEN; len = len - ISIS_SUBTLV_SPB_MCID_MIN_LEN; stlv_len = stlv_len - ISIS_SUBTLV_SPB_MCID_MIN_LEN; break; } case ISIS_SUBTLV_SPB_DIGEST: { if (stlv_len < ISIS_SUBTLV_SPB_DIGEST_MIN_LEN) goto trunc; ND_PRINT((ndo, "\n\t RES: %d V: %d A: %d D: %d", (*(tptr) >> 5), (((*tptr)>> 4) & 0x01), ((*(tptr) >> 2) & 0x03), ((*tptr) & 0x03))); tptr++; ND_PRINT((ndo, "\n\t Digest: ")); for(i=1;i<=8; i++) { ND_PRINT((ndo, "%08x ", EXTRACT_32BITS(tptr))); if (i%4 == 0 && i != 8) ND_PRINT((ndo, "\n\t ")); tptr = tptr + 4; } len = len - ISIS_SUBTLV_SPB_DIGEST_MIN_LEN; stlv_len = stlv_len - ISIS_SUBTLV_SPB_DIGEST_MIN_LEN; break; } case ISIS_SUBTLV_SPB_BVID: { while (stlv_len >= ISIS_SUBTLV_SPB_BVID_MIN_LEN) { ND_PRINT((ndo, "\n\t ECT: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, " BVID: %d, U:%01x M:%01x ", (EXTRACT_16BITS (tptr) >> 4) , (EXTRACT_16BITS (tptr) >> 3) & 0x01, (EXTRACT_16BITS (tptr) >> 2) & 0x01)); tptr = tptr + 2; len = len - ISIS_SUBTLV_SPB_BVID_MIN_LEN; stlv_len = stlv_len - ISIS_SUBTLV_SPB_BVID_MIN_LEN; } break; } default: break; } tptr += stlv_len; len -= stlv_len; } return 0; trunc: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); }
6,444
22,950
0
static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) { struct inode *inode = state->inode; struct nfs_inode *nfsi = NFS_I(inode); struct file_lock *fl; int status = 0; down_write(&nfsi->rwsem); for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK))) continue; if (nfs_file_open_context(fl->fl_file)->state != state) continue; status = ops->recover_lock(state, fl); if (status >= 0) continue; switch (status) { default: printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", __func__, status); case -NFS4ERR_EXPIRED: case -NFS4ERR_NO_GRACE: case -NFS4ERR_RECLAIM_BAD: case -NFS4ERR_RECLAIM_CONFLICT: /* kill_proc(fl->fl_pid, SIGLOST, 1); */ break; case -NFS4ERR_STALE_CLIENTID: goto out_err; } } up_write(&nfsi->rwsem); return 0; out_err: up_write(&nfsi->rwsem); return status; }
6,445
43,680
0
static int pick_link(struct nameidata *nd, struct path *link, struct inode *inode, unsigned seq) { int error; struct saved *last; if (unlikely(nd->total_link_count++ >= MAXSYMLINKS)) { path_to_nameidata(link, nd); return -ELOOP; } if (!(nd->flags & LOOKUP_RCU)) { if (link->mnt == nd->path.mnt) mntget(link->mnt); } error = nd_alloc_stack(nd); if (unlikely(error)) { if (error == -ECHILD) { if (unlikely(unlazy_link(nd, link, seq))) return -ECHILD; error = nd_alloc_stack(nd); } if (error) { path_put(link); return error; } } last = nd->stack + nd->depth++; last->link = *link; last->cookie = NULL; last->inode = inode; last->seq = seq; return 1; }
6,446
135,760
0
void InputMethodController::SetCompositionFromExistingText( const Vector<CompositionUnderline>& underlines, unsigned composition_start, unsigned composition_end) { Element* editable = GetFrame() .Selection() .ComputeVisibleSelectionInDOMTreeDeprecated() .RootEditableElement(); if (!editable) return; DCHECK(!GetDocument().NeedsLayoutTreeUpdate()); const EphemeralRange range = PlainTextRange(composition_start, composition_end).CreateRange(*editable); if (range.IsNull()) return; const Position start = range.StartPosition(); if (RootEditableElementOf(start) != editable) return; const Position end = range.EndPosition(); if (RootEditableElementOf(end) != editable) return; Clear(); AddCompositionUnderlines(underlines, editable, composition_start); has_composition_ = true; if (!composition_range_) composition_range_ = Range::Create(GetDocument()); composition_range_->setStart(range.StartPosition()); composition_range_->setEnd(range.EndPosition()); }
6,447
56,821
0
int usb_port_resume(struct usb_device *udev, pm_message_t msg) { struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); struct usb_port *port_dev = hub->ports[udev->portnum - 1]; int port1 = udev->portnum; int status; u16 portchange, portstatus; if (!test_and_set_bit(port1, hub->child_usage_bits)) { status = pm_runtime_get_sync(&port_dev->dev); if (status < 0) { dev_dbg(&udev->dev, "can't resume usb port, status %d\n", status); return status; } } usb_lock_port(port_dev); /* Skip the initial Clear-Suspend step for a remote wakeup */ status = hub_port_status(hub, port1, &portstatus, &portchange); if (status == 0 && !port_is_suspended(hub, portstatus)) goto SuspendCleared; /* see 7.1.7.7; affects power usage, but not budgeting */ if (hub_is_superspeed(hub->hdev)) status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U0); else status = usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_SUSPEND); if (status) { dev_dbg(&port_dev->dev, "can't resume, status %d\n", status); } else { /* drive resume for USB_RESUME_TIMEOUT msec */ dev_dbg(&udev->dev, "usb %sresume\n", (PMSG_IS_AUTO(msg) ? "auto-" : "")); msleep(USB_RESUME_TIMEOUT); /* Virtual root hubs can trigger on GET_PORT_STATUS to * stop resume signaling. Then finish the resume * sequence. */ status = hub_port_status(hub, port1, &portstatus, &portchange); /* TRSMRCY = 10 msec */ msleep(10); } SuspendCleared: if (status == 0) { udev->port_is_suspended = 0; if (hub_is_superspeed(hub->hdev)) { if (portchange & USB_PORT_STAT_C_LINK_STATE) usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_PORT_LINK_STATE); } else { if (portchange & USB_PORT_STAT_C_SUSPEND) usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_SUSPEND); } } if (udev->persist_enabled && hub_is_superspeed(hub->hdev)) status = wait_for_ss_port_enable(udev, hub, &port1, &portchange, &portstatus); status = check_port_resume_type(udev, hub, port1, status, portchange, portstatus); if (status == 0) status = finish_port_resume(udev); if (status < 0) { dev_dbg(&udev->dev, "can't resume, status %d\n", status); hub_port_logical_disconnect(hub, port1); } else { /* Try to enable USB2 hardware LPM */ if (udev->usb2_hw_lpm_capable == 1) usb_set_usb2_hardware_lpm(udev, 1); /* Try to enable USB3 LTM and LPM */ usb_enable_ltm(udev); usb_unlocked_enable_lpm(udev); } usb_unlock_port(port_dev); return status; }
6,448
71,500
0
ModuleExport void UnregisterDIBImage(void) { (void) UnregisterMagickInfo("DIB"); }
6,449
159,168
0
void DownloadItemImpl::MarkAsComplete() { DCHECK_CURRENTLY_ON(BrowserThread::UI); DCHECK(AllDataSaved()); destination_info_.end_time = base::Time::Now(); TransitionTo(COMPLETE_INTERNAL); UpdateObservers(); }
6,450
73,223
0
static int jpc_poc_dumpparms(jpc_ms_t *ms, FILE *out) { jpc_poc_t *poc = &ms->parms.poc; jpc_pocpchg_t *pchg; int pchgno; for (pchgno = 0, pchg = poc->pchgs; pchgno < poc->numpchgs; ++pchgno, ++pchg) { fprintf(out, "po[%d] = %d; ", pchgno, pchg->prgord); fprintf(out, "cs[%d] = %d; ce[%d] = %d; ", pchgno, pchg->compnostart, pchgno, pchg->compnoend); fprintf(out, "rs[%d] = %d; re[%d] = %d; ", pchgno, pchg->rlvlnostart, pchgno, pchg->rlvlnoend); fprintf(out, "le[%d] = %d\n", pchgno, pchg->lyrnoend); } return 0; }
6,451
61,687
0
static void opj_tcd_code_block_enc_deallocate(opj_tcd_precinct_t * p_precinct) { OPJ_UINT32 cblkno, l_nb_code_blocks; opj_tcd_cblk_enc_t * l_code_block = p_precinct->cblks.enc; if (l_code_block) { l_nb_code_blocks = p_precinct->block_size / sizeof(opj_tcd_cblk_enc_t); for (cblkno = 0; cblkno < l_nb_code_blocks; ++cblkno) { if (l_code_block->data) { /* We refer to data - 1 since below we incremented it */ /* in opj_tcd_code_block_enc_allocate_data() */ opj_free(l_code_block->data - 1); l_code_block->data = 00; } if (l_code_block->layers) { opj_free(l_code_block->layers); l_code_block->layers = 00; } if (l_code_block->passes) { opj_free(l_code_block->passes); l_code_block->passes = 00; } ++l_code_block; } opj_free(p_precinct->cblks.enc); p_precinct->cblks.enc = 00; } }
6,452
103,366
0
BrowserFrameGtk::BrowserFrameGtk(BrowserFrame* browser_frame, BrowserView* browser_view) : views::NativeWidgetGtk(browser_frame), browser_view_(browser_view) { set_focus_on_creation(false); }
6,453
31,073
0
static inline int rtnl_vfinfo_size(const struct net_device *dev, u32 ext_filter_mask) { if (dev->dev.parent && dev_is_pci(dev->dev.parent) && (ext_filter_mask & RTEXT_FILTER_VF)) { int num_vfs = dev_num_vf(dev->dev.parent); size_t size = nla_total_size(sizeof(struct nlattr)); size += nla_total_size(num_vfs * sizeof(struct nlattr)); size += num_vfs * (nla_total_size(sizeof(struct ifla_vf_mac)) + nla_total_size(sizeof(struct ifla_vf_vlan)) + nla_total_size(sizeof(struct ifla_vf_tx_rate)) + nla_total_size(sizeof(struct ifla_vf_spoofchk))); return size; } else return 0; }
6,454
157,708
0
void WakeUp() { if (loop_) loop_->Quit(); }
6,455
168,542
0
static inline BOOLEAN guid_eq(const GUID *guid1, const GUID *guid2) { if ((guid1 != NULL) && (guid2 != NULL)) { return (memcmp(guid1, guid2, sizeof(GUID)) == 0); } return false; }
6,456
49,701
0
static int diskstats_open(struct inode *inode, struct file *file) { return seq_open(file, &diskstats_op); }
6,457
171,628
0
static int adev_dump(const audio_hw_device_t *device, int fd) { UNUSED(device); UNUSED(fd); FNLOG(); return 0; }
6,458
168,106
0
void AutocompleteSuggestionsReturned( const std::vector<base::string16>& result) { autofill_manager_->autocomplete_history_manager_->SendSuggestions(&result); }
6,459
845
0
GBool ArthurOutputDev::beginType3Char(GfxState *state, double x, double y, double dx, double dy, CharCode code, Unicode *u, int uLen) { return gFalse; }
6,460
86,634
0
void blk_insert_flush(struct request *rq) { struct request_queue *q = rq->q; unsigned int fflags = q->flush_flags; /* may change, cache */ unsigned int policy = blk_flush_policy(fflags, rq); struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); /* * @policy now records what operations need to be done. Adjust * REQ_FLUSH and FUA for the driver. */ rq->cmd_flags &= ~REQ_FLUSH; if (!(fflags & REQ_FUA)) rq->cmd_flags &= ~REQ_FUA; /* * An empty flush handed down from a stacking driver may * translate into nothing if the underlying device does not * advertise a write-back cache. In this case, simply * complete the request. */ if (!policy) { if (q->mq_ops) blk_mq_end_request(rq, 0); else __blk_end_bidi_request(rq, 0, 0, 0); return; } BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ /* * If there's data but flush is not necessary, the request can be * processed directly without going through flush machinery. Queue * for normal execution. */ if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { if (q->mq_ops) { blk_mq_insert_request(rq, false, false, true); } else list_add_tail(&rq->queuelist, &q->queue_head); return; } /* * @rq should go through flush machinery. Mark it part of flush * sequence and submit for further processing. */ memset(&rq->flush, 0, sizeof(rq->flush)); INIT_LIST_HEAD(&rq->flush.list); rq->cmd_flags |= REQ_FLUSH_SEQ; rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ if (q->mq_ops) { rq->end_io = mq_flush_data_end_io; spin_lock_irq(&fq->mq_flush_lock); blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); spin_unlock_irq(&fq->mq_flush_lock); return; } rq->end_io = flush_data_end_io; blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); }
6,461
181,821
1
monitor_init(void) { struct ssh *ssh = active_state; /* XXX * struct monitor *mon; mon = xcalloc(1, sizeof(*mon)); monitor_openfds(mon, 1); /* Used to share zlib space across processes * if (options.compression) { mon->m_zback = mm_create(NULL, MM_MEMSIZE); mon->m_zlib = mm_create(mon->m_zback, 20 * MM_MEMSIZE); /* Compression needs to share state across borders * ssh_packet_set_compress_hooks(ssh, mon->m_zlib, (ssh_packet_comp_alloc_func *)mm_zalloc, (ssh_packet_comp_free_func *)mm_zfree); } return mon; }
6,462
127,456
0
void FileAPIMessageFilter::OnWrite( int request_id, const GURL& path, const GURL& blob_url, int64 offset) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); if (!request_context_) { NOTREACHED(); return; } FileSystemURL url(path); base::PlatformFileError error; if (!HasPermissionsForFile(url, kWriteFilePermissions, &error)) { Send(new FileSystemMsg_DidFail(request_id, error)); return; } FileSystemOperation* operation = GetNewOperation(url, request_id); if (!operation) return; operation->Write( request_context_, url, blob_url, offset, base::Bind(&FileAPIMessageFilter::DidWrite, this, request_id)); }
6,463
87,106
0
CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count) { size_t i = 0; cJSON *n = NULL; cJSON *p = NULL; cJSON *a = NULL; if ((count < 0) || (numbers == NULL)) { return NULL; } a = cJSON_CreateArray(); for(i = 0; a && (i < (size_t)count); i++) { n = cJSON_CreateNumber(numbers[i]); if (!n) { cJSON_Delete(a); return NULL; } if(!i) { a->child = n; } else { suffix_object(p, n); } p = n; } return a; }
6,464
39,472
0
static inline void yam_rx_flag(struct net_device *dev, struct yam_port *yp) { if (yp->dcd && yp->rx_len >= 3 && yp->rx_len < YAM_MAX_FRAME) { int pkt_len = yp->rx_len - 2 + 1; /* -CRC + kiss */ struct sk_buff *skb; if ((yp->rx_crch & yp->rx_crcl) != 0xFF) { /* Bad crc */ } else { if (!(skb = dev_alloc_skb(pkt_len))) { printk(KERN_WARNING "%s: memory squeeze, dropping packet\n", dev->name); ++dev->stats.rx_dropped; } else { unsigned char *cp; cp = skb_put(skb, pkt_len); *cp++ = 0; /* KISS kludge */ memcpy(cp, yp->rx_buf, pkt_len - 1); skb->protocol = ax25_type_trans(skb, dev); netif_rx(skb); ++dev->stats.rx_packets; } } } yp->rx_len = 0; yp->rx_crcl = 0x21; yp->rx_crch = 0xf3; }
6,465
30,516
0
static void nr_info_stop(struct seq_file *seq, void *v) { spin_unlock_bh(&nr_list_lock); }
6,466
91,097
0
static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) { struct rtnl_net_dump_cb net_cb = { .tgt_net = sock_net(skb->sk), .skb = skb, .fillargs = { .portid = NETLINK_CB(cb->skb).portid, .seq = cb->nlh->nlmsg_seq, .flags = NLM_F_MULTI, .cmd = RTM_NEWNSID, }, .idx = 0, .s_idx = cb->args[0], }; int err = 0; if (cb->strict_check) { err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb); if (err < 0) goto end; } spin_lock_bh(&net_cb.tgt_net->nsid_lock); if (net_cb.fillargs.add_ref && !net_eq(net_cb.ref_net, net_cb.tgt_net) && !spin_trylock_bh(&net_cb.ref_net->nsid_lock)) { spin_unlock_bh(&net_cb.tgt_net->nsid_lock); err = -EAGAIN; goto end; } idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb); if (net_cb.fillargs.add_ref && !net_eq(net_cb.ref_net, net_cb.tgt_net)) spin_unlock_bh(&net_cb.ref_net->nsid_lock); spin_unlock_bh(&net_cb.tgt_net->nsid_lock); cb->args[0] = net_cb.idx; end: if (net_cb.fillargs.add_ref) put_net(net_cb.tgt_net); return err < 0 ? err : skb->len; }
6,467
117,445
0
WebGLId WebGraphicsContext3DCommandBufferImpl::getPlatformTextureId() { return context_->GetParentTextureId(); }
6,468
57,848
0
static inline int writer_trylock(struct ld_semaphore *sem) { /* only wake this writer if the active part of the count can be * transitioned from 0 -> 1 */ long count = ldsem_atomic_update(LDSEM_ACTIVE_BIAS, sem); do { if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) return 1; if (ldsem_cmpxchg(&count, count - LDSEM_ACTIVE_BIAS, sem)) return 0; } while (1); }
6,469
149,096
0
static int codeTriggerProgram( Parse *pParse, /* The parser context */ TriggerStep *pStepList, /* List of statements inside the trigger body */ int orconf /* Conflict algorithm. (OE_Abort, etc) */ ){ TriggerStep *pStep; Vdbe *v = pParse->pVdbe; sqlite3 *db = pParse->db; assert( pParse->pTriggerTab && pParse->pToplevel ); assert( pStepList ); assert( v!=0 ); for(pStep=pStepList; pStep; pStep=pStep->pNext){ /* Figure out the ON CONFLICT policy that will be used for this step ** of the trigger program. If the statement that caused this trigger ** to fire had an explicit ON CONFLICT, then use it. Otherwise, use ** the ON CONFLICT policy that was specified as part of the trigger ** step statement. Example: ** ** CREATE TRIGGER AFTER INSERT ON t1 BEGIN; ** INSERT OR REPLACE INTO t2 VALUES(new.a, new.b); ** END; ** ** INSERT INTO t1 ... ; -- insert into t2 uses REPLACE policy ** INSERT OR IGNORE INTO t1 ... ; -- insert into t2 uses IGNORE policy */ pParse->eOrconf = (orconf==OE_Default)?pStep->orconf:(u8)orconf; assert( pParse->okConstFactor==0 ); switch( pStep->op ){ case TK_UPDATE: { sqlite3Update(pParse, targetSrcList(pParse, pStep), sqlite3ExprListDup(db, pStep->pExprList, 0), sqlite3ExprDup(db, pStep->pWhere, 0), pParse->eOrconf ); break; } case TK_INSERT: { sqlite3Insert(pParse, targetSrcList(pParse, pStep), sqlite3SelectDup(db, pStep->pSelect, 0), sqlite3IdListDup(db, pStep->pIdList), pParse->eOrconf ); break; } case TK_DELETE: { sqlite3DeleteFrom(pParse, targetSrcList(pParse, pStep), sqlite3ExprDup(db, pStep->pWhere, 0) ); break; } default: assert( pStep->op==TK_SELECT ); { SelectDest sDest; Select *pSelect = sqlite3SelectDup(db, pStep->pSelect, 0); sqlite3SelectDestInit(&sDest, SRT_Discard, 0); sqlite3Select(pParse, pSelect, &sDest); sqlite3SelectDelete(db, pSelect); break; } } if( pStep->op!=TK_SELECT ){ sqlite3VdbeAddOp0(v, OP_ResetCount); } } return 0; }
6,470
125,459
0
void RunGetFileCallbackHelper(const GetFileCallback& callback, GDataFileError* error, FilePath* file_path, std::string* mime_type, GDataFileType* file_type) { DCHECK(error); DCHECK(file_path); DCHECK(mime_type); DCHECK(file_type); if (!callback.is_null()) callback.Run(*error, *file_path, *mime_type, *file_type); }
6,471
153,605
0
GLES2Implementation::DeferErrorCallbacks::DeferErrorCallbacks( GLES2Implementation* gles2_implementation) : gles2_implementation_(gles2_implementation) { DCHECK_EQ(false, gles2_implementation_->deferring_error_callbacks_); gles2_implementation_->deferring_error_callbacks_ = true; }
6,472
79,108
0
process_principals(struct ssh *ssh, FILE *f, const char *file, const struct sshkey_cert *cert, struct sshauthopt **authoptsp) { char loc[256], *line = NULL, *cp, *ep; size_t linesize = 0; u_long linenum = 0; u_int found_principal = 0; if (authoptsp != NULL) *authoptsp = NULL; while (getline(&line, &linesize, f) != -1) { linenum++; /* Always consume entire input */ if (found_principal) continue; /* Skip leading whitespace. */ for (cp = line; *cp == ' ' || *cp == '\t'; cp++) ; /* Skip blank and comment lines. */ if ((ep = strchr(cp, '#')) != NULL) *ep = '\0'; if (!*cp || *cp == '\n') continue; snprintf(loc, sizeof(loc), "%.200s:%lu", file, linenum); if (check_principals_line(ssh, cp, cert, loc, authoptsp) == 0) found_principal = 1; } free(line); return found_principal; }
6,473
1,357
0
static inline struct nfs_inode *nfsi(struct inode *inode) { return container_of(inode, struct nfs_inode, inode); }
6,474
179,245
1
static int key_notify_policy_flush(const struct km_event *c) { struct sk_buff *skb_out; struct sadb_msg *hdr; skb_out = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC); if (!skb_out) return -ENOBUFS; hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg)); hdr->sadb_msg_type = SADB_X_SPDFLUSH; hdr->sadb_msg_seq = c->seq; hdr->sadb_msg_pid = c->portid; hdr->sadb_msg_version = PF_KEY_V2; hdr->sadb_msg_errno = (uint8_t) 0; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); return 0; }
6,475
27,489
0
static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) { struct iphdr *iph = (struct iphdr *) skb_mac_header(skb); memcpy(haddr, &iph->saddr, 4); return 4; }
6,476
146,052
0
void WebGL2RenderingContextBase::bufferData( GLenum target, MaybeShared<DOMArrayBufferView> src_data, GLenum usage, GLuint src_offset, GLuint length) { if (isContextLost()) return; void* sub_base_address = nullptr; long long sub_byte_length = 0; if (!ValidateSubSourceAndGetData(src_data.View(), src_offset, length, &sub_base_address, &sub_byte_length)) { SynthesizeGLError(GL_INVALID_VALUE, "bufferData", "srcOffset + length too large"); return; } BufferDataImpl(target, sub_byte_length, sub_base_address, usage); }
6,477
181,499
1
tgs_make_reply(krb5_context context, krb5_kdc_configuration *config, KDC_REQ_BODY *b, krb5_const_principal tgt_name, const EncTicketPart *tgt, const krb5_keyblock *replykey, int rk_is_subkey, const EncryptionKey *serverkey, const krb5_keyblock *sessionkey, krb5_kvno kvno, AuthorizationData *auth_data, hdb_entry_ex *server, krb5_principal server_principal, const char *server_name, hdb_entry_ex *client, krb5_principal client_principal, hdb_entry_ex *krbtgt, krb5_enctype krbtgt_etype, krb5_principals spp, const krb5_data *rspac, const METHOD_DATA *enc_pa_data, const char **e_text, krb5_data *reply) { KDC_REP rep; EncKDCRepPart ek; EncTicketPart et; KDCOptions f = b->kdc_options; krb5_error_code ret; int is_weak = 0; memset(&rep, 0, sizeof(rep)); memset(&et, 0, sizeof(et)); memset(&ek, 0, sizeof(ek)); rep.pvno = 5; rep.msg_type = krb_tgs_rep; et.authtime = tgt->authtime; _kdc_fix_time(&b->till); et.endtime = min(tgt->endtime, *b->till); ALLOC(et.starttime); *et.starttime = kdc_time; ret = check_tgs_flags(context, config, b, tgt, &et); if(ret) goto out; /* We should check the transited encoding if: 1) the request doesn't ask not to be checked 2) globally enforcing a check 3) principal requires checking 4) we allow non-check per-principal, but principal isn't marked as allowing this 5) we don't globally allow this */ #define GLOBAL_FORCE_TRANSITED_CHECK \ (config->trpolicy == TRPOLICY_ALWAYS_CHECK) #define GLOBAL_ALLOW_PER_PRINCIPAL \ (config->trpolicy == TRPOLICY_ALLOW_PER_PRINCIPAL) #define GLOBAL_ALLOW_DISABLE_TRANSITED_CHECK \ (config->trpolicy == TRPOLICY_ALWAYS_HONOUR_REQUEST) /* these will consult the database in future release */ #define PRINCIPAL_FORCE_TRANSITED_CHECK(P) 0 #define PRINCIPAL_ALLOW_DISABLE_TRANSITED_CHECK(P) 0 ret = fix_transited_encoding(context, config, !f.disable_transited_check || GLOBAL_FORCE_TRANSITED_CHECK || PRINCIPAL_FORCE_TRANSITED_CHECK(server) || !((GLOBAL_ALLOW_PER_PRINCIPAL && PRINCIPAL_ALLOW_DISABLE_TRANSITED_CHECK(server)) || GLOBAL_ALLOW_DISABLE_TRANSITED_CHECK), &tgt->transited, &et, krb5_principal_get_realm(context, client_principal), krb5_principal_get_realm(context, server->entry.principal), krb5_principal_get_realm(context, krbtgt->entry.principal)); if(ret) goto out; copy_Realm(&server_principal->realm, &rep.ticket.realm); _krb5_principal2principalname(&rep.ticket.sname, server_principal); copy_Realm(&tgt_name->realm, &rep.crealm); /* if (f.request_anonymous) _kdc_make_anonymous_principalname (&rep.cname); else */ copy_PrincipalName(&tgt_name->name, &rep.cname); rep.ticket.tkt_vno = 5; ek.caddr = et.caddr; { time_t life; life = et.endtime - *et.starttime; if(client && client->entry.max_life) life = min(life, *client->entry.max_life); if(server->entry.max_life) life = min(life, *server->entry.max_life); et.endtime = *et.starttime + life; } if(f.renewable_ok && tgt->flags.renewable && et.renew_till == NULL && et.endtime < *b->till && tgt->renew_till != NULL) { et.flags.renewable = 1; ALLOC(et.renew_till); *et.renew_till = *b->till; } if(et.renew_till){ time_t renew; renew = *et.renew_till - *et.starttime; if(client && client->entry.max_renew) renew = min(renew, *client->entry.max_renew); if(server->entry.max_renew) renew = min(renew, *server->entry.max_renew); *et.renew_till = *et.starttime + renew; } if(et.renew_till){ *et.renew_till = min(*et.renew_till, *tgt->renew_till); *et.starttime = min(*et.starttime, *et.renew_till); et.endtime = min(et.endtime, *et.renew_till); } *et.starttime = min(*et.starttime, et.endtime); if(*et.starttime == et.endtime){ ret = KRB5KDC_ERR_NEVER_VALID; goto out; } if(et.renew_till && et.endtime == *et.renew_till){ free(et.renew_till); et.renew_till = NULL; et.flags.renewable = 0; } et.flags.pre_authent = tgt->flags.pre_authent; et.flags.hw_authent = tgt->flags.hw_authent; et.flags.anonymous = tgt->flags.anonymous; et.flags.ok_as_delegate = server->entry.flags.ok_as_delegate; if(rspac->length) { /* * No not need to filter out the any PAC from the * auth_data since it's signed by the KDC. */ ret = _kdc_tkt_add_if_relevant_ad(context, &et, KRB5_AUTHDATA_WIN2K_PAC, rspac); if (ret) goto out; } if (auth_data) { unsigned int i = 0; /* XXX check authdata */ if (et.authorization_data == NULL) { et.authorization_data = calloc(1, sizeof(*et.authorization_data)); if (et.authorization_data == NULL) { ret = ENOMEM; krb5_set_error_message(context, ret, "malloc: out of memory"); goto out; } } for(i = 0; i < auth_data->len ; i++) { ret = add_AuthorizationData(et.authorization_data, &auth_data->val[i]); if (ret) { krb5_set_error_message(context, ret, "malloc: out of memory"); goto out; } } /* Filter out type KRB5SignedPath */ ret = find_KRB5SignedPath(context, et.authorization_data, NULL); if (ret == 0) { if (et.authorization_data->len == 1) { free_AuthorizationData(et.authorization_data); free(et.authorization_data); et.authorization_data = NULL; } else { AuthorizationData *ad = et.authorization_data; free_AuthorizationDataElement(&ad->val[ad->len - 1]); ad->len--; } } } ret = krb5_copy_keyblock_contents(context, sessionkey, &et.key); if (ret) goto out; et.crealm = tgt_name->realm; et.cname = tgt_name->name; ek.key = et.key; /* MIT must have at least one last_req */ ek.last_req.val = calloc(1, sizeof(*ek.last_req.val)); if (ek.last_req.val == NULL) { ret = ENOMEM; goto out; } ek.last_req.len = 1; /* set after alloc to avoid null deref on cleanup */ ek.nonce = b->nonce; ek.flags = et.flags; ek.authtime = et.authtime; ek.starttime = et.starttime; ek.endtime = et.endtime; ek.renew_till = et.renew_till; ek.srealm = rep.ticket.realm; ek.sname = rep.ticket.sname; _kdc_log_timestamp(context, config, "TGS-REQ", et.authtime, et.starttime, et.endtime, et.renew_till); /* Don't sign cross realm tickets, they can't be checked anyway */ { char *r = get_krbtgt_realm(&ek.sname); if (r == NULL || strcmp(r, ek.srealm) == 0) { ret = _kdc_add_KRB5SignedPath(context, config, krbtgt, krbtgt_etype, client_principal, NULL, spp, &et); if (ret) goto out; } } if (enc_pa_data->len) { rep.padata = calloc(1, sizeof(*rep.padata)); if (rep.padata == NULL) { ret = ENOMEM; goto out; } ret = copy_METHOD_DATA(enc_pa_data, rep.padata); if (ret) goto out; } if (krb5_enctype_valid(context, serverkey->keytype) != 0 && _kdc_is_weak_exception(server->entry.principal, serverkey->keytype)) { krb5_enctype_enable(context, serverkey->keytype); is_weak = 1; } /* It is somewhat unclear where the etype in the following encryption should come from. What we have is a session key in the passed tgt, and a list of preferred etypes *for the new ticket*. Should we pick the best possible etype, given the keytype in the tgt, or should we look at the etype list here as well? What if the tgt session key is DES3 and we want a ticket with a (say) CAST session key. Should the DES3 etype be added to the etype list, even if we don't want a session key with DES3? */ ret = _kdc_encode_reply(context, config, NULL, 0, &rep, &et, &ek, serverkey->keytype, kvno, serverkey, 0, replykey, rk_is_subkey, e_text, reply); if (is_weak) krb5_enctype_disable(context, serverkey->keytype); out: free_TGS_REP(&rep); free_TransitedEncoding(&et.transited); if(et.starttime) free(et.starttime); if(et.renew_till) free(et.renew_till); if(et.authorization_data) { free_AuthorizationData(et.authorization_data); free(et.authorization_data); } free_LastReq(&ek.last_req); memset(et.key.keyvalue.data, 0, et.key.keyvalue.length); free_EncryptionKey(&et.key); return ret; }
6,478
81,757
0
static int mpeg4_decode_profile_level(MpegEncContext *s, GetBitContext *gb) { s->avctx->profile = get_bits(gb, 4); s->avctx->level = get_bits(gb, 4); if (s->avctx->profile == 0 && s->avctx->level == 8) { s->avctx->level = 0; } return 0; }
6,479
121,155
0
void HTMLInputElement::handleBlurEvent() { m_inputType->handleBlurEvent(); }
6,480
29,974
0
static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, struct br_ip *group, unsigned char state) { struct net_bridge_mdb_entry *mp; struct net_bridge_port_group *p; struct net_bridge_port_group __rcu **pp; struct net_bridge_mdb_htable *mdb; int err; mdb = mlock_dereference(br->mdb, br); mp = br_mdb_ip_get(mdb, group); if (!mp) { mp = br_multicast_new_group(br, port, group); err = PTR_ERR(mp); if (IS_ERR(mp)) return err; } for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { if (p->port == port) return -EEXIST; if ((unsigned long)p->port < (unsigned long)port) break; } p = br_multicast_new_port_group(port, group, *pp, state); if (unlikely(!p)) return -ENOMEM; rcu_assign_pointer(*pp, p); br_mdb_notify(br->dev, port, group, RTM_NEWMDB); return 0; }
6,481
73,384
0
MagickExport MagickBooleanType IsMonochromeImage(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); }
6,482
14,372
0
static double bessel(double x) { double v = 1; double lastv = 0; double t = 1; int i; x = x * x / 4; for (i = 1; v != lastv; i++) { lastv = v; t *= x / (i * i); v += t; } return v; }
6,483
20,954
0
static inline void clear_TF(struct kernel_vm86_regs *regs) { regs->pt.flags &= ~X86_EFLAGS_TF; }
6,484
27,194
0
int id_create(void *ptr) { static int id = 0; id_link(++id, ptr); return id; }
6,485
164,122
0
bool AppCacheDatabase::LazyOpen(bool create_if_needed) { if (db_) return true; if (is_disabled_) return false; bool use_in_memory_db = db_file_path_.empty(); if (!create_if_needed && (use_in_memory_db || !base::PathExists(db_file_path_))) { return false; } db_.reset(new sql::Database); meta_table_.reset(new sql::MetaTable); db_->set_histogram_tag("AppCache"); bool opened = false; if (use_in_memory_db) { opened = db_->OpenInMemory(); } else if (!base::CreateDirectory(db_file_path_.DirName())) { LOG(ERROR) << "Failed to create appcache directory."; } else { opened = db_->Open(db_file_path_); if (opened) db_->Preload(); } if (!opened || !db_->QuickIntegrityCheck() || !EnsureDatabaseVersion()) { LOG(ERROR) << "Failed to open the appcache database."; AppCacheHistograms::CountInitResult( AppCacheHistograms::SQL_DATABASE_ERROR); if (!use_in_memory_db && DeleteExistingAndCreateNewDatabase()) return true; Disable(); return false; } AppCacheHistograms::CountInitResult(AppCacheHistograms::INIT_OK); was_corruption_detected_ = false; db_->set_error_callback(base::BindRepeating( &AppCacheDatabase::OnDatabaseError, base::Unretained(this))); return true; }
6,486
82,545
0
static ALWAYS_INLINE void jslSingleChar() { lex->tk = (unsigned char)lex->currCh; jslGetNextCh(); }
6,487
181,337
1
int svc_rdma_recvfrom(struct svc_rqst *rqstp) { struct svc_xprt *xprt = rqstp->rq_xprt; struct svcxprt_rdma *rdma_xprt = container_of(xprt, struct svcxprt_rdma, sc_xprt); struct svc_rdma_op_ctxt *ctxt = NULL; struct rpcrdma_msg *rmsgp; int ret = 0; dprintk("svcrdma: rqstp=%p\n", rqstp); spin_lock(&rdma_xprt->sc_rq_dto_lock); if (!list_empty(&rdma_xprt->sc_read_complete_q)) { ctxt = list_first_entry(&rdma_xprt->sc_read_complete_q, struct svc_rdma_op_ctxt, list); list_del(&ctxt->list); spin_unlock(&rdma_xprt->sc_rq_dto_lock); rdma_read_complete(rqstp, ctxt); goto complete; } else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) { ctxt = list_first_entry(&rdma_xprt->sc_rq_dto_q, struct svc_rdma_op_ctxt, list); list_del(&ctxt->list); } else { atomic_inc(&rdma_stat_rq_starve); clear_bit(XPT_DATA, &xprt->xpt_flags); ctxt = NULL; } spin_unlock(&rdma_xprt->sc_rq_dto_lock); if (!ctxt) { /* This is the EAGAIN path. The svc_recv routine will * return -EAGAIN, the nfsd thread will go to call into * svc_recv again and we shouldn't be on the active * transport list */ if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) goto defer; goto out; } dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p\n", ctxt, rdma_xprt, rqstp); atomic_inc(&rdma_stat_recv); /* Build up the XDR from the receive buffers. */ rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len); /* Decode the RDMA header. */ rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base; ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg); if (ret < 0) goto out_err; if (ret == 0) goto out_drop; rqstp->rq_xprt_hlen = ret; if (svc_rdma_is_backchannel_reply(xprt, rmsgp)) { ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, rmsgp, &rqstp->rq_arg); svc_rdma_put_context(ctxt, 0); if (ret) goto repost; return ret; } /* Read read-list data. */ ret = rdma_read_chunks(rdma_xprt, rmsgp, rqstp, ctxt); if (ret > 0) { /* read-list posted, defer until data received from client. */ goto defer; } else if (ret < 0) { /* Post of read-list failed, free context. */ svc_rdma_put_context(ctxt, 1); return 0; } complete: ret = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len + rqstp->rq_arg.tail[0].iov_len; svc_rdma_put_context(ctxt, 0); out: dprintk("svcrdma: ret=%d, rq_arg.len=%u, " "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zd\n", ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base, rqstp->rq_arg.head[0].iov_len); rqstp->rq_prot = IPPROTO_MAX; svc_xprt_copy_addrs(rqstp, xprt); return ret; out_err: svc_rdma_send_error(rdma_xprt, rmsgp, ret); svc_rdma_put_context(ctxt, 0); return 0; defer: return 0; out_drop: svc_rdma_put_context(ctxt, 1); repost: return svc_rdma_repost_recv(rdma_xprt, GFP_KERNEL); }
6,488
160,197
0
void PDFiumEngine::AppendBlankPages(int num_pages) { DCHECK_NE(num_pages, 0); if (!doc_) return; selection_.clear(); pending_pages_.clear(); while (pages_.size() > 1) { pages_.pop_back(); FPDFPage_Delete(doc_, pages_.size()); } std::vector<pp::Rect> page_rects; pp::Size page_size = GetPageSize(0); page_size.Enlarge(kPageShadowLeft + kPageShadowRight, kPageShadowTop + kPageShadowBottom); pp::Size old_document_size = document_size_; document_size_ = pp::Size(page_size.width(), 0); for (int i = 0; i < num_pages; ++i) { if (i != 0) { document_size_.Enlarge(0, kPageSeparatorThickness); } pp::Rect rect(pp::Point(0, document_size_.height()), page_size); page_rects.push_back(rect); document_size_.Enlarge(0, page_size.height()); } for (int i = 1; i < num_pages; ++i) { pp::Rect page_rect(page_rects[i]); page_rect.Inset(kPageShadowLeft, kPageShadowTop, kPageShadowRight, kPageShadowBottom); double width_in_points = ConvertUnitDouble(page_rect.width(), kPixelsPerInch, kPointsPerInch); double height_in_points = ConvertUnitDouble(page_rect.height(), kPixelsPerInch, kPointsPerInch); FPDF_PAGE temp_page = FPDFPage_New(doc_, i, width_in_points, height_in_points); FPDF_ClosePage(temp_page); pages_.push_back(std::make_unique<PDFiumPage>(this, i, page_rect, true)); } CalculateVisiblePages(); if (document_size_ != old_document_size) client_->DocumentSizeUpdated(document_size_); }
6,489
29,342
0
void kvm_make_scan_ioapic_request(struct kvm *kvm) { make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); }
6,490
100,618
0
VoiceInteractionIcon() : Layer(ui::LAYER_NOT_DRAWN) { set_name("VoiceInteractionOverlay:ICON_LAYER"); SetBounds(gfx::Rect(0, 0, kIconInitSizeDip, kIconInitSizeDip)); SetFillsBoundsOpaquely(false); SetMasksToBounds(false); InitMoleculeShape(); }
6,491
117,996
0
v8::Local<v8::Context> V8Proxy::mainWorldContext(Frame* frame) { V8Proxy* proxy = retrieve(frame); if (!proxy) return v8::Local<v8::Context>(); return proxy->mainWorldContext(); }
6,492
89,630
0
setupM (gcry_cipher_hd_t c) { #if defined(GCM_USE_INTEL_PCLMUL) || defined(GCM_USE_ARM_PMULL) unsigned int features = _gcry_get_hw_features (); #endif if (0) ; #ifdef GCM_USE_INTEL_PCLMUL else if (features & HWF_INTEL_PCLMUL) { c->u_mode.gcm.ghash_fn = _gcry_ghash_intel_pclmul; _gcry_ghash_setup_intel_pclmul (c); } #endif #ifdef GCM_USE_ARM_PMULL else if (features & HWF_ARM_PMULL) { c->u_mode.gcm.ghash_fn = ghash_armv8_ce_pmull; ghash_setup_armv8_ce_pmull (c); } #endif #ifdef GCM_USE_ARM_NEON else if (features & HWF_ARM_NEON) { c->u_mode.gcm.ghash_fn = ghash_armv7_neon; ghash_setup_armv7_neon (c); } #endif else { c->u_mode.gcm.ghash_fn = ghash_internal; fillM (c); } }
6,493
55,481
0
int __sched _cond_resched(void) { if (should_resched(0)) { preempt_schedule_common(); return 1; } return 0; }
6,494
90,080
0
size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel) { DEBUGLOG(4, "ZSTD_initCStream"); return ZSTD_initCStream_srcSize(zcs, compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN); }
6,495
110,340
0
void ScrollbarOverlayChanged(pp::Scrollbar_Dev scrollbar, bool overlay) { if (ppp_scrollbar_ != NULL) { ppp_scrollbar_->OverlayChanged(plugin_->pp_instance(), scrollbar.pp_resource(), PP_FromBool(overlay)); } }
6,496
111,601
0
void TestLoadMetadataFromCache() { file_system_->LoadRootFeedFromCache( false, // load_from_server FilePath(FILE_PATH_LITERAL("drive")), base::Bind(&GDataFileSystemTest::OnExpectToFindEntry, FilePath(FILE_PATH_LITERAL("drive")))); BrowserThread::GetBlockingPool()->FlushForTesting(); message_loop_.RunAllPending(); }
6,497
97,264
0
virtual void call(XMLTokenizer* tokenizer) { tokenizer->characters(s, len); }
6,498
5,021
0
int X509_get_pubkey_parameters(EVP_PKEY *pkey, STACK_OF(X509) *chain) { EVP_PKEY *ktmp = NULL, *ktmp2; int i, j; if ((pkey != NULL) && !EVP_PKEY_missing_parameters(pkey)) return 1; for (i = 0; i < sk_X509_num(chain); i++) { ktmp = X509_get_pubkey(sk_X509_value(chain, i)); if (ktmp == NULL) { X509err(X509_F_X509_GET_PUBKEY_PARAMETERS, X509_R_UNABLE_TO_GET_CERTS_PUBLIC_KEY); return 0; } if (!EVP_PKEY_missing_parameters(ktmp)) break; else { EVP_PKEY_free(ktmp); ktmp = NULL; } } if (ktmp == NULL) { X509err(X509_F_X509_GET_PUBKEY_PARAMETERS, X509_R_UNABLE_TO_FIND_PARAMETERS_IN_CHAIN); return 0; } /* first, populate the other certs */ for (j = i - 1; j >= 0; j--) { ktmp2 = X509_get_pubkey(sk_X509_value(chain, j)); EVP_PKEY_copy_parameters(ktmp2, ktmp); EVP_PKEY_free(ktmp2); } if (pkey != NULL) EVP_PKEY_copy_parameters(pkey, ktmp); EVP_PKEY_free(ktmp); return 1; }
6,499