unique_id
int64 13
189k
| target
int64 0
1
| code
stringlengths 20
241k
| __index_level_0__
int64 0
18.9k
|
---|---|---|---|
35,825 | 0 | static void update_pvclock_gtod(struct timekeeper *tk)
{
struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
u64 boot_ns;
boot_ns = ktime_to_ns(ktime_add(tk->tkr.base_mono, tk->offs_boot));
write_seqcount_begin(&vdata->seq);
/* copy pvclock gtod data */
vdata->clock.vclock_mode = tk->tkr.clock->archdata.vclock_mode;
vdata->clock.cycle_last = tk->tkr.cycle_last;
vdata->clock.mask = tk->tkr.mask;
vdata->clock.mult = tk->tkr.mult;
vdata->clock.shift = tk->tkr.shift;
vdata->boot_ns = boot_ns;
vdata->nsec_base = tk->tkr.xtime_nsec;
write_seqcount_end(&vdata->seq);
}
| 14,800 |
146,709 | 0 | void Document::BeginLifecycleUpdatesIfRenderingReady() {
if (!IsActive())
return;
if (!IsRenderingReady())
return;
View()->BeginLifecycleUpdates();
}
| 14,801 |
36,033 | 0 | get_ticket_handler(struct ceph_auth_client *ac, int service)
{
struct ceph_x_ticket_handler *th;
struct ceph_x_info *xi = ac->private;
struct rb_node *parent = NULL, **p = &xi->ticket_handlers.rb_node;
while (*p) {
parent = *p;
th = rb_entry(parent, struct ceph_x_ticket_handler, node);
if (service < th->service)
p = &(*p)->rb_left;
else if (service > th->service)
p = &(*p)->rb_right;
else
return th;
}
/* add it */
th = kzalloc(sizeof(*th), GFP_NOFS);
if (!th)
return ERR_PTR(-ENOMEM);
th->service = service;
rb_link_node(&th->node, parent, p);
rb_insert_color(&th->node, &xi->ticket_handlers);
return th;
}
| 14,802 |
158,752 | 0 | bool GLES2DecoderImpl::ValidateCompressedTexDimensions(
const char* function_name, GLenum target, GLint level,
GLsizei width, GLsizei height, GLsizei depth, GLenum format) {
const char* error_message = "";
if (!::gpu::gles2::ValidateCompressedTexDimensions(
target, level, width, height, depth, format,
feature_info_->IsWebGLContext(), &error_message)) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name, error_message);
return false;
}
return true;
}
| 14,803 |
12,010 | 0 | DEFINE_RUN_ONCE_STATIC_ALT(ossl_init_no_config, ossl_init_config)
{
#ifdef OPENSSL_INIT_DEBUG
fprintf(stderr,
"OPENSSL_INIT: ossl_init_config: openssl_no_config_int()\n");
#endif
openssl_no_config_int();
config_inited = 1;
return 1;
}
| 14,804 |
187,356 | 1 | PasswordAutofillAgent::PasswordAutofillAgent(content::RenderFrame* render_frame)
: content::RenderFrameObserver(render_frame),
logging_state_active_(false),
was_username_autofilled_(false),
was_password_autofilled_(false),
weak_ptr_factory_(this) {
Send(new AutofillHostMsg_PasswordAutofillAgentConstructed(routing_id()));
}
| 14,805 |
106,426 | 0 | void BlobURLRequestJob::SetExtraRequestHeaders(
const net::HttpRequestHeaders& headers) {
std::string range_header;
if (headers.GetHeader(net::HttpRequestHeaders::kRange, &range_header)) {
std::vector<net::HttpByteRange> ranges;
if (net::HttpUtil::ParseRangeHeader(range_header, &ranges)) {
if (ranges.size() == 1) {
byte_range_set_ = true;
byte_range_ = ranges[0];
} else {
NotifyFailure(net::ERR_REQUEST_RANGE_NOT_SATISFIABLE);
}
}
}
}
| 14,806 |
134,931 | 0 | bool IsGaiaIdMigrationStarted() {
base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
if (!command_line->HasSwitch(kTestCrosGaiaIdMigration))
return false;
return command_line->GetSwitchValueASCII(kTestCrosGaiaIdMigration) ==
kTestCrosGaiaIdMigrationStarted;
}
| 14,807 |
69,390 | 0 | static int tls_process_ske_srp(SSL *s, PACKET *pkt, EVP_PKEY **pkey, int *al)
{
#ifndef OPENSSL_NO_SRP
PACKET prime, generator, salt, server_pub;
if (!PACKET_get_length_prefixed_2(pkt, &prime)
|| !PACKET_get_length_prefixed_2(pkt, &generator)
|| !PACKET_get_length_prefixed_1(pkt, &salt)
|| !PACKET_get_length_prefixed_2(pkt, &server_pub)) {
*al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_SRP, SSL_R_LENGTH_MISMATCH);
return 0;
}
if ((s->srp_ctx.N =
BN_bin2bn(PACKET_data(&prime),
PACKET_remaining(&prime), NULL)) == NULL
|| (s->srp_ctx.g =
BN_bin2bn(PACKET_data(&generator),
PACKET_remaining(&generator), NULL)) == NULL
|| (s->srp_ctx.s =
BN_bin2bn(PACKET_data(&salt),
PACKET_remaining(&salt), NULL)) == NULL
|| (s->srp_ctx.B =
BN_bin2bn(PACKET_data(&server_pub),
PACKET_remaining(&server_pub), NULL)) == NULL) {
*al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_SRP, ERR_R_BN_LIB);
return 0;
}
if (!srp_verify_server_param(s, al)) {
*al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_SRP, SSL_R_BAD_SRP_PARAMETERS);
return 0;
}
/* We must check if there is a certificate */
if (s->s3->tmp.new_cipher->algorithm_auth & (SSL_aRSA | SSL_aDSS))
*pkey = X509_get0_pubkey(s->session->peer);
return 1;
#else
SSLerr(SSL_F_TLS_PROCESS_SKE_SRP, ERR_R_INTERNAL_ERROR);
*al = SSL_AD_INTERNAL_ERROR;
return 0;
#endif
}
| 14,808 |
55,315 | 0 | static inline void atl2_irq_disable(struct atl2_adapter *adapter)
{
ATL2_WRITE_REG(&adapter->hw, REG_IMR, 0);
ATL2_WRITE_FLUSH(&adapter->hw);
synchronize_irq(adapter->pdev->irq);
}
| 14,809 |
28,533 | 0 | static int qeth_core_thaw(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
if (card->discipline && card->discipline->thaw)
return card->discipline->thaw(gdev);
return 0;
}
| 14,810 |
57,831 | 0 | static inline int __ldsem_down_read_nested(struct ld_semaphore *sem,
int subclass, long timeout)
{
long count;
lockdep_acquire_read(sem, subclass, 0, _RET_IP_);
count = ldsem_atomic_update(LDSEM_READ_BIAS, sem);
if (count <= 0) {
lock_stat(sem, contended);
if (!down_read_failed(sem, count, timeout)) {
lockdep_release(sem, 1, _RET_IP_);
return 0;
}
}
lock_stat(sem, acquired);
return 1;
}
| 14,811 |
4,512 | 0 | PHP_FUNCTION(openssl_pkcs7_verify)
{
X509_STORE * store = NULL;
zval * cainfo = NULL;
STACK_OF(X509) *signers= NULL;
STACK_OF(X509) *others = NULL;
PKCS7 * p7 = NULL;
BIO * in = NULL, * datain = NULL, * dataout = NULL;
zend_long flags = 0;
char * filename;
size_t filename_len;
char * extracerts = NULL;
size_t extracerts_len = 0;
char * signersfilename = NULL;
size_t signersfilename_len = 0;
char * datafilename = NULL;
size_t datafilename_len = 0;
RETVAL_LONG(-1);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "pl|papp", &filename, &filename_len,
&flags, &signersfilename, &signersfilename_len, &cainfo,
&extracerts, &extracerts_len, &datafilename, &datafilename_len) == FAILURE) {
return;
}
if (extracerts) {
others = load_all_certs_from_file(extracerts);
if (others == NULL) {
goto clean_exit;
}
}
flags = flags & ~PKCS7_DETACHED;
store = setup_verify(cainfo);
if (!store) {
goto clean_exit;
}
if (php_openssl_open_base_dir_chk(filename)) {
goto clean_exit;
}
in = BIO_new_file(filename, (flags & PKCS7_BINARY) ? "rb" : "r");
if (in == NULL) {
goto clean_exit;
}
p7 = SMIME_read_PKCS7(in, &datain);
if (p7 == NULL) {
#if DEBUG_SMIME
zend_printf("SMIME_read_PKCS7 failed\n");
#endif
goto clean_exit;
}
if (datafilename) {
if (php_openssl_open_base_dir_chk(datafilename)) {
goto clean_exit;
}
dataout = BIO_new_file(datafilename, "w");
if (dataout == NULL) {
goto clean_exit;
}
}
#if DEBUG_SMIME
zend_printf("Calling PKCS7 verify\n");
#endif
if (PKCS7_verify(p7, others, store, datain, dataout, (int)flags)) {
RETVAL_TRUE;
if (signersfilename) {
BIO *certout;
if (php_openssl_open_base_dir_chk(signersfilename)) {
goto clean_exit;
}
certout = BIO_new_file(signersfilename, "w");
if (certout) {
int i;
signers = PKCS7_get0_signers(p7, NULL, (int)flags);
for(i = 0; i < sk_X509_num(signers); i++) {
PEM_write_bio_X509(certout, sk_X509_value(signers, i));
}
BIO_free(certout);
sk_X509_free(signers);
} else {
php_error_docref(NULL, E_WARNING, "signature OK, but cannot open %s for writing", signersfilename);
RETVAL_LONG(-1);
}
}
goto clean_exit;
} else {
RETVAL_FALSE;
}
clean_exit:
X509_STORE_free(store);
BIO_free(datain);
BIO_free(in);
BIO_free(dataout);
PKCS7_free(p7);
sk_X509_free(others);
}
| 14,812 |
22,660 | 0 | wait_for_completion_interruptible_timeout(struct completion *x,
unsigned long timeout)
{
return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
}
| 14,813 |
140,295 | 0 | bool Editor::canCopy() const {
if (imageElementFromImageDocument(frame().document()))
return true;
FrameSelection& selection = frame().selection();
return selection.computeVisibleSelectionInDOMTreeDeprecated().isRange() &&
!selection.isInPasswordField();
}
| 14,814 |
4,991 | 0 | X509_VERIFY_PARAM *X509_STORE_CTX_get0_param(X509_STORE_CTX *ctx)
{
return ctx->param;
}
| 14,815 |
105,663 | 0 | void TreeView::RecursivelyDelete(NodeDetails* node) {
DCHECK(node);
HTREEITEM item = node->tree_item;
DCHECK(item);
for (HTREEITEM child = TreeView_GetChild(tree_view_, item); child ;) {
HTREEITEM next = TreeView_GetNextSibling(tree_view_, child);
RecursivelyDelete(GetNodeDetailsByTreeItem(child));
child = next;
}
TreeView_DeleteItem(tree_view_, item);
id_to_details_map_.erase(node->id);
node_to_details_map_.erase(node->node);
delete node;
}
| 14,816 |
106,330 | 0 | bool SyncBackendHost::HasUnsyncedItems() const {
DCHECK(syncapi_initialized_);
return core_->syncapi()->HasUnsyncedItems();
}
| 14,817 |
56,846 | 0 | static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
{
struct cpl_abort_req *req = cplhdr(skb);
PDBG("%s t3cdev %p\n", __func__, dev);
req->cmd = CPL_ABORT_NO_RST;
iwch_cxgb3_ofld_send(dev, skb);
}
| 14,818 |
125,476 | 0 | void GDataFileSystem::UnpinIfPinned(
const std::string& resource_id,
const std::string& md5,
bool success,
const GDataCacheEntry& cache_entry) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
if (success && cache_entry.is_pinned())
cache_->UnpinOnUIThread(resource_id, md5, CacheOperationCallback());
}
| 14,819 |
87,737 | 0 | int hns_roce_init(struct hns_roce_dev *hr_dev)
{
int ret;
struct device *dev = hr_dev->dev;
if (hr_dev->hw->reset) {
ret = hr_dev->hw->reset(hr_dev, true);
if (ret) {
dev_err(dev, "Reset RoCE engine failed!\n");
return ret;
}
}
if (hr_dev->hw->cmq_init) {
ret = hr_dev->hw->cmq_init(hr_dev);
if (ret) {
dev_err(dev, "Init RoCE Command Queue failed!\n");
goto error_failed_cmq_init;
}
}
ret = hr_dev->hw->hw_profile(hr_dev);
if (ret) {
dev_err(dev, "Get RoCE engine profile failed!\n");
goto error_failed_cmd_init;
}
ret = hns_roce_cmd_init(hr_dev);
if (ret) {
dev_err(dev, "cmd init failed!\n");
goto error_failed_cmd_init;
}
ret = hr_dev->hw->init_eq(hr_dev);
if (ret) {
dev_err(dev, "eq init failed!\n");
goto error_failed_eq_table;
}
if (hr_dev->cmd_mod) {
ret = hns_roce_cmd_use_events(hr_dev);
if (ret) {
dev_err(dev, "Switch to event-driven cmd failed!\n");
goto error_failed_use_event;
}
}
ret = hns_roce_init_hem(hr_dev);
if (ret) {
dev_err(dev, "init HEM(Hardware Entry Memory) failed!\n");
goto error_failed_init_hem;
}
ret = hns_roce_setup_hca(hr_dev);
if (ret) {
dev_err(dev, "setup hca failed!\n");
goto error_failed_setup_hca;
}
if (hr_dev->hw->hw_init) {
ret = hr_dev->hw->hw_init(hr_dev);
if (ret) {
dev_err(dev, "hw_init failed!\n");
goto error_failed_engine_init;
}
}
ret = hns_roce_register_device(hr_dev);
if (ret)
goto error_failed_register_device;
return 0;
error_failed_register_device:
if (hr_dev->hw->hw_exit)
hr_dev->hw->hw_exit(hr_dev);
error_failed_engine_init:
hns_roce_cleanup_bitmap(hr_dev);
error_failed_setup_hca:
hns_roce_cleanup_hem(hr_dev);
error_failed_init_hem:
if (hr_dev->cmd_mod)
hns_roce_cmd_use_polling(hr_dev);
error_failed_use_event:
hr_dev->hw->cleanup_eq(hr_dev);
error_failed_eq_table:
hns_roce_cmd_cleanup(hr_dev);
error_failed_cmd_init:
if (hr_dev->hw->cmq_exit)
hr_dev->hw->cmq_exit(hr_dev);
error_failed_cmq_init:
if (hr_dev->hw->reset) {
ret = hr_dev->hw->reset(hr_dev, false);
if (ret)
dev_err(dev, "Dereset RoCE engine failed!\n");
}
return ret;
}
| 14,820 |
72,189 | 0 | mm_create(struct mm_master *mmalloc, size_t size)
{
void *address;
struct mm_master *mm;
if (mmalloc == NULL)
mm = xcalloc(1, sizeof(struct mm_master));
else
mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
/*
* If the memory map has a mm_master it can be completely
* shared including authentication between the child
* and the client.
*/
mm->mmalloc = mmalloc;
address = mmap(NULL, size, PROT_WRITE|PROT_READ, MAP_ANON|MAP_SHARED,
-1, 0);
if (address == MAP_FAILED)
fatal("mmap(%zu): %s", size, strerror(errno));
mm->address = address;
mm->size = size;
RB_INIT(&mm->rb_free);
RB_INIT(&mm->rb_allocated);
mm_make_entry(mm, &mm->rb_free, address, size);
return (mm);
}
| 14,821 |
138,314 | 0 | void InspectorAccessibilityAgent::addChildren(
AXObject& axObject,
AXObject* inspectedAXObject,
std::unique_ptr<protocol::Array<AXNodeId>>& childIds,
std::unique_ptr<protocol::Array<AXNode>>& nodes,
AXObjectCacheImpl& cache) const {
if (inspectedAXObject && inspectedAXObject->accessibilityIsIgnored() &&
&axObject == inspectedAXObject->parentObjectUnignored()) {
childIds->addItem(String::number(inspectedAXObject->axObjectID()));
return;
}
const AXObject::AXObjectVector& children = axObject.children();
for (unsigned i = 0; i < children.size(); i++) {
AXObject& childAXObject = *children[i].get();
childIds->addItem(String::number(childAXObject.axObjectID()));
if (&childAXObject == inspectedAXObject)
continue;
if (&axObject != inspectedAXObject &&
(axObject.getNode() ||
axObject.parentObjectUnignored() != inspectedAXObject)) {
continue;
}
std::unique_ptr<AXNode> childNode = buildProtocolAXObject(
childAXObject, inspectedAXObject, true, nodes, cache);
nodes->addItem(std::move(childNode));
}
}
| 14,822 |
51,759 | 0 | dissect_rpcap_startcap_request (tvbuff_t *tvb, packet_info *pinfo,
proto_tree *parent_tree, gint offset)
{
proto_tree *tree, *field_tree;
proto_item *ti, *field_ti;
guint16 flags;
ti = proto_tree_add_item (parent_tree, hf_startcap_request, tvb, offset, -1, ENC_NA);
tree = proto_item_add_subtree (ti, ett_startcap_request);
proto_tree_add_item (tree, hf_snaplen, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
proto_tree_add_item (tree, hf_read_timeout, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
flags = tvb_get_ntohs (tvb, offset);
field_ti = proto_tree_add_uint_format (tree, hf_flags, tvb, offset, 2, flags, "Flags");
field_tree = proto_item_add_subtree (field_ti, ett_startcap_flags);
proto_tree_add_item (field_tree, hf_flags_promisc, tvb, offset, 2, ENC_BIG_ENDIAN);
proto_tree_add_item (field_tree, hf_flags_dgram, tvb, offset, 2, ENC_BIG_ENDIAN);
proto_tree_add_item (field_tree, hf_flags_serveropen, tvb, offset, 2, ENC_BIG_ENDIAN);
proto_tree_add_item (field_tree, hf_flags_inbound, tvb, offset, 2, ENC_BIG_ENDIAN);
proto_tree_add_item (field_tree, hf_flags_outbound, tvb, offset, 2, ENC_BIG_ENDIAN);
if (flags & 0x1F) {
gchar *flagstr = wmem_strdup_printf (wmem_packet_scope(), "%s%s%s%s%s",
(flags & FLAG_PROMISC) ? ", Promiscuous" : "",
(flags & FLAG_DGRAM) ? ", Datagram" : "",
(flags & FLAG_SERVEROPEN) ? ", ServerOpen" : "",
(flags & FLAG_INBOUND) ? ", Inbound" : "",
(flags & FLAG_OUTBOUND) ? ", Outbound" : "");
proto_item_append_text (field_ti, ":%s", &flagstr[1]);
} else {
proto_item_append_text (field_ti, " (none)");
}
offset += 2;
proto_tree_add_item (tree, hf_client_port, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
dissect_rpcap_filter (tvb, pinfo, tree, offset);
}
| 14,823 |
175,343 | 0 | bool SniffOgg(
const sp<DataSource> &source, String8 *mimeType, float *confidence,
sp<AMessage> *) {
char tmp[4];
if (source->readAt(0, tmp, 4) < 4 || memcmp(tmp, "OggS", 4)) {
return false;
}
mimeType->setTo(MEDIA_MIMETYPE_CONTAINER_OGG);
*confidence = 0.2f;
return true;
}
| 14,824 |
137,476 | 0 | RunLoop::Delegate::Client::Client(Delegate* outer) : outer_(outer) {}
| 14,825 |
68 | 0 | static void timelib_eat_spaces(char **ptr)
{
while (**ptr == ' ' || **ptr == '\t') {
++*ptr;
}
}
| 14,826 |
92,318 | 0 | entityValueProcessor(XML_Parser parser,
const char *s,
const char *end,
const char **nextPtr)
{
const char *start = s;
const char *next = s;
const ENCODING *enc = parser->m_encoding;
int tok;
for (;;) {
tok = XmlPrologTok(enc, start, end, &next);
if (tok <= 0) {
if (!parser->m_parsingStatus.finalBuffer && tok != XML_TOK_INVALID) {
*nextPtr = s;
return XML_ERROR_NONE;
}
switch (tok) {
case XML_TOK_INVALID:
return XML_ERROR_INVALID_TOKEN;
case XML_TOK_PARTIAL:
return XML_ERROR_UNCLOSED_TOKEN;
case XML_TOK_PARTIAL_CHAR:
return XML_ERROR_PARTIAL_CHAR;
case XML_TOK_NONE: /* start == end */
default:
break;
}
/* found end of entity value - can store it now */
return storeEntityValue(parser, enc, s, end);
}
start = next;
}
}
| 14,827 |
25,780 | 0 | static void x86_pmu_cancel_txn(struct pmu *pmu)
{
__this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
/*
* Truncate the collected events.
*/
__this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
__this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
perf_pmu_enable(pmu);
}
| 14,828 |
64,456 | 0 | root_scan_phase(mrb_state *mrb, mrb_gc *gc)
{
size_t i, e;
if (!is_minor_gc(gc)) {
gc->gray_list = NULL;
gc->atomic_gray_list = NULL;
}
mrb_gc_mark_gv(mrb);
/* mark arena */
for (i=0,e=gc->arena_idx; i<e; i++) {
mrb_gc_mark(mrb, gc->arena[i]);
}
/* mark class hierarchy */
mrb_gc_mark(mrb, (struct RBasic*)mrb->object_class);
/* mark built-in classes */
mrb_gc_mark(mrb, (struct RBasic*)mrb->class_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->module_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->proc_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->string_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->array_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->hash_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->float_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->fixnum_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->true_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->false_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->nil_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->symbol_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->kernel_module);
mrb_gc_mark(mrb, (struct RBasic*)mrb->eException_class);
mrb_gc_mark(mrb, (struct RBasic*)mrb->eStandardError_class);
/* mark top_self */
mrb_gc_mark(mrb, (struct RBasic*)mrb->top_self);
/* mark exception */
mrb_gc_mark(mrb, (struct RBasic*)mrb->exc);
/* mark backtrace */
mrb_gc_mark(mrb, (struct RBasic*)mrb->backtrace.exc);
e = (size_t)mrb->backtrace.n;
for (i=0; i<e; i++) {
mrb_gc_mark(mrb, (struct RBasic*)mrb->backtrace.entries[i].klass);
}
/* mark pre-allocated exception */
mrb_gc_mark(mrb, (struct RBasic*)mrb->nomem_err);
mrb_gc_mark(mrb, (struct RBasic*)mrb->stack_err);
#ifdef MRB_GC_FIXED_ARENA
mrb_gc_mark(mrb, (struct RBasic*)mrb->arena_err);
#endif
mark_context(mrb, mrb->root_c);
if (mrb->root_c->fib) {
mrb_gc_mark(mrb, (struct RBasic*)mrb->root_c->fib);
}
if (mrb->root_c != mrb->c) {
mark_context(mrb, mrb->c);
}
}
| 14,829 |
152,083 | 0 | void RenderFrameHostImpl::SetAccessibilityCallbackForTesting(
const AccessibilityCallbackForTesting& callback) {
accessibility_testing_callback_ = callback;
}
| 14,830 |
72,743 | 0 | static int jas_icctxtdesc_input(jas_iccattrval_t *attrval, jas_stream_t *in,
int cnt)
{
int n;
int c;
jas_icctxtdesc_t *txtdesc = &attrval->data.txtdesc;
txtdesc->ascdata = 0;
txtdesc->ucdata = 0;
if (jas_iccgetuint32(in, &txtdesc->asclen))
goto error;
if (!(txtdesc->ascdata = jas_malloc(txtdesc->asclen)))
goto error;
if (jas_stream_read(in, txtdesc->ascdata, txtdesc->asclen) !=
JAS_CAST(int, txtdesc->asclen))
goto error;
txtdesc->ascdata[txtdesc->asclen - 1] = '\0';
if (jas_iccgetuint32(in, &txtdesc->uclangcode) ||
jas_iccgetuint32(in, &txtdesc->uclen))
goto error;
if (!(txtdesc->ucdata = jas_alloc2(txtdesc->uclen, 2)))
goto error;
if (jas_stream_read(in, txtdesc->ucdata, txtdesc->uclen * 2) !=
JAS_CAST(int, txtdesc->uclen * 2))
goto error;
if (jas_iccgetuint16(in, &txtdesc->sccode))
goto error;
if ((c = jas_stream_getc(in)) == EOF)
goto error;
txtdesc->maclen = c;
if (jas_stream_read(in, txtdesc->macdata, 67) != 67)
goto error;
txtdesc->asclen = JAS_CAST(jas_iccuint32_t, strlen(txtdesc->ascdata) + 1);
#define WORKAROUND_BAD_PROFILES
#ifdef WORKAROUND_BAD_PROFILES
n = txtdesc->asclen + txtdesc->uclen * 2 + 15 + 67;
if (n > cnt) {
return -1;
}
if (n < cnt) {
if (jas_stream_gobble(in, cnt - n) != cnt - n)
goto error;
}
#else
if (txtdesc->asclen + txtdesc->uclen * 2 + 15 + 67 != cnt)
return -1;
#endif
return 0;
error:
jas_icctxtdesc_destroy(attrval);
return -1;
}
| 14,831 |
184,426 | 1 | void ChromeMockRenderThread::OnMsgOpenChannelToExtension(
int routing_id, const std::string& source_extension_id,
const std::string& target_extension_id,
const std::string& channel_name, int* port_id) {
*port_id = 0;
}
| 14,832 |
123,701 | 0 | ScriptLoaderClient* ScriptLoader::client() const
{
if (isHTMLScriptLoader(m_element))
return toHTMLScriptElement(m_element);
if (isSVGScriptLoader(m_element))
return toSVGScriptElement(m_element);
ASSERT_NOT_REACHED();
return 0;
}
| 14,833 |
49,392 | 0 | static int lock_trace(struct task_struct *task)
{
int err = mutex_lock_killable(&task->signal->cred_guard_mutex);
if (err)
return err;
if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) {
mutex_unlock(&task->signal->cred_guard_mutex);
return -EPERM;
}
return 0;
}
| 14,834 |
109,780 | 0 | void Document::registerNodeList(LiveNodeListBase* list)
{
if (list->hasIdNameCache())
m_nodeListCounts[InvalidateOnIdNameAttrChange]++;
m_nodeListCounts[list->invalidationType()]++;
if (list->isRootedAtDocument())
m_listsInvalidatedAtDocument.add(list);
}
| 14,835 |
54,317 | 0 | static void spl_ptr_llist_destroy(spl_ptr_llist *llist) /* {{{ */
{
spl_ptr_llist_element *current = llist->head, *next;
spl_ptr_llist_dtor_func dtor = llist->dtor;
while (current) {
next = current->next;
if (dtor) {
dtor(current);
}
SPL_LLIST_DELREF(current);
current = next;
}
efree(llist);
}
/* }}} */
| 14,836 |
67,021 | 0 | void ff_color_frame(AVFrame *frame, const int c[4])
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
int p, y, x;
av_assert0(desc->flags & AV_PIX_FMT_FLAG_PLANAR);
for (p = 0; p<desc->nb_components; p++) {
uint8_t *dst = frame->data[p];
int is_chroma = p == 1 || p == 2;
int bytes = is_chroma ? AV_CEIL_RSHIFT(frame->width, desc->log2_chroma_w) : frame->width;
int height = is_chroma ? AV_CEIL_RSHIFT(frame->height, desc->log2_chroma_h) : frame->height;
for (y = 0; y < height; y++) {
if (desc->comp[0].depth >= 9) {
for (x = 0; x<bytes; x++)
((uint16_t*)dst)[x] = c[p];
}else
memset(dst, c[p], bytes);
dst += frame->linesize[p];
}
}
}
| 14,837 |
51,010 | 0 | static long do_rmdir(int dfd, const char __user *pathname)
{
int error = 0;
struct filename *name;
struct dentry *dentry;
struct path path;
struct qstr last;
int type;
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname,
&path, &last, &type, lookup_flags);
if (IS_ERR(name))
return PTR_ERR(name);
switch (type) {
case LAST_DOTDOT:
error = -ENOTEMPTY;
goto exit1;
case LAST_DOT:
error = -EINVAL;
goto exit1;
case LAST_ROOT:
error = -EBUSY;
goto exit1;
}
error = mnt_want_write(path.mnt);
if (error)
goto exit1;
inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT);
dentry = __lookup_hash(&last, path.dentry, lookup_flags);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto exit2;
if (!dentry->d_inode) {
error = -ENOENT;
goto exit3;
}
error = security_path_rmdir(&path, dentry);
if (error)
goto exit3;
error = vfs_rmdir(path.dentry->d_inode, dentry);
exit3:
dput(dentry);
exit2:
inode_unlock(path.dentry->d_inode);
mnt_drop_write(path.mnt);
exit1:
path_put(&path);
putname(name);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
return error;
}
| 14,838 |
139,547 | 0 | static TriState StateJustifyRight(LocalFrame& frame, Event*) {
return StateStyle(frame, CSSPropertyTextAlign, "right");
}
| 14,839 |
65,712 | 0 | static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
{
struct nfs4_client *clp = lo->lo_owner.so_client;
lockdep_assert_held(&clp->cl_lock);
list_del_init(&lo->lo_owner.so_strhash);
}
| 14,840 |
128,288 | 0 | void FrameView::notifyPageThatContentAreaWillPaint() const
{
Page* page = m_frame->page();
if (!page)
return;
contentAreaWillPaint();
if (!m_scrollableAreas)
return;
for (HashSet<ScrollableArea*>::const_iterator it = m_scrollableAreas->begin(), end = m_scrollableAreas->end(); it != end; ++it) {
ScrollableArea* scrollableArea = *it;
if (!scrollableArea->scrollbarsCanBeActive())
continue;
scrollableArea->contentAreaWillPaint();
}
}
| 14,841 |
147,051 | 0 | void WebLocalFrameImpl::ReplaceSelection(const WebString& text) {
GetFrame()->GetDocument()->UpdateStyleAndLayoutIgnorePendingStylesheets();
GetFrame()->GetEditor().ReplaceSelection(text);
}
| 14,842 |
124,167 | 0 | bool RenderViewHostManager::ShouldReuseWebUI(
const NavigationEntry* curr_entry,
const NavigationEntryImpl* new_entry) const {
NavigationControllerImpl& controller =
delegate_->GetControllerForRenderManager();
return curr_entry && web_ui_.get() &&
(WebUIControllerFactoryRegistry::GetInstance()->GetWebUIType(
controller.GetBrowserContext(), curr_entry->GetURL()) ==
WebUIControllerFactoryRegistry::GetInstance()->GetWebUIType(
controller.GetBrowserContext(), new_entry->GetURL()));
}
| 14,843 |
67,131 | 0 | nfsd(void *vrqstp)
{
struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp;
struct svc_xprt *perm_sock = list_entry(rqstp->rq_server->sv_permsocks.next, typeof(struct svc_xprt), xpt_list);
struct net *net = perm_sock->xpt_net;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
int err;
/* Lock module and set up kernel thread */
mutex_lock(&nfsd_mutex);
/* At this point, the thread shares current->fs
* with the init process. We need to create files with the
* umask as defined by the client instead of init's umask. */
if (unshare_fs_struct() < 0) {
printk("Unable to start nfsd thread: out of memory\n");
goto out;
}
current->fs->umask = 0;
/*
* thread is spawned with all signals set to SIG_IGN, re-enable
* the ones that will bring down the thread
*/
allow_signal(SIGKILL);
allow_signal(SIGHUP);
allow_signal(SIGINT);
allow_signal(SIGQUIT);
nfsdstats.th_cnt++;
mutex_unlock(&nfsd_mutex);
set_freezable();
/*
* The main request loop
*/
for (;;) {
/* Update sv_maxconn if it has changed */
rqstp->rq_server->sv_maxconn = nn->max_connections;
/*
* Find a socket with data available and call its
* recvfrom routine.
*/
while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN)
;
if (err == -EINTR)
break;
validate_process_creds();
svc_process(rqstp);
validate_process_creds();
}
/* Clear signals before calling svc_exit_thread() */
flush_signals(current);
mutex_lock(&nfsd_mutex);
nfsdstats.th_cnt --;
out:
rqstp->rq_server = NULL;
/* Release the thread */
svc_exit_thread(rqstp);
nfsd_destroy(net);
/* Release module */
mutex_unlock(&nfsd_mutex);
module_put_and_exit(0);
return 0;
}
| 14,844 |
184,965 | 1 | xmlParseCDSect(xmlParserCtxtPtr ctxt) {
xmlChar *buf = NULL;
int len = 0;
int size = XML_PARSER_BUFFER_SIZE;
int r, rl;
int s, sl;
int cur, l;
int count = 0;
/* Check 2.6.0 was NXT(0) not RAW */
if (CMP9(CUR_PTR, '<', '!', '[', 'C', 'D', 'A', 'T', 'A', '[')) {
SKIP(9);
} else
return;
ctxt->instate = XML_PARSER_CDATA_SECTION;
r = CUR_CHAR(rl);
if (!IS_CHAR(r)) {
xmlFatalErr(ctxt, XML_ERR_CDATA_NOT_FINISHED, NULL);
ctxt->instate = XML_PARSER_CONTENT;
return;
}
NEXTL(rl);
s = CUR_CHAR(sl);
if (!IS_CHAR(s)) {
xmlFatalErr(ctxt, XML_ERR_CDATA_NOT_FINISHED, NULL);
ctxt->instate = XML_PARSER_CONTENT;
return;
}
NEXTL(sl);
cur = CUR_CHAR(l);
buf = (xmlChar *) xmlMallocAtomic(size * sizeof(xmlChar));
if (buf == NULL) {
xmlErrMemory(ctxt, NULL);
return;
}
while (IS_CHAR(cur) &&
((r != ']') || (s != ']') || (cur != '>'))) {
if (len + 5 >= size) {
xmlChar *tmp;
size *= 2;
tmp = (xmlChar *) xmlRealloc(buf, size * sizeof(xmlChar));
if (tmp == NULL) {
xmlFree(buf);
xmlErrMemory(ctxt, NULL);
return;
}
buf = tmp;
}
COPY_BUF(rl,buf,len,r);
r = s;
rl = sl;
s = cur;
sl = l;
count++;
if (count > 50) {
GROW;
count = 0;
}
NEXTL(l);
cur = CUR_CHAR(l);
}
buf[len] = 0;
ctxt->instate = XML_PARSER_CONTENT;
if (cur != '>') {
xmlFatalErrMsgStr(ctxt, XML_ERR_CDATA_NOT_FINISHED,
"CData section not finished\n%.50s\n", buf);
xmlFree(buf);
return;
}
NEXTL(l);
/*
* OK the buffer is to be consumed as cdata.
*/
if ((ctxt->sax != NULL) && (!ctxt->disableSAX)) {
if (ctxt->sax->cdataBlock != NULL)
ctxt->sax->cdataBlock(ctxt->userData, buf, len);
else if (ctxt->sax->characters != NULL)
ctxt->sax->characters(ctxt->userData, buf, len);
}
xmlFree(buf);
}
| 14,845 |
19,556 | 0 | static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
int (*nproc)(void *, void *, int), void *priv,
void *cookie, void *ctx)
{
int error, call_nests = 0;
unsigned long flags;
struct list_head *lsthead = &ncalls->tasks_call_list;
struct nested_call_node *tncur;
struct nested_call_node tnode;
spin_lock_irqsave(&ncalls->lock, flags);
/*
* Try to see if the current task is already inside this wakeup call.
* We use a list here, since the population inside this set is always
* very much limited.
*/
list_for_each_entry(tncur, lsthead, llink) {
if (tncur->ctx == ctx &&
(tncur->cookie == cookie || ++call_nests > max_nests)) {
/*
* Ops ... loop detected or maximum nest level reached.
* We abort this wake by breaking the cycle itself.
*/
error = -1;
goto out_unlock;
}
}
/* Add the current task and cookie to the list */
tnode.ctx = ctx;
tnode.cookie = cookie;
list_add(&tnode.llink, lsthead);
spin_unlock_irqrestore(&ncalls->lock, flags);
/* Call the nested function */
error = (*nproc)(priv, cookie, call_nests);
/* Remove the current task from the list */
spin_lock_irqsave(&ncalls->lock, flags);
list_del(&tnode.llink);
out_unlock:
spin_unlock_irqrestore(&ncalls->lock, flags);
return error;
}
| 14,846 |
142,754 | 0 | void HTMLMediaElement::CancelPendingEventsAndCallbacks() {
BLINK_MEDIA_LOG << "cancelPendingEventsAndCallbacks(" << (void*)this << ")";
async_event_queue_->CancelAllEvents();
for (HTMLSourceElement* source =
Traversal<HTMLSourceElement>::FirstChild(*this);
source; source = Traversal<HTMLSourceElement>::NextSibling(*source))
source->CancelPendingErrorEvent();
}
| 14,847 |
179,955 | 1 | static noinline void key_gc_unused_keys(struct list_head *keys)
{
while (!list_empty(keys)) {
struct key *key =
list_entry(keys->next, struct key, graveyard_link);
list_del(&key->graveyard_link);
kdebug("- %u", key->serial);
key_check(key);
security_key_free(key);
/* deal with the user's key tracking and quota */
if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
spin_lock(&key->user->lock);
key->user->qnkeys--;
key->user->qnbytes -= key->quotalen;
spin_unlock(&key->user->lock);
}
atomic_dec(&key->user->nkeys);
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
atomic_dec(&key->user->nikeys);
key_user_put(key->user);
/* now throw away the key memory */
if (key->type->destroy)
key->type->destroy(key);
kfree(key->description);
#ifdef KEY_DEBUGGING
key->magic = KEY_DEBUG_MAGIC_X;
#endif
kmem_cache_free(key_jar, key);
}
}
| 14,848 |
176,906 | 0 | bool InputDispatcher::checkInjectionPermission(const sp<InputWindowHandle>& windowHandle,
const InjectionState* injectionState) {
if (injectionState
&& (windowHandle == NULL
|| windowHandle->getInfo()->ownerUid != injectionState->injectorUid)
&& !hasInjectionPermission(injectionState->injectorPid, injectionState->injectorUid)) {
if (windowHandle != NULL) {
ALOGW("Permission denied: injecting event from pid %d uid %d to window %s "
"owned by uid %d",
injectionState->injectorPid, injectionState->injectorUid,
windowHandle->getName().string(),
windowHandle->getInfo()->ownerUid);
} else {
ALOGW("Permission denied: injecting event from pid %d uid %d",
injectionState->injectorPid, injectionState->injectorUid);
}
return false;
}
return true;
}
| 14,849 |
164,789 | 0 | static bool CheckH264(const uint8_t* buffer, int buffer_size) {
RCHECK(buffer_size > 4);
int offset = 0;
int parameter_count = 0;
while (true) {
if (!AdvanceToStartCode(buffer, buffer_size, &offset, 4, 24, 1)) {
return parameter_count > 0;
}
BitReader reader(buffer + offset, 4);
RCHECK(ReadBits(&reader, 24) == 1);
RCHECK(ReadBits(&reader, 1) == 0);
int nal_ref_idc = ReadBits(&reader, 2);
int nal_unit_type = ReadBits(&reader, 5);
switch (nal_unit_type) {
case 5: // Coded slice of an IDR picture.
RCHECK(nal_ref_idc != 0);
break;
case 6: // Supplemental enhancement information (SEI).
case 9: // Access unit delimiter.
case 10: // End of sequence.
case 11: // End of stream.
case 12: // Filler data.
RCHECK(nal_ref_idc == 0);
break;
case 7: // Sequence parameter set.
case 8: // Picture parameter set.
++parameter_count;
break;
}
offset += 4;
}
}
| 14,850 |
156,636 | 0 | void TryCreateDuplicateRequestIds(Shell* shell, bool block_loaders) {
NavigateToURL(shell, GURL("http://foo.com/simple_page.html"));
RenderFrameHostImpl* rfh = static_cast<RenderFrameHostImpl*>(
shell->web_contents()->GetMainFrame());
if (block_loaders) {
rfh->BlockRequestsForFrame();
}
const char* blocking_url = net::URLRequestSlowDownloadJob::kUnknownSizeUrl;
network::ResourceRequest request(CreateXHRRequest(blocking_url));
RenderProcessHostKillWaiter kill_waiter(rfh->GetProcess());
network::mojom::URLLoaderPtr loader1, loader2;
network::TestURLLoaderClient client1, client2;
CreateLoaderAndStart(rfh, mojo::MakeRequest(&loader1), rfh->GetRoutingID(),
kRequestIdNotPreviouslyUsed, request,
client1.CreateInterfacePtr().PassInterface());
CreateLoaderAndStart(rfh, mojo::MakeRequest(&loader2), rfh->GetRoutingID(),
kRequestIdNotPreviouslyUsed, request,
client2.CreateInterfacePtr().PassInterface());
EXPECT_EQ(bad_message::RDH_INVALID_REQUEST_ID, kill_waiter.Wait());
}
| 14,851 |
152,863 | 0 | UkmPageLoadMetricsObserver::ObservePolicy UkmPageLoadMetricsObserver::OnCommit(
content::NavigationHandle* navigation_handle,
ukm::SourceId source_id) {
const net::HttpResponseHeaders* response_headers =
navigation_handle->GetResponseHeaders();
if (response_headers)
http_response_code_ = response_headers->response_code();
page_transition_ = navigation_handle->GetPageTransition();
was_cached_ = navigation_handle->WasResponseCached();
navigation_start_ = navigation_handle->NavigationStart();
return CONTINUE_OBSERVING;
}
| 14,852 |
59,834 | 0 | static int usbhid_restart_out_queue(struct usbhid_device *usbhid)
{
struct hid_device *hid = usb_get_intfdata(usbhid->intf);
int kicked;
int r;
if (!hid || test_bit(HID_RESET_PENDING, &usbhid->iofl) ||
test_bit(HID_SUSPENDED, &usbhid->iofl))
return 0;
if ((kicked = (usbhid->outhead != usbhid->outtail))) {
hid_dbg(hid, "Kicking head %d tail %d", usbhid->outhead, usbhid->outtail);
/* Try to wake up from autosuspend... */
r = usb_autopm_get_interface_async(usbhid->intf);
if (r < 0)
return r;
/*
* If still suspended, don't submit. Submission will
* occur if/when resume drains the queue.
*/
if (test_bit(HID_SUSPENDED, &usbhid->iofl)) {
usb_autopm_put_interface_no_suspend(usbhid->intf);
return r;
}
/* Asynchronously flush queue. */
set_bit(HID_OUT_RUNNING, &usbhid->iofl);
if (hid_submit_out(hid)) {
clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
usb_autopm_put_interface_async(usbhid->intf);
}
wake_up(&usbhid->wait);
}
return kicked;
}
| 14,853 |
46,822 | 0 | static void aesni_gcm_enc_avx(void *ctx, u8 *out,
const u8 *in, unsigned long plaintext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len)
{
if (plaintext_len < AVX_GEN2_OPTSIZE) {
aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
aad_len, auth_tag, auth_tag_len);
} else {
aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
aad_len, auth_tag, auth_tag_len);
}
}
| 14,854 |
5,365 | 0 | static void Ins_AND( INS_ARG )
{ (void)exc;
if ( args[0] != 0 && args[1] != 0 )
args[0] = 1;
else
args[0] = 0;
}
| 14,855 |
36,742 | 0 | spnego_gss_delete_sec_context(
OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
gss_buffer_t output_token)
{
OM_uint32 ret = GSS_S_COMPLETE;
spnego_gss_ctx_id_t *ctx =
(spnego_gss_ctx_id_t *)context_handle;
*minor_status = 0;
if (context_handle == NULL)
return (GSS_S_FAILURE);
if (*ctx == NULL)
return (GSS_S_COMPLETE);
/*
* If this is still an SPNEGO mech, release it locally.
*/
if ((*ctx)->magic_num == SPNEGO_MAGIC_ID) {
(void) gss_delete_sec_context(minor_status,
&(*ctx)->ctx_handle,
output_token);
(void) release_spnego_ctx(ctx);
} else {
ret = gss_delete_sec_context(minor_status,
context_handle,
output_token);
}
return (ret);
}
| 14,856 |
139,939 | 0 | bool canLoadURL(const KURL& url, const ContentType& contentType) {
DEFINE_STATIC_LOCAL(const String, codecs, ("codecs"));
String contentMIMEType = contentType.type().lower();
String contentTypeCodecs = contentType.parameter(codecs);
if (contentMIMEType.isEmpty() ||
contentMIMEType == "application/octet-stream" ||
contentMIMEType == "text/plain") {
if (url.protocolIsData())
contentMIMEType = mimeTypeFromDataURL(url.getString());
}
if (contentMIMEType.isEmpty())
return true;
if (contentMIMEType != "application/octet-stream" ||
contentTypeCodecs.isEmpty()) {
return MIMETypeRegistry::supportsMediaMIMEType(contentMIMEType,
contentTypeCodecs);
}
return false;
}
| 14,857 |
127,332 | 0 | static std::string selectionAsString(WebFrame* frame)
{
return frame->selectionAsText().utf8();
}
| 14,858 |
122,943 | 0 | void RenderWidgetHostImpl::Copy() {
Send(new ViewMsg_Copy(GetRoutingID()));
RecordAction(UserMetricsAction("Copy"));
}
| 14,859 |
161,746 | 0 | void PlatformSensorAmbientLightMac::IOServiceCallback(void* context,
io_service_t service,
natural_t message_type,
void* message_argument) {
PlatformSensorAmbientLightMac* sensor =
static_cast<PlatformSensorAmbientLightMac*>(context);
if (!sensor->ReadAndUpdate()) {
sensor->NotifySensorError();
sensor->StopSensor();
}
}
| 14,860 |
98,253 | 0 | bool WebFrame::isFrameSet() const
{
if (!m_coreFrame)
return false;
Document* document = m_coreFrame->document();
if (!document)
return false;
return document->isFrameSet();
}
| 14,861 |
178,086 | 1 | XRRGetMonitors(Display *dpy, Window window, Bool get_active, int *nmonitors)
{
XExtDisplayInfo *info = XRRFindDisplay(dpy);
xRRGetMonitorsReply rep;
xRRGetMonitorsReq *req;
int nbytes, nbytesRead, rbytes;
int nmon, noutput;
int m, o;
char *buf, *buf_head;
xRRMonitorInfo *xmon;
CARD32 *xoutput;
XRRMonitorInfo *mon = NULL;
RROutput *output;
RRCheckExtension (dpy, info, NULL);
*nmonitors = -1;
LockDisplay (dpy);
GetReq (RRGetMonitors, req);
req->reqType = info->codes->major_opcode;
req->randrReqType = X_RRGetMonitors;
req->window = window;
req->get_active = get_active;
if (!_XReply (dpy, (xReply *) &rep, 0, xFalse))
{
UnlockDisplay (dpy);
SyncHandle ();
return NULL;
return NULL;
}
nbytes = (long) rep.length << 2;
nmon = rep.nmonitors;
noutput = rep.noutputs;
rbytes = nmon * sizeof (XRRMonitorInfo) + noutput * sizeof(RROutput);
buf = buf_head = Xmalloc (nbytesRead);
mon = Xmalloc (rbytes);
if (buf == NULL || mon == NULL) {
Xfree(buf);
Xfree(mon);
_XEatDataWords (dpy, rep.length);
UnlockDisplay (dpy);
SyncHandle ();
return NULL;
}
_XReadPad(dpy, buf, nbytesRead);
output = (RROutput *) (mon + nmon);
for (m = 0; m < nmon; m++) {
xmon = (xRRMonitorInfo *) buf;
mon[m].name = xmon->name;
mon[m].primary = xmon->primary;
mon[m].automatic = xmon->automatic;
mon[m].noutput = xmon->noutput;
mon[m].x = xmon->x;
mon[m].y = xmon->y;
mon[m].width = xmon->width;
mon[m].height = xmon->height;
mon[m].mwidth = xmon->widthInMillimeters;
mon[m].mheight = xmon->heightInMillimeters;
mon[m].outputs = output;
buf += SIZEOF (xRRMonitorInfo);
xoutput = (CARD32 *) buf;
for (o = 0; o < xmon->noutput; o++)
output[o] = xoutput[o];
output += xmon->noutput;
buf += xmon->noutput * 4;
}
Xfree(buf_head);
}
| 14,862 |
43,043 | 0 | static CallInfo *growCI (lua_State *L) {
if (L->size_ci > LUAI_MAXCALLS) /* overflow while handling overflow? */
luaD_throw(L, LUA_ERRERR);
else {
luaD_reallocCI(L, 2*L->size_ci);
if (L->size_ci > LUAI_MAXCALLS)
luaG_runerror(L, "stack overflow");
}
return ++L->ci;
}
| 14,863 |
3,674 | 0 | static void lsi_bad_selection(LSIState *s, uint32_t id)
{
trace_lsi_bad_selection(id);
lsi_script_scsi_interrupt(s, 0, LSI_SIST1_STO);
lsi_disconnect(s);
}
| 14,864 |
30,651 | 0 | static int irda_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
int err = -EOPNOTSUPP;
IRDA_DEBUG(2, "%s()\n", __func__);
lock_sock(sk);
if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) &&
(sk->sk_type != SOCK_DGRAM))
goto out;
if (sk->sk_state != TCP_LISTEN) {
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
err = 0;
}
out:
release_sock(sk);
return err;
}
| 14,865 |
11,038 | 0 | static int push_signature(uint8 **outbuf)
{
char *lanman;
int result, tmp;
result = 0;
tmp = message_push_string(outbuf, "Unix", STR_TERMINATE);
if (tmp == -1) return -1;
result += tmp;
if (asprintf(&lanman, "Samba %s", samba_version_string()) != -1) {
tmp = message_push_string(outbuf, lanman, STR_TERMINATE);
SAFE_FREE(lanman);
}
else {
tmp = message_push_string(outbuf, "Samba", STR_TERMINATE);
}
if (tmp == -1) return -1;
result += tmp;
tmp = message_push_string(outbuf, lp_workgroup(), STR_TERMINATE);
if (tmp == -1) return -1;
result += tmp;
return result;
}
| 14,866 |
111,580 | 0 | virtual void GetEntryInfoCallback(
base::PlatformFileError error,
const FilePath& entry_path,
scoped_ptr<GDataEntryProto> entry_proto) {
last_error_ = error;
entry_proto_ = entry_proto.Pass();
}
| 14,867 |
126,256 | 0 | void Browser::UpdateSearchState(TabContents* contents) {
if (chrome::search::IsInstantExtendedAPIEnabled(profile_))
search_delegate_->OnTabActivated(contents->web_contents());
}
| 14,868 |
75,630 | 0 | static int read_int32_info (WavpackStream *wps, WavpackMetadata *wpmd)
{
int bytecnt = wpmd->byte_length;
char *byteptr = (char *)wpmd->data;
if (bytecnt != 4)
return FALSE;
wps->int32_sent_bits = *byteptr++;
wps->int32_zeros = *byteptr++;
wps->int32_ones = *byteptr++;
wps->int32_dups = *byteptr;
return TRUE;
}
| 14,869 |
51,130 | 0 | void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat)
{
struct audit_context *context = current->audit_context;
context->mq_getsetattr.mqdes = mqdes;
context->mq_getsetattr.mqstat = *mqstat;
context->type = AUDIT_MQ_GETSETATTR;
}
| 14,870 |
111,316 | 0 | static inline IntPoint roundTransformedPoint(const FloatPoint &point)
{
return IntPoint(static_cast<int>(floorf(point.x())), static_cast<int>(floorf(point.y())));
}
| 14,871 |
26,396 | 0 | static void watchdog_enable_all_cpus(void)
{
int cpu;
watchdog_enabled = 0;
for_each_online_cpu(cpu)
if (!watchdog_enable(cpu))
/* if any cpu succeeds, watchdog is considered
enabled for the system */
watchdog_enabled = 1;
if (!watchdog_enabled)
printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
}
| 14,872 |
66,855 | 0 | __ref void *alloc_low_pages(unsigned int num)
{
unsigned long pfn;
int i;
if (after_bootmem) {
unsigned int order;
order = get_order((unsigned long)num << PAGE_SHIFT);
return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK |
__GFP_ZERO, order);
}
if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
unsigned long ret;
if (min_pfn_mapped >= max_pfn_mapped)
panic("alloc_low_pages: ran out of memory");
ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
max_pfn_mapped << PAGE_SHIFT,
PAGE_SIZE * num , PAGE_SIZE);
if (!ret)
panic("alloc_low_pages: can not alloc memory");
memblock_reserve(ret, PAGE_SIZE * num);
pfn = ret >> PAGE_SHIFT;
} else {
pfn = pgt_buf_end;
pgt_buf_end += num;
printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n",
pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1);
}
for (i = 0; i < num; i++) {
void *adr;
adr = __va((pfn + i) << PAGE_SHIFT);
clear_page(adr);
}
return __va(pfn << PAGE_SHIFT);
}
| 14,873 |
6,600 | 0 | QList<Smb4KShare*> Smb4KGlobal::findInaccessibleShares()
{
QList<Smb4KShare *> inaccessibleShares;
mutex.lock();
for (Smb4KShare *s : p->mountedSharesList)
{
if (s->isInaccessible())
{
inaccessibleShares += s;
}
else
{
}
}
mutex.unlock();
return inaccessibleShares;
}
| 14,874 |
37,461 | 0 | unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
{
unsigned int nr_mmu_pages;
unsigned int nr_pages = 0;
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, slots)
nr_pages += memslot->npages;
nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
nr_mmu_pages = max(nr_mmu_pages,
(unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
return nr_mmu_pages;
}
| 14,875 |
163,529 | 0 | blink::WebPushPermissionStatus ToPushPermission(
ContentSetting content_setting) {
switch (content_setting) {
case CONTENT_SETTING_ALLOW:
return blink::kWebPushPermissionStatusGranted;
case CONTENT_SETTING_BLOCK:
return blink::kWebPushPermissionStatusDenied;
case CONTENT_SETTING_ASK:
return blink::kWebPushPermissionStatusPrompt;
default:
break;
}
NOTREACHED();
return blink::kWebPushPermissionStatusDenied;
}
| 14,876 |
25,762 | 0 | perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
{
struct die_args *args = __args;
unsigned int this_nmi;
int handled;
if (!atomic_read(&active_events))
return NOTIFY_DONE;
switch (cmd) {
case DIE_NMI:
break;
case DIE_NMIUNKNOWN:
this_nmi = percpu_read(irq_stat.__nmi_count);
if (this_nmi != __this_cpu_read(pmu_nmi.marked))
/* let the kernel handle the unknown nmi */
return NOTIFY_DONE;
/*
* This one is a PMU back-to-back nmi. Two events
* trigger 'simultaneously' raising two back-to-back
* NMIs. If the first NMI handles both, the latter
* will be empty and daze the CPU. So, we drop it to
* avoid false-positive 'unknown nmi' messages.
*/
return NOTIFY_STOP;
default:
return NOTIFY_DONE;
}
handled = x86_pmu.handle_irq(args->regs);
if (!handled)
return NOTIFY_DONE;
this_nmi = percpu_read(irq_stat.__nmi_count);
if ((handled > 1) ||
/* the next nmi could be a back-to-back nmi */
((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
(__this_cpu_read(pmu_nmi.handled) > 1))) {
/*
* We could have two subsequent back-to-back nmis: The
* first handles more than one counter, the 2nd
* handles only one counter and the 3rd handles no
* counter.
*
* This is the 2nd nmi because the previous was
* handling more than one counter. We will mark the
* next (3rd) and then drop it if unhandled.
*/
__this_cpu_write(pmu_nmi.marked, this_nmi + 1);
__this_cpu_write(pmu_nmi.handled, handled);
}
return NOTIFY_STOP;
}
| 14,877 |
21,507 | 0 | kadm5_setkey_principal_3(void *server_handle,
krb5_principal principal,
krb5_boolean keepold,
int n_ks_tuple, krb5_key_salt_tuple *ks_tuple,
krb5_keyblock *keyblocks,
int n_keys)
{
krb5_db_entry *kdb;
osa_princ_ent_rec adb;
krb5_int32 now;
kadm5_policy_ent_rec pol;
krb5_key_data *old_key_data;
int n_old_keys;
int i, j, k, kvno, ret, have_pol = 0;
#if 0
int last_pwd;
#endif
kadm5_server_handle_t handle = server_handle;
krb5_boolean similar;
krb5_keysalt keysalt;
krb5_key_data tmp_key_data;
krb5_key_data *tptr;
krb5_keyblock *act_mkey;
CHECK_HANDLE(server_handle);
krb5_clear_error_message(handle->context);
if (principal == NULL || keyblocks == NULL)
return EINVAL;
if (hist_princ && /* this will be NULL when initializing the databse */
((krb5_principal_compare(handle->context,
principal, hist_princ)) == TRUE))
return KADM5_PROTECT_PRINCIPAL;
for (i = 0; i < n_keys; i++) {
for (j = i+1; j < n_keys; j++) {
if ((ret = krb5_c_enctype_compare(handle->context,
keyblocks[i].enctype,
keyblocks[j].enctype,
&similar)))
return(ret);
if (similar) {
if (n_ks_tuple) {
if (ks_tuple[i].ks_salttype == ks_tuple[j].ks_salttype)
return KADM5_SETKEY_DUP_ENCTYPES;
} else
return KADM5_SETKEY_DUP_ENCTYPES;
}
}
}
if (n_ks_tuple && n_ks_tuple != n_keys)
return KADM5_SETKEY3_ETYPE_MISMATCH;
if ((ret = kdb_get_entry(handle, principal, &kdb, &adb)))
return(ret);
for (kvno = 0, i=0; i<kdb->n_key_data; i++)
if (kdb->key_data[i].key_data_kvno > kvno)
kvno = kdb->key_data[i].key_data_kvno;
if (keepold) {
old_key_data = kdb->key_data;
n_old_keys = kdb->n_key_data;
} else {
if (kdb->key_data != NULL)
cleanup_key_data(handle->context, kdb->n_key_data, kdb->key_data);
n_old_keys = 0;
old_key_data = NULL;
}
kdb->key_data = (krb5_key_data*)krb5_db_alloc(handle->context, NULL, (n_keys+n_old_keys)
*sizeof(krb5_key_data));
if (kdb->key_data == NULL) {
ret = ENOMEM;
goto done;
}
memset(kdb->key_data, 0, (n_keys+n_old_keys)*sizeof(krb5_key_data));
kdb->n_key_data = 0;
for (i = 0; i < n_keys; i++) {
if (n_ks_tuple) {
keysalt.type = ks_tuple[i].ks_salttype;
keysalt.data.length = 0;
keysalt.data.data = NULL;
if (ks_tuple[i].ks_enctype != keyblocks[i].enctype) {
ret = KADM5_SETKEY3_ETYPE_MISMATCH;
goto done;
}
}
memset (&tmp_key_data, 0, sizeof(tmp_key_data));
ret = krb5_dbe_find_act_mkey(handle->context, active_mkey_list, NULL,
&act_mkey);
if (ret)
goto done;
ret = krb5_dbe_encrypt_key_data(handle->context, act_mkey,
&keyblocks[i],
n_ks_tuple ? &keysalt : NULL, kvno + 1,
&tmp_key_data);
if (ret)
goto done;
tptr = &kdb->key_data[i];
tptr->key_data_ver = tmp_key_data.key_data_ver;
tptr->key_data_kvno = tmp_key_data.key_data_kvno;
for (k = 0; k < tmp_key_data.key_data_ver; k++) {
tptr->key_data_type[k] = tmp_key_data.key_data_type[k];
tptr->key_data_length[k] = tmp_key_data.key_data_length[k];
if (tmp_key_data.key_data_contents[k]) {
tptr->key_data_contents[k] = krb5_db_alloc(handle->context, NULL, tmp_key_data.key_data_length[k]);
if (tptr->key_data_contents[k] == NULL) {
int i1;
for (i1 = k; i1 < tmp_key_data.key_data_ver; i1++) {
if (tmp_key_data.key_data_contents[i1]) {
memset (tmp_key_data.key_data_contents[i1], 0, tmp_key_data.key_data_length[i1]);
free (tmp_key_data.key_data_contents[i1]);
}
}
ret = ENOMEM;
goto done;
}
memcpy (tptr->key_data_contents[k], tmp_key_data.key_data_contents[k], tmp_key_data.key_data_length[k]);
memset (tmp_key_data.key_data_contents[k], 0, tmp_key_data.key_data_length[k]);
free (tmp_key_data.key_data_contents[k]);
tmp_key_data.key_data_contents[k] = NULL;
}
}
kdb->n_key_data++;
}
/* copy old key data if necessary */
for (i = 0; i < n_old_keys; i++) {
kdb->key_data[i+n_keys] = old_key_data[i];
memset(&old_key_data[i], 0, sizeof (krb5_key_data));
kdb->n_key_data++;
}
if (old_key_data)
krb5_db_free(handle->context, old_key_data);
/* assert(kdb->n_key_data == n_keys + n_old_keys) */
kdb->attributes &= ~KRB5_KDB_REQUIRES_PWCHANGE;
if ((ret = krb5_timeofday(handle->context, &now)))
goto done;
if ((adb.aux_attributes & KADM5_POLICY)) {
if ((ret = kadm5_get_policy(handle->lhandle, adb.policy,
&pol)) != KADM5_OK)
goto done;
have_pol = 1;
#if 0
/*
* The spec says this check is overridden if the caller has
* modify privilege. The admin server therefore makes this
* check itself (in chpass_principal_wrapper, misc.c). A
* local caller implicitly has all authorization bits.
*/
if (ret = krb5_dbe_lookup_last_pwd_change(handle->context,
kdb, &last_pwd))
goto done;
if((now - last_pwd) < pol.pw_min_life &&
!(kdb->attributes & KRB5_KDB_REQUIRES_PWCHANGE)) {
ret = KADM5_PASS_TOOSOON;
goto done;
}
#endif
if (pol.pw_max_life)
kdb->pw_expiration = now + pol.pw_max_life;
else
kdb->pw_expiration = 0;
} else {
kdb->pw_expiration = 0;
}
if ((ret = krb5_dbe_update_last_pwd_change(handle->context, kdb, now)))
goto done;
/* unlock principal on this KDC */
kdb->fail_auth_count = 0;
if ((ret = kdb_put_entry(handle, kdb, &adb)))
goto done;
ret = KADM5_OK;
done:
kdb_free_entry(handle, kdb, &adb);
if (have_pol)
kadm5_free_policy_ent(handle->lhandle, &pol);
return ret;
}
| 14,878 |
10,137 | 0 | Ins_MUL( INS_ARG )
{
DO_MUL
}
| 14,879 |
74,458 | 0 | static int _regulator_get_current_limit(struct regulator_dev *rdev)
{
int ret;
mutex_lock(&rdev->mutex);
/* sanity check */
if (!rdev->desc->ops->get_current_limit) {
ret = -EINVAL;
goto out;
}
ret = rdev->desc->ops->get_current_limit(rdev);
out:
mutex_unlock(&rdev->mutex);
return ret;
}
| 14,880 |
4,944 | 0 | gst_qtdemux_handle_esds (GstQTDemux * qtdemux, QtDemuxStream * stream,
GNode * esds, GstTagList * list)
{
int len = QT_UINT32 (esds->data);
guint8 *ptr = esds->data;
guint8 *end = ptr + len;
int tag;
guint8 *data_ptr = NULL;
int data_len = 0;
guint8 object_type_id = 0;
qtdemux_dump_mem (ptr, len);
ptr += 8;
GST_DEBUG_OBJECT (qtdemux, "version/flags = %08x", QT_UINT32 (ptr));
ptr += 4;
while (ptr < end) {
tag = QT_UINT8 (ptr);
GST_DEBUG_OBJECT (qtdemux, "tag = %02x", tag);
ptr++;
len = get_size (ptr, &ptr);
GST_DEBUG_OBJECT (qtdemux, "len = %d", len);
switch (tag) {
case 0x03:
GST_DEBUG_OBJECT (qtdemux, "ID %04x", QT_UINT16 (ptr));
GST_DEBUG_OBJECT (qtdemux, "priority %04x", QT_UINT8 (ptr + 2));
ptr += 3;
break;
case 0x04:
object_type_id = QT_UINT8 (ptr);
GST_DEBUG_OBJECT (qtdemux, "object_type_id %02x", object_type_id);
GST_DEBUG_OBJECT (qtdemux, "stream_type %02x", QT_UINT8 (ptr + 1));
GST_DEBUG_OBJECT (qtdemux, "buffer_size_db %02x", QT_UINT24 (ptr + 2));
GST_DEBUG_OBJECT (qtdemux, "max bitrate %d", QT_UINT32 (ptr + 5));
GST_DEBUG_OBJECT (qtdemux, "avg bitrate %d", QT_UINT32 (ptr + 9));
ptr += 13;
break;
case 0x05:
GST_DEBUG_OBJECT (qtdemux, "data:");
qtdemux_dump_mem (ptr, len);
data_ptr = ptr;
data_len = len;
ptr += len;
break;
case 0x06:
GST_DEBUG_OBJECT (qtdemux, "data %02x", QT_UINT8 (ptr));
ptr += 1;
break;
default:
GST_ERROR_OBJECT (qtdemux, "parse error");
}
}
if (data_ptr) {
GstBuffer *buffer;
buffer = gst_buffer_new_and_alloc (data_len);
memcpy (GST_BUFFER_DATA (buffer), data_ptr, data_len);
qtdemux_dump_mem (GST_BUFFER_DATA (buffer), data_len);
GST_DEBUG_OBJECT (qtdemux, "setting codec_data from esds");
gst_caps_set_simple (stream->caps, "codec_data", GST_TYPE_BUFFER,
buffer, NULL);
gst_buffer_unref (buffer);
}
/* object_type_id in the stsd atom in mp4a tells us about AAC or plain
* MPEG audio and other formats */
switch (object_type_id) {
case 107:
/* change to mpeg1 layer 3 audio */
gst_caps_set_simple (stream->caps, "layer", G_TYPE_INT, 3,
"mpegversion", G_TYPE_INT, 1, NULL);
if (list)
gst_tag_list_add (list, GST_TAG_MERGE_REPLACE,
GST_TAG_AUDIO_CODEC, "MPEG-1 layer 3", NULL);
break;
case 0xE1:
{
GstStructure *structure;
/* QCELP, the codec_data is a riff tag (little endian) with
* more info (http://ftp.3gpp2.org/TSGC/Working/2003/2003-05-SanDiego/TSG-C-2003-05-San%20Diego/WG1/SWG12/C12-20030512-006%20=%20C12-20030217-015_Draft_Baseline%20Text%20of%20FFMS_R2.doc). */
structure = gst_caps_get_structure (stream->caps, 0);
gst_structure_set_name (structure, "audio/qcelp");
gst_structure_remove_fields (structure, "mpegversion", "framed", NULL);
if (list)
gst_tag_list_add (list, GST_TAG_MERGE_REPLACE,
GST_TAG_AUDIO_CODEC, "QCELP", NULL);
break;
}
default:
break;
}
}
| 14,881 |
3,900 | 0 | EmbedStream::EmbedStream(Stream *strA, Object *dictA,
GBool limitedA, Guint lengthA):
BaseStream(dictA, lengthA) {
str = strA;
limited = limitedA;
length = lengthA;
}
| 14,882 |
138,656 | 0 | bool RenderFrameHostImpl::CreateRenderFrame(int proxy_routing_id,
int opener_routing_id,
int parent_routing_id,
int previous_sibling_routing_id) {
TRACE_EVENT0("navigation", "RenderFrameHostImpl::CreateRenderFrame");
DCHECK(!IsRenderFrameLive()) << "Creating frame twice";
if (!GetProcess()->Init())
return false;
DCHECK(GetProcess()->HasConnection());
mojom::CreateFrameParamsPtr params = mojom::CreateFrameParams::New();
params->routing_id = routing_id_;
params->proxy_routing_id = proxy_routing_id;
params->opener_routing_id = opener_routing_id;
params->parent_routing_id = parent_routing_id;
params->previous_sibling_routing_id = previous_sibling_routing_id;
params->replication_state = frame_tree_node()->current_replication_state();
params->replication_state.sandbox_flags =
frame_tree_node()->pending_sandbox_flags();
params->frame_owner_properties =
FrameOwnerProperties(frame_tree_node()->frame_owner_properties());
params->widget_params = mojom::CreateFrameWidgetParams::New();
if (render_widget_host_) {
params->widget_params->routing_id = render_widget_host_->GetRoutingID();
params->widget_params->hidden = render_widget_host_->is_hidden();
} else {
params->widget_params->routing_id = MSG_ROUTING_NONE;
params->widget_params->hidden = true;
}
GetProcess()->GetRendererInterface()->CreateFrame(std::move(params));
if (parent_routing_id != MSG_ROUTING_NONE && render_widget_host_) {
RenderWidgetHostView* rwhv =
RenderWidgetHostViewChildFrame::Create(render_widget_host_);
rwhv->Hide();
}
if (proxy_routing_id != MSG_ROUTING_NONE) {
RenderFrameProxyHost* proxy = RenderFrameProxyHost::FromID(
GetProcess()->GetID(), proxy_routing_id);
proxy->set_render_frame_proxy_created(true);
}
SetRenderFrameCreated(true);
return true;
}
| 14,883 |
68,690 | 0 | vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vc4_get_hang_state *get_state = data;
struct drm_vc4_get_hang_state_bo *bo_state;
struct vc4_hang_state *kernel_state;
struct drm_vc4_get_hang_state *state;
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned long irqflags;
u32 i;
int ret = 0;
spin_lock_irqsave(&vc4->job_lock, irqflags);
kernel_state = vc4->hang_state;
if (!kernel_state) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return -ENOENT;
}
state = &kernel_state->user_state;
/* If the user's array isn't big enough, just return the
* required array size.
*/
if (get_state->bo_count < state->bo_count) {
get_state->bo_count = state->bo_count;
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return 0;
}
vc4->hang_state = NULL;
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
/* Save the user's BO pointer, so we don't stomp it with the memcpy. */
state->bo = get_state->bo;
memcpy(get_state, state, sizeof(*state));
bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL);
if (!bo_state) {
ret = -ENOMEM;
goto err_free;
}
for (i = 0; i < state->bo_count; i++) {
struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]);
u32 handle;
ret = drm_gem_handle_create(file_priv, kernel_state->bo[i],
&handle);
if (ret) {
state->bo_count = i - 1;
goto err;
}
bo_state[i].handle = handle;
bo_state[i].paddr = vc4_bo->base.paddr;
bo_state[i].size = vc4_bo->base.base.size;
}
if (copy_to_user((void __user *)(uintptr_t)get_state->bo,
bo_state,
state->bo_count * sizeof(*bo_state)))
ret = -EFAULT;
kfree(bo_state);
err_free:
vc4_free_hang_state(dev, kernel_state);
err:
return ret;
}
| 14,884 |
102,511 | 0 | bool FileUtilProxy::GetFileInfo(
scoped_refptr<MessageLoopProxy> message_loop_proxy,
const FilePath& file_path,
GetFileInfoCallback* callback) {
return Start(FROM_HERE, message_loop_proxy, new RelayGetFileInfo(
file_path, callback));
}
| 14,885 |
46,731 | 0 | static int cbc_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
u64 *key_end;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->ops->load_decrypt_keys(&ctx->key[0]);
key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)];
while ((nbytes = walk.nbytes)) {
unsigned int block_len = nbytes & AES_BLOCK_MASK;
if (likely(block_len)) {
ctx->ops->cbc_decrypt(key_end,
(const u64 *) walk.src.virt.addr,
(u64 *) walk.dst.virt.addr,
block_len, (u64 *) walk.iv);
}
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
fprs_write(0);
return err;
}
| 14,886 |
75,241 | 0 | static void compute_accelerated_huffman(Codebook *c)
{
int i, len;
for (i=0; i < FAST_HUFFMAN_TABLE_SIZE; ++i)
c->fast_huffman[i] = -1;
len = c->sparse ? c->sorted_entries : c->entries;
#ifdef STB_VORBIS_FAST_HUFFMAN_SHORT
if (len > 32767) len = 32767; // largest possible value we can encode!
#endif
for (i=0; i < len; ++i) {
if (c->codeword_lengths[i] <= STB_VORBIS_FAST_HUFFMAN_LENGTH) {
uint32 z = c->sparse ? bit_reverse(c->sorted_codewords[i]) : c->codewords[i];
while (z < FAST_HUFFMAN_TABLE_SIZE) {
c->fast_huffman[z] = i;
z += 1 << c->codeword_lengths[i];
}
}
}
}
| 14,887 |
62,656 | 0 | _zip_dirent_size(zip_source_t *src, zip_uint16_t flags, zip_error_t *error)
{
zip_int32_t size;
bool local = (flags & ZIP_EF_LOCAL) != 0;
int i;
zip_uint8_t b[6];
zip_buffer_t *buffer;
size = local ? LENTRYSIZE : CDENTRYSIZE;
if (zip_source_seek(src, local ? 26 : 28, SEEK_CUR) < 0) {
_zip_error_set_from_source(error, src);
return -1;
}
if ((buffer = _zip_buffer_new_from_source(src, local ? 4 : 6, b, error)) == NULL) {
return -1;
}
for (i=0; i<(local ? 2 : 3); i++) {
size += _zip_buffer_get_16(buffer);
}
if (!_zip_buffer_eof(buffer)) {
zip_error_set(error, ZIP_ER_INTERNAL, 0);
_zip_buffer_free(buffer);
return -1;
}
_zip_buffer_free(buffer);
return size;
}
| 14,888 |
40,703 | 0 | static int __init sock_init(void)
{
int err;
/*
* Initialize the network sysctl infrastructure.
*/
err = net_sysctl_init();
if (err)
goto out;
/*
* Initialize skbuff SLAB cache
*/
skb_init();
/*
* Initialize the protocols module.
*/
init_inodecache();
err = register_filesystem(&sock_fs_type);
if (err)
goto out_fs;
sock_mnt = kern_mount(&sock_fs_type);
if (IS_ERR(sock_mnt)) {
err = PTR_ERR(sock_mnt);
goto out_mount;
}
/* The real protocol initialization is performed in later initcalls.
*/
#ifdef CONFIG_NETFILTER
err = netfilter_init();
if (err)
goto out;
#endif
#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
skb_timestamping_init();
#endif
out:
return err;
out_mount:
unregister_filesystem(&sock_fs_type);
out_fs:
goto out;
}
| 14,889 |
26,560 | 0 | static void packet_mm_close(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct socket *sock = file->private_data;
struct sock *sk = sock->sk;
if (sk)
atomic_dec(&pkt_sk(sk)->mapped);
}
| 14,890 |
21,673 | 0 | int aio_put_req(struct kiocb *req)
{
struct kioctx *ctx = req->ki_ctx;
int ret;
spin_lock_irq(&ctx->ctx_lock);
ret = __aio_put_req(ctx, req);
spin_unlock_irq(&ctx->ctx_lock);
return ret;
}
| 14,891 |
107,847 | 0 | void PrintWebViewHelper::PrintNode(WebNode* node,
bool script_initiated,
bool is_preview) {
Print(node->document().frame(), node, script_initiated, is_preview);
}
| 14,892 |
145,896 | 0 | bool IsSelecting() { return window_selector_controller()->IsSelecting(); }
| 14,893 |
145,303 | 0 | void PrintNativeHandler::Print(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() < 1)
return;
std::vector<std::string> components;
for (int i = 0; i < args.Length(); ++i)
components.push_back(*v8::String::Utf8Value(args[i]));
LOG(ERROR) << base::JoinString(components, ",");
}
| 14,894 |
14,661 | 0 | PHP_FUNCTION(money_format)
{
size_t format_len = 0;
char *format, *p, *e;
double value;
zend_bool check = 0;
zend_string *str;
ssize_t res_len;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "sd", &format, &format_len, &value) == FAILURE) {
return;
}
p = format;
e = p + format_len;
while ((p = memchr(p, '%', (e - p)))) {
if (*(p + 1) == '%') {
p += 2;
} else if (!check) {
check = 1;
p++;
} else {
php_error_docref(NULL, E_WARNING, "Only a single %%i or %%n token can be used");
RETURN_FALSE;
}
}
str = zend_string_alloc(format_len + 1024, 0);
if ((res_len = strfmon(ZSTR_VAL(str), ZSTR_LEN(str), format, value)) < 0) {
zend_string_free(str);
RETURN_FALSE;
}
ZSTR_LEN(str) = (size_t)res_len;
ZSTR_VAL(str)[ZSTR_LEN(str)] = '\0';
RETURN_NEW_STR(zend_string_truncate(str, ZSTR_LEN(str), 0));
}
| 14,895 |
176,152 | 0 | media_status_t AMediaCodecCryptoInfo_getIV(AMediaCodecCryptoInfo* ci, uint8_t *dst) {
if (!ci) {
return AMEDIA_ERROR_INVALID_OBJECT;
}
if (!dst) {
return AMEDIA_ERROR_INVALID_PARAMETER;
}
memcpy(dst, ci->iv, 16);
return AMEDIA_OK;
}
| 14,896 |
155,301 | 0 | ChromeContentBrowserClient::CreateClientCertStore(
content::ResourceContext* resource_context) {
if (!resource_context)
return nullptr;
return ProfileIOData::FromResourceContext(resource_context)
->CreateClientCertStore();
}
| 14,897 |
116,792 | 0 | void WebRTCAudioDeviceTest::DestroyChannel() {
DCHECK(content::BrowserThread::CurrentlyOn(content::BrowserThread::IO));
audio_render_host_->OnChannelClosing();
audio_render_host_->OnFilterRemoved();
audio_input_renderer_host_->OnChannelClosing();
audio_input_renderer_host_->OnFilterRemoved();
channel_.reset();
audio_render_host_ = NULL;
audio_input_renderer_host_ = NULL;
}
| 14,898 |
127,818 | 0 | void PepperMediaDeviceManager::StopEnumerateDevices(int request_id) {
enumerate_callbacks_.erase(request_id);
#if defined(ENABLE_WEBRTC)
base::MessageLoop::current()->PostTask(
FROM_HERE,
base::Bind(&PepperMediaDeviceManager::StopEnumerateDevicesDelayed,
AsWeakPtr(),
request_id));
#endif
}
| 14,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.