unique_id
int64 13
189k
| target
int64 0
1
| code
stringlengths 20
241k
| __index_level_0__
int64 0
18.9k
|
---|---|---|---|
131,437 | 0 | static void limitedWithInvalidMissingDefaultAttributeAttributeSetter(v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder());
V8TRYCATCH_FOR_V8STRINGRESOURCE_VOID(V8StringResource<>, cppValue, jsValue);
CustomElementCallbackDispatcher::CallbackDeliveryScope deliveryScope;
imp->setAttribute(HTMLNames::limitedwithinvalidmissingdefaultattributeAttr, cppValue);
}
| 5,000 |
41,150 | 0 | static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
u32 now;
inet_csk_schedule_ack(sk);
tcp_measure_rcv_mss(sk, skb);
tcp_rcv_rtt_measure(tp);
now = tcp_time_stamp;
if (!icsk->icsk_ack.ato) {
/* The _first_ data packet received, initialize
* delayed ACK engine.
*/
tcp_incr_quickack(sk);
icsk->icsk_ack.ato = TCP_ATO_MIN;
} else {
int m = now - icsk->icsk_ack.lrcvtime;
if (m <= TCP_ATO_MIN / 2) {
/* The fastest case is the first. */
icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
} else if (m < icsk->icsk_ack.ato) {
icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
if (icsk->icsk_ack.ato > icsk->icsk_rto)
icsk->icsk_ack.ato = icsk->icsk_rto;
} else if (m > icsk->icsk_rto) {
/* Too long gap. Apparently sender failed to
* restart window, so that we send ACKs quickly.
*/
tcp_incr_quickack(sk);
sk_mem_reclaim(sk);
}
}
icsk->icsk_ack.lrcvtime = now;
TCP_ECN_check_ce(tp, skb);
if (skb->len >= 128)
tcp_grow_window(sk, skb);
}
| 5,001 |
186,812 | 1 | void PlatformSensorProviderAndroid::CreateAbsoluteOrientationQuaternionSensor(
JNIEnv* env,
mojo::ScopedSharedBufferMapping mapping,
const CreateSensorCallback& callback) {
ScopedJavaLocalRef<jobject> sensor = Java_PlatformSensorProvider_createSensor(
env, j_object_,
static_cast<jint>(mojom::SensorType::ABSOLUTE_ORIENTATION_QUATERNION));
if (sensor.obj()) {
auto concrete_sensor = base::MakeRefCounted<PlatformSensorAndroid>(
mojom::SensorType::ABSOLUTE_ORIENTATION_QUATERNION, std::move(mapping),
this, sensor);
callback.Run(concrete_sensor);
} else {
auto sensor_fusion_algorithm =
std::make_unique<OrientationQuaternionFusionAlgorithmUsingEulerAngles>(
true /* absolute */);
// If this PlatformSensorFusion object is successfully initialized,
// |callback| will be run with a reference to this object.
PlatformSensorFusion::Create(std::move(mapping), this,
std::move(sensor_fusion_algorithm), callback);
}
}
| 5,002 |
64,833 | 0 | static void my_output_message(j_common_ptr cinfo)
{
return;
}
| 5,003 |
125,707 | 0 | void RenderViewHostImpl::SetAltErrorPageURL(const GURL& url) {
Send(new ViewMsg_SetAltErrorPageURL(GetRoutingID(), url));
}
| 5,004 |
181,876 | 1 | void jas_matrix_divpow2(jas_matrix_t *matrix, int n)
{
int i;
int j;
jas_seqent_t *rowstart;
int rowstep;
jas_seqent_t *data;
if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) {
assert(matrix->rows_);
rowstep = jas_matrix_rowstep(matrix);
for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i,
rowstart += rowstep) {
for (j = matrix->numcols_, data = rowstart; j > 0; --j,
++data) {
*data = (*data >= 0) ? ((*data) >> n) :
(-((-(*data)) >> n));
}
}
}
}
| 5,005 |
38,016 | 0 | static inline __u32 __peek_user_per_compat(struct task_struct *child,
addr_t addr)
{
struct compat_per_struct_kernel *dummy32 = NULL;
if (addr == (addr_t) &dummy32->cr9)
/* Control bits of the active per set. */
return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
PER_EVENT_IFETCH : child->thread.per_user.control;
else if (addr == (addr_t) &dummy32->cr10)
/* Start address of the active per set. */
return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
0 : child->thread.per_user.start;
else if (addr == (addr_t) &dummy32->cr11)
/* End address of the active per set. */
return test_thread_flag(TIF_SINGLE_STEP) ?
PSW32_ADDR_INSN : child->thread.per_user.end;
else if (addr == (addr_t) &dummy32->bits)
/* Single-step bit. */
return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
0x80000000 : 0;
else if (addr == (addr_t) &dummy32->starting_addr)
/* Start address of the user specified per set. */
return (__u32) child->thread.per_user.start;
else if (addr == (addr_t) &dummy32->ending_addr)
/* End address of the user specified per set. */
return (__u32) child->thread.per_user.end;
else if (addr == (addr_t) &dummy32->perc_atmid)
/* PER code, ATMID and AI of the last PER trap */
return (__u32) child->thread.per_event.cause << 16;
else if (addr == (addr_t) &dummy32->address)
/* Address of the last PER trap */
return (__u32) child->thread.per_event.address;
else if (addr == (addr_t) &dummy32->access_id)
/* Access id of the last PER trap */
return (__u32) child->thread.per_event.paid << 24;
return 0;
}
| 5,006 |
4,401 | 0 | PHP_METHOD(Phar, hasMetadata)
{
PHAR_ARCHIVE_OBJECT();
RETURN_BOOL(phar_obj->arc.archive->metadata != NULL);
}
| 5,007 |
137,680 | 0 | void PrintPreviewDialogController::RemoveInitiator(
WebContents* initiator) {
WebContents* preview_dialog = GetPrintPreviewForContents(initiator);
DCHECK(preview_dialog);
preview_dialog_map_[preview_dialog] = nullptr;
RemoveObservers(initiator);
PrintViewManager::FromWebContents(initiator)->PrintPreviewDone();
if (content::WebUI* web_ui = preview_dialog->GetWebUI()) {
PrintPreviewUI* print_preview_ui =
static_cast<PrintPreviewUI*>(web_ui->GetController());
if (print_preview_ui)
print_preview_ui->OnInitiatorClosed();
}
}
| 5,008 |
112,374 | 0 | void ResourceDispatcherHostImpl::UnregisterDownloadedTempFile(
int child_id, int request_id) {
DeletableFilesMap& map = registered_temp_files_[child_id];
DeletableFilesMap::iterator found = map.find(request_id);
if (found == map.end())
return;
map.erase(found);
}
| 5,009 |
57,011 | 0 | static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
sctp_event_timeout_t timer,
char *name)
{
struct sctp_transport *t;
t = asoc->init_last_sent_to;
asoc->init_err_counter++;
if (t->init_sent_count > (asoc->init_cycle + 1)) {
asoc->timeouts[timer] *= 2;
if (asoc->timeouts[timer] > asoc->max_init_timeo) {
asoc->timeouts[timer] = asoc->max_init_timeo;
}
asoc->init_cycle++;
pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d"
" cycle:%d timeout:%ld\n", __func__, name,
asoc->init_err_counter, asoc->init_cycle,
asoc->timeouts[timer]);
}
}
| 5,010 |
111,900 | 0 | bool ProfileSyncService::IsSyncEnabledAndLoggedIn() {
if (IsManaged() || sync_prefs_.IsStartSuppressed())
return false;
return !signin_->GetAuthenticatedUsername().empty();
}
| 5,011 |
42,309 | 0 | sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
{
int mxsize, cmd_size, k;
int input_size, blocking;
unsigned char opcode;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
struct sg_header old_hdr;
sg_io_hdr_t *hp;
unsigned char cmnd[SG_MAX_CDB_SIZE];
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_write: count=%d\n", (int) count));
if (atomic_read(&sdp->detaching))
return -ENODEV;
if (!((filp->f_flags & O_NONBLOCK) ||
scsi_block_when_processing_errors(sdp->device)))
return -ENXIO;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT; /* protects following copy_from_user()s + get_user()s */
if (count < SZ_SG_HEADER)
return -EIO;
if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
return -EFAULT;
blocking = !(filp->f_flags & O_NONBLOCK);
if (old_hdr.reply_len < 0)
return sg_new_write(sfp, filp, buf, count,
blocking, 0, 0, NULL);
if (count < (SZ_SG_HEADER + 6))
return -EIO; /* The minimum scsi command length is 6 bytes. */
if (!(srp = sg_add_request(sfp))) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp,
"sg_write: queue full\n"));
return -EDOM;
}
buf += SZ_SG_HEADER;
__get_user(opcode, buf);
if (sfp->next_cmd_len > 0) {
cmd_size = sfp->next_cmd_len;
sfp->next_cmd_len = 0; /* reset so only this write() effected */
} else {
cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
if ((opcode >= 0xc0) && old_hdr.twelve_byte)
cmd_size = 12;
}
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
/* Determine buffer size. */
input_size = count - cmd_size;
mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
mxsize -= SZ_SG_HEADER;
input_size -= SZ_SG_HEADER;
if (input_size < 0) {
sg_remove_request(sfp, srp);
return -EIO; /* User did not pass enough bytes for this command. */
}
hp = &srp->header;
hp->interface_id = '\0'; /* indicator of old interface tunnelled */
hp->cmd_len = (unsigned char) cmd_size;
hp->iovec_count = 0;
hp->mx_sb_len = 0;
if (input_size > 0)
hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
else
hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
hp->dxfer_len = mxsize;
if (hp->dxfer_direction == SG_DXFER_TO_DEV)
hp->dxferp = (char __user *)buf + cmd_size;
else
hp->dxferp = NULL;
hp->sbp = NULL;
hp->timeout = old_hdr.reply_len; /* structure abuse ... */
hp->flags = input_size; /* structure abuse ... */
hp->pack_id = old_hdr.pack_id;
hp->usr_ptr = NULL;
if (__copy_from_user(cmnd, buf, cmd_size))
return -EFAULT;
/*
* SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
* but is is possible that the app intended SG_DXFER_TO_DEV, because there
* is a non-zero input_size, so emit a warning.
*/
if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
static char cmd[TASK_COMM_LEN];
if (strcmp(current->comm, cmd)) {
printk_ratelimited(KERN_WARNING
"sg_write: data in/out %d/%d bytes "
"for SCSI command 0x%x-- guessing "
"data in;\n program %s not setting "
"count and/or reply_len properly\n",
old_hdr.reply_len - (int)SZ_SG_HEADER,
input_size, (unsigned int) cmnd[0],
current->comm);
strcpy(cmd, current->comm);
}
}
k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
return (k < 0) ? k : count;
}
| 5,012 |
77,695 | 0 | ofputil_put_ofp15_bucket(const struct ofputil_bucket *bucket,
uint32_t bucket_id, enum ofp11_group_type group_type,
struct ofpbuf *openflow, enum ofp_version ofp_version)
{
struct ofp15_bucket *ob;
size_t start, actions_start, actions_len;
start = openflow->size;
ofpbuf_put_zeros(openflow, sizeof *ob);
actions_start = openflow->size;
ofpacts_put_openflow_actions(bucket->ofpacts, bucket->ofpacts_len,
openflow, ofp_version);
actions_len = openflow->size - actions_start;
if (group_type == OFPGT11_SELECT) {
ofpprop_put_u16(openflow, OFPGBPT15_WEIGHT, bucket->weight);
}
if (bucket->watch_port != OFPP_ANY) {
ofpprop_put_be32(openflow, OFPGBPT15_WATCH_PORT,
ofputil_port_to_ofp11(bucket->watch_port));
}
if (bucket->watch_group != OFPG_ANY) {
ofpprop_put_u32(openflow, OFPGBPT15_WATCH_GROUP, bucket->watch_group);
}
ob = ofpbuf_at_assert(openflow, start, sizeof *ob);
ob->len = htons(openflow->size - start);
ob->action_array_len = htons(actions_len);
ob->bucket_id = htonl(bucket_id);
}
| 5,013 |
118,523 | 0 | void RenderFrameImpl::frameFocused() {
Send(new FrameHostMsg_FrameFocused(routing_id_));
}
| 5,014 |
76,019 | 0 | vrrp_nopreempt_handler(__attribute__((unused)) vector_t *strvec)
{
vrrp_t *vrrp = LIST_TAIL_DATA(vrrp_data->vrrp);
vrrp->nopreempt = 1;
}
| 5,015 |
20,675 | 0 | static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
{
return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
}
| 5,016 |
184,336 | 1 | void FileSystemOperation::GetUsageAndQuotaThenRunTask(
const GURL& origin, FileSystemType type,
const base::Closure& task,
const base::Closure& error_callback) {
quota::QuotaManagerProxy* quota_manager_proxy =
file_system_context()->quota_manager_proxy();
if (!quota_manager_proxy ||
!file_system_context()->GetQuotaUtil(type)) {
// If we don't have the quota manager or the requested filesystem type
// does not support quota, we should be able to let it go.
operation_context_.set_allowed_bytes_growth(kint64max);
task.Run();
return;
}
TaskParamsForDidGetQuota params;
params.origin = origin;
params.type = type;
params.task = task;
params.error_callback = error_callback;
DCHECK(quota_manager_proxy);
DCHECK(quota_manager_proxy->quota_manager());
quota_manager_proxy->quota_manager()->GetUsageAndQuota(
origin,
FileSystemTypeToQuotaStorageType(type),
base::Bind(&FileSystemOperation::DidGetUsageAndQuotaAndRunTask,
base::Unretained(this), params));
}
| 5,017 |
182,102 | 1 | rar_read_ahead(struct archive_read *a, size_t min, ssize_t *avail)
{
struct rar *rar = (struct rar *)(a->format->data);
const void *h = __archive_read_ahead(a, min, avail);
int ret;
if (avail)
{
if (a->archive.read_data_is_posix_read && *avail > (ssize_t)a->archive.read_data_requested)
*avail = a->archive.read_data_requested;
if (*avail > rar->bytes_remaining)
*avail = (ssize_t)rar->bytes_remaining;
if (*avail < 0)
return NULL;
else if (*avail == 0 && rar->main_flags & MHD_VOLUME &&
rar->file_flags & FHD_SPLIT_AFTER)
{
ret = archive_read_format_rar_read_header(a, a->entry);
if (ret == (ARCHIVE_EOF))
{
rar->has_endarc_header = 1;
ret = archive_read_format_rar_read_header(a, a->entry);
}
if (ret != (ARCHIVE_OK))
return NULL;
return rar_read_ahead(a, min, avail);
}
}
return h;
}
| 5,018 |
45,783 | 0 | static int gcm_hash_update(struct aead_request *req,
struct crypto_gcm_req_priv_ctx *pctx,
crypto_completion_t compl,
struct scatterlist *src,
unsigned int len)
{
struct ahash_request *ahreq = &pctx->u.ahreq;
ahash_request_set_callback(ahreq, aead_request_flags(req),
compl, req);
ahash_request_set_crypt(ahreq, src, NULL, len);
return crypto_ahash_update(ahreq);
}
| 5,019 |
30,223 | 0 | int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
int remove, int reset)
{
return ftrace_set_addr(ops, ip, remove, reset, 1);
}
| 5,020 |
120,121 | 0 | bool Layer::NeedMoreUpdates() {
return false;
}
| 5,021 |
86,035 | 0 | void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
enum iostat_type io_type)
{
struct f2fs_io_info fio = {
.sbi = sbi,
.type = META,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
.old_blkaddr = page->index,
.new_blkaddr = page->index,
.page = page,
.encrypted_page = NULL,
.in_list = false,
};
if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
fio.op_flags &= ~REQ_META;
set_page_writeback(page);
f2fs_submit_page_write(&fio);
f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
}
| 5,022 |
98,175 | 0 | void AutoFillManager::ParseForms(
const std::vector<webkit_glue::FormData>& forms) {
for (std::vector<FormData>::const_iterator iter =
forms.begin();
iter != forms.end(); ++iter) {
scoped_ptr<FormStructure> form_structure(new FormStructure(*iter));
if (!form_structure->ShouldBeParsed())
continue;
DeterminePossibleFieldTypes(form_structure.get());
form_structures_.push_back(form_structure.release());
}
if (!form_structures_.empty() && !disable_download_manager_requests_)
download_manager_.StartQueryRequest(form_structures_);
}
| 5,023 |
164,037 | 0 | service_manager::Connector* DownloadManagerImpl::GetServiceManagerConnector() {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
if (auto* connection = ServiceManagerConnection::GetForProcess())
return connection->GetConnector();
return nullptr;
}
| 5,024 |
114,151 | 0 | HKEY GetHKEYFromString(const std::wstring &name) {
if (L"HKLM" == name)
return HKEY_LOCAL_MACHINE;
else if (L"HKCR" == name)
return HKEY_CLASSES_ROOT;
else if (L"HKCC" == name)
return HKEY_CURRENT_CONFIG;
else if (L"HKCU" == name)
return HKEY_CURRENT_USER;
else if (L"HKU" == name)
return HKEY_USERS;
return NULL;
}
| 5,025 |
5,373 | 0 | static void Ins_DEPTH( INS_ARG )
{
args[0] = CUR.top;
}
| 5,026 |
18,537 | 0 | static int __ext4_ext_check(const char *function, unsigned int line,
struct inode *inode, struct ext4_extent_header *eh,
int depth)
{
const char *error_msg;
int max = 0;
if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
error_msg = "invalid magic";
goto corrupted;
}
if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
error_msg = "unexpected eh_depth";
goto corrupted;
}
if (unlikely(eh->eh_max == 0)) {
error_msg = "invalid eh_max";
goto corrupted;
}
max = ext4_ext_max_entries(inode, depth);
if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
error_msg = "too large eh_max";
goto corrupted;
}
if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
error_msg = "invalid eh_entries";
goto corrupted;
}
if (!ext4_valid_extent_entries(inode, eh, depth)) {
error_msg = "invalid extent entries";
goto corrupted;
}
/* Verify checksum on non-root extent tree nodes */
if (ext_depth(inode) != depth &&
!ext4_extent_block_csum_verify(inode, eh)) {
error_msg = "extent tree corrupted";
goto corrupted;
}
return 0;
corrupted:
ext4_error_inode(inode, function, line, 0,
"bad header/extent: %s - magic %x, "
"entries %u, max %u(%u), depth %u(%u)",
error_msg, le16_to_cpu(eh->eh_magic),
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
max, le16_to_cpu(eh->eh_depth), depth);
return -EIO;
}
| 5,027 |
94,494 | 0 | static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev)
{
struct tty_struct *tty = dev->port.tty;
struct sk_buff *skb;
int inserted = 0;
if (!tty)
return;
BT_DBG("dev %p tty %p", dev, tty);
rfcomm_dlc_lock(dev->dlc);
while ((skb = skb_dequeue(&dev->pending))) {
inserted += tty_insert_flip_string(tty, skb->data, skb->len);
kfree_skb(skb);
}
rfcomm_dlc_unlock(dev->dlc);
if (inserted > 0)
tty_flip_buffer_push(tty);
}
| 5,028 |
43,483 | 0 | static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[8];
struct lrw_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.table_ctx = &ctx->lrw_table,
.crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
.crypt_fn = lrw_xts_decrypt_callback,
};
int ret;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
kernel_fpu_begin();
ret = lrw_crypt(desc, dst, src, nbytes, &req);
kernel_fpu_end();
return ret;
}
| 5,029 |
53,451 | 0 | delay(int us)
{
struct timeval tv;
tv.tv_sec = 0;
tv.tv_usec = us;
(void) select(1, (fd_set *) 0, (fd_set *) 0, (fd_set *) 0, &tv);
return 1;
}
| 5,030 |
22,980 | 0 | static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid)
{
__be32 *p;
*fileid = 0;
if (unlikely(bitmap[0] & (FATTR4_WORD0_FILEID - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_FILEID)) {
READ_BUF(8);
READ64(*fileid);
bitmap[0] &= ~FATTR4_WORD0_FILEID;
}
dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid);
return 0;
}
| 5,031 |
19,918 | 0 | static void nfs4_init_opendata_res(struct nfs4_opendata *p)
{
p->o_res.f_attr = &p->f_attr;
p->o_res.dir_attr = &p->dir_attr;
p->o_res.seqid = p->o_arg.seqid;
p->c_res.seqid = p->c_arg.seqid;
p->o_res.server = p->o_arg.server;
nfs_fattr_init(&p->f_attr);
nfs_fattr_init(&p->dir_attr);
nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
}
| 5,032 |
125,117 | 0 | PpapiPluginProcessHost* PluginServiceImpl::FindPpapiBrokerProcess(
const FilePath& broker_path) {
for (PpapiBrokerProcessHostIterator iter; !iter.Done(); ++iter) {
if (iter->plugin_path() == broker_path)
return *iter;
}
return NULL;
}
| 5,033 |
87,091 | 0 | CJSON_PUBLIC(void) cJSON_AddItemToArray(cJSON *array, cJSON *item)
{
add_item_to_array(array, item);
}
| 5,034 |
39,282 | 0 | int security_member_sid(u32 ssid,
u32 tsid,
u16 tclass,
u32 *out_sid)
{
return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, NULL,
out_sid, false);
}
| 5,035 |
23,138 | 0 | static int nfs4_xdr_enc_link(struct rpc_rqst *req, __be32 *p, const struct nfs4_link_arg *args)
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
.nops = 7,
};
int status;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_compound_hdr(&xdr, &hdr);
if ((status = encode_putfh(&xdr, args->fh)) != 0)
goto out;
if ((status = encode_savefh(&xdr)) != 0)
goto out;
if ((status = encode_putfh(&xdr, args->dir_fh)) != 0)
goto out;
if ((status = encode_link(&xdr, args->name)) != 0)
goto out;
if ((status = encode_getfattr(&xdr, args->bitmask)) != 0)
goto out;
if ((status = encode_restorefh(&xdr)) != 0)
goto out;
status = encode_getfattr(&xdr, args->bitmask);
out:
return status;
}
| 5,036 |
142,762 | 0 | void HTMLMediaElement::CloseMediaSource() {
if (!media_source_)
return;
media_source_->Close();
media_source_ = nullptr;
}
| 5,037 |
173,751 | 0 | static int handle_rename(struct fuse* fuse, struct fuse_handler* handler,
const struct fuse_in_header* hdr, const struct fuse_rename_in* req,
const char* old_name, const char* new_name)
{
struct node* old_parent_node;
struct node* new_parent_node;
struct node* child_node;
char old_parent_path[PATH_MAX];
char new_parent_path[PATH_MAX];
char old_child_path[PATH_MAX];
char new_child_path[PATH_MAX];
const char* new_actual_name;
int res;
pthread_mutex_lock(&fuse->global->lock);
old_parent_node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid,
old_parent_path, sizeof(old_parent_path));
new_parent_node = lookup_node_and_path_by_id_locked(fuse, req->newdir,
new_parent_path, sizeof(new_parent_path));
TRACE("[%d] RENAME %s->%s @ %"PRIx64" (%s) -> %"PRIx64" (%s)\n", handler->token,
old_name, new_name,
hdr->nodeid, old_parent_node ? old_parent_node->name : "?",
req->newdir, new_parent_node ? new_parent_node->name : "?");
if (!old_parent_node || !new_parent_node) {
res = -ENOENT;
goto lookup_error;
}
if (!check_caller_access_to_name(fuse, hdr, old_parent_node, old_name, W_OK)) {
res = -EACCES;
goto lookup_error;
}
if (!check_caller_access_to_name(fuse, hdr, new_parent_node, new_name, W_OK)) {
res = -EACCES;
goto lookup_error;
}
child_node = lookup_child_by_name_locked(old_parent_node, old_name);
if (!child_node || get_node_path_locked(child_node,
old_child_path, sizeof(old_child_path)) < 0) {
res = -ENOENT;
goto lookup_error;
}
acquire_node_locked(child_node);
pthread_mutex_unlock(&fuse->global->lock);
/* Special case for renaming a file where destination is same path
* differing only by case. In this case we don't want to look for a case
* insensitive match. This allows commands like "mv foo FOO" to work as expected.
*/
int search = old_parent_node != new_parent_node
|| strcasecmp(old_name, new_name);
if (!(new_actual_name = find_file_within(new_parent_path, new_name,
new_child_path, sizeof(new_child_path), search))) {
res = -ENOENT;
goto io_error;
}
TRACE("[%d] RENAME %s->%s\n", handler->token, old_child_path, new_child_path);
res = rename(old_child_path, new_child_path);
if (res < 0) {
res = -errno;
goto io_error;
}
pthread_mutex_lock(&fuse->global->lock);
res = rename_node_locked(child_node, new_name, new_actual_name);
if (!res) {
remove_node_from_parent_locked(child_node);
derive_permissions_locked(fuse, new_parent_node, child_node);
derive_permissions_recursive_locked(fuse, child_node);
add_node_to_parent_locked(child_node, new_parent_node);
}
goto done;
io_error:
pthread_mutex_lock(&fuse->global->lock);
done:
release_node_locked(child_node);
lookup_error:
pthread_mutex_unlock(&fuse->global->lock);
return res;
}
| 5,038 |
158,949 | 0 | int PDFiumEngine::GetMostVisiblePage() {
if (in_flight_visible_page_)
return *in_flight_visible_page_;
base::AutoReset<bool> defer_page_unload_guard(&defer_page_unload_, true);
CalculateVisiblePages();
return most_visible_page_;
}
| 5,039 |
6,231 | 0 | static void virtio_gpu_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
vdc->realize = virtio_gpu_device_realize;
vdc->unrealize = virtio_gpu_device_unrealize;
vdc->get_config = virtio_gpu_get_config;
vdc->set_config = virtio_gpu_set_config;
vdc->get_features = virtio_gpu_get_features;
vdc->set_features = virtio_gpu_set_features;
vdc->reset = virtio_gpu_reset;
dc->props = virtio_gpu_properties;
dc->vmsd = &vmstate_virtio_gpu;
dc->hotpluggable = false;
}
| 5,040 |
79,517 | 0 | static void print_gss_error(OM_uint32 err_maj, OM_uint32 err_min)
{
OM_uint32 maj_stat, min_stat;
OM_uint32 msg_ctx = 0;
gss_buffer_desc status_string;
char buf_maj[512];
char buf_min[512];
do
{
maj_stat = gss_display_status(&min_stat, err_maj, GSS_C_GSS_CODE,
GSS_C_NO_OID, &msg_ctx, &status_string);
if (GSS_ERROR(maj_stat))
break;
size_t status_len = status_string.length;
if (status_len >= sizeof(buf_maj))
status_len = sizeof(buf_maj) - 1;
strncpy(buf_maj, (char *) status_string.value, status_len);
buf_maj[status_len] = '\0';
gss_release_buffer(&min_stat, &status_string);
maj_stat = gss_display_status(&min_stat, err_min, GSS_C_MECH_CODE,
GSS_C_NULL_OID, &msg_ctx, &status_string);
if (!GSS_ERROR(maj_stat))
{
status_len = status_string.length;
if (status_len >= sizeof(buf_min))
status_len = sizeof(buf_min) - 1;
strncpy(buf_min, (char *) status_string.value, status_len);
buf_min[status_len] = '\0';
gss_release_buffer(&min_stat, &status_string);
}
} while (!GSS_ERROR(maj_stat) && msg_ctx != 0);
mutt_debug(2, "((%s:%d )(%s:%d))\n", buf_maj, err_maj, buf_min, err_min);
}
| 5,041 |
103,115 | 0 | bool Browser::GetSavedMaximizedState() const {
if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kStartMaximized))
return true;
if (maximized_state_ == MAXIMIZED_STATE_MAXIMIZED)
return true;
if (maximized_state_ == MAXIMIZED_STATE_UNMAXIMIZED)
return false;
gfx::Rect restored_bounds;
bool maximized = false;
WindowSizer::GetBrowserWindowBounds(app_name_, restored_bounds, this,
&restored_bounds, &maximized);
return maximized;
}
| 5,042 |
66,397 | 0 | static void gen_reset_hflag(DisasContext *s, uint32_t mask)
{
if (s->flags & mask) {
TCGv_i32 t = tcg_temp_new_i32();
tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
tcg_gen_andi_i32(t, t, ~mask);
tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
tcg_temp_free_i32(t);
s->flags &= ~mask;
}
}
| 5,043 |
81,047 | 0 | static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
{
u8 mode = 0;
if (cpu_has_secondary_exec_ctrls() &&
(vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
mode |= MSR_BITMAP_MODE_X2APIC;
if (enable_apicv && kvm_vcpu_apicv_active(vcpu))
mode |= MSR_BITMAP_MODE_X2APIC_APICV;
}
if (is_long_mode(vcpu))
mode |= MSR_BITMAP_MODE_LM;
return mode;
}
| 5,044 |
31,991 | 0 | static void ring_buffer_attach(struct perf_event *event,
struct ring_buffer *rb)
{
unsigned long flags;
if (!list_empty(&event->rb_entry))
return;
spin_lock_irqsave(&rb->event_lock, flags);
if (!list_empty(&event->rb_entry))
goto unlock;
list_add(&event->rb_entry, &rb->event_list);
unlock:
spin_unlock_irqrestore(&rb->event_lock, flags);
}
| 5,045 |
40,097 | 0 | static void free_ioctx_reqs(struct percpu_ref *ref)
{
struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
INIT_WORK(&ctx->free_work, free_ioctx);
schedule_work(&ctx->free_work);
}
| 5,046 |
87,282 | 0 | static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
| 5,047 |
155,695 | 0 | void WebXrControllerInputMock::OnFrameSubmitted(
device_test::mojom::SubmittedFrameDataPtr frame_data,
device_test::mojom::XRTestHook::OnFrameSubmittedCallback callback) {
num_submitted_frames_++;
if (wait_loop_ && target_submitted_frames_ == num_submitted_frames_) {
wait_loop_->Quit();
}
std::move(callback).Run();
}
| 5,048 |
84,471 | 0 | chkURLBuffer(Buffer *buf)
{
static char *url_like_pat[] = {
"https?://[a-zA-Z0-9][a-zA-Z0-9:%\\-\\./?=~_\\&+@#,\\$;]*[a-zA-Z0-9_/=\\-]",
"file:/[a-zA-Z0-9:%\\-\\./=_\\+@#,\\$;]*",
#ifdef USE_GOPHER
"gopher://[a-zA-Z0-9][a-zA-Z0-9:%\\-\\./_]*",
#endif /* USE_GOPHER */
"ftp://[a-zA-Z0-9][a-zA-Z0-9:%\\-\\./=_+@#,\\$]*[a-zA-Z0-9_/]",
#ifdef USE_NNTP
"news:[^<> ][^<> ]*",
"nntp://[a-zA-Z0-9][a-zA-Z0-9:%\\-\\./_]*",
#endif /* USE_NNTP */
#ifndef USE_W3MMAILER /* see also chkExternalURIBuffer() */
"mailto:[^<> ][^<> ]*@[a-zA-Z0-9][a-zA-Z0-9\\-\\._]*[a-zA-Z0-9]",
#endif
#ifdef INET6
"https?://[a-zA-Z0-9:%\\-\\./_@]*\\[[a-fA-F0-9:][a-fA-F0-9:\\.]*\\][a-zA-Z0-9:%\\-\\./?=~_\\&+@#,\\$;]*",
"ftp://[a-zA-Z0-9:%\\-\\./_@]*\\[[a-fA-F0-9:][a-fA-F0-9:\\.]*\\][a-zA-Z0-9:%\\-\\./=_+@#,\\$]*",
#endif /* INET6 */
NULL
};
int i;
for (i = 0; url_like_pat[i]; i++) {
reAnchor(buf, url_like_pat[i]);
}
#ifdef USE_EXTERNAL_URI_LOADER
chkExternalURIBuffer(buf);
#endif
buf->check_url |= CHK_URL;
}
| 5,049 |
125,014 | 0 | LayoutUnit RenderFlexibleBox::flowAwareMarginBeforeForChild(RenderBox* child) const
{
switch (transformedWritingMode()) {
case TopToBottomWritingMode:
return child->marginTop();
case BottomToTopWritingMode:
return child->marginBottom();
case LeftToRightWritingMode:
return child->marginLeft();
case RightToLeftWritingMode:
return child->marginRight();
}
ASSERT_NOT_REACHED();
return marginTop();
}
| 5,050 |
147,936 | 0 | void V8TestObject::ToStringMethodCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
RUNTIME_CALL_TIMER_SCOPE_DISABLED_BY_DEFAULT(info.GetIsolate(), "Blink_TestObject_toString");
test_object_v8_internal::ToStringMethod(info);
}
| 5,051 |
61,606 | 0 | static void mxf_read_pixel_layout(AVIOContext *pb, MXFDescriptor *descriptor)
{
int code, value, ofs = 0;
char layout[16] = {0}; /* not for printing, may end up not terminated on purpose */
do {
code = avio_r8(pb);
value = avio_r8(pb);
av_log(NULL, AV_LOG_TRACE, "pixel layout: code %#x\n", code);
if (ofs <= 14) {
layout[ofs++] = code;
layout[ofs++] = value;
} else
break; /* don't read byte by byte on sneaky files filled with lots of non-zeroes */
} while (code != 0); /* SMPTE 377M E.2.46 */
ff_mxf_decode_pixel_layout(layout, &descriptor->pix_fmt);
}
| 5,052 |
171,803 | 0 | bt_status_t btif_hh_connect(bt_bdaddr_t *bd_addr)
{
btif_hh_device_t *dev;
btif_hh_added_device_t *added_dev = NULL;
char bda_str[20];
int i;
BD_ADDR *bda = (BD_ADDR*)bd_addr;
CHECK_BTHH_INIT();
dev = btif_hh_find_dev_by_bda(bd_addr);
BTIF_TRACE_DEBUG("Connect _hh");
sprintf(bda_str, "%02X:%02X:%02X:%02X:%02X:%02X",
(*bda)[0], (*bda)[1], (*bda)[2], (*bda)[3], (*bda)[4], (*bda)[5]);
if (dev == NULL && btif_hh_cb.device_num >= BTIF_HH_MAX_HID) {
BTIF_TRACE_WARNING("%s: Error, exceeded the maximum supported HID device number %d",
__FUNCTION__, BTIF_HH_MAX_HID);
return BT_STATUS_FAIL;
}
for (i = 0; i < BTIF_HH_MAX_ADDED_DEV; i++) {
if (memcmp(&(btif_hh_cb.added_devices[i].bd_addr), bd_addr, BD_ADDR_LEN) == 0) {
added_dev = &btif_hh_cb.added_devices[i];
BTIF_TRACE_WARNING("%s: Device %s already added, attr_mask = 0x%x",
__FUNCTION__, bda_str, added_dev->attr_mask);
}
}
if (added_dev != NULL) {
if (added_dev->dev_handle == BTA_HH_INVALID_HANDLE) {
BTIF_TRACE_ERROR("%s: Error, device %s added but addition failed", __FUNCTION__, bda_str);
memset(&(added_dev->bd_addr), 0, 6);
added_dev->dev_handle = BTA_HH_INVALID_HANDLE;
return BT_STATUS_FAIL;
}
}
/* Not checking the NORMALLY_Connectible flags from sdp record, and anyways sending this
request from host, for subsequent user initiated connection. If the remote is not in
pagescan mode, we will do 2 retries to connect before giving up */
tBTA_SEC sec_mask = BTUI_HH_SECURITY;
btif_hh_cb.status = BTIF_HH_DEV_CONNECTING;
BTA_HhOpen(*bda, BTA_HH_PROTO_RPT_MODE, sec_mask);
HAL_CBACK(bt_hh_callbacks, connection_state_cb, bd_addr, BTHH_CONN_STATE_CONNECTING);
return BT_STATUS_SUCCESS;
}
| 5,053 |
8,622 | 0 | static inline void vmsvga_update_rect(struct vmsvga_state_s *s,
int x, int y, int w, int h)
{
DisplaySurface *surface = qemu_console_surface(s->vga.con);
int line;
int bypl;
int width;
int start;
uint8_t *src;
uint8_t *dst;
if (!vmsvga_verify_rect(surface, __func__, x, y, w, h)) {
/* go for a fullscreen update as fallback */
x = 0;
y = 0;
w = surface_width(surface);
h = surface_height(surface);
}
bypl = surface_stride(surface);
width = surface_bytes_per_pixel(surface) * w;
start = surface_bytes_per_pixel(surface) * x + bypl * y;
src = s->vga.vram_ptr + start;
dst = surface_data(surface) + start;
for (line = h; line > 0; line--, src += bypl, dst += bypl) {
memcpy(dst, src, width);
}
dpy_gfx_update(s->vga.con, x, y, w, h);
}
| 5,054 |
170,564 | 0 | int32_t EqualizerGetBandFreqRange(EffectContext *pContext __unused, int32_t band, uint32_t *pLow,
uint32_t *pHi){
*pLow = bandFreqRange[band][0];
*pHi = bandFreqRange[band][1];
return 0;
}
| 5,055 |
69,736 | 0 | guards_retry_optimistic(const or_options_t *options)
{
if (! entry_list_is_constrained(options))
return 0;
mark_primary_guards_maybe_reachable(get_guard_selection_info());
return 1;
}
| 5,056 |
28,165 | 0 | static void put_pixels_clamped4_c(const int16_t *block, uint8_t *av_restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<4;i++) {
pixels[0] = av_clip_uint8(block[0]);
pixels[1] = av_clip_uint8(block[1]);
pixels[2] = av_clip_uint8(block[2]);
pixels[3] = av_clip_uint8(block[3]);
pixels += line_size;
block += 8;
}
}
| 5,057 |
162,364 | 0 | MojoAudioOutputStreamTest()
: foreign_socket_(std::make_unique<TestCancelableSyncSocket>()),
client_binding_(&client_, mojo::MakeRequest(&client_ptr_)) {}
| 5,058 |
15,979 | 0 | ImportSingleTIFF_SShort ( const TIFF_Manager::TagInfo & tagInfo, const bool nativeEndian,
SXMPMeta * xmp, const char * xmpNS, const char * xmpProp )
{
try { // Don't let errors with one stop the others.
XMP_Int16 binValue = *((XMP_Int16*)tagInfo.dataPtr);
if ( ! nativeEndian ) Flip2 ( &binValue );
char strValue[20];
snprintf ( strValue, sizeof(strValue), "%hd", binValue ); // AUDIT: Using sizeof(strValue) is safe.
xmp->SetProperty ( xmpNS, xmpProp, strValue );
} catch ( ... ) {
}
} // ImportSingleTIFF_SShort
| 5,059 |
90,643 | 0 | zipx_bzip2_init(struct archive_read *a, struct zip *zip)
{
int r;
/* Deallocate already existing BZ2 decompression context if it
* exists. */
if(zip->bzstream_valid) {
BZ2_bzDecompressEnd(&zip->bzstream);
zip->bzstream_valid = 0;
}
/* Allocate a new BZ2 decompression context. */
memset(&zip->bzstream, 0, sizeof(bz_stream));
r = BZ2_bzDecompressInit(&zip->bzstream, 0, 1);
if(r != BZ_OK) {
archive_set_error(&(a->archive), ARCHIVE_ERRNO_MISC,
"bzip2 initialization failed(%d)",
r);
return ARCHIVE_FAILED;
}
/* Mark the bzstream field to be released in cleanup phase. */
zip->bzstream_valid = 1;
/* (Re)allocate the buffer that will contain decompressed bytes. */
free(zip->uncompressed_buffer);
zip->uncompressed_buffer_size = 256 * 1024;
zip->uncompressed_buffer =
(uint8_t*) malloc(zip->uncompressed_buffer_size);
if (zip->uncompressed_buffer == NULL) {
archive_set_error(&a->archive, ENOMEM,
"No memory for bzip2 decompression");
return ARCHIVE_FATAL;
}
/* Initialization done. */
zip->decompress_init = 1;
return ARCHIVE_OK;
}
| 5,060 |
163,133 | 0 | void HTMLFrameOwnerElement::DispatchLoad() {
DispatchScopedEvent(Event::Create(EventTypeNames::load));
}
| 5,061 |
37,211 | 0 | static __always_inline unsigned long vmcs_readl(unsigned long field)
{
unsigned long value;
asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
: "=a"(value) : "d"(field) : "cc");
return value;
}
| 5,062 |
608 | 0 | int pdf_xobject_isolated(fz_context *ctx, pdf_xobject *xobj)
{
pdf_obj *group = pdf_dict_get(ctx, xobj->obj, PDF_NAME_Group);
if (group)
return pdf_to_bool(ctx, pdf_dict_get(ctx, group, PDF_NAME_I));
return 0;
}
| 5,063 |
32,336 | 0 | int copy_mount_string(const void __user *data, char **where)
{
char *tmp;
if (!data) {
*where = NULL;
return 0;
}
tmp = strndup_user(data, PAGE_SIZE);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
*where = tmp;
return 0;
}
| 5,064 |
28,971 | 0 | struct kvm_vcpu __percpu **kvm_get_running_vcpus(void)
{
return &kvm_arm_running_vcpu;
}
| 5,065 |
95,860 | 0 | void *Sys_LoadGameDll(const char *name,
intptr_t (QDECL **entryPoint)(intptr_t, ...),
intptr_t (*systemcalls)(intptr_t, ...))
{
void *libHandle;
void (*dllEntry)(intptr_t (*syscallptr)(intptr_t, ...));
assert(name);
Com_Printf( "Loading DLL file: %s\n", name);
libHandle = Sys_LoadLibrary(name);
if(!libHandle)
{
Com_Printf("Sys_LoadGameDll(%s) failed:\n\"%s\"\n", name, Sys_LibraryError());
return NULL;
}
dllEntry = Sys_LoadFunction( libHandle, "dllEntry" );
*entryPoint = Sys_LoadFunction( libHandle, "vmMain" );
if ( !*entryPoint || !dllEntry )
{
Com_Printf ( "Sys_LoadGameDll(%s) failed to find vmMain function:\n\"%s\" !\n", name, Sys_LibraryError( ) );
Sys_UnloadLibrary(libHandle);
return NULL;
}
Com_Printf ( "Sys_LoadGameDll(%s) found vmMain function at %p\n", name, *entryPoint );
dllEntry( systemcalls );
return libHandle;
}
| 5,066 |
165,012 | 0 | bool HTMLCanvasElement::IsAccelerated() const {
return context_ && context_->IsAccelerated();
}
| 5,067 |
38,220 | 0 | static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
atomic_dec(&hb->waiters);
#endif
}
| 5,068 |
63,095 | 0 | static void platform_device_release(struct device *dev)
{
struct platform_object *pa = container_of(dev, struct platform_object,
pdev.dev);
of_device_node_put(&pa->pdev.dev);
kfree(pa->pdev.dev.platform_data);
kfree(pa->pdev.mfd_cell);
kfree(pa->pdev.resource);
kfree(pa->pdev.driver_override);
kfree(pa);
}
| 5,069 |
16,411 | 0 | CStarter::WriteRecoveryFile( ClassAd *recovery_ad )
{
MyString tmp_file;
FILE *tmp_fp;
if ( recovery_ad == NULL ) {
return;
}
if ( m_recoveryFile.Length() == 0 ) {
m_recoveryFile.sprintf( "%s%cdir_%ld.recover", Execute,
DIR_DELIM_CHAR, (long)daemonCore->getpid() );
}
tmp_file.sprintf( "%s.tmp", m_recoveryFile.Value() );
tmp_fp = safe_fcreate_replace_if_exists( tmp_file.Value(), "w" );
if ( tmp_fp == NULL ) {
dprintf( D_ALWAYS, "Failed to open recovery file %s\n", tmp_file.Value() );
return;
}
if ( recovery_ad->fPrint( tmp_fp ) == FALSE ) {
dprintf( D_ALWAYS, "Failed to write recovery file\n" );
fclose( tmp_fp );
return;
}
if ( fclose( tmp_fp ) != 0 ) {
dprintf( D_ALWAYS, "Failed close recovery file\n" );
unlink( tmp_file.Value() );
return;
}
if ( rotate_file( tmp_file.Value(), m_recoveryFile.Value() ) != 0 ) {
dprintf( D_ALWAYS, "Failed to rename recovery file\n" );
unlink( tmp_file.Value() );
}
}
| 5,070 |
13,983 | 0 | gsicc_new_devicen(gsicc_manager_t *icc_manager)
{
/* Allocate a new deviceN ICC profile entry in the deviceN list */
gsicc_devicen_entry_t *device_n_entry =
gs_alloc_struct(icc_manager->memory, gsicc_devicen_entry_t,
&st_gsicc_devicen_entry, "gsicc_new_devicen");
if (device_n_entry == NULL)
return gs_throw(gs_error_VMerror, "insufficient memory to allocate device n profile");
device_n_entry->next = NULL;
device_n_entry->iccprofile = NULL;
/* Check if we already have one in the manager */
if ( icc_manager->device_n == NULL ) {
/* First one. Need to allocate the DeviceN main object */
icc_manager->device_n = gs_alloc_struct(icc_manager->memory,
gsicc_devicen_t, &st_gsicc_devicen, "gsicc_new_devicen");
if (icc_manager->device_n == NULL)
return gs_throw(gs_error_VMerror, "insufficient memory to allocate device n profile");
icc_manager->device_n->head = device_n_entry;
icc_manager->device_n->final = device_n_entry;
icc_manager->device_n->count = 1;
return 0;
} else {
/* We have one or more in the list. */
icc_manager->device_n->final->next = device_n_entry;
icc_manager->device_n->final = device_n_entry;
icc_manager->device_n->count++;
return 0;
}
}
| 5,071 |
185,056 | 1 | void GpuProcessHostUIShim::OnAcceleratedSurfaceNew(
const GpuHostMsg_AcceleratedSurfaceNew_Params& params) {
RenderWidgetHostViewPort* view = GetRenderWidgetHostViewFromSurfaceID(
params.surface_id);
if (!view)
return;
view->AcceleratedSurfaceNew(
params.width, params.height, params.surface_handle);
}
| 5,072 |
100,547 | 0 | bool has_extension_bindings() const {
return BindingsPolicy::is_extension_enabled(enabled_bindings_);
}
| 5,073 |
123,606 | 0 | bool InspectorPageAgent::deviceMetricsChanged(int width, int height, double deviceScaleFactor, bool emulateViewport, bool fitWindow, double fontScaleFactor, bool textAutosizing)
{
int currentWidth = static_cast<int>(m_state->getLong(PageAgentState::pageAgentScreenWidthOverride));
int currentHeight = static_cast<int>(m_state->getLong(PageAgentState::pageAgentScreenHeightOverride));
double currentDeviceScaleFactor = m_state->getDouble(PageAgentState::pageAgentDeviceScaleFactorOverride, 1);
bool currentEmulateViewport = m_state->getBoolean(PageAgentState::pageAgentEmulateViewport);
bool currentFitWindow = m_state->getBoolean(PageAgentState::pageAgentFitWindow);
double currentFontScaleFactor = m_state->getDouble(PageAgentState::fontScaleFactor, 1);
bool currentTextAutosizing = m_state->getBoolean(PageAgentState::pageAgentTextAutosizingOverride);
return width != currentWidth || height != currentHeight || deviceScaleFactor != currentDeviceScaleFactor || emulateViewport != currentEmulateViewport || fitWindow != currentFitWindow || fontScaleFactor != currentFontScaleFactor || textAutosizing != currentTextAutosizing;
}
| 5,074 |
21,172 | 0 | static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
{
if (!do_swap_account)
return 0;
return cgroup_add_files(cont, ss, memsw_cgroup_files,
ARRAY_SIZE(memsw_cgroup_files));
};
| 5,075 |
141,026 | 0 | void Document::DidRemoveText(const CharacterData& text,
unsigned offset,
unsigned length) {
for (Range* range : ranges_)
range->DidRemoveText(text, offset, length);
}
| 5,076 |
46,849 | 0 | static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
return glue_cbc_decrypt_128bit(&camellia_dec_cbc, desc, dst, src,
nbytes);
}
| 5,077 |
92,620 | 0 | static void numa_group_count_active_nodes(struct numa_group *numa_group)
{
unsigned long faults, max_faults = 0;
int nid, active_nodes = 0;
for_each_online_node(nid) {
faults = group_faults_cpu(numa_group, nid);
if (faults > max_faults)
max_faults = faults;
}
for_each_online_node(nid) {
faults = group_faults_cpu(numa_group, nid);
if (faults * ACTIVE_NODE_FRACTION > max_faults)
active_nodes++;
}
numa_group->max_faults_cpu = max_faults;
numa_group->active_nodes = active_nodes;
}
| 5,078 |
100,047 | 0 | void AppendFormattedHost(const GURL& url,
const std::wstring& languages,
std::wstring* output,
url_parse::Parsed* new_parsed,
size_t* offset_for_adjustment) {
DCHECK(output);
const url_parse::Component& host =
url.parsed_for_possibly_invalid_spec().host;
if (host.is_nonempty()) {
int new_host_begin = static_cast<int>(output->length());
if (new_parsed)
new_parsed->host.begin = new_host_begin;
size_t offset_past_current_output =
(!offset_for_adjustment ||
(*offset_for_adjustment == std::wstring::npos) ||
(*offset_for_adjustment < output->length())) ?
std::wstring::npos : (*offset_for_adjustment - output->length());
size_t* offset_into_host =
(offset_past_current_output >= static_cast<size_t>(host.len)) ?
NULL : &offset_past_current_output;
const std::string& spec = url.possibly_invalid_spec();
DCHECK(host.begin >= 0 &&
((spec.length() == 0 && host.begin == 0) ||
host.begin < static_cast<int>(spec.length())));
output->append(net::IDNToUnicode(&spec[host.begin],
static_cast<size_t>(host.len), languages, offset_into_host));
int new_host_len = static_cast<int>(output->length()) - new_host_begin;
if (new_parsed)
new_parsed->host.len = new_host_len;
if (offset_into_host) {
*offset_for_adjustment = (*offset_into_host == std::wstring::npos) ?
std::wstring::npos : (new_host_begin + *offset_into_host);
} else if (offset_past_current_output != std::wstring::npos) {
*offset_for_adjustment += new_host_len - host.len;
}
} else if (new_parsed) {
new_parsed->host.reset();
}
}
| 5,079 |
24,014 | 0 | static inline void checkThrottle(struct airo_info *ai)
{
int i;
/* Old hardware had a limit on encryption speed */
if (ai->config.authType != AUTH_OPEN && maxencrypt) {
for(i=0; i<8; i++) {
if (ai->config.rates[i] > maxencrypt) {
ai->config.rates[i] = 0;
}
}
}
}
| 5,080 |
117,159 | 0 | CustomInspectorTest()
: InspectorTest()
, m_inspectorWindow(0)
{
}
| 5,081 |
67,719 | 0 | struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
{
struct sk_buff_head *q = &sk->sk_error_queue;
struct sk_buff *skb, *skb_next = NULL;
bool icmp_next = false;
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
skb = __skb_dequeue(q);
if (skb && (skb_next = skb_peek(q)))
icmp_next = is_icmp_err_skb(skb_next);
spin_unlock_irqrestore(&q->lock, flags);
if (is_icmp_err_skb(skb) && !icmp_next)
sk->sk_err = 0;
if (skb_next)
sk->sk_error_report(sk);
return skb;
}
| 5,082 |
22,893 | 0 | nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
{
struct nfs_open_context *ctx;
struct nfs4_state *state;
unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
int status;
/* verify open state */
ctx = nfs_file_open_context(filp);
state = ctx->state;
if (request->fl_start < 0 || request->fl_end < 0)
return -EINVAL;
if (IS_GETLK(cmd))
return nfs4_proc_getlk(state, F_GETLK, request);
if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
return -EINVAL;
if (request->fl_type == F_UNLCK)
return nfs4_proc_unlck(state, cmd, request);
do {
status = nfs4_proc_setlk(state, cmd, request);
if ((status != -EAGAIN) || IS_SETLK(cmd))
break;
timeout = nfs4_set_lock_task_retry(timeout);
status = -ERESTARTSYS;
if (signalled())
break;
} while(status < 0);
return status;
}
| 5,083 |
142,772 | 0 | void HTMLMediaElement::DeferLoad() {
DCHECK(!deferred_load_timer_.IsActive());
DCHECK_EQ(deferred_load_state_, kNotDeferred);
ChangeNetworkStateFromLoadingToIdle();
deferred_load_timer_.StartOneShot(TimeDelta(), FROM_HERE);
deferred_load_state_ = kWaitingForStopDelayingLoadEventTask;
}
| 5,084 |
40,451 | 0 | static int ipxitf_demux_socket(struct ipx_interface *intrfc,
struct sk_buff *skb, int copy)
{
struct ipxhdr *ipx = ipx_hdr(skb);
struct sock *sock1 = NULL, *sock2 = NULL;
struct sk_buff *skb1 = NULL, *skb2 = NULL;
int rc;
if (intrfc == ipx_primary_net && ntohs(ipx->ipx_dest.sock) == 0x451)
sock1 = ncp_connection_hack(intrfc, ipx);
if (!sock1)
/* No special socket found, forward the packet the normal way */
sock1 = ipxitf_find_socket(intrfc, ipx->ipx_dest.sock);
/*
* We need to check if there is a primary net and if
* this is addressed to one of the *SPECIAL* sockets because
* these need to be propagated to the primary net.
* The *SPECIAL* socket list contains: 0x452(SAP), 0x453(RIP) and
* 0x456(Diagnostic).
*/
if (ipx_primary_net && intrfc != ipx_primary_net) {
const int dsock = ntohs(ipx->ipx_dest.sock);
if (dsock == 0x452 || dsock == 0x453 || dsock == 0x456)
/* The appropriate thing to do here is to dup the
* packet and route to the primary net interface via
* ipxitf_send; however, we'll cheat and just demux it
* here. */
sock2 = ipxitf_find_socket(ipx_primary_net,
ipx->ipx_dest.sock);
}
/*
* If there is nothing to do return. The kfree will cancel any charging.
*/
rc = 0;
if (!sock1 && !sock2) {
if (!copy)
kfree_skb(skb);
goto out;
}
/*
* This next segment of code is a little awkward, but it sets it up
* so that the appropriate number of copies of the SKB are made and
* that skb1 and skb2 point to it (them) so that it (they) can be
* demuxed to sock1 and/or sock2. If we are unable to make enough
* copies, we do as much as is possible.
*/
if (copy)
skb1 = skb_clone(skb, GFP_ATOMIC);
else
skb1 = skb;
rc = -ENOMEM;
if (!skb1)
goto out_put;
/* Do we need 2 SKBs? */
if (sock1 && sock2)
skb2 = skb_clone(skb1, GFP_ATOMIC);
else
skb2 = skb1;
if (sock1)
ipxitf_def_skb_handler(sock1, skb1);
if (!skb2)
goto out_put;
if (sock2)
ipxitf_def_skb_handler(sock2, skb2);
rc = 0;
out_put:
if (sock1)
sock_put(sock1);
if (sock2)
sock_put(sock2);
out:
return rc;
}
| 5,085 |
161,913 | 0 | bool PrintRenderFrameHelper::PrintPreviewContext::HasSelection() {
return IsModifiable() && source_frame()->HasSelection();
}
| 5,086 |
87,064 | 0 | static int oidc_discovery(request_rec *r, oidc_cfg *cfg) {
oidc_debug(r, "enter");
/* obtain the URL we're currently accessing, to be stored in the state/session */
char *current_url = oidc_get_current_url(r);
const char *method = oidc_original_request_method(r, cfg, FALSE);
/* generate CSRF token */
char *csrf = NULL;
if (oidc_proto_generate_nonce(r, &csrf, 8) == FALSE)
return HTTP_INTERNAL_SERVER_ERROR;
char *path_scopes = oidc_dir_cfg_path_scope(r);
char *path_auth_request_params = oidc_dir_cfg_path_auth_request_params(r);
char *discover_url = oidc_cfg_dir_discover_url(r);
/* see if there's an external discovery page configured */
if (discover_url != NULL) {
/* yes, assemble the parameters for external discovery */
char *url = apr_psprintf(r->pool, "%s%s%s=%s&%s=%s&%s=%s&%s=%s",
discover_url,
strchr(discover_url, OIDC_CHAR_QUERY) != NULL ?
OIDC_STR_AMP :
OIDC_STR_QUERY,
OIDC_DISC_RT_PARAM, oidc_util_escape_string(r, current_url),
OIDC_DISC_RM_PARAM, method,
OIDC_DISC_CB_PARAM,
oidc_util_escape_string(r, oidc_get_redirect_uri(r, cfg)),
OIDC_CSRF_NAME, oidc_util_escape_string(r, csrf));
if (path_scopes != NULL)
url = apr_psprintf(r->pool, "%s&%s=%s", url, OIDC_DISC_SC_PARAM,
oidc_util_escape_string(r, path_scopes));
if (path_auth_request_params != NULL)
url = apr_psprintf(r->pool, "%s&%s=%s", url, OIDC_DISC_AR_PARAM,
oidc_util_escape_string(r, path_auth_request_params));
/* log what we're about to do */
oidc_debug(r, "redirecting to external discovery page: %s", url);
/* set CSRF cookie */
oidc_util_set_cookie(r, OIDC_CSRF_NAME, csrf, -1,
cfg->cookie_same_site ?
OIDC_COOKIE_EXT_SAME_SITE_STRICT :
NULL);
/* see if we need to preserve POST parameters through Javascript/HTML5 storage */
if (oidc_post_preserve_javascript(r, url, NULL, NULL) == TRUE)
return DONE;
/* do the actual redirect to an external discovery page */
oidc_util_hdr_out_location_set(r, url);
return HTTP_MOVED_TEMPORARILY;
}
/* get a list of all providers configured in the metadata directory */
apr_array_header_t *arr = NULL;
if (oidc_metadata_list(r, cfg, &arr) == FALSE)
return oidc_util_html_send_error(r, cfg->error_template,
"Configuration Error",
"No configured providers found, contact your administrator",
HTTP_UNAUTHORIZED);
/* assemble a where-are-you-from IDP discovery HTML page */
const char *s = " <h3>Select your OpenID Connect Identity Provider</h3>\n";
/* list all configured providers in there */
int i;
for (i = 0; i < arr->nelts; i++) {
const char *issuer = ((const char**) arr->elts)[i];
char *href = apr_psprintf(r->pool,
"%s?%s=%s&%s=%s&%s=%s&%s=%s",
oidc_get_redirect_uri(r, cfg), OIDC_DISC_OP_PARAM,
oidc_util_escape_string(r, issuer),
OIDC_DISC_RT_PARAM, oidc_util_escape_string(r, current_url),
OIDC_DISC_RM_PARAM, method,
OIDC_CSRF_NAME, csrf);
if (path_scopes != NULL)
href = apr_psprintf(r->pool, "%s&%s=%s", href,
OIDC_DISC_SC_PARAM, oidc_util_escape_string(r, path_scopes));
if (path_auth_request_params != NULL)
href = apr_psprintf(r->pool, "%s&%s=%s", href,
OIDC_DISC_AR_PARAM,
oidc_util_escape_string(r, path_auth_request_params));
char *display =
(strstr(issuer, "https://") == NULL) ?
apr_pstrdup(r->pool, issuer) :
apr_pstrdup(r->pool, issuer + strlen("https://"));
/* strip port number */
/* point back to the redirect_uri, where the selection is handled, with an IDP selection and return_to URL */
s = apr_psprintf(r->pool, "%s<p><a href=\"%s\">%s</a></p>\n", s, href,
display);
}
/* add an option to enter an account or issuer name for dynamic OP discovery */
s = apr_psprintf(r->pool, "%s<form method=\"get\" action=\"%s\">\n", s,
oidc_get_redirect_uri(r, cfg));
s = apr_psprintf(r->pool,
"%s<p><input type=\"hidden\" name=\"%s\" value=\"%s\"><p>\n", s,
OIDC_DISC_RT_PARAM, current_url);
s = apr_psprintf(r->pool,
"%s<p><input type=\"hidden\" name=\"%s\" value=\"%s\"><p>\n", s,
OIDC_DISC_RM_PARAM, method);
s = apr_psprintf(r->pool,
"%s<p><input type=\"hidden\" name=\"%s\" value=\"%s\"><p>\n", s,
OIDC_CSRF_NAME, csrf);
if (path_scopes != NULL)
s = apr_psprintf(r->pool,
"%s<p><input type=\"hidden\" name=\"%s\" value=\"%s\"><p>\n", s,
OIDC_DISC_SC_PARAM, path_scopes);
if (path_auth_request_params != NULL)
s = apr_psprintf(r->pool,
"%s<p><input type=\"hidden\" name=\"%s\" value=\"%s\"><p>\n", s,
OIDC_DISC_AR_PARAM, path_auth_request_params);
s =
apr_psprintf(r->pool,
"%s<p>Or enter your account name (eg. "[email protected]", or an IDP identifier (eg. "mitreid.org"):</p>\n",
s);
s = apr_psprintf(r->pool,
"%s<p><input type=\"text\" name=\"%s\" value=\"%s\"></p>\n", s,
OIDC_DISC_OP_PARAM, "");
s = apr_psprintf(r->pool,
"%s<p><input type=\"submit\" value=\"Submit\"></p>\n", s);
s = apr_psprintf(r->pool, "%s</form>\n", s);
oidc_util_set_cookie(r, OIDC_CSRF_NAME, csrf, -1,
cfg->cookie_same_site ? OIDC_COOKIE_EXT_SAME_SITE_STRICT : NULL);
char *javascript = NULL, *javascript_method = NULL;
char *html_head =
"<style type=\"text/css\">body {text-align: center}</style>";
if (oidc_post_preserve_javascript(r, NULL, &javascript,
&javascript_method) == TRUE)
html_head = apr_psprintf(r->pool, "%s%s", html_head, javascript);
/* now send the HTML contents to the user agent */
return oidc_util_html_send(r, "OpenID Connect Provider Discovery",
html_head, javascript_method, s, DONE);
}
| 5,087 |
100,183 | 0 | bool BrowserInit::LaunchWithProfile::OpenStartupURLs(
bool is_process_startup,
const std::vector<GURL>& urls_to_open) {
SessionStartupPref pref = GetSessionStartupPref(command_line_, profile_);
if (is_process_startup &&
command_line_.HasSwitch(switches::kTestingChannelID) &&
!command_line_.HasSwitch(switches::kRestoreLastSession) &&
browser_defaults::kDefaultSessionStartupType !=
SessionStartupPref::DEFAULT) {
return false;
}
switch (pref.type) {
case SessionStartupPref::LAST:
if (!is_process_startup)
return false;
if (!profile_->DidLastSessionExitCleanly() &&
!command_line_.HasSwitch(switches::kRestoreLastSession)) {
return false;
}
SessionRestore::RestoreSessionSynchronously(profile_, urls_to_open);
return true;
case SessionStartupPref::URLS:
if (urls_to_open.empty()) {
if (pref.urls.empty()) {
std::vector<GURL> urls;
urls.push_back(GURL(chrome::kChromeUINewTabURL));
OpenURLsInBrowser(NULL, is_process_startup, urls);
return true;
}
OpenURLsInBrowser(NULL, is_process_startup, pref.urls);
return true;
}
return false;
default:
return false;
}
}
| 5,088 |
4,563 | 0 | static int php_openssl_load_rand_file(const char * file, int *egdsocket, int *seeded) /* {{{ */
{
char buffer[MAXPATHLEN];
*egdsocket = 0;
*seeded = 0;
if (file == NULL) {
file = RAND_file_name(buffer, sizeof(buffer));
#ifdef HAVE_RAND_EGD
} else if (RAND_egd(file) > 0) {
/* if the given filename is an EGD socket, don't
* write anything back to it */
*egdsocket = 1;
return SUCCESS;
#endif
}
if (file == NULL || !RAND_load_file(file, -1)) {
if (RAND_status() == 0) {
php_error_docref(NULL, E_WARNING, "unable to load random state; not enough random data!");
return FAILURE;
}
return FAILURE;
}
*seeded = 1;
return SUCCESS;
}
/* }}} */
| 5,089 |
149,073 | 0 | static void clearSelect(sqlite3 *db, Select *p, int bFree){
while( p ){
Select *pPrior = p->pPrior;
sqlite3ExprListDelete(db, p->pEList);
sqlite3SrcListDelete(db, p->pSrc);
sqlite3ExprDelete(db, p->pWhere);
sqlite3ExprListDelete(db, p->pGroupBy);
sqlite3ExprDelete(db, p->pHaving);
sqlite3ExprListDelete(db, p->pOrderBy);
sqlite3ExprDelete(db, p->pLimit);
sqlite3ExprDelete(db, p->pOffset);
if( p->pWith ) sqlite3WithDelete(db, p->pWith);
if( bFree ) sqlite3DbFree(db, p);
p = pPrior;
bFree = 1;
}
}
| 5,090 |
73,550 | 0 | static void DestroyQuantumPixels(QuantumInfo *quantum_info)
{
register ssize_t
i;
ssize_t
extent;
assert(quantum_info != (QuantumInfo *) NULL);
assert(quantum_info->signature == MagickCoreSignature);
assert(quantum_info->pixels != (unsigned char **) NULL);
extent=(ssize_t) quantum_info->extent;
for (i=0; i < (ssize_t) quantum_info->number_threads; i++)
if (quantum_info->pixels[i] != (unsigned char *) NULL)
{
/*
Did we overrun our quantum buffer?
*/
assert(quantum_info->pixels[i][extent] == QuantumSignature);
quantum_info->pixels[i]=(unsigned char *) RelinquishMagickMemory(
quantum_info->pixels[i]);
}
quantum_info->pixels=(unsigned char **) RelinquishMagickMemory(
quantum_info->pixels);
}
| 5,091 |
67,634 | 0 | int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs, u64 lblk_num)
{
if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
BUG_ON(!PageLocked(page));
return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
len, offs, GFP_NOFS);
}
| 5,092 |
65,797 | 0 | nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_create *create)
{
struct xdr_stream *xdr = &resp->xdr;
__be32 *p;
if (!nfserr) {
p = xdr_reserve_space(xdr, 20);
if (!p)
return nfserr_resource;
encode_cinfo(p, &create->cr_cinfo);
nfserr = nfsd4_encode_bitmap(xdr, create->cr_bmval[0],
create->cr_bmval[1], create->cr_bmval[2]);
}
return nfserr;
}
| 5,093 |
50,147 | 0 | ZEND_API void * __zend_realloc(void *p, size_t len)
{
p = realloc(p, len);
if (EXPECTED(p)) {
return p;
}
zend_out_of_memory();
}
| 5,094 |
91,321 | 0 | static void send_panic_events(struct ipmi_smi *intf, char *str)
{
struct kernel_ipmi_msg msg;
unsigned char data[16];
struct ipmi_system_interface_addr *si;
struct ipmi_addr addr;
char *p = str;
struct ipmi_ipmb_addr *ipmb;
int j;
if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
return;
si = (struct ipmi_system_interface_addr *) &addr;
si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
si->channel = IPMI_BMC_CHANNEL;
si->lun = 0;
/* Fill in an event telling that we have failed. */
msg.netfn = 0x04; /* Sensor or Event. */
msg.cmd = 2; /* Platform event command. */
msg.data = data;
msg.data_len = 8;
data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
data[1] = 0x03; /* This is for IPMI 1.0. */
data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
/*
* Put a few breadcrumbs in. Hopefully later we can add more things
* to make the panic events more useful.
*/
if (str) {
data[3] = str[0];
data[6] = str[1];
data[7] = str[2];
}
/* Send the event announcing the panic. */
ipmi_panic_request_and_wait(intf, &addr, &msg);
/*
* On every interface, dump a bunch of OEM event holding the
* string.
*/
if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
return;
/*
* intf_num is used as an marker to tell if the
* interface is valid. Thus we need a read barrier to
* make sure data fetched before checking intf_num
* won't be used.
*/
smp_rmb();
/*
* First job here is to figure out where to send the
* OEM events. There's no way in IPMI to send OEM
* events using an event send command, so we have to
* find the SEL to put them in and stick them in
* there.
*/
/* Get capabilities from the get device id. */
intf->local_sel_device = 0;
intf->local_event_generator = 0;
intf->event_receiver = 0;
/* Request the device info from the local MC. */
msg.netfn = IPMI_NETFN_APP_REQUEST;
msg.cmd = IPMI_GET_DEVICE_ID_CMD;
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = device_id_fetcher;
ipmi_panic_request_and_wait(intf, &addr, &msg);
if (intf->local_event_generator) {
/* Request the event receiver from the local MC. */
msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = event_receiver_fetcher;
ipmi_panic_request_and_wait(intf, &addr, &msg);
}
intf->null_user_handler = NULL;
/*
* Validate the event receiver. The low bit must not
* be 1 (it must be a valid IPMB address), it cannot
* be zero, and it must not be my address.
*/
if (((intf->event_receiver & 1) == 0)
&& (intf->event_receiver != 0)
&& (intf->event_receiver != intf->addrinfo[0].address)) {
/*
* The event receiver is valid, send an IPMB
* message.
*/
ipmb = (struct ipmi_ipmb_addr *) &addr;
ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
ipmb->channel = 0; /* FIXME - is this right? */
ipmb->lun = intf->event_receiver_lun;
ipmb->slave_addr = intf->event_receiver;
} else if (intf->local_sel_device) {
/*
* The event receiver was not valid (or was
* me), but I am an SEL device, just dump it
* in my SEL.
*/
si = (struct ipmi_system_interface_addr *) &addr;
si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
si->channel = IPMI_BMC_CHANNEL;
si->lun = 0;
} else
return; /* No where to send the event. */
msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
msg.data = data;
msg.data_len = 16;
j = 0;
while (*p) {
int size = strlen(p);
if (size > 11)
size = 11;
data[0] = 0;
data[1] = 0;
data[2] = 0xf0; /* OEM event without timestamp. */
data[3] = intf->addrinfo[0].address;
data[4] = j++; /* sequence # */
/*
* Always give 11 bytes, so strncpy will fill
* it with zeroes for me.
*/
strncpy(data+5, p, 11);
p += size;
ipmi_panic_request_and_wait(intf, &addr, &msg);
}
}
| 5,095 |
37,494 | 0 | static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
{
kvm->arch.n_used_mmu_pages += nr;
percpu_counter_add(&kvm_total_used_mmu_pages, nr);
}
| 5,096 |
11,351 | 0 | fbCombineConjointOutReverseU (CARD32 *dest, const CARD32 *src, int width)
{
fbCombineConjointGeneralU (dest, src, width, CombineBOut);
}
| 5,097 |
94,672 | 0 | int i2d_PKCS7_bio(BIO *bp, PKCS7 *p7)
{
return ASN1_item_i2d_bio(ASN1_ITEM_rptr(PKCS7), bp, p7);
}
| 5,098 |
10,617 | 0 | Ins_JMPR( TT_ExecContext exc,
FT_Long* args )
{
if ( args[0] == 0 && exc->args == 0 )
{
exc->error = FT_THROW( Bad_Argument );
return;
}
exc->IP += args[0];
if ( exc->IP < 0 ||
( exc->callTop > 0 &&
exc->IP > exc->callStack[exc->callTop - 1].Def->end ) )
{
exc->error = FT_THROW( Bad_Argument );
return;
}
exc->step_ins = FALSE;
if ( args[0] < 0 )
{
if ( ++exc->neg_jump_counter > exc->neg_jump_counter_max )
exc->error = FT_THROW( Execution_Too_Long );
}
}
| 5,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.