unique_id
int64 13
189k
| target
int64 0
1
| code
stringlengths 20
241k
| __index_level_0__
int64 0
18.9k
|
---|---|---|---|
29,834 | 0 | cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
{
int ret;
kuid_t fsuid = current_fsuid();
struct tcon_link *tlink, *newtlink;
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
spin_lock(&cifs_sb->tlink_tree_lock);
tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
if (tlink)
cifs_get_tlink(tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
if (tlink == NULL) {
newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
if (newtlink == NULL)
return ERR_PTR(-ENOMEM);
newtlink->tl_uid = fsuid;
newtlink->tl_tcon = ERR_PTR(-EACCES);
set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
cifs_get_tlink(newtlink);
spin_lock(&cifs_sb->tlink_tree_lock);
/* was one inserted after previous search? */
tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
if (tlink) {
cifs_get_tlink(tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
kfree(newtlink);
goto wait_for_construction;
}
tlink = newtlink;
tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
} else {
wait_for_construction:
ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
cifs_sb_tcon_pending_wait,
TASK_INTERRUPTIBLE);
if (ret) {
cifs_put_tlink(tlink);
return ERR_PTR(ret);
}
/* if it's good, return it */
if (!IS_ERR(tlink->tl_tcon))
return tlink;
/* return error if we tried this already recently */
if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) {
cifs_put_tlink(tlink);
return ERR_PTR(-EACCES);
}
if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags))
goto wait_for_construction;
}
tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid);
clear_bit(TCON_LINK_PENDING, &tlink->tl_flags);
wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING);
if (IS_ERR(tlink->tl_tcon)) {
cifs_put_tlink(tlink);
return ERR_PTR(-EACCES);
}
return tlink;
}
| 16,200 |
171,417 | 0 | static int do_explicit_dump(pid_t tid, bool dump_backtrace) {
fprintf(stdout, "Sending request to dump task %d.\n", tid);
if (dump_backtrace) {
fflush(stdout);
if (dump_backtrace_to_file(tid, fileno(stdout)) < 0) {
fputs("Error dumping backtrace.\n", stderr);
return 1;
}
} else {
char tombstone_path[PATH_MAX];
if (dump_tombstone(tid, tombstone_path, sizeof(tombstone_path)) < 0) {
fputs("Error dumping tombstone.\n", stderr);
return 1;
}
fprintf(stderr, "Tombstone written to: %s\n", tombstone_path);
}
return 0;
}
| 16,201 |
22,121 | 0 | static int ptrace_do_wait(struct task_struct *tsk, int *notask_error,
enum pid_type type, struct pid *pid, int options,
struct siginfo __user *infop, int __user *stat_addr,
struct rusage __user *ru)
{
struct task_struct *p;
/*
* Traditionally we see ptrace'd stopped tasks regardless of options.
*/
options |= WUNTRACED;
list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
int ret = wait_consider_task(tsk, 1, p, notask_error,
type, pid, options,
infop, stat_addr, ru);
if (ret)
return ret;
}
return 0;
}
| 16,202 |
132,103 | 0 | bool LayoutBlockFlow::mustDiscardMarginBefore() const
{
return style()->marginBeforeCollapse() == MDISCARD || (m_rareData && m_rareData->m_discardMarginBefore);
}
| 16,203 |
106,213 | 0 | void setJSTestObjLongLongSequenceAttr(ExecState* exec, JSObject* thisObject, JSValue value)
{
JSTestObj* castedThis = jsCast<JSTestObj*>(thisObject);
TestObj* impl = static_cast<TestObj*>(castedThis->impl());
impl->setLongLongSequenceAttr(toNativeArray<long long>(exec, value));
}
| 16,204 |
134,114 | 0 | void InputMethodIBus::OnTextInputTypeChanged(const TextInputClient* client) {
if (IsTextInputClientFocused(client)) {
ResetContext();
UpdateContextFocusState();
if (previous_textinput_type_ != client->GetTextInputType())
OnInputMethodChanged();
}
InputMethodBase::OnTextInputTypeChanged(client);
}
| 16,205 |
97,490 | 0 | void FrameLoader::setOpener(Frame* opener)
{
if (m_opener)
m_opener->loader()->m_openedFrames.remove(m_frame);
if (opener)
opener->loader()->m_openedFrames.add(m_frame);
m_opener = opener;
if (m_frame->document()) {
m_frame->document()->initSecurityContext();
m_frame->domWindow()->setSecurityOrigin(m_frame->document()->securityOrigin());
}
}
| 16,206 |
43,428 | 0 | SYSCALL_DEFINE5(execveat,
int, fd, const char __user *, filename,
const char __user *const __user *, argv,
const char __user *const __user *, envp,
int, flags)
{
int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
return do_execveat(fd,
getname_flags(filename, lookup_flags, NULL),
argv, envp, flags);
}
| 16,207 |
175,636 | 0 | void SoftAACEncoder::onQueueFilled(OMX_U32 portIndex) {
if (mSignalledError) {
return;
}
List<BufferInfo *> &inQueue = getPortQueue(0);
List<BufferInfo *> &outQueue = getPortQueue(1);
if (!mSentCodecSpecificData) {
if (outQueue.empty()) {
return;
}
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
outHeader->nFilledLen = sizeof(mAudioSpecificConfigData);
outHeader->nFlags = OMX_BUFFERFLAG_CODECCONFIG;
uint8_t *out = outHeader->pBuffer + outHeader->nOffset;
memcpy(out, mAudioSpecificConfigData, sizeof(mAudioSpecificConfigData));
#if 0
ALOGI("sending codec specific data.");
hexdump(out, sizeof(mAudioSpecificConfigData));
#endif
outQueue.erase(outQueue.begin());
outInfo->mOwnedByUs = false;
notifyFillBufferDone(outHeader);
mSentCodecSpecificData = true;
}
size_t numBytesPerInputFrame =
mNumChannels * kNumSamplesPerFrame * sizeof(int16_t);
for (;;) {
while (mInputSize < numBytesPerInputFrame) {
if (mSawInputEOS || inQueue.empty()) {
return;
}
BufferInfo *inInfo = *inQueue.begin();
OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
const void *inData = inHeader->pBuffer + inHeader->nOffset;
size_t copy = numBytesPerInputFrame - mInputSize;
if (copy > inHeader->nFilledLen) {
copy = inHeader->nFilledLen;
}
if (mInputFrame == NULL) {
mInputFrame = new int16_t[kNumSamplesPerFrame * mNumChannels];
}
if (mInputSize == 0) {
mInputTimeUs = inHeader->nTimeStamp;
}
memcpy((uint8_t *)mInputFrame + mInputSize, inData, copy);
mInputSize += copy;
inHeader->nOffset += copy;
inHeader->nFilledLen -= copy;
inHeader->nTimeStamp +=
(copy * 1000000ll / mSampleRate)
/ (mNumChannels * sizeof(int16_t));
if (inHeader->nFilledLen == 0) {
if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
ALOGV("saw input EOS");
mSawInputEOS = true;
memset((uint8_t *)mInputFrame + mInputSize,
0,
numBytesPerInputFrame - mInputSize);
mInputSize = numBytesPerInputFrame;
}
inQueue.erase(inQueue.begin());
inInfo->mOwnedByUs = false;
notifyEmptyBufferDone(inHeader);
inData = NULL;
inHeader = NULL;
inInfo = NULL;
}
}
if (outQueue.empty()) {
return;
}
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
VO_CODECBUFFER inputData;
memset(&inputData, 0, sizeof(inputData));
inputData.Buffer = (unsigned char *)mInputFrame;
inputData.Length = numBytesPerInputFrame;
CHECK(VO_ERR_NONE ==
mApiHandle->SetInputData(mEncoderHandle, &inputData));
VO_CODECBUFFER outputData;
memset(&outputData, 0, sizeof(outputData));
VO_AUDIO_OUTPUTINFO outputInfo;
memset(&outputInfo, 0, sizeof(outputInfo));
uint8_t *outPtr = (uint8_t *)outHeader->pBuffer + outHeader->nOffset;
size_t outAvailable = outHeader->nAllocLen - outHeader->nOffset;
VO_U32 ret = VO_ERR_NONE;
size_t nOutputBytes = 0;
do {
outputData.Buffer = outPtr;
outputData.Length = outAvailable - nOutputBytes;
ret = mApiHandle->GetOutputData(
mEncoderHandle, &outputData, &outputInfo);
if (ret == VO_ERR_NONE) {
outPtr += outputData.Length;
nOutputBytes += outputData.Length;
}
} while (ret != VO_ERR_INPUT_BUFFER_SMALL);
outHeader->nFilledLen = nOutputBytes;
outHeader->nFlags = OMX_BUFFERFLAG_ENDOFFRAME;
if (mSawInputEOS) {
outHeader->nFlags = OMX_BUFFERFLAG_EOS;
}
outHeader->nTimeStamp = mInputTimeUs;
#if 0
ALOGI("sending %d bytes of data (time = %lld us, flags = 0x%08lx)",
nOutputBytes, mInputTimeUs, outHeader->nFlags);
hexdump(outHeader->pBuffer + outHeader->nOffset, outHeader->nFilledLen);
#endif
outQueue.erase(outQueue.begin());
outInfo->mOwnedByUs = false;
notifyFillBufferDone(outHeader);
outHeader = NULL;
outInfo = NULL;
mInputSize = 0;
}
}
| 16,208 |
147,720 | 0 | void V8TestObject::RaisesExceptionLongAttributeAttributeGetterCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
RUNTIME_CALL_TIMER_SCOPE_DISABLED_BY_DEFAULT(info.GetIsolate(), "Blink_TestObject_raisesExceptionLongAttribute_Getter");
test_object_v8_internal::RaisesExceptionLongAttributeAttributeGetter(info);
}
| 16,209 |
158,663 | 0 | void BackRenderbuffer::Create() {
ScopedGLErrorSuppressor suppressor("BackRenderbuffer::Create",
decoder_->state_.GetErrorState());
Destroy();
api()->glGenRenderbuffersEXTFn(1, &id_);
}
| 16,210 |
188,174 | 1 | void install_local_socket(asocket* s) {
adb_mutex_lock(&socket_list_lock);
s->id = local_socket_next_id++;
// Socket ids should never be 0.
if (local_socket_next_id == 0) {
local_socket_next_id = 1;
}
insert_local_socket(s, &local_socket_list);
adb_mutex_unlock(&socket_list_lock);
}
| 16,211 |
56,225 | 0 | compat_user_backtrace(struct compat_frame_tail __user *tail,
struct perf_callchain_entry *entry)
{
struct compat_frame_tail buftail;
unsigned long err;
/* Also check accessibility of one struct frame_tail beyond */
if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
return NULL;
pagefault_disable();
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
pagefault_enable();
if (err)
return NULL;
perf_callchain_store(entry, buftail.lr);
/*
* Frame pointers should strictly progress back up the stack
* (towards higher addresses).
*/
if (tail + 1 >= (struct compat_frame_tail __user *)
compat_ptr(buftail.fp))
return NULL;
return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
}
| 16,212 |
179,226 | 1 | void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
struct br_ip *group, int type)
{
struct br_mdb_entry entry;
entry.ifindex = port->dev->ifindex;
entry.addr.proto = group->proto;
entry.addr.u.ip4 = group->u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
entry.addr.u.ip6 = group->u.ip6;
#endif
__br_mdb_notify(dev, &entry, type);
}
| 16,213 |
13,327 | 0 | pdf14_pop_transparency_mask(pdf14_ctx *ctx, gs_gstate *pgs, gx_device *dev)
{
pdf14_buf *tos = ctx->stack;
byte *new_data_buf;
int icc_match;
cmm_profile_t *des_profile = tos->parent_color_info_procs->icc_profile; /* If set, this should be a gray profile */
cmm_profile_t *src_profile;
gsicc_rendering_param_t rendering_params;
gsicc_link_t *icc_link;
gsicc_rendering_param_t render_cond;
cmm_dev_profile_t *dev_profile;
dev_proc(dev, get_profile)(dev, &dev_profile);
gsicc_extract_profile(GS_UNKNOWN_TAG, dev_profile, &src_profile,
&render_cond);
ctx->smask_depth -= 1;
/* icc_match == -1 means old non-icc code.
icc_match == 0 means use icc code
icc_match == 1 mean no conversion needed */
if ( des_profile != NULL && src_profile != NULL ) {
icc_match = (des_profile->hashcode == src_profile->hashcode);
} else {
icc_match = -1;
}
if_debug1m('v', ctx->memory, "[v]pdf14_pop_transparency_mask, idle=%d\n",
tos->idle);
ctx->stack = tos->saved;
tos->saved = NULL; /* To avoid issues with GC */
if (tos->mask_stack) {
/* During the soft mask push, the mask_stack was copied (not moved) from
the ctx to the tos mask_stack. We are done with this now so it is safe to
just set to NULL. However, before we do that we must perform
rc decrement to match the increment that occured was made. Also,
if this is the last ref count of the rc_mask, we should free the
buffer now since no other groups need it. */
rc_decrement(tos->mask_stack->rc_mask,
"pdf14_pop_transparency_mask(tos->mask_stack->rc_mask)");
if (tos->mask_stack->rc_mask) {
if (tos->mask_stack->rc_mask->rc.ref_count == 1){
rc_decrement(tos->mask_stack->rc_mask,
"pdf14_pop_transparency_mask(tos->mask_stack->rc_mask)");
}
}
tos->mask_stack = NULL;
}
if (tos->data == NULL ) {
/* This can occur in clist rendering if the soft mask does
not intersect the current band. It would be nice to
catch this earlier and just avoid creating the structure
to begin with. For now we need to delete the structure
that was created. Only delete if the alpha value is 255 */
if (tos->alpha == 255) {
pdf14_buf_free(tos, ctx->memory);
if (ctx->mask_stack != NULL) {
pdf14_free_mask_stack(ctx, ctx->memory);
}
} else {
/* Assign as mask buffer */
if (ctx->mask_stack != NULL) {
pdf14_free_mask_stack(ctx, ctx->memory);
}
ctx->mask_stack = pdf14_mask_element_new(ctx->memory);
ctx->mask_stack->rc_mask = pdf14_rcmask_new(ctx->memory);
ctx->mask_stack->rc_mask->mask_buf = tos;
}
ctx->smask_blend = false; /* just in case */
} else {
/* If we are already in the source space then there is no reason
to do the transformation */
/* Lets get this to a monochrome buffer and map it to a luminance only value */
/* This will reduce our memory. We won't reuse the existing one, due */
/* Due to the fact that on certain systems we may have issues recovering */
/* the data after a resize */
new_data_buf = gs_alloc_bytes(ctx->memory, tos->planestride,
"pdf14_pop_transparency_mask");
if (new_data_buf == NULL)
return_error(gs_error_VMerror);
/* Initialize with 0. Need to do this since in Smask_Luminosity_Mapping
we won't be filling everything during the remap if it had not been
written into by the PDF14 fill rect */
memset(new_data_buf, 0, tos->planestride);
/* If the subtype was alpha, then just grab the alpha channel now
and we are all done */
if (tos->SMask_SubType == TRANSPARENCY_MASK_Alpha) {
ctx->smask_blend = false; /* not used in this case */
smask_copy(tos->rect.q.y - tos->rect.p.y,
tos->rect.q.x - tos->rect.p.x,
tos->rowstride,
(tos->data)+tos->planestride, new_data_buf);
#if RAW_DUMP
/* Dump the current buffer to see what we have. */
dump_raw_buffer(tos->rect.q.y-tos->rect.p.y,
tos->rowstride, tos->n_planes,
tos->planestride, tos->rowstride,
"SMask_Pop_Alpha(Mask_Plane1)",tos->data);
global_index++;
#endif
} else {
if ( icc_match == 1 || tos->n_chan == 2) {
#if RAW_DUMP
/* Dump the current buffer to see what we have. */
dump_raw_buffer(tos->rect.q.y-tos->rect.p.y,
tos->rowstride, tos->n_planes,
tos->planestride, tos->rowstride,
"SMask_Pop_Lum(Mask_Plane0)",tos->data);
global_index++;
#endif
/* There is no need to color convert. Data is already gray scale.
We just need to copy the gray plane. However it is
possible that the soft mask could have a soft mask which
would end us up with some alpha blending information
(Bug691803). In fact, according to the spec, the alpha
blending has to occur. See FTS test fts_26_2601.pdf
for an example of this. Softmask buffer is intialized
with BG values. It would be nice to keep track if buffer
ever has a alpha value not 1 so that we could detect and
avoid this blend if not needed. */
smask_blend(tos->data, tos->rect.q.x - tos->rect.p.x,
tos->rect.q.y - tos->rect.p.y, tos->rowstride,
tos->planestride);
#if RAW_DUMP
/* Dump the current buffer to see what we have. */
dump_raw_buffer(tos->rect.q.y-tos->rect.p.y,
tos->rowstride, tos->n_planes,
tos->planestride, tos->rowstride,
"SMask_Pop_Lum_Post_Blend",tos->data);
global_index++;
#endif
smask_copy(tos->rect.q.y - tos->rect.p.y,
tos->rect.q.x - tos->rect.p.x,
tos->rowstride, tos->data, new_data_buf);
} else {
if ( icc_match == -1 ) {
/* The slow old fashioned way */
smask_luminosity_mapping(tos->rect.q.y - tos->rect.p.y ,
tos->rect.q.x - tos->rect.p.x,tos->n_chan,
tos->rowstride, tos->planestride,
tos->data, new_data_buf, ctx->additive, tos->SMask_SubType);
} else {
/* ICC case where we use the CMM */
/* Request the ICC link for the transform that we will need to use */
rendering_params.black_point_comp = gsBLACKPTCOMP_OFF;
rendering_params.graphics_type_tag = GS_IMAGE_TAG;
rendering_params.override_icc = false;
rendering_params.preserve_black = gsBKPRESNOTSPECIFIED;
rendering_params.rendering_intent = gsPERCEPTUAL;
rendering_params.cmm = gsCMM_DEFAULT;
icc_link = gsicc_get_link_profile(pgs, dev, des_profile,
src_profile, &rendering_params, pgs->memory, false);
smask_icc(dev, tos->rect.q.y - tos->rect.p.y,
tos->rect.q.x - tos->rect.p.x,tos->n_chan,
tos->rowstride, tos->planestride,
tos->data, new_data_buf, icc_link);
/* Release the link */
gsicc_release_link(icc_link);
}
}
}
/* Free the old object, NULL test was above */
gs_free_object(ctx->memory, tos->data, "pdf14_pop_transparency_mask");
tos->data = new_data_buf;
/* Data is single channel now */
tos->n_chan = 1;
tos->n_planes = 1;
/* Assign as reference counted mask buffer */
if (ctx->mask_stack != NULL) {
/* In this case, the source file is wacky as it already had a
softmask and now is getting a replacement. We need to clean
up the softmask stack before doing this free and creating
a new stack. Bug 693312 */
pdf14_free_mask_stack(ctx, ctx->memory);
}
ctx->mask_stack = pdf14_mask_element_new(ctx->memory);
if (ctx->mask_stack == NULL)
return gs_note_error(gs_error_VMerror);
ctx->mask_stack->rc_mask = pdf14_rcmask_new(ctx->memory);
if (ctx->mask_stack->rc_mask == NULL)
return gs_note_error(gs_error_VMerror);
ctx->mask_stack->rc_mask->mask_buf = tos;
}
return 0;
}
| 16,214 |
175,456 | 0 | static size_t in_get_buffer_size(const struct audio_stream *stream)
{
struct stream_in *in = (struct stream_in *)stream;
return get_input_buffer_size(in->requested_rate,
in_get_format(stream),
audio_channel_count_from_in_mask(in->main_channels),
in->usecase_type,
in->devices);
}
| 16,215 |
175,061 | 0 | void SoundPool::addToStopList(SoundChannel* channel)
{
Mutex::Autolock lock(&mRestartLock);
if (!mQuit) {
mStop.push_back(channel);
mCondition.signal();
}
}
| 16,216 |
21,851 | 0 | static void write_register_operand(struct operand *op)
{
/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
switch (op->bytes) {
case 1:
*(u8 *)op->addr.reg = (u8)op->val;
break;
case 2:
*(u16 *)op->addr.reg = (u16)op->val;
break;
case 4:
*op->addr.reg = (u32)op->val;
break; /* 64b: zero-extend */
case 8:
*op->addr.reg = op->val;
break;
}
}
| 16,217 |
18,079 | 0 | XcursorLibraryLoadImage (const char *file, const char *theme, int size)
{
FILE *f = NULL;
XcursorImage *image = NULL;
if (!file)
return NULL;
if (theme)
f = XcursorScanTheme (theme, file);
if (!f)
f = XcursorScanTheme ("default", file);
if (f == XCURSOR_SCAN_CORE)
return NULL;
if (f)
{
image = XcursorFileLoadImage (f, size);
fclose (f);
}
return image;
}
| 16,218 |
185,546 | 1 | bool ChromeContentUtilityClient::OnMessageReceived(
const IPC::Message& message) {
if (filter_messages_ && !ContainsKey(message_id_whitelist_, message.type()))
return false;
bool handled = true;
IPC_BEGIN_MESSAGE_MAP(ChromeContentUtilityClient, message)
IPC_MESSAGE_HANDLER(ChromeUtilityMsg_DecodeImage, OnDecodeImage)
#if defined(OS_CHROMEOS)
IPC_MESSAGE_HANDLER(ChromeUtilityMsg_RobustJPEGDecodeImage,
OnRobustJPEGDecodeImage)
#endif // defined(OS_CHROMEOS)
IPC_MESSAGE_HANDLER(ChromeUtilityMsg_PatchFileBsdiff,
OnPatchFileBsdiff)
IPC_MESSAGE_HANDLER(ChromeUtilityMsg_PatchFileCourgette,
OnPatchFileCourgette)
IPC_MESSAGE_HANDLER(ChromeUtilityMsg_StartupPing, OnStartupPing)
#if defined(FULL_SAFE_BROWSING)
IPC_MESSAGE_HANDLER(ChromeUtilityMsg_AnalyzeZipFileForDownloadProtection,
OnAnalyzeZipFileForDownloadProtection)
#endif
#if defined(ENABLE_EXTENSIONS)
IPC_MESSAGE_HANDLER(ChromeUtilityMsg_ParseMediaMetadata,
OnParseMediaMetadata)
#endif
#if defined(OS_CHROMEOS)
IPC_MESSAGE_HANDLER(ChromeUtilityMsg_CreateZipFile, OnCreateZipFile)
#endif
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
for (Handlers::iterator it = handlers_.begin();
!handled && it != handlers_.end(); ++it) {
handled = (*it)->OnMessageReceived(message);
}
return handled;
}
| 16,219 |
50,353 | 0 | int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, umode_t *i_mode)
{
struct posix_acl *default_acl, *acl;
int rc;
cache_no_acl(inode);
rc = posix_acl_create(dir_i, i_mode, &default_acl, &acl);
if (rc)
return rc;
if (default_acl) {
set_cached_acl(inode, ACL_TYPE_DEFAULT, default_acl);
posix_acl_release(default_acl);
}
if (acl) {
set_cached_acl(inode, ACL_TYPE_ACCESS, acl);
posix_acl_release(acl);
}
return 0;
}
| 16,220 |
119,137 | 0 | bool CancelableSyncSocket::CreatePair(CancelableSyncSocket* socket_a,
CancelableSyncSocket* socket_b) {
return CreatePairImpl(&socket_a->handle_, &socket_b->handle_, true);
}
| 16,221 |
175,075 | 0 | void SoundChannel::init(SoundPool* soundPool)
{
mSoundPool = soundPool;
}
| 16,222 |
58,026 | 0 | int nft_register_set(struct nft_set_ops *ops)
{
nfnl_lock(NFNL_SUBSYS_NFTABLES);
list_add_tail_rcu(&ops->list, &nf_tables_set_ops);
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
return 0;
}
| 16,223 |
39,157 | 0 | static void nfs_flush_error(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
set_bit(NFS_IOHDR_REDO, &hdr->flags);
while (!list_empty(&hdr->rpc_list)) {
struct nfs_write_data *data = list_first_entry(&hdr->rpc_list,
struct nfs_write_data, list);
list_del(&data->list);
nfs_writedata_release(data);
}
desc->pg_completion_ops->error_cleanup(&desc->pg_list);
}
| 16,224 |
99,669 | 0 | VaapiVideoDecodeAccelerator::InputBuffer::~InputBuffer() {
}
| 16,225 |
23,836 | 0 | static void addr_hash_set(u32 *mask, const u8 *addr)
{
int n = ether_crc(ETH_ALEN, addr) >> 26;
mask[n >> 5] |= (1 << (n & 31));
}
| 16,226 |
80,056 | 0 | GF_Box *dmax_New()
{
ISOM_DECL_BOX_ALLOC(GF_DMAXBox, GF_ISOM_BOX_TYPE_DMAX);
return (GF_Box *)tmp;
}
| 16,227 |
92,859 | 0 | static u32 FFD_RegisterMimeTypes(const GF_InputService *plug) {
u32 i;
for (i = 0 ; FFD_MIME_TYPES[i]; i+=3)
gf_service_register_mime(plug, FFD_MIME_TYPES[i], FFD_MIME_TYPES[i+1], FFD_MIME_TYPES[i+2]);
return i/3;
}
| 16,228 |
94,744 | 0 | MagickExport void DestroyImageProperties(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image->filename);
if (image->properties != (void *) NULL)
image->properties=(void *) DestroySplayTree((SplayTreeInfo *)
image->properties);
}
| 16,229 |
178,105 | 1 | static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size)
{
struct xlx_ethlite *s = qemu_get_nic_opaque(nc);
unsigned int rxbase = s->rxbuf * (0x800 / 4);
/* DA filter. */
if (!(buf[0] & 0x80) && memcmp(&s->conf.macaddr.a[0], buf, 6))
return size;
if (s->regs[rxbase + R_RX_CTRL0] & CTRL_S) {
D(qemu_log("ethlite lost packet %x\n", s->regs[R_RX_CTRL0]));
return -1;
}
D(qemu_log("%s %zd rxbase=%x\n", __func__, size, rxbase));
memcpy(&s->regs[rxbase + R_RX_BUF0], buf, size);
s->regs[rxbase + R_RX_CTRL0] |= CTRL_S;
/* If c_rx_pingpong was set flip buffers. */
s->rxbuf ^= s->c_rx_pingpong;
return size;
}
| 16,230 |
28,600 | 0 | void qeth_print_status_message(struct qeth_card *card)
{
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
case QETH_CARD_TYPE_OSM:
case QETH_CARD_TYPE_OSX:
/* VM will use a non-zero first character
* to indicate a HiperSockets like reporting
* of the level OSA sets the first character to zero
* */
if (!card->info.mcl_level[0]) {
sprintf(card->info.mcl_level, "%02x%02x",
card->info.mcl_level[2],
card->info.mcl_level[3]);
card->info.mcl_level[QETH_MCL_LENGTH] = 0;
break;
}
/* fallthrough */
case QETH_CARD_TYPE_IQD:
if ((card->info.guestlan) ||
(card->info.mcl_level[0] & 0x80)) {
card->info.mcl_level[0] = (char) _ebcasc[(__u8)
card->info.mcl_level[0]];
card->info.mcl_level[1] = (char) _ebcasc[(__u8)
card->info.mcl_level[1]];
card->info.mcl_level[2] = (char) _ebcasc[(__u8)
card->info.mcl_level[2]];
card->info.mcl_level[3] = (char) _ebcasc[(__u8)
card->info.mcl_level[3]];
card->info.mcl_level[QETH_MCL_LENGTH] = 0;
}
break;
default:
memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
}
if (card->info.portname_required)
qeth_print_status_with_portname(card);
else
qeth_print_status_no_portname(card);
}
| 16,231 |
60,920 | 0 | istr_set_insert (GHashTable *table,
const char *istr)
{
char *key;
key = g_strdup (istr);
g_hash_table_replace (table, key, key);
}
| 16,232 |
89,774 | 0 | drop_cap_bounding_set (bool drop_all)
{
if (!drop_all)
prctl_caps (requested_caps, TRUE, FALSE);
else
{
uint32_t no_caps[2] = {0, 0};
prctl_caps (no_caps, TRUE, FALSE);
}
}
| 16,233 |
179,084 | 1 | static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
{
ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
shm_rmid(ns, shp);
shm_unlock(shp);
if (!is_file_hugepages(shp->shm_file))
shmem_lock(shp->shm_file, 0, shp->mlock_user);
else if (shp->mlock_user)
user_shm_unlock(file_inode(shp->shm_file)->i_size,
shp->mlock_user);
fput (shp->shm_file);
ipc_rcu_putref(shp, shm_rcu_free);
}
| 16,234 |
10,479 | 0 | static union mfi_sgl *megasas_sgl_next(MegasasCmd *cmd,
union mfi_sgl *sgl)
{
uint8_t *next = (uint8_t *)sgl;
if (megasas_frame_is_ieee_sgl(cmd)) {
next += sizeof(struct mfi_sg_skinny);
} else if (megasas_frame_is_sgl64(cmd)) {
next += sizeof(struct mfi_sg64);
} else {
next += sizeof(struct mfi_sg32);
}
if (next >= (uint8_t *)cmd->frame + cmd->pa_size) {
return NULL;
}
return (union mfi_sgl *)next;
}
| 16,235 |
117,963 | 0 | Frame* ScriptController::retrieveFrameForCurrentContext()
{
return V8Proxy::retrieveFrameForCurrentContext();
}
| 16,236 |
168,356 | 0 | bool BrowserView::ShouldShowWindowIcon() const {
return ShouldShowWindowTitle();
}
| 16,237 |
131,964 | 0 | static void voidMethodStringArgLongArgMethodCallback(const v8::FunctionCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMMethod");
TestObjectPythonV8Internal::voidMethodStringArgLongArgMethod(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
| 16,238 |
112,164 | 0 | DictionaryValue* ChromiumExtensionActivityToValue(
const sync_pb::ChromiumExtensionsActivity& proto) {
DictionaryValue* value = new DictionaryValue();
SET_STR(extension_id);
SET_INT32(bookmark_writes_since_last_commit);
return value;
}
| 16,239 |
20,625 | 0 | int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
{
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
if (vpd->irr[0] & (1UL << NMI_VECTOR))
return NMI_VECTOR;
if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
return ExtINT_VECTOR;
return find_highest_bits((int *)&vpd->irr[0]);
}
| 16,240 |
82,586 | 0 | void jswrap_graphics_setRotation(JsVar *parent, int rotation, bool reflect) {
JsGraphics gfx; if (!graphicsGetFromVar(&gfx, parent)) return;
gfx.data.flags &= (JsGraphicsFlags)~(JSGRAPHICSFLAGS_SWAP_XY | JSGRAPHICSFLAGS_INVERT_X | JSGRAPHICSFLAGS_INVERT_Y);
switch (rotation) {
case 0:
break;
case 1:
gfx.data.flags |= JSGRAPHICSFLAGS_SWAP_XY | JSGRAPHICSFLAGS_INVERT_X;
break;
case 2:
gfx.data.flags |= JSGRAPHICSFLAGS_INVERT_X | JSGRAPHICSFLAGS_INVERT_Y;
break;
case 3:
gfx.data.flags |= JSGRAPHICSFLAGS_SWAP_XY | JSGRAPHICSFLAGS_INVERT_Y;
break;
}
if (reflect) {
if (gfx.data.flags & JSGRAPHICSFLAGS_SWAP_XY)
gfx.data.flags ^= JSGRAPHICSFLAGS_INVERT_Y;
else
gfx.data.flags ^= JSGRAPHICSFLAGS_INVERT_X;
}
graphicsSetVar(&gfx);
}
| 16,241 |
107,408 | 0 | void Scrollbar::updateThumb()
{
#ifdef THUMB_POSITION_AFFECTS_BUTTONS
invalidate();
#else
theme()->invalidateParts(this, ForwardTrackPart | BackTrackPart | ThumbPart);
#endif
}
| 16,242 |
158,270 | 0 | void RenderWidgetHostImpl::ForwardMouseEvent(const WebMouseEvent& mouse_event) {
if (GetView()->IsInVR() &&
(is_in_gesture_scroll_[blink::kWebGestureDeviceTouchpad] ||
is_in_touchpad_gesture_fling_)) {
return;
}
ForwardMouseEventWithLatencyInfo(mouse_event,
ui::LatencyInfo(ui::SourceEventType::MOUSE));
if (owner_delegate_)
owner_delegate_->RenderWidgetDidForwardMouseEvent(mouse_event);
}
| 16,243 |
70,148 | 0 | TIFFReadDirEntryCheckRangeLongSlong8(int64 value)
{
if ((value < 0) || (value > (int64) TIFF_UINT32_MAX))
return(TIFFReadDirEntryErrRange);
else
return(TIFFReadDirEntryErrOk);
}
| 16,244 |
69,295 | 0 | int tls1_final_finish_mac(SSL *s, const char *str, int slen, unsigned char *out)
{
int hashlen;
unsigned char hash[EVP_MAX_MD_SIZE];
if (!ssl3_digest_cached_records(s, 0))
return 0;
hashlen = ssl_handshake_hash(s, hash, sizeof(hash));
if (hashlen == 0)
return 0;
if (!tls1_PRF(s, str, slen, hash, hashlen, NULL, 0, NULL, 0, NULL, 0,
s->session->master_key, s->session->master_key_length,
out, TLS1_FINISH_MAC_LENGTH))
return 0;
OPENSSL_cleanse(hash, hashlen);
return TLS1_FINISH_MAC_LENGTH;
}
| 16,245 |
64,963 | 0 | IW_IMPL(const char*) iw_get_option(struct iw_context *ctx, const char *name)
{
int i;
for(i=0; i<ctx->req.options_count; i++) {
if(ctx->req.options[i].name && !strcmp(ctx->req.options[i].name, name)) {
return ctx->req.options[i].val;
}
}
return NULL;
}
| 16,246 |
13,159 | 0 | xps_identify_font_encoding(fz_font *font, int idx, int *pid, int *eid)
{
FT_Face face = font->ft_face;
*pid = face->charmaps[idx]->platform_id;
*eid = face->charmaps[idx]->encoding_id;
}
| 16,247 |
105,884 | 0 | JSObject* createReferenceError(JSGlobalObject* globalObject, const UString& message)
{
ASSERT(!message.isEmpty());
return ErrorInstance::create(globalObject->globalData(), globalObject->referenceErrorConstructor()->errorStructure(), message);
}
| 16,248 |
154,480 | 0 | bool GLES2DecoderPassthroughImpl::MakeCurrent() {
if (!context_.get())
return false;
if (WasContextLost()) {
LOG(ERROR) << " GLES2DecoderPassthroughImpl: Trying to make lost context "
"current.";
return false;
}
if (!context_->MakeCurrent(surface_.get())) {
LOG(ERROR)
<< " GLES2DecoderPassthroughImpl: Context lost during MakeCurrent.";
MarkContextLost(error::kMakeCurrentFailed);
group_->LoseContexts(error::kUnknown);
return false;
}
DCHECK_EQ(api(), gl::g_current_gl_context);
if (CheckResetStatus()) {
LOG(ERROR) << " GLES2DecoderPassthroughImpl: Context reset detected after "
"MakeCurrent.";
group_->LoseContexts(error::kUnknown);
return false;
}
ProcessReadPixels(false);
ProcessQueries(false);
resources_->DestroyPendingTextures(/*has_context=*/true);
return true;
}
| 16,249 |
49,290 | 0 | static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
struct net *net = dev_net(skb->dev);
struct request_sock *fastopen;
struct ipv6_pinfo *np;
struct tcp_sock *tp;
__u32 seq, snd_una;
struct sock *sk;
bool fatal;
int err;
sk = __inet6_lookup_established(net, &tcp_hashinfo,
&hdr->daddr, th->dest,
&hdr->saddr, ntohs(th->source),
skb->dev->ifindex);
if (!sk) {
__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS);
return;
}
if (sk->sk_state == TCP_TIME_WAIT) {
inet_twsk_put(inet_twsk(sk));
return;
}
seq = ntohl(th->seq);
fatal = icmpv6_err_convert(type, code, &err);
if (sk->sk_state == TCP_NEW_SYN_RECV)
return tcp_req_err(sk, seq, fatal);
bh_lock_sock(sk);
if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == TCP_CLOSE)
goto out;
if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
goto out;
}
tp = tcp_sk(sk);
/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
fastopen = tp->fastopen_rsk;
snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
if (sk->sk_state != TCP_LISTEN &&
!between(seq, snd_una, tp->snd_nxt)) {
__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
np = inet6_sk(sk);
if (type == NDISC_REDIRECT) {
struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
if (dst)
dst->ops->redirect(dst, sk, skb);
goto out;
}
if (type == ICMPV6_PKT_TOOBIG) {
/* We are not interested in TCP_LISTEN and open_requests
* (SYN-ACKs send out by Linux are always <576bytes so
* they should go through unfragmented).
*/
if (sk->sk_state == TCP_LISTEN)
goto out;
if (!ip6_sk_accept_pmtu(sk))
goto out;
tp->mtu_info = ntohl(info);
if (!sock_owned_by_user(sk))
tcp_v6_mtu_reduced(sk);
else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
&tp->tsq_flags))
sock_hold(sk);
goto out;
}
/* Might be for an request_sock */
switch (sk->sk_state) {
case TCP_SYN_SENT:
case TCP_SYN_RECV:
/* Only in fast or simultaneous open. If a fast open socket is
* is already accepted it is treated as a connected one below.
*/
if (fastopen && !fastopen->sk)
break;
if (!sock_owned_by_user(sk)) {
sk->sk_err = err;
sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
tcp_done(sk);
} else
sk->sk_err_soft = err;
goto out;
}
if (!sock_owned_by_user(sk) && np->recverr) {
sk->sk_err = err;
sk->sk_error_report(sk);
} else
sk->sk_err_soft = err;
out:
bh_unlock_sock(sk);
sock_put(sk);
}
| 16,250 |
128,842 | 0 | void SVGDocumentExtensions::addTimeContainer(SVGSVGElement* element)
{
m_timeContainers.add(element);
}
| 16,251 |
104,370 | 0 | PassRefPtr<StylePropertySet> CSSComputedStyleDeclaration::makeMutable()
{
return copy();
}
| 16,252 |
80,080 | 0 | GF_Err edts_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_EditBox *ptr = (GF_EditBox *)s;
if (ptr->editList && gf_list_count(ptr->editList->entryList)) {
e = gf_isom_box_write_header(s, bs);
if (e) return e;
e = gf_isom_box_write((GF_Box *) ptr->editList, bs);
if (e) return e;
}
return GF_OK;
}
| 16,253 |
101,391 | 0 | BaseNode::~BaseNode() {}
| 16,254 |
6,556 | 0 | void Smb4KGlobal::clearHostsList()
{
mutex.lock();
while ( !p->hostsList.isEmpty() )
{
delete p->hostsList.takeFirst();
}
mutex.unlock();
}
| 16,255 |
62,419 | 0 | handle_reassoc_request(netdissect_options *ndo,
const u_char *p, u_int length)
{
struct mgmt_body_t pbody;
int offset = 0;
int ret;
memset(&pbody, 0, sizeof(pbody));
if (!ND_TTEST2(*p, IEEE802_11_CAPINFO_LEN + IEEE802_11_LISTENINT_LEN +
IEEE802_11_AP_LEN))
return 0;
if (length < IEEE802_11_CAPINFO_LEN + IEEE802_11_LISTENINT_LEN +
IEEE802_11_AP_LEN)
return 0;
pbody.capability_info = EXTRACT_LE_16BITS(p);
offset += IEEE802_11_CAPINFO_LEN;
length -= IEEE802_11_CAPINFO_LEN;
pbody.listen_interval = EXTRACT_LE_16BITS(p+offset);
offset += IEEE802_11_LISTENINT_LEN;
length -= IEEE802_11_LISTENINT_LEN;
memcpy(&pbody.ap, p+offset, IEEE802_11_AP_LEN);
offset += IEEE802_11_AP_LEN;
length -= IEEE802_11_AP_LEN;
ret = parse_elements(ndo, &pbody, p, offset, length);
PRINT_SSID(pbody);
ND_PRINT((ndo, " AP : %s", etheraddr_string(ndo, pbody.ap )));
return ret;
}
| 16,256 |
88,630 | 0 | static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
{
struct dwc3 *dwc = dep->dwc;
if (!dep->endpoint.desc) {
dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
dep->name);
return -ESHUTDOWN;
}
if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
&req->request, req->dep->name))
return -EINVAL;
pm_runtime_get(dwc->dev);
req->request.actual = 0;
req->request.status = -EINPROGRESS;
req->direction = dep->direction;
req->epnum = dep->number;
trace_dwc3_ep_queue(req);
list_add_tail(&req->list, &dep->pending_list);
/*
* NOTICE: Isochronous endpoints should NEVER be prestarted. We must
* wait for a XferNotReady event so we will know what's the current
* (micro-)frame number.
*
* Without this trick, we are very, very likely gonna get Bus Expiry
* errors which will force us issue EndTransfer command.
*/
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
if ((dep->flags & DWC3_EP_PENDING_REQUEST)) {
if (dep->flags & DWC3_EP_TRANSFER_STARTED) {
dwc3_stop_active_transfer(dwc, dep->number, true);
dep->flags = DWC3_EP_ENABLED;
} else {
u32 cur_uf;
cur_uf = __dwc3_gadget_get_frame(dwc);
__dwc3_gadget_start_isoc(dwc, dep, cur_uf);
dep->flags &= ~DWC3_EP_PENDING_REQUEST;
}
return 0;
}
if ((dep->flags & DWC3_EP_BUSY) &&
!(dep->flags & DWC3_EP_MISSED_ISOC))
goto out;
return 0;
}
out:
return __dwc3_gadget_kick_transfer(dep);
}
| 16,257 |
48,617 | 0 | void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
unsigned long flags;
spin_lock_irqsave(&vdev->irqlock, flags);
/*
* Masking can come from interrupt, ioctl, or config space
* via INTx disable. The latter means this can get called
* even when not using intx delivery. In this case, just
* try to have the physical bit follow the virtual bit.
*/
if (unlikely(!is_intx(vdev))) {
if (vdev->pci_2_3)
pci_intx(pdev, 0);
} else if (!vdev->ctx[0].masked) {
/*
* Can't use check_and_mask here because we always want to
* mask, not just when something is pending.
*/
if (vdev->pci_2_3)
pci_intx(pdev, 0);
else
disable_irq_nosync(pdev->irq);
vdev->ctx[0].masked = true;
}
spin_unlock_irqrestore(&vdev->irqlock, flags);
}
| 16,258 |
187,015 | 1 | ChromeContentBrowserClient::CreateThrottlesForNavigation(
content::NavigationHandle* handle) {
std::vector<std::unique_ptr<content::NavigationThrottle>> throttles;
// MetricsNavigationThrottle requires that it runs before NavigationThrottles
// that may delay or cancel navigations, so only NavigationThrottles that
// don't delay or cancel navigations (e.g. throttles that are only observing
// callbacks without affecting navigation behavior) should be added before
// MetricsNavigationThrottle.
if (handle->IsInMainFrame()) {
throttles.push_back(
page_load_metrics::MetricsNavigationThrottle::Create(handle));
}
#if BUILDFLAG(ENABLE_PLUGINS)
std::unique_ptr<content::NavigationThrottle> flash_url_throttle =
FlashDownloadInterception::MaybeCreateThrottleFor(handle);
if (flash_url_throttle)
throttles.push_back(std::move(flash_url_throttle));
#endif
#if BUILDFLAG(ENABLE_SUPERVISED_USERS)
std::unique_ptr<content::NavigationThrottle> supervised_user_throttle =
SupervisedUserNavigationThrottle::MaybeCreateThrottleFor(handle);
if (supervised_user_throttle)
throttles.push_back(std::move(supervised_user_throttle));
#endif
#if defined(OS_ANDROID)
// TODO(davidben): This is insufficient to integrate with prerender properly.
// https://crbug.com/370595
prerender::PrerenderContents* prerender_contents =
prerender::PrerenderContents::FromWebContents(handle->GetWebContents());
if (!prerender_contents && handle->IsInMainFrame()) {
throttles.push_back(
navigation_interception::InterceptNavigationDelegate::CreateThrottleFor(
handle));
}
throttles.push_back(InterceptOMADownloadNavigationThrottle::Create(handle));
#elif BUILDFLAG(ENABLE_EXTENSIONS)
if (handle->IsInMainFrame()) {
// Redirect some navigations to apps that have registered matching URL
// handlers ('url_handlers' in the manifest).
auto url_to_app_throttle =
PlatformAppNavigationRedirector::MaybeCreateThrottleFor(handle);
if (url_to_app_throttle)
throttles.push_back(std::move(url_to_app_throttle));
}
if (base::FeatureList::IsEnabled(features::kDesktopPWAWindowing)) {
if (base::FeatureList::IsEnabled(features::kDesktopPWAsLinkCapturing)) {
auto bookmark_app_experimental_throttle =
extensions::BookmarkAppExperimentalNavigationThrottle::
MaybeCreateThrottleFor(handle);
if (bookmark_app_experimental_throttle)
throttles.push_back(std::move(bookmark_app_experimental_throttle));
} else if (!base::FeatureList::IsEnabled(
features::kDesktopPWAsStayInWindow)) {
// Only add the bookmark app navigation throttle if the stay in
// window flag is not set, as the navigation throttle controls
// opening out of scope links in the browser.
auto bookmark_app_throttle =
extensions::BookmarkAppNavigationThrottle::MaybeCreateThrottleFor(
handle);
if (bookmark_app_throttle)
throttles.push_back(std::move(bookmark_app_throttle));
}
}
if (base::FeatureList::IsEnabled(
features::kMimeHandlerViewInCrossProcessFrame)) {
auto plugin_frame_attach_throttle =
extensions::ExtensionsGuestViewMessageFilter::MaybeCreateThrottle(
handle);
if (plugin_frame_attach_throttle)
throttles.push_back(std::move(plugin_frame_attach_throttle));
}
#endif
#if defined(OS_CHROMEOS)
// Check if we need to add merge session throttle. This throttle will postpone
// loading of main frames.
if (handle->IsInMainFrame()) {
// Add interstitial page while merge session process (cookie reconstruction
// from OAuth2 refresh token in ChromeOS login) is still in progress while
// we are attempting to load a google property.
if (merge_session_throttling_utils::ShouldAttachNavigationThrottle() &&
!merge_session_throttling_utils::AreAllSessionMergedAlready() &&
handle->GetURL().SchemeIsHTTPOrHTTPS()) {
throttles.push_back(MergeSessionNavigationThrottle::Create(handle));
}
auto url_to_apps_throttle =
chromeos::AppsNavigationThrottle::MaybeCreate(handle);
if (url_to_apps_throttle)
throttles.push_back(std::move(url_to_apps_throttle));
}
#endif
#if BUILDFLAG(ENABLE_EXTENSIONS)
throttles.push_back(
std::make_unique<extensions::ExtensionNavigationThrottle>(handle));
std::unique_ptr<content::NavigationThrottle> user_script_throttle =
extensions::ExtensionsBrowserClient::Get()
->GetUserScriptListener()
->CreateNavigationThrottle(handle);
if (user_script_throttle)
throttles.push_back(std::move(user_script_throttle));
#endif
#if BUILDFLAG(ENABLE_SUPERVISED_USERS)
std::unique_ptr<content::NavigationThrottle> supervised_user_nav_throttle =
SupervisedUserGoogleAuthNavigationThrottle::MaybeCreate(handle);
if (supervised_user_nav_throttle)
throttles.push_back(std::move(supervised_user_nav_throttle));
#endif
content::WebContents* web_contents = handle->GetWebContents();
if (auto* subresource_filter_client =
ChromeSubresourceFilterClient::FromWebContents(web_contents)) {
subresource_filter_client->MaybeAppendNavigationThrottles(handle,
&throttles);
}
#if !defined(OS_ANDROID)
// BackgroundTabNavigationThrottle is used by TabManager, which is only
// enabled on non-Android platforms.
std::unique_ptr<content::NavigationThrottle>
background_tab_navigation_throttle = resource_coordinator::
BackgroundTabNavigationThrottle::MaybeCreateThrottleFor(handle);
if (background_tab_navigation_throttle)
throttles.push_back(std::move(background_tab_navigation_throttle));
#endif
#if defined(SAFE_BROWSING_DB_LOCAL)
std::unique_ptr<content::NavigationThrottle>
password_protection_navigation_throttle =
safe_browsing::MaybeCreateNavigationThrottle(handle);
if (password_protection_navigation_throttle) {
throttles.push_back(std::move(password_protection_navigation_throttle));
}
#endif
std::unique_ptr<content::NavigationThrottle> pdf_iframe_throttle =
PDFIFrameNavigationThrottle::MaybeCreateThrottleFor(handle);
if (pdf_iframe_throttle)
throttles.push_back(std::move(pdf_iframe_throttle));
std::unique_ptr<content::NavigationThrottle> tab_under_throttle =
TabUnderNavigationThrottle::MaybeCreate(handle);
if (tab_under_throttle)
throttles.push_back(std::move(tab_under_throttle));
throttles.push_back(std::make_unique<PolicyBlacklistNavigationThrottle>(
handle, handle->GetWebContents()->GetBrowserContext()));
if (base::FeatureList::IsEnabled(features::kSSLCommittedInterstitials)) {
throttles.push_back(std::make_unique<SSLErrorNavigationThrottle>(
handle,
std::make_unique<CertificateReportingServiceCertReporter>(web_contents),
base::Bind(&SSLErrorHandler::HandleSSLError)));
}
std::unique_ptr<content::NavigationThrottle> https_upgrade_timing_throttle =
TypedNavigationTimingThrottle::MaybeCreateThrottleFor(handle);
if (https_upgrade_timing_throttle)
throttles.push_back(std::move(https_upgrade_timing_throttle));
#if !defined(OS_ANDROID)
std::unique_ptr<content::NavigationThrottle> devtools_throttle =
DevToolsWindow::MaybeCreateNavigationThrottle(handle);
if (devtools_throttle)
throttles.push_back(std::move(devtools_throttle));
std::unique_ptr<content::NavigationThrottle> new_tab_page_throttle =
NewTabPageNavigationThrottle::MaybeCreateThrottleFor(handle);
if (new_tab_page_throttle)
throttles.push_back(std::move(new_tab_page_throttle));
std::unique_ptr<content::NavigationThrottle>
google_password_manager_throttle =
GooglePasswordManagerNavigationThrottle::MaybeCreateThrottleFor(
handle);
if (google_password_manager_throttle)
throttles.push_back(std::move(google_password_manager_throttle));
#endif
std::unique_ptr<content::NavigationThrottle> previews_lite_page_throttle =
PreviewsLitePageDecider::MaybeCreateThrottleFor(handle);
if (previews_lite_page_throttle)
throttles.push_back(std::move(previews_lite_page_throttle));
if (base::FeatureList::IsEnabled(safe_browsing::kCommittedSBInterstitials)) {
throttles.push_back(
std::make_unique<safe_browsing::SafeBrowsingNavigationThrottle>(
handle));
}
#if defined(OS_WIN) || defined(OS_MACOSX) || \
(defined(OS_LINUX) && !defined(OS_CHROMEOS))
std::unique_ptr<content::NavigationThrottle> browser_switcher_throttle =
browser_switcher::BrowserSwitcherNavigationThrottle ::
MaybeCreateThrottleFor(handle);
if (browser_switcher_throttle)
throttles.push_back(std::move(browser_switcher_throttle));
#endif
return throttles;
}
| 16,259 |
172,018 | 0 | static void btsock_l2cap_free(l2cap_socket *sock)
{
pthread_mutex_lock(&state_lock);
btsock_l2cap_free_l(sock);
pthread_mutex_unlock(&state_lock);
}
| 16,260 |
99,026 | 0 | bool WebGraphicsContext3DDefaultImpl::supportsMapSubCHROMIUM()
{
return false;
}
| 16,261 |
8,554 | 0 | static int ssh_channelcmp(void *av, void *bv)
{
struct ssh_channel *a = (struct ssh_channel *) av;
struct ssh_channel *b = (struct ssh_channel *) bv;
if (a->localid < b->localid)
return -1;
if (a->localid > b->localid)
return +1;
return 0;
}
| 16,262 |
141,599 | 0 | std::unique_ptr<EventMatcher> EventBindings::ParseEventMatcher(
std::unique_ptr<base::DictionaryValue> filter) {
return base::WrapUnique(new EventMatcher(
std::move(filter), context()->GetRenderFrame()->GetRoutingID()));
}
| 16,263 |
54,896 | 0 | static void add_pending_tree(struct rev_info *revs, struct tree *tree)
{
add_pending_object(revs, &tree->object, "");
}
| 16,264 |
52,906 | 0 | void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
{
struct ib_uevent_object *uobj;
uobj = container_of(event->element.srq->uobject,
struct ib_uevent_object, uobject);
ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
event->event, &uobj->event_list,
&uobj->events_reported);
}
| 16,265 |
98,828 | 0 | void AppendMatchingCookiesToList(
net::CookieStore* cookie_store, const std::string& store_id,
const GURL& url, const DictionaryValue* details,
const Extension* extension,
ListValue* match_list) {
net::CookieMonster::CookieList all_cookies = GetCookieListFromStore(
cookie_store, url);
net::CookieMonster::CookieList::const_iterator it;
for (it = all_cookies.begin(); it != all_cookies.end(); ++it) {
GURL cookie_domain_url = GetURLFromCookiePair(*it);
if (!extension->HasHostPermission(cookie_domain_url))
continue;
extension_cookies_helpers::MatchFilter filter(details);
if (filter.MatchesCookie(*it))
match_list->Append(CreateCookieValue(*it, store_id));
}
}
| 16,266 |
136,799 | 0 | double LocalDOMWindow::devicePixelRatio() const {
if (!GetFrame())
return 0.0;
return GetFrame()->DevicePixelRatio();
}
| 16,267 |
7,163 | 0 | cf2_hintmask_isNew( const CF2_HintMask hintmask )
{
return hintmask->isNew;
}
| 16,268 |
73,621 | 0 | static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cpu_throttle = 0,
cycles = 0,
time_limit = 0;
static time_t
cache_timestamp = 0;
status=MagickTrue;
LockSemaphoreInfo(image->semaphore);
if (cpu_throttle == 0)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != MagickResourceInfinity) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (time_limit == 0)
{
/*
Set the expire time in seconds.
*/
time_limit=GetMagickResourceLimit(TimeResource);
cache_timestamp=time((time_t *) NULL);
}
if ((time_limit != MagickResourceInfinity) &&
((MagickSizeType) (time((time_t *) NULL)-cache_timestamp) >= time_limit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AllocateSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status != MagickFalse)
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status != MagickFalse)
{
if (cache_info->reference_count == 1)
cache_info->nexus_info=(NexusInfo **) NULL;
destroy=MagickTrue;
image->cache=clone_image.cache;
}
}
DestroySemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->type == DiskCache)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
| 16,269 |
108,541 | 0 | bool SetTemporaryGlobalOverrideQuotaOnDBThread(int64* new_quota,
QuotaDatabase* database) {
DCHECK(database);
if (!database->SetQuotaConfigValue(
QuotaDatabase::kTemporaryQuotaOverrideKey, *new_quota)) {
*new_quota = -1;
return false;
}
return true;
}
| 16,270 |
4,833 | 0 | FixUpEventFromWindow(SpritePtr pSprite,
xEvent *xE, WindowPtr pWin, Window child, Bool calcChild)
{
int evtype;
if (calcChild)
child = FindChildForEvent(pSprite, pWin);
if ((evtype = xi2_get_type(xE))) {
xXIDeviceEvent *event = (xXIDeviceEvent *) xE;
switch (evtype) {
case XI_RawKeyPress:
case XI_RawKeyRelease:
case XI_RawButtonPress:
case XI_RawButtonRelease:
case XI_RawMotion:
case XI_RawTouchBegin:
case XI_RawTouchUpdate:
case XI_RawTouchEnd:
case XI_DeviceChanged:
case XI_HierarchyChanged:
case XI_PropertyEvent:
case XI_BarrierHit:
case XI_BarrierLeave:
return;
default:
break;
}
event->root = RootWindow(pSprite)->drawable.id;
event->event = pWin->drawable.id;
if (evtype == XI_TouchOwnership) {
event->child = child;
return;
}
if (pSprite->hot.pScreen == pWin->drawable.pScreen) {
event->event_x = event->root_x - double_to_fp1616(pWin->drawable.x);
event->event_y = event->root_y - double_to_fp1616(pWin->drawable.y);
event->child = child;
}
else {
event->event_x = 0;
event->event_y = 0;
event->child = None;
}
if (event->evtype == XI_Enter || event->evtype == XI_Leave ||
event->evtype == XI_FocusIn || event->evtype == XI_FocusOut)
((xXIEnterEvent *) event)->same_screen =
(pSprite->hot.pScreen == pWin->drawable.pScreen);
}
else {
XE_KBPTR.root = RootWindow(pSprite)->drawable.id;
XE_KBPTR.event = pWin->drawable.id;
if (pSprite->hot.pScreen == pWin->drawable.pScreen) {
XE_KBPTR.sameScreen = xTrue;
XE_KBPTR.child = child;
XE_KBPTR.eventX = XE_KBPTR.rootX - pWin->drawable.x;
XE_KBPTR.eventY = XE_KBPTR.rootY - pWin->drawable.y;
}
else {
XE_KBPTR.sameScreen = xFalse;
XE_KBPTR.child = None;
XE_KBPTR.eventX = 0;
XE_KBPTR.eventY = 0;
}
}
}
| 16,271 |
170,342 | 0 | static bool AdjustChannelsAndRate(uint32_t fourcc, uint32_t *channels, uint32_t *rate) {
if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_NB, FourCC2MIME(fourcc))) {
*channels = 1;
*rate = 8000;
return true;
} else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_WB, FourCC2MIME(fourcc))) {
*channels = 1;
*rate = 16000;
return true;
}
return false;
}
| 16,272 |
125,936 | 0 | DragTargetDropAckNotificationObserver::DragTargetDropAckNotificationObserver(
AutomationProvider* automation,
IPC::Message* reply_message)
: automation_(automation->AsWeakPtr()),
reply_message_(reply_message) {
registrar_.Add(
this,
content::NOTIFICATION_RENDER_VIEW_HOST_DID_RECEIVE_DRAG_TARGET_DROP_ACK,
content::NotificationService::AllSources());
registrar_.Add(
this,
chrome::NOTIFICATION_APP_MODAL_DIALOG_SHOWN,
content::NotificationService::AllSources());
}
| 16,273 |
178,625 | 1 | static int nbd_negotiate_drop_sync(QIOChannel *ioc, size_t size)
{
ssize_t ret;
uint8_t *buffer = g_malloc(MIN(65536, size));
while (size > 0) {
size_t count = MIN(65536, size);
ret = nbd_negotiate_read(ioc, buffer, count);
if (ret < 0) {
g_free(buffer);
return ret;
}
size -= count;
}
g_free(buffer);
return 0;
}
| 16,274 |
100,653 | 0 | int GetIndexOfSingletonTab(browser::NavigateParams* params) {
if (params->disposition != SINGLETON_TAB)
return -1;
GURL rewritten_url(params->url);
bool reverse_on_redirect = false;
BrowserURLHandler::GetInstance()->RewriteURLIfNecessary(
&rewritten_url,
params->browser->profile(),
&reverse_on_redirect);
int start_index = std::max(0, params->browser->active_index());
int tab_count = params->browser->tab_count();
for (int i = 0; i < tab_count; ++i) {
int tab_index = (start_index + i) % tab_count;
TabContentsWrapper* tab =
params->browser->GetTabContentsWrapperAt(tab_index);
url_canon::Replacements<char> replacements;
if (params->ref_behavior == browser::NavigateParams::IGNORE_REF)
replacements.ClearRef();
if (params->path_behavior == browser::NavigateParams::IGNORE_AND_NAVIGATE ||
params->path_behavior == browser::NavigateParams::IGNORE_AND_STAY_PUT) {
replacements.ClearPath();
replacements.ClearQuery();
}
if (CompareURLsWithReplacements(tab->tab_contents()->GetURL(),
params->url, replacements) ||
CompareURLsWithReplacements(tab->tab_contents()->GetURL(),
rewritten_url, replacements)) {
params->target_contents = tab;
return tab_index;
}
}
return -1;
}
| 16,275 |
26,860 | 0 | static struct dentry *proc_lookupfdinfo(struct inode *dir,
struct dentry *dentry,
struct nameidata *nd)
{
return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate);
}
| 16,276 |
157,245 | 0 | void WebMediaPlayerImpl::OnAudioConfigChange(const AudioDecoderConfig& config) {
DCHECK(main_task_runner_->BelongsToCurrentThread());
DCHECK_NE(ready_state_, WebMediaPlayer::kReadyStateHaveNothing);
const bool codec_change =
pipeline_metadata_.audio_decoder_config.codec() != config.codec();
pipeline_metadata_.audio_decoder_config = config;
if (observer_)
observer_->OnMetadataChanged(pipeline_metadata_);
if (codec_change)
UpdateSecondaryProperties();
}
| 16,277 |
158,903 | 0 | bool PDFiumEngine::CheckPageAvailable(int index, std::vector<int>* pending) {
if (!doc_)
return false;
const int num_pages = static_cast<int>(pages_.size());
if (index < num_pages && pages_[index]->available())
return true;
if (!FPDFAvail_IsPageAvail(fpdf_availability_, index, &download_hints_)) {
if (!base::ContainsValue(*pending, index))
pending->push_back(index);
return false;
}
if (index < num_pages)
pages_[index]->set_available(true);
if (default_page_size_.IsEmpty())
default_page_size_ = GetPageSize(index);
return true;
}
| 16,278 |
57,972 | 0 | static int nf_tables_getgen(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
{
struct net *net = sock_net(skb->sk);
struct sk_buff *skb2;
int err;
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (skb2 == NULL)
return -ENOMEM;
err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid,
nlh->nlmsg_seq);
if (err < 0)
goto err;
return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
err:
kfree_skb(skb2);
return err;
}
| 16,279 |
138,663 | 0 | void RenderFrameHostImpl::DidSelectPopupMenuItem(int selected_index) {
Send(new FrameMsg_SelectPopupMenuItem(routing_id_, selected_index));
}
| 16,280 |
79,736 | 0 | png_handle_iCCP(png_structrp png_ptr, png_inforp info_ptr, png_uint_32 length)
/* Note: this does not properly handle profiles that are > 64K under DOS */
{
png_const_charp errmsg = NULL; /* error message output, or no error */
int finished = 0; /* crc checked */
png_debug(1, "in png_handle_iCCP");
if ((png_ptr->mode & PNG_HAVE_IHDR) == 0)
png_chunk_error(png_ptr, "missing IHDR");
else if ((png_ptr->mode & (PNG_HAVE_IDAT|PNG_HAVE_PLTE)) != 0)
{
png_crc_finish(png_ptr, length);
png_chunk_benign_error(png_ptr, "out of place");
return;
}
/* Consistent with all the above colorspace handling an obviously *invalid*
* chunk is just ignored, so does not invalidate the color space. An
* alternative is to set the 'invalid' flags at the start of this routine
* and only clear them in they were not set before and all the tests pass.
*/
/* The keyword must be at least one character and there is a
* terminator (0) byte and the compression method byte, and the
* 'zlib' datastream is at least 11 bytes.
*/
if (length < 14)
{
png_crc_finish(png_ptr, length);
png_chunk_benign_error(png_ptr, "too short");
return;
}
/* If a colorspace error has already been output skip this chunk */
if ((png_ptr->colorspace.flags & PNG_COLORSPACE_INVALID) != 0)
{
png_crc_finish(png_ptr, length);
return;
}
/* Only one sRGB or iCCP chunk is allowed, use the HAVE_INTENT flag to detect
* this.
*/
if ((png_ptr->colorspace.flags & PNG_COLORSPACE_HAVE_INTENT) == 0)
{
uInt read_length, keyword_length;
char keyword[81];
/* Find the keyword; the keyword plus separator and compression method
* bytes can be at most 81 characters long.
*/
read_length = 81; /* maximum */
if (read_length > length)
read_length = (uInt)length;
png_crc_read(png_ptr, (png_bytep)keyword, read_length);
length -= read_length;
/* The minimum 'zlib' stream is assumed to be just the 2 byte header,
* 5 bytes minimum 'deflate' stream, and the 4 byte checksum.
*/
if (length < 11)
{
png_crc_finish(png_ptr, length);
png_chunk_benign_error(png_ptr, "too short");
return;
}
keyword_length = 0;
while (keyword_length < 80 && keyword_length < read_length &&
keyword[keyword_length] != 0)
++keyword_length;
/* TODO: make the keyword checking common */
if (keyword_length >= 1 && keyword_length <= 79)
{
/* We only understand '0' compression - deflate - so if we get a
* different value we can't safely decode the chunk.
*/
if (keyword_length+1 < read_length &&
keyword[keyword_length+1] == PNG_COMPRESSION_TYPE_BASE)
{
read_length -= keyword_length+2;
if (png_inflate_claim(png_ptr, png_iCCP) == Z_OK)
{
Byte profile_header[132]={0};
Byte local_buffer[PNG_INFLATE_BUF_SIZE];
png_alloc_size_t size = (sizeof profile_header);
png_ptr->zstream.next_in = (Bytef*)keyword + (keyword_length+2);
png_ptr->zstream.avail_in = read_length;
(void)png_inflate_read(png_ptr, local_buffer,
(sizeof local_buffer), &length, profile_header, &size,
0/*finish: don't, because the output is too small*/);
if (size == 0)
{
/* We have the ICC profile header; do the basic header checks.
*/
const png_uint_32 profile_length =
png_get_uint_32(profile_header);
if (png_icc_check_length(png_ptr, &png_ptr->colorspace,
keyword, profile_length) != 0)
{
/* The length is apparently ok, so we can check the 132
* byte header.
*/
if (png_icc_check_header(png_ptr, &png_ptr->colorspace,
keyword, profile_length, profile_header,
png_ptr->color_type) != 0)
{
/* Now read the tag table; a variable size buffer is
* needed at this point, allocate one for the whole
* profile. The header check has already validated
* that none of this stuff will overflow.
*/
const png_uint_32 tag_count = png_get_uint_32(
profile_header+128);
png_bytep profile = png_read_buffer(png_ptr,
profile_length, 2/*silent*/);
if (profile != NULL)
{
memcpy(profile, profile_header,
(sizeof profile_header));
size = 12 * tag_count;
(void)png_inflate_read(png_ptr, local_buffer,
(sizeof local_buffer), &length,
profile + (sizeof profile_header), &size, 0);
/* Still expect a buffer error because we expect
* there to be some tag data!
*/
if (size == 0)
{
if (png_icc_check_tag_table(png_ptr,
&png_ptr->colorspace, keyword, profile_length,
profile) != 0)
{
/* The profile has been validated for basic
* security issues, so read the whole thing in.
*/
size = profile_length - (sizeof profile_header)
- 12 * tag_count;
(void)png_inflate_read(png_ptr, local_buffer,
(sizeof local_buffer), &length,
profile + (sizeof profile_header) +
12 * tag_count, &size, 1/*finish*/);
if (length > 0 && !(png_ptr->flags &
PNG_FLAG_BENIGN_ERRORS_WARN))
errmsg = "extra compressed data";
/* But otherwise allow extra data: */
else if (size == 0)
{
if (length > 0)
{
/* This can be handled completely, so
* keep going.
*/
png_chunk_warning(png_ptr,
"extra compressed data");
}
png_crc_finish(png_ptr, length);
finished = 1;
# if defined(PNG_sRGB_SUPPORTED) && PNG_sRGB_PROFILE_CHECKS >= 0
/* Check for a match against sRGB */
png_icc_set_sRGB(png_ptr,
&png_ptr->colorspace, profile,
png_ptr->zstream.adler);
# endif
/* Steal the profile for info_ptr. */
if (info_ptr != NULL)
{
png_free_data(png_ptr, info_ptr,
PNG_FREE_ICCP, 0);
info_ptr->iccp_name = png_voidcast(char*,
png_malloc_base(png_ptr,
keyword_length+1));
if (info_ptr->iccp_name != NULL)
{
memcpy(info_ptr->iccp_name, keyword,
keyword_length+1);
info_ptr->iccp_proflen =
profile_length;
info_ptr->iccp_profile = profile;
png_ptr->read_buffer = NULL; /*steal*/
info_ptr->free_me |= PNG_FREE_ICCP;
info_ptr->valid |= PNG_INFO_iCCP;
}
else
{
png_ptr->colorspace.flags |=
PNG_COLORSPACE_INVALID;
errmsg = "out of memory";
}
}
/* else the profile remains in the read
* buffer which gets reused for subsequent
* chunks.
*/
if (info_ptr != NULL)
png_colorspace_sync(png_ptr, info_ptr);
if (errmsg == NULL)
{
png_ptr->zowner = 0;
return;
}
}
if (errmsg == NULL)
errmsg = png_ptr->zstream.msg;
}
/* else png_icc_check_tag_table output an error */
}
else /* profile truncated */
errmsg = png_ptr->zstream.msg;
}
else
errmsg = "out of memory";
}
/* else png_icc_check_header output an error */
}
/* else png_icc_check_length output an error */
}
else /* profile truncated */
errmsg = png_ptr->zstream.msg;
/* Release the stream */
png_ptr->zowner = 0;
}
else /* png_inflate_claim failed */
errmsg = png_ptr->zstream.msg;
}
else
errmsg = "bad compression method"; /* or missing */
}
else
errmsg = "bad keyword";
}
else
errmsg = "too many profiles";
/* Failure: the reason is in 'errmsg' */
if (finished == 0)
png_crc_finish(png_ptr, length);
png_ptr->colorspace.flags |= PNG_COLORSPACE_INVALID;
png_colorspace_sync(png_ptr, info_ptr);
if (errmsg != NULL) /* else already output */
png_chunk_benign_error(png_ptr, errmsg);
}
| 16,281 |
63,121 | 0 | int Huff_Receive (node_t *node, int *ch, byte *fin) {
while (node && node->symbol == INTERNAL_NODE) {
if (get_bit(fin)) {
node = node->right;
} else {
node = node->left;
}
}
if (!node) {
return 0;
}
return (*ch = node->symbol);
}
| 16,282 |
79,696 | 0 | R_API RBinJavaCPTypeObj *r_bin_java_class_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) {
ut8 tag = buffer[0];
int quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_CLASS, tag, sz, "Class");
if (quick_check > 0) {
return NULL;
}
RBinJavaCPTypeObj *obj = R_NEW0 (RBinJavaCPTypeObj);
if (obj) {
obj->tag = tag;
obj->metas = R_NEW0 (RBinJavaMetaInfo);
obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag];
obj->info.cp_class.name_idx = R_BIN_JAVA_USHORT (buffer, 1);
}
return obj;
}
| 16,283 |
149,177 | 0 | void LockScreenMediaControlsView::MediaSessionPositionChanged(
const base::Optional<media_session::MediaPosition>& position) {
if (hide_controls_timer_->IsRunning())
return;
position_ = position;
if (!position.has_value()) {
if (progress_->GetVisible()) {
progress_->SetVisible(false);
Layout();
}
return;
}
progress_->UpdateProgress(*position);
if (!progress_->GetVisible()) {
progress_->SetVisible(true);
Layout();
}
}
| 16,284 |
14,104 | 0 | SProcRenderCreateRadialGradient (ClientPtr client)
{
register int n;
int len;
REQUEST (xRenderCreateRadialGradientReq);
REQUEST_AT_LEAST_SIZE (xRenderCreateRadialGradientReq);
swaps(&stuff->length, n);
swapl(&stuff->pid, n);
swapl(&stuff->inner.x, n);
swapl(&stuff->inner.y, n);
swapl(&stuff->outer.x, n);
swapl(&stuff->outer.y, n);
swapl(&stuff->inner_radius, n);
swapl(&stuff->outer_radius, n);
swapl(&stuff->nStops, n);
len = (client->req_len << 2) - sizeof(xRenderCreateRadialGradientReq);
if (stuff->nStops > UINT32_MAX/(sizeof(xFixed) + sizeof(xRenderColor)))
return BadLength;
if (len != stuff->nStops*(sizeof(xFixed) + sizeof(xRenderColor)))
return BadLength;
swapStops(stuff+1, stuff->nStops);
return (*ProcRenderVector[stuff->renderReqType]) (client);
}
| 16,285 |
81,400 | 0 | void trace_free_pid_list(struct trace_pid_list *pid_list)
{
vfree(pid_list->pids);
kfree(pid_list);
}
| 16,286 |
125,229 | 0 | RenderMessageCompletionCallback(RenderMessageFilter* filter,
IPC::Message* reply_msg)
: filter_(filter),
reply_msg_(reply_msg) {
}
| 16,287 |
175,600 | 0 | status_t NuMediaExtractor::setDataSource(
const sp<IMediaHTTPService> &httpService,
const char *path,
const KeyedVector<String8, String8> *headers) {
Mutex::Autolock autoLock(mLock);
if (mImpl != NULL) {
return -EINVAL;
}
sp<DataSource> dataSource =
DataSource::CreateFromURI(httpService, path, headers);
if (dataSource == NULL) {
return -ENOENT;
}
mIsWidevineExtractor = false;
if (!strncasecmp("widevine://", path, 11)) {
String8 mimeType;
float confidence;
sp<AMessage> dummy;
bool success = SniffWVM(dataSource, &mimeType, &confidence, &dummy);
if (!success
|| strcasecmp(
mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM)) {
return ERROR_UNSUPPORTED;
}
sp<WVMExtractor> extractor = new WVMExtractor(dataSource);
extractor->setAdaptiveStreamingMode(true);
mImpl = extractor;
mIsWidevineExtractor = true;
} else {
mImpl = MediaExtractor::Create(dataSource);
}
if (mImpl == NULL) {
return ERROR_UNSUPPORTED;
}
sp<MetaData> fileMeta = mImpl->getMetaData();
const char *containerMime;
if (fileMeta != NULL
&& fileMeta->findCString(kKeyMIMEType, &containerMime)
&& !strcasecmp(containerMime, "video/wvm")) {
static_cast<WVMExtractor *>(mImpl.get())->setCryptoPluginMode(true);
} else if (mImpl->getDrmFlag()) {
mImpl.clear();
mImpl = NULL;
return ERROR_UNSUPPORTED;
}
status_t err = updateDurationAndBitrate();
if (err == OK) {
mDataSource = dataSource;
}
return OK;
}
| 16,288 |
84,517 | 0 | mouse_scroll_line(void)
{
if (relative_wheel_scroll)
return (relative_wheel_scroll_ratio * LASTLINE + 99) / 100;
else
return fixed_wheel_scroll_count;
}
| 16,289 |
95,977 | 0 | void CL_PacketEvent( netadr_t from, msg_t *msg ) {
int headerBytes;
clc.lastPacketTime = cls.realtime;
if ( msg->cursize >= 4 && *(int *)msg->data == -1 ) {
CL_ConnectionlessPacket( from, msg );
return;
}
if ( clc.state < CA_CONNECTED ) {
return; // can't be a valid sequenced packet
}
if ( msg->cursize < 4 ) {
Com_Printf ("%s: Runt packet\n", NET_AdrToStringwPort( from ));
return;
}
if ( !NET_CompareAdr( from, clc.netchan.remoteAddress ) ) {
Com_DPrintf ("%s:sequenced packet without connection\n"
, NET_AdrToStringwPort( from ) );
return;
}
if (!CL_Netchan_Process( &clc.netchan, msg) ) {
return; // out of order, duplicated, etc
}
headerBytes = msg->readcount;
clc.serverMessageSequence = LittleLong( *(int *)msg->data );
clc.lastPacketTime = cls.realtime;
CL_ParseServerMessage( msg );
if ( clc.demorecording && !clc.demowaiting ) {
CL_WriteDemoMessage( msg, headerBytes );
}
}
| 16,290 |
46,399 | 0 | static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
unsigned long nr_segs, unsigned int flags)
{
struct pipe_inode_info *pipe;
struct splice_desc sd;
long ret;
struct iovec iovstack[UIO_FASTIOV];
struct iovec *iov = iovstack;
struct iov_iter iter;
ssize_t count = 0;
pipe = get_pipe_info(file);
if (!pipe)
return -EBADF;
ret = rw_copy_check_uvector(READ, uiov, nr_segs,
ARRAY_SIZE(iovstack), iovstack, &iov);
if (ret <= 0)
return ret;
iov_iter_init(&iter, READ, iov, nr_segs, count);
sd.len = 0;
sd.total_len = count;
sd.flags = flags;
sd.u.data = &iter;
sd.pos = 0;
pipe_lock(pipe);
ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
pipe_unlock(pipe);
if (iov != iovstack)
kfree(iov);
return ret;
}
| 16,291 |
63,597 | 0 | gsm_xsmp_server_new (GsmStore *client_store)
{
if (xsmp_server_object != NULL) {
g_object_ref (xsmp_server_object);
} else {
xsmp_server_object = g_object_new (GSM_TYPE_XSMP_SERVER,
"client-store", client_store,
NULL);
g_object_add_weak_pointer (xsmp_server_object,
(gpointer *) &xsmp_server_object);
}
return GSM_XSMP_SERVER (xsmp_server_object);
}
| 16,292 |
126,060 | 0 | V8HeapStatsObserver::V8HeapStatsObserver(
AutomationProvider* automation,
IPC::Message* reply_message,
base::ProcessId renderer_id)
: automation_(automation->AsWeakPtr()),
reply_message_(reply_message),
renderer_id_(renderer_id) {
registrar_.Add(
this,
chrome::NOTIFICATION_RENDERER_V8_HEAP_STATS_COMPUTED,
content::NotificationService::AllSources());
}
| 16,293 |
149,680 | 0 | base::string16 SaveCardBubbleControllerImpl::GetWindowTitle() const {
return l10n_util::GetStringUTF16(
is_uploading_ ? IDS_AUTOFILL_SAVE_CARD_PROMPT_TITLE_TO_CLOUD
: IDS_AUTOFILL_SAVE_CARD_PROMPT_TITLE_LOCAL);
}
| 16,294 |
118,882 | 0 | const NavigationControllerImpl& WebContentsImpl::GetController() const {
return controller_;
}
| 16,295 |
75,984 | 0 | static_track_group_group_handler(vector_t *strvec)
{
static_track_group_t *tgroup = LIST_TAIL_DATA(vrrp_data->static_track_groups);
if (tgroup->iname) {
report_config_error(CONFIG_GENERAL_ERROR, "Group list already specified for sync group %s", tgroup->gname);
skip_block(true);
return;
}
tgroup->iname = read_value_block(strvec);
if (!tgroup->iname)
report_config_error(CONFIG_GENERAL_ERROR, "Warning - track group %s has empty group block", tgroup->gname);
}
| 16,296 |
20,841 | 0 | static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
u64 mcg_cap)
{
int r;
unsigned bank_num = mcg_cap & 0xff, bank;
r = -EINVAL;
if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
goto out;
if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
goto out;
r = 0;
vcpu->arch.mcg_cap = mcg_cap;
/* Init IA32_MCG_CTL to all 1s */
if (mcg_cap & MCG_CTL_P)
vcpu->arch.mcg_ctl = ~(u64)0;
/* Init IA32_MCi_CTL to all 1s */
for (bank = 0; bank < bank_num; bank++)
vcpu->arch.mce_banks[bank*4] = ~(u64)0;
out:
return r;
}
| 16,297 |
187,984 | 1 | int SoundPool::load(int fd, int64_t offset, int64_t length, int priority __unused)
{
ALOGV("load: fd=%d, offset=%" PRId64 ", length=%" PRId64 ", priority=%d",
fd, offset, length, priority);
Mutex::Autolock lock(&mLock);
sp<Sample> sample = new Sample(++mNextSampleID, fd, offset, length);
mSamples.add(sample->sampleID(), sample);
doLoad(sample);
return sample->sampleID();
}
| 16,298 |
139,974 | 0 | DocumentElementSetMap& documentToElementSetMap() {
DEFINE_STATIC_LOCAL(DocumentElementSetMap, map, (new DocumentElementSetMap));
return map;
}
| 16,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.