unique_id
int64 13
189k
| target
int64 0
1
| code
stringlengths 20
241k
| __index_level_0__
int64 0
18.9k
|
---|---|---|---|
1,904 | 0 | static void reds_mig_switch(void)
{
if (!reds->mig_spice) {
spice_warning("reds_mig_switch called without migrate_info set");
return;
}
main_channel_migrate_switch(reds->main_channel, reds->mig_spice);
reds_mig_release();
}
| 13,300 |
15,116 | 0 | PHP_FUNCTION(imagecreatefromxbm)
{
_php_image_create_from(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHP_GDIMG_TYPE_XBM, "XBM", gdImageCreateFromXbm, NULL);
}
| 13,301 |
44,251 | 0 | int BN_GF2m_arr2poly(const int p[], BIGNUM *a)
{
int i;
bn_check_top(a);
BN_zero(a);
for (i = 0; p[i] != -1; i++) {
if (BN_set_bit(a, p[i]) == 0)
return 0;
}
bn_check_top(a);
return 1;
}
| 13,302 |
163,402 | 0 | bool RenderThreadImpl::Send(IPC::Message* msg) {
bool pumping_events = false;
if (msg->is_sync()) {
if (msg->is_caller_pumping_messages()) {
pumping_events = true;
}
}
std::unique_ptr<blink::scheduler::RendererScheduler::RendererPauseHandle>
renderer_paused_handle;
if (pumping_events) {
renderer_paused_handle = renderer_scheduler_->PauseRenderer();
WebView::WillEnterModalLoop();
}
bool rv = ChildThreadImpl::Send(msg);
if (pumping_events)
WebView::DidExitModalLoop();
return rv;
}
| 13,303 |
118,956 | 0 | void WebContentsImpl::LostMouseLock() {
if (delegate_)
delegate_->LostMouseLock();
}
| 13,304 |
6,755 | 0 | static void ide_sector_read_cb(void *opaque, int ret)
{
IDEState *s = opaque;
int n;
s->pio_aiocb = NULL;
s->status &= ~BUSY_STAT;
if (ret == -ECANCELED) {
return;
}
block_acct_done(blk_get_stats(s->blk), &s->acct);
if (ret != 0) {
if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
IDE_RETRY_READ)) {
return;
}
}
n = s->nsector;
if (n > s->req_nb_sectors) {
n = s->req_nb_sectors;
}
/* Allow the guest to read the io_buffer */
ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
ide_set_irq(s->bus);
ide_set_sector(s, ide_get_sector(s) + n);
s->nsector -= n;
s->io_buffer_offset += 512 * n;
}
| 13,305 |
60,554 | 0 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data)
{
ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
struct kvmppc_pte pte;
int rc;
vcpu->stat.ld++;
rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
XLATE_READ, &pte);
if (rc)
return rc;
*eaddr = pte.raddr;
if (!pte.may_read)
return -EPERM;
if (!data && !pte.may_execute)
return -ENOEXEC;
/* Magic page override */
if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
!(kvmppc_get_msr(vcpu) & MSR_PR)) {
void *magic = vcpu->arch.shared;
magic += pte.eaddr & 0xfff;
memcpy(ptr, magic, size);
return EMULATE_DONE;
}
if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
return EMULATE_DO_MMIO;
return EMULATE_DONE;
}
| 13,306 |
108,590 | 0 | void Shell::OnForwardButtonClicked(GtkWidget* widget) {
GoBackOrForward(1);
}
| 13,307 |
40,979 | 0 | int SearchMLUEntry(cmsMLU* mlu, cmsUInt16Number LanguageCode, cmsUInt16Number CountryCode)
{
int i;
if (mlu == NULL) return -1;
for (i=0; i < mlu ->UsedEntries; i++) {
if (mlu ->Entries[i].Country == CountryCode &&
mlu ->Entries[i].Language == LanguageCode) return i;
}
return -1;
}
| 13,308 |
188,191 | 1 | bool InputWindowInfo::frameContainsPoint(int32_t x, int32_t y) const {
return x >= frameLeft && x <= frameRight
&& y >= frameTop && y <= frameBottom;
}
| 13,309 |
138,390 | 0 | std::unique_ptr<service_manager::Service> CreateDataDecoderService() {
content::UtilityThread::Get()->EnsureBlinkInitialized();
return data_decoder::DataDecoderService::Create();
}
| 13,310 |
88,819 | 0 | static int compat_setdrvprm(int drive,
struct compat_floppy_drive_params __user *arg)
{
struct compat_floppy_drive_params v;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&v, arg, sizeof(struct compat_floppy_drive_params)))
return -EFAULT;
mutex_lock(&floppy_mutex);
UDP->cmos = v.cmos;
UDP->max_dtr = v.max_dtr;
UDP->hlt = v.hlt;
UDP->hut = v.hut;
UDP->srt = v.srt;
UDP->spinup = v.spinup;
UDP->spindown = v.spindown;
UDP->spindown_offset = v.spindown_offset;
UDP->select_delay = v.select_delay;
UDP->rps = v.rps;
UDP->tracks = v.tracks;
UDP->timeout = v.timeout;
UDP->interleave_sect = v.interleave_sect;
UDP->max_errors = v.max_errors;
UDP->flags = v.flags;
UDP->read_track = v.read_track;
memcpy(UDP->autodetect, v.autodetect, sizeof(v.autodetect));
UDP->checkfreq = v.checkfreq;
UDP->native_format = v.native_format;
mutex_unlock(&floppy_mutex);
return 0;
}
| 13,311 |
85,037 | 0 | static CURLcode smtp_connect(struct connectdata *conn, bool *done)
{
CURLcode result = CURLE_OK;
struct smtp_conn *smtpc = &conn->proto.smtpc;
struct pingpong *pp = &smtpc->pp;
*done = FALSE; /* default to not done yet */
/* We always support persistent connections in SMTP */
connkeep(conn, "SMTP default");
/* Set the default response time-out */
pp->response_time = RESP_TIMEOUT;
pp->statemach_act = smtp_statemach_act;
pp->endofresp = smtp_endofresp;
pp->conn = conn;
/* Initialize the SASL storage */
Curl_sasl_init(&smtpc->sasl, &saslsmtp);
/* Initialise the pingpong layer */
Curl_pp_init(pp);
/* Parse the URL options */
result = smtp_parse_url_options(conn);
if(result)
return result;
/* Parse the URL path */
result = smtp_parse_url_path(conn);
if(result)
return result;
/* Start off waiting for the server greeting response */
state(conn, SMTP_SERVERGREET);
result = smtp_multi_statemach(conn, done);
return result;
}
| 13,312 |
107,523 | 0 | void ewk_view_editor_client_contents_changed(Evas_Object* ewkView)
{
evas_object_smart_callback_call(ewkView, "editorclient,contents,changed", 0);
}
| 13,313 |
172,226 | 0 | static int uipc_check_fd_locked(tUIPC_CH_ID ch_id)
{
if (ch_id >= UIPC_CH_NUM)
return -1;
if (SAFE_FD_ISSET(uipc_main.ch[ch_id].srvfd, &uipc_main.read_set))
{
BTIF_TRACE_EVENT("INCOMING CONNECTION ON CH %d", ch_id);
uipc_main.ch[ch_id].fd = accept_server_socket(uipc_main.ch[ch_id].srvfd);
BTIF_TRACE_EVENT("NEW FD %d", uipc_main.ch[ch_id].fd);
if ((uipc_main.ch[ch_id].fd > 0) && uipc_main.ch[ch_id].cback)
{
/* if we have a callback we should add this fd to the active set
and notify user with callback event */
BTIF_TRACE_EVENT("ADD FD %d TO ACTIVE SET", uipc_main.ch[ch_id].fd);
FD_SET(uipc_main.ch[ch_id].fd, &uipc_main.active_set);
uipc_main.max_fd = MAX(uipc_main.max_fd, uipc_main.ch[ch_id].fd);
}
if (uipc_main.ch[ch_id].fd < 0)
{
BTIF_TRACE_ERROR("FAILED TO ACCEPT CH %d (%s)", ch_id, strerror(errno));
return -1;
}
if (uipc_main.ch[ch_id].cback)
uipc_main.ch[ch_id].cback(ch_id, UIPC_OPEN_EVT);
}
if (SAFE_FD_ISSET(uipc_main.ch[ch_id].fd, &uipc_main.read_set))
{
if (uipc_main.ch[ch_id].cback)
uipc_main.ch[ch_id].cback(ch_id, UIPC_RX_DATA_READY_EVT);
}
return 0;
}
| 13,314 |
111,972 | 0 | void VerifyPrefSync() {
ASSERT_TRUE(BooleanPrefMatches(prefs::kShowHomeButton));
ChangeBooleanPref(0, prefs::kShowHomeButton);
ASSERT_TRUE(GetClient(0)->AwaitMutualSyncCycleCompletion(GetClient(1)));
ASSERT_TRUE(BooleanPrefMatches(prefs::kShowHomeButton));
}
| 13,315 |
70,734 | 0 | evutil_getenv_(const char *varname)
{
if (evutil_issetugid())
return NULL;
return getenv(varname);
}
| 13,316 |
143,123 | 0 | HTMLCollection* Document::anchors()
{
return ensureCachedCollection<HTMLCollection>(DocAnchors);
}
| 13,317 |
123,733 | 0 | bool CreateTemporaryFileInDir(const FilePath& dir, FilePath* temp_file) {
base::ThreadRestrictions::AssertIOAllowed(); // For call to close().
int fd = CreateAndOpenFdForTemporaryFile(dir, temp_file);
return ((fd >= 0) && !HANDLE_EINTR(close(fd)));
}
| 13,318 |
122,791 | 0 | GpuProcessHost::GpuProcessHost(int host_id, GpuProcessKind kind)
: host_id_(host_id),
valid_(true),
in_process_(false),
software_rendering_(false),
kind_(kind),
process_launched_(false) {
if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kSingleProcess) ||
CommandLine::ForCurrentProcess()->HasSwitch(switches::kInProcessGPU))
in_process_ = true;
DCHECK(!in_process_ || g_gpu_process_hosts[kind] == NULL);
g_gpu_process_hosts[kind] = this;
BrowserThread::PostTask(
BrowserThread::UI,
FROM_HERE,
base::Bind(base::IgnoreResult(&GpuProcessHostUIShim::Create), host_id));
process_.reset(new BrowserChildProcessHostImpl(PROCESS_TYPE_GPU, this));
}
| 13,319 |
30,767 | 0 | int bt_sock_register(int proto, const struct net_proto_family *ops)
{
int err = 0;
if (proto < 0 || proto >= BT_MAX_PROTO)
return -EINVAL;
write_lock(&bt_proto_lock);
if (bt_proto[proto])
err = -EEXIST;
else
bt_proto[proto] = ops;
write_unlock(&bt_proto_lock);
return err;
}
| 13,320 |
3,291 | 0 | stack_param_read(iparam_list * plist, const ref * pkey, iparam_loc * ploc)
{
stack_param_list *const splist = (stack_param_list *) plist;
ref_stack_t *pstack = splist->pstack;
/* This implementation is slow, but it probably doesn't matter. */
uint index = splist->skip + 1;
uint count = splist->count;
for (; count; count--, index += 2) {
const ref *p = ref_stack_index(pstack, index);
if (r_has_type(p, t_name) && name_eq(p, pkey)) {
ploc->pvalue = ref_stack_index(pstack, index - 1);
ploc->presult = &plist->results[count - 1];
*ploc->presult = 1;
return 0;
}
}
return 1;
}
| 13,321 |
92,780 | 0 | update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
unsigned long runnable_load_avg, load_avg;
u64 runnable_load_sum, load_sum = 0;
s64 delta_sum;
if (!runnable_sum)
return;
gcfs_rq->prop_runnable_sum = 0;
if (runnable_sum >= 0) {
/*
* Add runnable; clip at LOAD_AVG_MAX. Reflects that until
* the CPU is saturated running == runnable.
*/
runnable_sum += se->avg.load_sum;
runnable_sum = min(runnable_sum, (long)LOAD_AVG_MAX);
} else {
/*
* Estimate the new unweighted runnable_sum of the gcfs_rq by
* assuming all tasks are equally runnable.
*/
if (scale_load_down(gcfs_rq->load.weight)) {
load_sum = div_s64(gcfs_rq->avg.load_sum,
scale_load_down(gcfs_rq->load.weight));
}
/* But make sure to not inflate se's runnable */
runnable_sum = min(se->avg.load_sum, load_sum);
}
/*
* runnable_sum can't be lower than running_sum
* As running sum is scale with CPU capacity wehreas the runnable sum
* is not we rescale running_sum 1st
*/
running_sum = se->avg.util_sum /
arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
runnable_sum = max(runnable_sum, running_sum);
load_sum = (s64)se_weight(se) * runnable_sum;
load_avg = div_s64(load_sum, LOAD_AVG_MAX);
delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
delta_avg = load_avg - se->avg.load_avg;
se->avg.load_sum = runnable_sum;
se->avg.load_avg = load_avg;
add_positive(&cfs_rq->avg.load_avg, delta_avg);
add_positive(&cfs_rq->avg.load_sum, delta_sum);
runnable_load_sum = (s64)se_runnable(se) * runnable_sum;
runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX);
delta_sum = runnable_load_sum - se_weight(se) * se->avg.runnable_load_sum;
delta_avg = runnable_load_avg - se->avg.runnable_load_avg;
se->avg.runnable_load_sum = runnable_sum;
se->avg.runnable_load_avg = runnable_load_avg;
if (se->on_rq) {
add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg);
add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum);
}
}
| 13,322 |
164,546 | 0 | static const char *columnTypeImpl(
NameContext *pNC,
#ifndef SQLITE_ENABLE_COLUMN_METADATA
Expr *pExpr
#else
Expr *pExpr,
const char **pzOrigDb,
const char **pzOrigTab,
const char **pzOrigCol
#endif
){
char const *zType = 0;
int j;
#ifdef SQLITE_ENABLE_COLUMN_METADATA
char const *zOrigDb = 0;
char const *zOrigTab = 0;
char const *zOrigCol = 0;
#endif
assert( pExpr!=0 );
assert( pNC->pSrcList!=0 );
assert( pExpr->op!=TK_AGG_COLUMN ); /* This routine runes before aggregates
** are processed */
switch( pExpr->op ){
case TK_COLUMN: {
/* The expression is a column. Locate the table the column is being
** extracted from in NameContext.pSrcList. This table may be real
** database table or a subquery.
*/
Table *pTab = 0; /* Table structure column is extracted from */
Select *pS = 0; /* Select the column is extracted from */
int iCol = pExpr->iColumn; /* Index of column in pTab */
while( pNC && !pTab ){
SrcList *pTabList = pNC->pSrcList;
for(j=0;j<pTabList->nSrc && pTabList->a[j].iCursor!=pExpr->iTable;j++);
if( j<pTabList->nSrc ){
pTab = pTabList->a[j].pTab;
pS = pTabList->a[j].pSelect;
}else{
pNC = pNC->pNext;
}
}
if( pTab==0 ){
/* At one time, code such as "SELECT new.x" within a trigger would
** cause this condition to run. Since then, we have restructured how
** trigger code is generated and so this condition is no longer
** possible. However, it can still be true for statements like
** the following:
**
** CREATE TABLE t1(col INTEGER);
** SELECT (SELECT t1.col) FROM FROM t1;
**
** when columnType() is called on the expression "t1.col" in the
** sub-select. In this case, set the column type to NULL, even
** though it should really be "INTEGER".
**
** This is not a problem, as the column type of "t1.col" is never
** used. When columnType() is called on the expression
** "(SELECT t1.col)", the correct type is returned (see the TK_SELECT
** branch below. */
break;
}
assert( pTab && pExpr->y.pTab==pTab );
if( pS ){
/* The "table" is actually a sub-select or a view in the FROM clause
** of the SELECT statement. Return the declaration type and origin
** data for the result-set column of the sub-select.
*/
if( iCol>=0 && iCol<pS->pEList->nExpr ){
/* If iCol is less than zero, then the expression requests the
** rowid of the sub-select or view. This expression is legal (see
** test case misc2.2.2) - it always evaluates to NULL.
*/
NameContext sNC;
Expr *p = pS->pEList->a[iCol].pExpr;
sNC.pSrcList = pS->pSrc;
sNC.pNext = pNC;
sNC.pParse = pNC->pParse;
zType = columnType(&sNC, p,&zOrigDb,&zOrigTab,&zOrigCol);
}
}else{
/* A real table or a CTE table */
assert( !pS );
#ifdef SQLITE_ENABLE_COLUMN_METADATA
if( iCol<0 ) iCol = pTab->iPKey;
assert( iCol==XN_ROWID || (iCol>=0 && iCol<pTab->nCol) );
if( iCol<0 ){
zType = "INTEGER";
zOrigCol = "rowid";
}else{
zOrigCol = pTab->aCol[iCol].zName;
zType = sqlite3ColumnType(&pTab->aCol[iCol],0);
}
zOrigTab = pTab->zName;
if( pNC->pParse && pTab->pSchema ){
int iDb = sqlite3SchemaToIndex(pNC->pParse->db, pTab->pSchema);
zOrigDb = pNC->pParse->db->aDb[iDb].zDbSName;
}
#else
assert( iCol==XN_ROWID || (iCol>=0 && iCol<pTab->nCol) );
if( iCol<0 ){
zType = "INTEGER";
}else{
zType = sqlite3ColumnType(&pTab->aCol[iCol],0);
}
#endif
}
break;
}
#ifndef SQLITE_OMIT_SUBQUERY
case TK_SELECT: {
/* The expression is a sub-select. Return the declaration type and
** origin info for the single column in the result set of the SELECT
** statement.
*/
NameContext sNC;
Select *pS = pExpr->x.pSelect;
Expr *p = pS->pEList->a[0].pExpr;
assert( ExprHasProperty(pExpr, EP_xIsSelect) );
sNC.pSrcList = pS->pSrc;
sNC.pNext = pNC;
sNC.pParse = pNC->pParse;
zType = columnType(&sNC, p, &zOrigDb, &zOrigTab, &zOrigCol);
break;
}
#endif
}
#ifdef SQLITE_ENABLE_COLUMN_METADATA
if( pzOrigDb ){
assert( pzOrigTab && pzOrigCol );
*pzOrigDb = zOrigDb;
*pzOrigTab = zOrigTab;
*pzOrigCol = zOrigCol;
}
#endif
return zType;
}
| 13,323 |
15,477 | 0 | ensure_extension (struct http_stat *hs, const char *ext, int *dt)
{
char *last_period_in_local_filename = strrchr (hs->local_file, '.');
char shortext[8];
int len;
shortext[0] = '\0';
len = strlen (ext);
if (len == 5)
{
memcpy (shortext, ext, len - 1);
shortext[len - 1] = '\0';
}
if (last_period_in_local_filename == NULL
|| !(0 == strcasecmp (last_period_in_local_filename, shortext)
|| 0 == strcasecmp (last_period_in_local_filename, ext)))
{
int local_filename_len = strlen (hs->local_file);
/* Resize the local file, allowing for ".html" preceded by
optional ".NUMBER". */
hs->local_file = xrealloc (hs->local_file,
local_filename_len + 24 + len);
strcpy (hs->local_file + local_filename_len, ext);
/* If clobbering is not allowed and the file, as named,
exists, tack on ".NUMBER.html" instead. */
if (!ALLOW_CLOBBER && file_exists_p (hs->local_file, NULL))
{
int ext_num = 1;
do
sprintf (hs->local_file + local_filename_len,
".%d%s", ext_num++, ext);
while (file_exists_p (hs->local_file, NULL));
}
*dt |= ADDED_HTML_EXTENSION;
}
}
| 13,324 |
1,674 | 0 | gx_dc_is_pattern1_color_with_trans(const gx_device_color *pdevc)
{
if (!(pdevc->type == &gx_dc_pattern || pdevc->type == &gx_dc_pattern_trans)) {
return(false);
}
return(gx_pattern1_get_transptr(pdevc) != NULL);
}
| 13,325 |
73,664 | 0 | static MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
NexusInfo *restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const IndexPacket
*restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=(MagickSizeType) length*rows;
p=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register IndexPacket
*restrict q;
/*
Write indexes to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write indexes to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*p),length,(const unsigned char *)
p);
if ((MagickSizeType) count < length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write indexes to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
| 13,326 |
121,055 | 0 | int HttpStreamParser::ReadResponseHeaders(const CompletionCallback& callback) {
DCHECK(io_state_ == STATE_REQUEST_SENT || io_state_ == STATE_DONE);
DCHECK(callback_.is_null());
DCHECK(!callback.is_null());
if (io_state_ == STATE_DONE)
return ERR_CONNECTION_CLOSED;
int result = OK;
io_state_ = STATE_READ_HEADERS;
if (read_buf_->offset() > 0) {
result = read_buf_->offset() - read_buf_unused_offset_;
read_buf_->set_offset(read_buf_unused_offset_);
}
if (result > 0)
io_state_ = STATE_READ_HEADERS_COMPLETE;
result = DoLoop(result);
if (result == ERR_IO_PENDING)
callback_ = callback;
return result > 0 ? OK : result;
}
| 13,327 |
56,557 | 0 | static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
int ret, retries = 0;
struct page *page;
pgoff_t index;
struct inode *inode = mapping->host;
handle_t *handle;
index = pos >> PAGE_CACHE_SHIFT;
if (ext4_nonda_switch(inode->i_sb)) {
*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
return ext4_write_begin(file, mapping, pos,
len, flags, pagep, fsdata);
}
*fsdata = (void *)0;
trace_ext4_da_write_begin(inode, pos, len, flags);
if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
ret = ext4_da_write_inline_data_begin(mapping, inode,
pos, len, flags,
pagep, fsdata);
if (ret < 0)
return ret;
if (ret == 1)
return 0;
}
/*
* grab_cache_page_write_begin() can take a long time if the
* system is thrashing due to memory pressure, or if the page
* is being written back. So grab it first before we start
* the transaction handle. This also allows us to allocate
* the page (if needed) without using GFP_NOFS.
*/
retry_grab:
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
return -ENOMEM;
unlock_page(page);
/*
* With delayed allocation, we don't log the i_disksize update
* if there is delayed block allocation. But we still need
* to journalling the i_disksize update if writes to the end
* of file which has an already mapped buffer.
*/
retry_journal:
handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
ext4_da_write_credits(inode, pos, len));
if (IS_ERR(handle)) {
page_cache_release(page);
return PTR_ERR(handle);
}
lock_page(page);
if (page->mapping != mapping) {
/* The page got truncated from under us */
unlock_page(page);
page_cache_release(page);
ext4_journal_stop(handle);
goto retry_grab;
}
/* In case writeback began while the page was unlocked */
wait_for_stable_page(page);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
ret = ext4_block_write_begin(page, pos, len,
ext4_da_get_block_prep);
#else
ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
#endif
if (ret < 0) {
unlock_page(page);
ext4_journal_stop(handle);
/*
* block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
* i_size_read because we hold i_mutex.
*/
if (pos + len > inode->i_size)
ext4_truncate_failed_write(inode);
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_journal;
page_cache_release(page);
return ret;
}
*pagep = page;
return ret;
}
| 13,328 |
171,119 | 0 | int sort_camera_metadata(camera_metadata_t *dst) {
if (dst == NULL) return ERROR;
if (dst->flags & FLAG_SORTED) return OK;
qsort(get_entries(dst), dst->entry_count,
sizeof(camera_metadata_buffer_entry_t),
compare_entry_tags);
dst->flags |= FLAG_SORTED;
assert(validate_camera_metadata_structure(dst, NULL) == OK);
return OK;
}
| 13,329 |
14,798 | 0 | ftp_nlist(ftpbuf_t *ftp, const char *path TSRMLS_DC)
{
return ftp_genlist(ftp, "NLST", path TSRMLS_CC);
}
| 13,330 |
69,584 | 0 | intro_point_accepted_intro_count(rend_intro_point_t *intro)
{
return intro->accepted_introduce2_count;
}
| 13,331 |
27,608 | 0 | void seq_copy_to_input(unsigned char *event_rec, int len)
{
unsigned long flags;
/*
* Verify that the len is valid for the current mode.
*/
if (len != 4 && len != 8)
return;
if ((seq_mode == SEQ_1) != (len == 4))
return;
if (iqlen >= (SEQ_MAX_QUEUE - 1))
return; /* Overflow */
spin_lock_irqsave(&lock,flags);
memcpy(&iqueue[iqtail * IEV_SZ], event_rec, len);
iqlen++;
iqtail = (iqtail + 1) % SEQ_MAX_QUEUE;
wake_up(&midi_sleeper);
spin_unlock_irqrestore(&lock,flags);
}
| 13,332 |
154,444 | 0 | bool GLES2DecoderPassthroughImpl::FlushErrors() {
bool had_error = false;
GLenum error = glGetError();
while (error != GL_NO_ERROR) {
errors_.insert(error);
had_error = true;
if (error == GL_OUT_OF_MEMORY && !WasContextLost() &&
lose_context_when_out_of_memory_) {
error::ContextLostReason other = error::kOutOfMemory;
if (CheckResetStatus()) {
other = error::kUnknown;
} else {
MarkContextLost(error::kOutOfMemory);
}
group_->LoseContexts(other);
break;
}
error = glGetError();
}
return had_error;
}
| 13,333 |
138,517 | 0 | void WorkerThread::shutdown()
{
ASSERT(isCurrentThread());
{
MutexLocker lock(m_threadStateMutex);
ASSERT(!m_shutdown);
m_shutdown = true;
}
PlatformThreadData::current().threadTimers().setSharedTimer(nullptr);
workerGlobalScope()->dispose();
willDestroyIsolate();
workerReportingProxy().willDestroyWorkerGlobalScope();
#if !ENABLE(OILPAN)
ASSERT(m_workerGlobalScope->hasOneRef());
#endif
m_workerGlobalScope->notifyContextDestroyed();
m_workerGlobalScope = nullptr;
backingThread().removeTaskObserver(m_microtaskRunner.get());
backingThread().shutdown();
destroyIsolate();
m_microtaskRunner = nullptr;
workerReportingProxy().workerThreadTerminated();
m_terminationEvent->signal();
PlatformThreadData::current().destroy();
}
| 13,334 |
111,948 | 0 | void ProfileSyncService::UpdateSelectedTypesHistogram(
bool sync_everything, const syncable::ModelTypeSet chosen_types) const {
if (!HasSyncSetupCompleted() ||
sync_everything != sync_prefs_.HasKeepEverythingSynced()) {
UMA_HISTOGRAM_BOOLEAN("Sync.SyncEverything", sync_everything);
}
const syncable::ModelType model_types[] = {
syncable::APPS,
syncable::AUTOFILL,
syncable::BOOKMARKS,
syncable::EXTENSIONS,
syncable::PASSWORDS,
syncable::PREFERENCES,
syncable::SESSIONS,
syncable::THEMES,
syncable::TYPED_URLS
};
const browser_sync::user_selectable_type::UserSelectableSyncType
user_selectable_types[] = {
browser_sync::user_selectable_type::APPS,
browser_sync::user_selectable_type::AUTOFILL,
browser_sync::user_selectable_type::BOOKMARKS,
browser_sync::user_selectable_type::EXTENSIONS,
browser_sync::user_selectable_type::PASSWORDS,
browser_sync::user_selectable_type::PREFERENCES,
browser_sync::user_selectable_type::SESSIONS,
browser_sync::user_selectable_type::THEMES,
browser_sync::user_selectable_type::TYPED_URLS
};
COMPILE_ASSERT(17 == syncable::MODEL_TYPE_COUNT,
UpdateCustomConfigHistogram);
COMPILE_ASSERT(arraysize(model_types) ==
browser_sync::user_selectable_type::SELECTABLE_DATATYPE_COUNT,
UpdateCustomConfigHistogram);
COMPILE_ASSERT(arraysize(model_types) == arraysize(user_selectable_types),
UpdateCustomConfigHistogram);
if (!sync_everything) {
const syncable::ModelTypeSet current_types = GetPreferredDataTypes();
for (size_t i = 0; i < arraysize(model_types); ++i) {
const syncable::ModelType type = model_types[i];
if (chosen_types.Has(type) &&
(!HasSyncSetupCompleted() || !current_types.Has(type))) {
UMA_HISTOGRAM_ENUMERATION(
"Sync.CustomSync",
user_selectable_types[i],
browser_sync::user_selectable_type::SELECTABLE_DATATYPE_COUNT + 1);
}
}
}
}
| 13,335 |
12,544 | 0 | listEntry(struct rx_call *call, afs_int32 aid, struct prcheckentry *aentry,
afs_int32 *cid)
{
afs_int32 code;
struct ubik_trans *tt;
afs_int32 temp;
struct prentry tentry;
code = Initdb();
if (code != PRSUCCESS)
return code;
code = ubik_BeginTransReadAny(dbase, UBIK_READTRANS, &tt);
if (code)
return code;
code = ubik_SetLock(tt, 1, 1, LOCKREAD);
if (code)
ABORT_WITH(tt, code);
code = read_DbHeader(tt);
if (code)
ABORT_WITH(tt, code);
code = WhoIsThis(call, tt, cid);
if (code)
ABORT_WITH(tt, PRPERM);
temp = FindByID(tt, aid);
if (!temp)
ABORT_WITH(tt, PRNOENT);
code = pr_ReadEntry(tt, 0, temp, &tentry);
if (code != 0)
ABORT_WITH(tt, code);
if (!AccessOK(tt, *cid, &tentry, PRP_STATUS_MEM, PRP_STATUS_ANY))
ABORT_WITH(tt, PRPERM);
aentry->flags = tentry.flags >> PRIVATE_SHIFT;
if (aentry->flags == 0) {
if (tentry.flags & PRGRP)
aentry->flags = prp_group_default >> PRIVATE_SHIFT;
else
aentry->flags = prp_user_default >> PRIVATE_SHIFT;
}
aentry->owner = tentry.owner;
aentry->id = tentry.id;
strncpy(aentry->name, tentry.name, PR_MAXNAMELEN);
aentry->creator = tentry.creator;
aentry->ngroups = tentry.ngroups;
aentry->nusers = tentry.nusers;
aentry->count = tentry.count;
memset(aentry->reserved, 0, sizeof(aentry->reserved));
code = ubik_EndTrans(tt);
if (code)
return code;
return PRSUCCESS;
}
| 13,336 |
23,155 | 0 | nfs4_xdr_enc_setacl(struct rpc_rqst *req, __be32 *p, struct nfs_setaclargs *args)
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
.nops = 2,
};
int status;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_compound_hdr(&xdr, &hdr);
status = encode_putfh(&xdr, args->fh);
if (status)
goto out;
status = encode_setacl(&xdr, args);
out:
return status;
}
| 13,337 |
3,139 | 0 | static int setcmykspace(i_ctx_t * i_ctx_p, ref *r, int *stage, int *cont, int CIESubst)
{
os_ptr op = osp;
gs_color_space *pcs;
int code=0;
ref stref;
do {
switch (*stage) {
case 0:
if (istate->use_cie_color.value.boolval && !CIESubst) {
byte *body;
ref *nosubst;
code = dict_find_string(systemdict, "NOSUBSTDEVICECOLORS", &nosubst);
if (code != 0) {
if (!r_has_type(nosubst, t_boolean))
return_error(gs_error_typecheck);
}
if (code != 0 && nosubst->value.boolval) {
*stage = 4;
*cont = 1;
body = ialloc_string(32, "string");
if (body == 0)
return_error(gs_error_VMerror);
memcpy(body, "/DefaultCMYK ..nosubstdevicetest",32);
make_string(&stref, a_all | icurrent_space, 32, body);
r_set_attrs(&stref, a_executable);
esp++;
ref_assign(esp, &stref);
return o_push_estack;
} else {
*stage = 2;
*cont = 1;
body = ialloc_string(47, "string");
if (body == 0)
return_error(gs_error_VMerror);
memcpy(body, "{/DefaultCMYK /ColorSpace findresource} stopped", 47);
make_string(&stref, a_all | icurrent_space, 47, body);
r_set_attrs(&stref, a_executable);
esp++;
ref_assign(esp, &stref);
return o_push_estack;
}
}
/* fall through */
case 1:
pcs = gs_cspace_new_DeviceCMYK(imemory);
if (pcs == NULL)
return_error(gs_error_VMerror);
code = gs_setcolorspace(igs, pcs);
if (code >= 0) {
gs_client_color *pcc = gs_currentcolor_inline(igs);
cs_adjust_color_count(igs, -1); /* not strictly necessary */
pcc->paint.values[0] = 0;
pcc->paint.values[1] = 0;
pcc->paint.values[2] = 0;
pcc->paint.values[3] = 1;
pcc->pattern = 0; /* for GC */
gx_unset_dev_color(igs);
}
rc_decrement_only_cs(pcs, "zsetdevcspace");
*cont = 0;
*stage = 0;
break;
case 2:
if (!r_has_type(op, t_boolean))
return_error(gs_error_typecheck);
if (op->value.boolval) {
/* Failed to find the /DefaultCMYK CSA, so give up and
* just use DeviceCMYK
*/
pop(1);
*stage = 1;
break;
}
pop(1);
*stage = 3;
code = setcolorspace_nosubst(i_ctx_p);
if (code != 0)
return code;
break;
case 3:
/* We end up here after setting the DefaultGray CIE space
* We've finished setting the gray color space, so we
* just exit now
*/
*cont = 0;
*stage = 0;
break;
case 4:
/* We come here if /UseCIEColor is true, and NOSUBSTDEVICECOLORS
* is also true. We will have a boolean on the stack, if its true
* then we need to set the space (also on the stack), invoke
* .includecolorspace, and set /DeviceGray, otherwise we just need
* to set DeviceGray. See gs-cspace.ps.
*/
if (!r_has_type(op, t_boolean))
return_error(gs_error_typecheck);
pop(1);
*stage = 1;
*cont = 1;
if (op->value.boolval) {
*stage = 5;
code = setcolorspace_nosubst(i_ctx_p);
if (code != 0)
return code;
}
break;
case 5:
/* After stage 4 above, if we had to set a color space, we come
* here. Now we need to use .includecolorspace to register the space
* with any high-level devices which want it.
*/
*stage = 1;
*cont = 1;
code = zincludecolorspace(i_ctx_p);
if (code != 0)
return code;
break;
}
} while (*stage);
return code;
}
| 13,338 |
123,099 | 0 | void RenderWidgetHostViewAndroid::GetScreenInfo(WebKit::WebScreenInfo* result) {
RenderWidgetHostViewBase::GetDefaultScreenInfo(result);
}
| 13,339 |
19,627 | 0 | static void nf_ct_frag6_evictor(void)
{
local_bh_disable();
inet_frag_evictor(&nf_init_frags, &nf_frags);
local_bh_enable();
}
| 13,340 |
48,996 | 0 | struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
struct device *busdev,
bool p2pdev_forced)
{
struct net_device *ndev = brcmf_get_ifp(drvr, 0)->ndev;
struct brcmf_cfg80211_info *cfg;
struct wiphy *wiphy;
struct cfg80211_ops *ops;
struct brcmf_cfg80211_vif *vif;
struct brcmf_if *ifp;
s32 err = 0;
s32 io_type;
u16 *cap = NULL;
if (!ndev) {
brcmf_err("ndev is invalid\n");
return NULL;
}
ops = kmemdup(&brcmf_cfg80211_ops, sizeof(*ops), GFP_KERNEL);
if (!ops)
return NULL;
ifp = netdev_priv(ndev);
#ifdef CONFIG_PM
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK))
ops->set_rekey_data = brcmf_cfg80211_set_rekey_data;
#endif
wiphy = wiphy_new(ops, sizeof(struct brcmf_cfg80211_info));
if (!wiphy) {
brcmf_err("Could not allocate wiphy device\n");
return NULL;
}
memcpy(wiphy->perm_addr, drvr->mac, ETH_ALEN);
set_wiphy_dev(wiphy, busdev);
cfg = wiphy_priv(wiphy);
cfg->wiphy = wiphy;
cfg->ops = ops;
cfg->pub = drvr;
init_vif_event(&cfg->vif_event);
INIT_LIST_HEAD(&cfg->vif_list);
vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION);
if (IS_ERR(vif))
goto wiphy_out;
vif->ifp = ifp;
vif->wdev.netdev = ndev;
ndev->ieee80211_ptr = &vif->wdev;
SET_NETDEV_DEV(ndev, wiphy_dev(cfg->wiphy));
err = wl_init_priv(cfg);
if (err) {
brcmf_err("Failed to init iwm_priv (%d)\n", err);
brcmf_free_vif(vif);
goto wiphy_out;
}
ifp->vif = vif;
/* determine d11 io type before wiphy setup */
err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_VERSION, &io_type);
if (err) {
brcmf_err("Failed to get D11 version (%d)\n", err);
goto priv_out;
}
cfg->d11inf.io_type = (u8)io_type;
brcmu_d11_attach(&cfg->d11inf);
err = brcmf_setup_wiphy(wiphy, ifp);
if (err < 0)
goto priv_out;
brcmf_dbg(INFO, "Registering custom regulatory\n");
wiphy->reg_notifier = brcmf_cfg80211_reg_notifier;
wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
wiphy_apply_custom_regulatory(wiphy, &brcmf_regdom);
/* firmware defaults to 40MHz disabled in 2G band. We signal
* cfg80211 here that we do and have it decide we can enable
* it. But first check if device does support 2G operation.
*/
if (wiphy->bands[NL80211_BAND_2GHZ]) {
cap = &wiphy->bands[NL80211_BAND_2GHZ]->ht_cap.cap;
*cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
}
err = wiphy_register(wiphy);
if (err < 0) {
brcmf_err("Could not register wiphy device (%d)\n", err);
goto priv_out;
}
/* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(),
* setup 40MHz in 2GHz band and enable OBSS scanning.
*/
if (cap && (*cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) {
err = brcmf_enable_bw40_2g(cfg);
if (!err)
err = brcmf_fil_iovar_int_set(ifp, "obss_coex",
BRCMF_OBSS_COEX_AUTO);
else
*cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
}
/* p2p might require that "if-events" get processed by fweh. So
* activate the already registered event handlers now and activate
* the rest when initialization has completed. drvr->config needs to
* be assigned before activating events.
*/
drvr->config = cfg;
err = brcmf_fweh_activate_events(ifp);
if (err) {
brcmf_err("FWEH activation failed (%d)\n", err);
goto wiphy_unreg_out;
}
err = brcmf_p2p_attach(cfg, p2pdev_forced);
if (err) {
brcmf_err("P2P initilisation failed (%d)\n", err);
goto wiphy_unreg_out;
}
err = brcmf_btcoex_attach(cfg);
if (err) {
brcmf_err("BT-coex initialisation failed (%d)\n", err);
brcmf_p2p_detach(&cfg->p2p);
goto wiphy_unreg_out;
}
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_TDLS)) {
err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
if (err) {
brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err);
wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_TDLS;
} else {
brcmf_fweh_register(cfg->pub, BRCMF_E_TDLS_PEER_EVENT,
brcmf_notify_tdls_peer_event);
}
}
/* (re-) activate FWEH event handling */
err = brcmf_fweh_activate_events(ifp);
if (err) {
brcmf_err("FWEH activation failed (%d)\n", err);
goto wiphy_unreg_out;
}
/* Fill in some of the advertised nl80211 supported features */
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SCAN_RANDOM_MAC)) {
wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR;
#ifdef CONFIG_PM
if (wiphy->wowlan &&
wiphy->wowlan->flags & WIPHY_WOWLAN_NET_DETECT)
wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
#endif
}
return cfg;
wiphy_unreg_out:
wiphy_unregister(cfg->wiphy);
priv_out:
wl_deinit_priv(cfg);
brcmf_free_vif(vif);
ifp->vif = NULL;
wiphy_out:
brcmf_free_wiphy(wiphy);
kfree(ops);
return NULL;
}
| 13,341 |
18,980 | 0 | static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
{
const struct iphdr *iph = ip_hdr(skb);
const struct tcphdr *th = tcp_hdr(skb);
__u32 seq = ntohl(th->seq) - 1;
__u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
th->source, th->dest, seq,
jiffies / (HZ * 60),
COUNTER_TRIES);
return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
}
| 13,342 |
45,019 | 0 | static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
int dr = ctxt->modrm_reg;
u64 cr4;
if (dr > 7)
return emulate_ud(ctxt);
cr4 = ctxt->ops->get_cr(ctxt, 4);
if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
return emulate_ud(ctxt);
if (check_dr7_gd(ctxt)) {
ulong dr6;
ctxt->ops->get_dr(ctxt, 6, &dr6);
dr6 &= ~15;
dr6 |= DR6_BD | DR6_RTM;
ctxt->ops->set_dr(ctxt, 6, dr6);
return emulate_db(ctxt);
}
return X86EMUL_CONTINUE;
}
| 13,343 |
115,675 | 0 | MATCHER_P2(EqualsMouseEvent, x, y, "") {
return arg.x() == x && arg.y() == y;
}
| 13,344 |
61,163 | 0 | activate_parameters_install_free (ActivateParametersInstall *parameters_install)
{
if (parameters_install->slot)
{
g_object_remove_weak_pointer (G_OBJECT (parameters_install->slot), (gpointer *) ¶meters_install->slot);
}
if (parameters_install->parent_window)
{
g_object_remove_weak_pointer (G_OBJECT (parameters_install->parent_window), (gpointer *) ¶meters_install->parent_window);
}
if (parameters_install->proxy != NULL)
{
g_object_unref (parameters_install->proxy);
}
nautilus_file_unref (parameters_install->file);
nautilus_file_list_free (parameters_install->files);
g_free (parameters_install->activation_directory);
g_free (parameters_install->uri);
g_free (parameters_install);
}
| 13,345 |
179,452 | 1 | static struct mount *clone_mnt(struct mount *old, struct dentry *root,
int flag)
{
struct super_block *sb = old->mnt.mnt_sb;
struct mount *mnt;
int err;
mnt = alloc_vfsmnt(old->mnt_devname);
if (!mnt)
return ERR_PTR(-ENOMEM);
if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
mnt->mnt_group_id = 0; /* not a peer of original */
else
mnt->mnt_group_id = old->mnt_group_id;
if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
err = mnt_alloc_group_id(mnt);
if (err)
goto out_free;
}
mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
/* Don't allow unprivileged users to change mount flags */
if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
/* Don't allow unprivileged users to reveal what is under a mount */
if ((flag & CL_UNPRIVILEGED) && list_empty(&old->mnt_expire))
mnt->mnt.mnt_flags |= MNT_LOCKED;
atomic_inc(&sb->s_active);
mnt->mnt.mnt_sb = sb;
mnt->mnt.mnt_root = dget(root);
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
mnt->mnt_parent = mnt;
lock_mount_hash();
list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
unlock_mount_hash();
if ((flag & CL_SLAVE) ||
((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
list_add(&mnt->mnt_slave, &old->mnt_slave_list);
mnt->mnt_master = old;
CLEAR_MNT_SHARED(mnt);
} else if (!(flag & CL_PRIVATE)) {
if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
list_add(&mnt->mnt_share, &old->mnt_share);
if (IS_MNT_SLAVE(old))
list_add(&mnt->mnt_slave, &old->mnt_slave);
mnt->mnt_master = old->mnt_master;
}
if (flag & CL_MAKE_SHARED)
set_mnt_shared(mnt);
/* stick the duplicate mount on the same expiry list
* as the original if that was on one */
if (flag & CL_EXPIRE) {
if (!list_empty(&old->mnt_expire))
list_add(&mnt->mnt_expire, &old->mnt_expire);
}
return mnt;
out_free:
mnt_free_id(mnt);
free_vfsmnt(mnt);
return ERR_PTR(err);
}
| 13,346 |
46,625 | 0 | static void __exit aes_mod_exit(void)
{
crypto_unregister_alg(&ccm_aes_alg);
}
| 13,347 |
180,662 | 1 | int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
{
struct super_block *sb = inode->i_sb;
ext4_lblk_t first_block, stop_block;
struct address_space *mapping = inode->i_mapping;
loff_t first_block_offset, last_block_offset;
handle_t *handle;
unsigned int credits;
int ret = 0;
if (!S_ISREG(inode->i_mode))
return -EOPNOTSUPP;
trace_ext4_punch_hole(inode, offset, length, 0);
/*
* Write out all dirty pages to avoid race conditions
* Then release them.
*/
if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
ret = filemap_write_and_wait_range(mapping, offset,
offset + length - 1);
if (ret)
return ret;
}
mutex_lock(&inode->i_mutex);
/* No need to punch hole beyond i_size */
if (offset >= inode->i_size)
goto out_mutex;
/*
* If the hole extends beyond i_size, set the hole
* to end after the page that contains i_size
*/
if (offset + length > inode->i_size) {
length = inode->i_size +
PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
offset;
}
if (offset & (sb->s_blocksize - 1) ||
(offset + length) & (sb->s_blocksize - 1)) {
/*
* Attach jinode to inode for jbd2 if we do any zeroing of
* partial block
*/
ret = ext4_inode_attach_jinode(inode);
if (ret < 0)
goto out_mutex;
}
first_block_offset = round_up(offset, sb->s_blocksize);
last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
/* Now release the pages and zero block aligned part of pages*/
if (last_block_offset > first_block_offset)
truncate_pagecache_range(inode, first_block_offset,
last_block_offset);
/* Wait all existing dio workers, newcomers will block on i_mutex *
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
credits = ext4_writepage_trans_blocks(inode);
else
credits = ext4_blocks_for_truncate(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_std_error(sb, ret);
goto out_dio;
}
ret = ext4_zero_partial_blocks(handle, inode, offset,
length);
if (ret)
goto out_stop;
first_block = (offset + sb->s_blocksize - 1) >>
EXT4_BLOCK_SIZE_BITS(sb);
stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
/* If there are no blocks to remove, return now */
if (first_block >= stop_block)
goto out_stop;
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
ret = ext4_es_remove_extent(inode, first_block,
stop_block - first_block);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ret = ext4_ext_remove_space(inode, first_block,
stop_block - 1);
else
ret = ext4_ind_remove_space(handle, inode, first_block,
stop_block);
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
/* Now release the pages again to reduce race window *
if (last_block_offset > first_block_offset)
truncate_pagecache_range(inode, first_block_offset,
last_block_offset);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
out_stop:
ext4_journal_stop(handle);
out_dio:
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
mutex_unlock(&inode->i_mutex);
return ret;
}
| 13,348 |
112,711 | 0 | PassRefPtr<ArchiveResource> DocumentLoader::subresource(const KURL& url) const
{
if (!isCommitted())
return 0;
CachedResource* resource = m_cachedResourceLoader->cachedResource(url);
if (!resource || !resource->isLoaded())
return archiveResourceForURL(url);
if (resource->type() == CachedResource::MainResource)
return 0;
if (!resource->makePurgeable(false))
return 0;
ResourceBuffer* data = resource->resourceBuffer();
if (!data)
return 0;
return ArchiveResource::create(data->sharedBuffer(), url, resource->response());
}
| 13,349 |
161,364 | 0 | double timeDelta(base::TimeTicks time,
base::TimeTicks start,
double invalid_value = -1) {
return time.is_null() ? invalid_value : (time - start).InMillisecondsF();
}
| 13,350 |
91,341 | 0 | static MagickBooleanType InvokePDFDelegate(const MagickBooleanType verbose,
const char *command,char *message,ExceptionInfo *exception)
{
int
status;
#define ExecuteGhostscriptCommand(command,status) \
{ \
status=ExternalDelegateCommand(MagickFalse,verbose,command,message, \
exception); \
if (status == 0) \
return(MagickTrue); \
if (status < 0) \
return(MagickFalse); \
(void) ThrowMagickException(exception,GetMagickModule(),DelegateError, \
"FailedToExecuteCommand","`%s' (%d)",command,status); \
return(MagickFalse); \
}
#if defined(MAGICKCORE_GS_DELEGATE) || defined(MAGICKCORE_WINDOWS_SUPPORT)
#define SetArgsStart(command,args_start) \
if (args_start == (const char *) NULL) \
{ \
if (*command != '"') \
args_start=strchr(command,' '); \
else \
{ \
args_start=strchr(command+1,'"'); \
if (args_start != (const char *) NULL) \
args_start++; \
} \
}
char
**argv,
*errors;
const char
*args_start = (const char *) NULL;
const GhostInfo
*ghost_info;
gs_main_instance
*interpreter;
gsapi_revision_t
revision;
int
argc,
code;
register ssize_t
i;
#if defined(MAGICKCORE_WINDOWS_SUPPORT)
ghost_info=NTGhostscriptDLLVectors();
#else
GhostInfo
ghost_info_struct;
ghost_info=(&ghost_info_struct);
(void) memset(&ghost_info_struct,0,sizeof(ghost_info_struct));
ghost_info_struct.delete_instance=(void (*)(gs_main_instance *))
gsapi_delete_instance;
ghost_info_struct.exit=(int (*)(gs_main_instance *)) gsapi_exit;
ghost_info_struct.new_instance=(int (*)(gs_main_instance **,void *))
gsapi_new_instance;
ghost_info_struct.init_with_args=(int (*)(gs_main_instance *,int,char **))
gsapi_init_with_args;
ghost_info_struct.run_string=(int (*)(gs_main_instance *,const char *,int,
int *)) gsapi_run_string;
ghost_info_struct.set_stdio=(int (*)(gs_main_instance *,int (*)(void *,char *,
int),int (*)(void *,const char *,int),int (*)(void *, const char *, int)))
gsapi_set_stdio;
ghost_info_struct.revision=(int (*)(gsapi_revision_t *,int)) gsapi_revision;
#endif
if (ghost_info == (GhostInfo *) NULL)
ExecuteGhostscriptCommand(command,status);
if ((ghost_info->revision)(&revision,sizeof(revision)) != 0)
revision.revision=0;
if (verbose != MagickFalse)
{
(void) fprintf(stdout,"[ghostscript library %.2f]",(double)
revision.revision/100.0);
SetArgsStart(command,args_start);
(void) fputs(args_start,stdout);
}
errors=(char *) NULL;
status=(ghost_info->new_instance)(&interpreter,(void *) &errors);
if (status < 0)
ExecuteGhostscriptCommand(command,status);
code=0;
argv=StringToArgv(command,&argc);
if (argv == (char **) NULL)
{
(ghost_info->delete_instance)(interpreter);
return(MagickFalse);
}
(void) (ghost_info->set_stdio)(interpreter,(int (MagickDLLCall *)(void *,
char *,int)) NULL,PDFDelegateMessage,PDFDelegateMessage);
status=(ghost_info->init_with_args)(interpreter,argc-1,argv+1);
if (status == 0)
status=(ghost_info->run_string)(interpreter,"systemdict /start get exec\n",
0,&code);
(ghost_info->exit)(interpreter);
(ghost_info->delete_instance)(interpreter);
for (i=0; i < (ssize_t) argc; i++)
argv[i]=DestroyString(argv[i]);
argv=(char **) RelinquishMagickMemory(argv);
if (status != 0)
{
SetArgsStart(command,args_start);
if (status == -101) /* quit */
(void) FormatLocaleString(message,MagickPathExtent,
"[ghostscript library %.2f]%s: %s",(double) revision.revision/100.0,
args_start,errors);
else
{
(void) ThrowMagickException(exception,GetMagickModule(),DelegateError,
"PDFDelegateFailed","`[ghostscript library %.2f]%s': %s",(double)
revision.revision/100.0,args_start,errors);
if (errors != (char *) NULL)
errors=DestroyString(errors);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Ghostscript returns status %d, exit code %d",status,code);
return(MagickFalse);
}
}
if (errors != (char *) NULL)
errors=DestroyString(errors);
return(MagickTrue);
#else
ExecuteGhostscriptCommand(command,status);
#endif
}
| 13,351 |
96,600 | 0 | static int bin_imports(RCore *r, int mode, int va, const char *name) {
RBinInfo *info = r_bin_get_info (r->bin);
int bin_demangle = r_config_get_i (r->config, "bin.demangle");
bool keep_lib = r_config_get_i (r->config, "bin.demangle.libs");
RBinImport *import;
RListIter *iter;
bool lit = info ? info->has_lit: false;
char *str;
int i = 0;
if (!info) {
return false;
}
RList *imports = r_bin_get_imports (r->bin);
int cdsz = info? (info->bits == 64? 8: info->bits == 32? 4: info->bits == 16 ? 4: 0): 0;
if (IS_MODE_JSON (mode)) {
r_cons_print ("[");
} else if (IS_MODE_RAD (mode)) {
r_cons_println ("fs imports");
} else if (IS_MODE_NORMAL (mode)) {
r_cons_println ("[Imports]");
r_cons_println ("Num Vaddr Bind Type Name");
}
r_list_foreach (imports, iter, import) {
if (name && strcmp (import->name, name)) {
continue;
}
char *symname = strdup (import->name);
ut64 addr = lit ? r_core_bin_impaddr (r->bin, va, symname): 0;
if (bin_demangle) {
char *dname = r_bin_demangle (r->bin->cur, NULL, symname, addr, keep_lib);
if (dname) {
free (symname);
symname = r_str_newf ("sym.imp.%s", dname);
free (dname);
}
}
if (r->bin->prefix) {
char *prname = r_str_newf ("%s.%s", r->bin->prefix, symname);
free (symname);
symname = prname;
}
if (IS_MODE_SET (mode)) {
if (strstr (symname, ".dll_") && cdsz) {
r_meta_add (r->anal, R_META_TYPE_DATA, addr, addr + cdsz, NULL);
}
} else if (IS_MODE_SIMPLE (mode) || IS_MODE_SIMPLEST (mode)) {
r_cons_println (symname);
} else if (IS_MODE_JSON (mode)) {
str = r_str_escape_utf8_for_json (symname, -1);
str = r_str_replace (str, "\"", "\\\"", 1);
r_cons_printf ("%s{\"ordinal\":%d,"
"\"bind\":\"%s\","
"\"type\":\"%s\",",
iter->p ? "," : "",
import->ordinal,
import->bind,
import->type);
if (import->classname && import->classname[0]) {
r_cons_printf ("\"classname\":\"%s\","
"\"descriptor\":\"%s\",",
import->classname,
import->descriptor);
}
r_cons_printf ("\"name\":\"%s\",\"plt\":%"PFMT64d"}",
str, addr);
free (str);
} else if (IS_MODE_RAD (mode)) {
} else {
const char *bind = r_str_get (import->bind);
const char *type = r_str_get (import->type);
#if 0
r_cons_printf ("ordinal=%03d plt=0x%08"PFMT64x" bind=%s type=%s",
import->ordinal, addr, bind, type);
if (import->classname && import->classname[0]) {
r_cons_printf (" classname=%s", import->classname);
}
r_cons_printf (" name=%s", symname);
if (import->descriptor && import->descriptor[0]) {
r_cons_printf (" descriptor=%s", import->descriptor);
}
r_cons_newline ();
#else
r_cons_printf ("%4d 0x%08"PFMT64x" %7s %7s ",
import->ordinal, addr, bind, type);
if (import->classname && import->classname[0]) {
r_cons_printf ("%s.", import->classname);
}
r_cons_printf ("%s", symname);
if (import->descriptor && import->descriptor[0]) {
r_cons_printf (" descriptor=%s", import->descriptor);
}
r_cons_newline ();
#endif
}
R_FREE (symname);
i++;
}
if (IS_MODE_JSON (mode)) {
r_cons_print ("]");
} else if (IS_MODE_NORMAL (mode)) {
}
#if MYDB
osymbols = NULL;
sdb_free (mydb);
mydb = NULL;
#endif
return true;
}
| 13,352 |
40,976 | 0 | cmsBool GrowMLUpool(cmsMLU* mlu)
{
cmsUInt32Number size;
void *NewPtr;
if (mlu == NULL) return FALSE;
if (mlu ->PoolSize == 0)
size = 256;
else
size = mlu ->PoolSize * 2;
if (size < mlu ->PoolSize) return FALSE;
NewPtr = _cmsRealloc(mlu ->ContextID, mlu ->MemPool, size);
if (NewPtr == NULL) return FALSE;
mlu ->MemPool = NewPtr;
mlu ->PoolSize = size;
return TRUE;
}
| 13,353 |
34,301 | 0 | static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
{
struct inode *inode = ordered_extent->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_state *cached_state = NULL;
int compress_type = 0;
int ret;
bool nolock;
nolock = btrfs_is_free_space_inode(inode);
if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
ret = -EIO;
goto out;
}
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
btrfs_ordered_update_i_size(inode, 0, ordered_extent);
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
goto out;
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, root, ret);
goto out;
}
lock_extent_bits(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset + ordered_extent->len - 1,
0, &cached_state);
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
goto out_unlock;
}
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
compress_type = ordered_extent->compress_type;
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
BUG_ON(compress_type);
ret = btrfs_mark_extent_written(trans, inode,
ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len);
} else {
BUG_ON(root == root->fs_info->tree_root);
ret = insert_reserved_file_extent(trans, inode,
ordered_extent->file_offset,
ordered_extent->start,
ordered_extent->disk_len,
ordered_extent->len,
ordered_extent->len,
compress_type, 0, 0,
BTRFS_FILE_EXTENT_REG);
}
unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
ordered_extent->file_offset, ordered_extent->len,
trans->transid);
if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto out_unlock;
}
add_pending_csums(trans, inode, ordered_extent->file_offset,
&ordered_extent->list);
btrfs_ordered_update_i_size(inode, 0, ordered_extent);
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) { /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, root, ret);
goto out_unlock;
}
ret = 0;
out_unlock:
unlock_extent_cached(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len - 1, &cached_state, GFP_NOFS);
out:
if (root != root->fs_info->tree_root)
btrfs_delalloc_release_metadata(inode, ordered_extent->len);
if (trans)
btrfs_end_transaction(trans, root);
if (ret)
clear_extent_uptodate(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len - 1, NULL, GFP_NOFS);
/*
* This needs to be done to make sure anybody waiting knows we are done
* updating everything for this ordered extent.
*/
btrfs_remove_ordered_extent(inode, ordered_extent);
/* once for us */
btrfs_put_ordered_extent(ordered_extent);
/* once for the tree */
btrfs_put_ordered_extent(ordered_extent);
return ret;
}
| 13,354 |
68,421 | 0 | static void put_ctx(struct perf_event_context *ctx)
{
if (atomic_dec_and_test(&ctx->refcount)) {
if (ctx->parent_ctx)
put_ctx(ctx->parent_ctx);
if (ctx->task && ctx->task != TASK_TOMBSTONE)
put_task_struct(ctx->task);
call_rcu(&ctx->rcu_head, free_ctx);
}
}
| 13,355 |
112,276 | 0 | void ChromeRenderMessageFilter::OnOpenChannelToTab(
int routing_id, int tab_id, const std::string& extension_id,
const std::string& channel_name, int* port_id) {
int port2_id;
ExtensionMessageService::AllocatePortIdPair(port_id, &port2_id);
BrowserThread::PostTask(
BrowserThread::UI, FROM_HERE,
base::Bind(&ChromeRenderMessageFilter::OpenChannelToTabOnUIThread, this,
render_process_id_, routing_id, port2_id, tab_id, extension_id,
channel_name));
}
| 13,356 |
27,137 | 0 | invoke_NPN_RemoveProperty(PluginInstance *plugin, NPObject *npobj, NPIdentifier propertyName)
{
npw_return_val_if_fail(rpc_method_invoke_possible(g_rpc_connection), false);
int error = rpc_method_invoke(g_rpc_connection,
RPC_METHOD_NPN_REMOVE_PROPERTY,
RPC_TYPE_NPW_PLUGIN_INSTANCE, plugin,
RPC_TYPE_NP_OBJECT, npobj,
RPC_TYPE_NP_IDENTIFIER, &propertyName,
RPC_TYPE_INVALID);
if (error != RPC_ERROR_NO_ERROR) {
npw_perror("NPN_RemoveProperty() invoke", error);
return false;
}
uint32_t ret;
error = rpc_method_wait_for_reply(g_rpc_connection,
RPC_TYPE_UINT32, &ret,
RPC_TYPE_INVALID);
if (error != RPC_ERROR_NO_ERROR) {
npw_perror("NPN_RemoveProperty() wait for reply", error);
return false;
}
return ret;
}
| 13,357 |
185,105 | 1 | void GpuVideoDecodeAccelerator::OnDecode(
base::SharedMemoryHandle handle, int32 id, int32 size) {
DCHECK(video_decode_accelerator_.get());
video_decode_accelerator_->Decode(media::BitstreamBuffer(id, handle, size));
}
| 13,358 |
49,203 | 0 | static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
struct sk_buff *skb = NULL;
struct net_device *dev;
struct sockcm_cookie sockc;
__be16 proto = 0;
int err;
int extra_len = 0;
/*
* Get and verify the address.
*/
if (saddr) {
if (msg->msg_namelen < sizeof(struct sockaddr))
return -EINVAL;
if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
proto = saddr->spkt_protocol;
} else
return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
/*
* Find the device first to size check it
*/
saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
retry:
rcu_read_lock();
dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
err = -ENODEV;
if (dev == NULL)
goto out_unlock;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
goto out_unlock;
/*
* You may not queue a frame bigger than the mtu. This is the lowest level
* raw protocol and you must do your own fragmentation at this level.
*/
if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
if (!netif_supports_nofcs(dev)) {
err = -EPROTONOSUPPORT;
goto out_unlock;
}
extra_len = 4; /* We're doing our own CRC */
}
err = -EMSGSIZE;
if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
goto out_unlock;
if (!skb) {
size_t reserved = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
rcu_read_unlock();
skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
if (skb == NULL)
return -ENOBUFS;
/* FIXME: Save some space for broken drivers that write a hard
* header at transmission time by themselves. PPP is the notable
* one here. This should really be fixed at the driver level.
*/
skb_reserve(skb, reserved);
skb_reset_network_header(skb);
/* Try to align data part correctly */
if (hhlen) {
skb->data -= hhlen;
skb->tail -= hhlen;
if (len < hhlen)
skb_reset_network_header(skb);
}
err = memcpy_from_msg(skb_put(skb, len), msg, len);
if (err)
goto out_free;
goto retry;
}
if (!dev_validate_header(dev, skb->data, len)) {
err = -EINVAL;
goto out_unlock;
}
if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
!packet_extra_vlan_len_allowed(dev, skb)) {
err = -EMSGSIZE;
goto out_unlock;
}
sockc.tsflags = sk->sk_tsflags;
if (msg->msg_controllen) {
err = sock_cmsg_send(sk, msg, &sockc);
if (unlikely(err))
goto out_unlock;
}
skb->protocol = proto;
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
skb_probe_transport_header(skb, 0);
dev_queue_xmit(skb);
rcu_read_unlock();
return len;
out_unlock:
rcu_read_unlock();
out_free:
kfree_skb(skb);
return err;
}
| 13,359 |
80,858 | 0 | GF_Err stsz_dump(GF_Box *a, FILE * trace)
{
GF_SampleSizeBox *p;
u32 i;
p = (GF_SampleSizeBox *)a;
if (a->type == GF_ISOM_BOX_TYPE_STSZ) {
gf_isom_box_dump_start(a, "SampleSizeBox", trace);
}
else {
gf_isom_box_dump_start(a, "CompactSampleSizeBox", trace);
}
fprintf(trace, "SampleCount=\"%d\"", p->sampleCount);
if (a->type == GF_ISOM_BOX_TYPE_STSZ) {
if (p->sampleSize) {
fprintf(trace, " ConstantSampleSize=\"%d\"", p->sampleSize);
}
} else {
fprintf(trace, " SampleSizeBits=\"%d\"", p->sampleSize);
}
fprintf(trace, ">\n");
if ((a->type != GF_ISOM_BOX_TYPE_STSZ) || !p->sampleSize) {
if (!p->sizes && p->size) {
fprintf(trace, "<!--WARNING: No Sample Size indications-->\n");
} else {
for (i=0; i<p->sampleCount; i++) {
fprintf(trace, "<SampleSizeEntry Size=\"%d\"/>\n", p->sizes[i]);
}
}
}
if (!p->size) {
fprintf(trace, "<SampleSizeEntry Size=\"\"/>\n");
}
gf_isom_box_dump_done((a->type == GF_ISOM_BOX_TYPE_STSZ) ? "SampleSizeBox" : "CompactSampleSizeBox", a, trace);
return GF_OK;
}
| 13,360 |
67,092 | 0 | RBinWasmObj *r_bin_wasm_init (RBinFile *arch) {
RBinWasmObj *bin = R_NEW0 (RBinWasmObj);
if (!bin) {
return NULL;
}
if (!(bin->buf = r_buf_new ())) {
free (bin);
return NULL;
}
bin->size = (ut32)arch->buf->length;
if (!r_buf_set_bytes (bin->buf, arch->buf->buf, bin->size)) {
r_bin_wasm_destroy (arch);
free (bin);
return NULL;
}
bin->g_sections = r_bin_wasm_get_sections (bin);
bin->g_types = r_bin_wasm_get_types (bin);
bin->g_imports = r_bin_wasm_get_imports (bin);
bin->g_exports = r_bin_wasm_get_exports (bin);
bin->g_tables = r_bin_wasm_get_tables (bin);
bin->g_memories = r_bin_wasm_get_memories (bin);
bin->g_globals = r_bin_wasm_get_globals (bin);
bin->g_codes = r_bin_wasm_get_codes (bin);
bin->g_datas = r_bin_wasm_get_datas (bin);
bin->entrypoint = r_bin_wasm_get_entrypoint (bin);
return bin;
}
| 13,361 |
115,664 | 0 | void ClientSession::OnConnectionOpened(
protocol::ConnectionToClient* connection) {
DCHECK_EQ(connection_.get(), connection);
authenticated_ = true;
event_handler_->OnSessionAuthenticated(this);
}
| 13,362 |
76,293 | 0 | int media_changed(struct cdrom_device_info *cdi, int queue)
{
unsigned int mask = (1 << (queue & 1));
int ret = !!(cdi->mc_flags & mask);
bool changed;
if (!CDROM_CAN(CDC_MEDIA_CHANGED))
return ret;
/* changed since last call? */
if (cdi->ops->check_events) {
BUG_ON(!queue); /* shouldn't be called from VFS path */
cdrom_update_events(cdi, DISK_EVENT_MEDIA_CHANGE);
changed = cdi->ioctl_events & DISK_EVENT_MEDIA_CHANGE;
cdi->ioctl_events = 0;
} else
changed = cdi->ops->media_changed(cdi, CDSL_CURRENT);
if (changed) {
cdi->mc_flags = 0x3; /* set bit on both queues */
ret |= 1;
cdi->media_written = 0;
}
cdi->mc_flags &= ~mask; /* clear bit */
return ret;
}
| 13,363 |
27,485 | 0 | static void ipgre_fb_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
struct ipgre_net *ign = net_generic(dev_net(dev), ipgre_net_id);
tunnel->dev = dev;
strcpy(tunnel->parms.name, dev->name);
iph->version = 4;
iph->protocol = IPPROTO_GRE;
iph->ihl = 5;
tunnel->hlen = sizeof(struct iphdr) + 4;
dev_hold(dev);
ign->tunnels_wc[0] = tunnel;
}
| 13,364 |
171,459 | 0 | static void set_error_detail(vpx_codec_alg_priv_t *ctx,
const char *const error) {
ctx->base.err_detail = error;
}
| 13,365 |
51,014 | 0 | static void drop_links(struct nameidata *nd)
{
int i = nd->depth;
while (i--) {
struct saved *last = nd->stack + i;
do_delayed_call(&last->done);
clear_delayed_call(&last->done);
}
}
| 13,366 |
153,369 | 0 | void TabAnimationDelegate::AnimationProgressed(
const gfx::Animation* animation) {
tab_->SetVisible(tab_strip_->ShouldTabBeVisible(tab_));
}
| 13,367 |
122,472 | 0 | void InspectorController::flushPendingFrontendMessages()
{
m_agents.flushPendingFrontendMessages();
}
| 13,368 |
50,520 | 0 | static void perf_set_shadow_time(struct perf_event *event,
struct perf_event_context *ctx,
u64 tstamp)
{
/*
* use the correct time source for the time snapshot
*
* We could get by without this by leveraging the
* fact that to get to this function, the caller
* has most likely already called update_context_time()
* and update_cgrp_time_xx() and thus both timestamp
* are identical (or very close). Given that tstamp is,
* already adjusted for cgroup, we could say that:
* tstamp - ctx->timestamp
* is equivalent to
* tstamp - cgrp->timestamp.
*
* Then, in perf_output_read(), the calculation would
* work with no changes because:
* - event is guaranteed scheduled in
* - no scheduled out in between
* - thus the timestamp would be the same
*
* But this is a bit hairy.
*
* So instead, we have an explicit cgroup call to remain
* within the time time source all along. We believe it
* is cleaner and simpler to understand.
*/
if (is_cgroup_event(event))
perf_cgroup_set_shadow_time(event, tstamp);
else
event->shadow_ctx_time = tstamp - ctx->timestamp;
}
| 13,369 |
31,942 | 0 | static void perf_event_for_each(struct perf_event *event,
void (*func)(struct perf_event *))
{
struct perf_event_context *ctx = event->ctx;
struct perf_event *sibling;
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
event = event->group_leader;
perf_event_for_each_child(event, func);
list_for_each_entry(sibling, &event->sibling_list, group_entry)
perf_event_for_each_child(sibling, func);
mutex_unlock(&ctx->mutex);
}
| 13,370 |
57,152 | 0 | nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutreturn *lrp = calldata;
dprintk("--> %s\n", __func__);
nfs41_setup_sequence(lrp->clp->cl_session,
&lrp->args.seq_args,
&lrp->res.seq_res,
task);
}
| 13,371 |
160,570 | 0 | RenderFrameImpl::CreateWorkerFetchContext() {
blink::WebServiceWorkerNetworkProvider* web_provider =
frame_->GetDocumentLoader()->GetServiceWorkerNetworkProvider();
DCHECK(web_provider);
ServiceWorkerNetworkProvider* provider =
ServiceWorkerNetworkProvider::FromWebServiceWorkerNetworkProvider(
web_provider);
mojom::ServiceWorkerWorkerClientRequest service_worker_client_request;
mojom::ServiceWorkerContainerHostPtrInfo container_host_ptr_info;
ServiceWorkerProviderContext* provider_context = provider->context();
if (provider_context) {
service_worker_client_request =
provider_context->CreateWorkerClientRequest();
if (ServiceWorkerUtils::IsServicificationEnabled())
container_host_ptr_info = provider_context->CloneContainerHostPtrInfo();
}
std::unique_ptr<WorkerFetchContextImpl> worker_fetch_context =
std::make_unique<WorkerFetchContextImpl>(
std::move(service_worker_client_request),
std::move(container_host_ptr_info), GetLoaderFactoryBundle()->Clone(),
GetContentClient()->renderer()->CreateURLLoaderThrottleProvider(
URLLoaderThrottleProviderType::kWorker));
worker_fetch_context->set_parent_frame_id(routing_id_);
worker_fetch_context->set_site_for_cookies(
frame_->GetDocument().SiteForCookies());
worker_fetch_context->set_is_secure_context(
frame_->GetDocument().IsSecureContext());
worker_fetch_context->set_service_worker_provider_id(provider->provider_id());
worker_fetch_context->set_is_controlled_by_service_worker(
provider->IsControlledByServiceWorker());
worker_fetch_context->set_origin_url(
GURL(frame_->GetDocument().Url()).GetOrigin());
{
SCOPED_UMA_HISTOGRAM_TIMER(
"RenderFrameObservers.WillCreateWorkerFetchContext");
for (auto& observer : observers_)
observer.WillCreateWorkerFetchContext(worker_fetch_context.get());
}
return std::move(worker_fetch_context);
}
| 13,372 |
138,685 | 0 | int RenderFrameHostImpl::GetFrameTreeNodeId() {
return frame_tree_node_->frame_tree_node_id();
}
| 13,373 |
12,419 | 0 | SPL_METHOD(MultipleIterator, next)
{
spl_SplObjectStorage *intern;
spl_SplObjectStorageElement *element;
zval *it;
intern = (spl_SplObjectStorage*)zend_object_store_get_object(getThis() TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
zend_hash_internal_pointer_reset_ex(&intern->storage, &intern->pos);
while (zend_hash_get_current_data_ex(&intern->storage, (void**)&element, &intern->pos) == SUCCESS && !EG(exception)) {
it = element->obj;
zend_call_method_with_0_params(&it, Z_OBJCE_P(it), &Z_OBJCE_P(it)->iterator_funcs.zf_next, "next", NULL);
zend_hash_move_forward_ex(&intern->storage, &intern->pos);
}
}
| 13,374 |
113,348 | 0 | void VideoRendererBase::AttemptRead_Locked() {
lock_.AssertAcquired();
DCHECK_NE(kEnded, state_);
if (pending_read_ ||
NumFrames_Locked() == limits::kMaxVideoFrames ||
(!ready_frames_.empty() && ready_frames_.back()->IsEndOfStream()) ||
state_ == kFlushingDecoder ||
state_ == kFlushing) {
return;
}
pending_read_ = true;
decoder_->Read(base::Bind(&VideoRendererBase::FrameReady, this));
}
| 13,375 |
116,046 | 0 | string16 GetUrlWithLang(const GURL& url) {
return ASCIIToUTF16(google_util::AppendGoogleLocaleParam(url).spec());
}
| 13,376 |
53,626 | 0 | int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
{
struct usbnet *dev = netdev_priv(skb_in->dev);
struct usb_cdc_ncm_ndp16 *ndp16;
int ret = -EINVAL;
if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) {
netif_dbg(dev, rx_err, dev->net, "invalid NDP offset <%u>\n",
ndpoffset);
goto error;
}
ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
netif_dbg(dev, rx_err, dev->net, "invalid DPT16 length <%u>\n",
le16_to_cpu(ndp16->wLength));
goto error;
}
ret = ((le16_to_cpu(ndp16->wLength) -
sizeof(struct usb_cdc_ncm_ndp16)) /
sizeof(struct usb_cdc_ncm_dpe16));
ret--; /* we process NDP entries except for the last one */
if ((sizeof(struct usb_cdc_ncm_ndp16) +
ret * (sizeof(struct usb_cdc_ncm_dpe16))) > skb_in->len) {
netif_dbg(dev, rx_err, dev->net, "Invalid nframes = %d\n", ret);
ret = -EINVAL;
}
error:
return ret;
}
| 13,377 |
57,944 | 0 | static int nf_tables_check_loops(const struct nft_ctx *ctx,
const struct nft_chain *chain)
{
const struct nft_rule *rule;
const struct nft_expr *expr, *last;
const struct nft_set *set;
struct nft_set_binding *binding;
struct nft_set_iter iter;
if (ctx->chain == chain)
return -ELOOP;
list_for_each_entry(rule, &chain->rules, list) {
nft_rule_for_each_expr(expr, last, rule) {
const struct nft_data *data = NULL;
int err;
if (!expr->ops->validate)
continue;
err = expr->ops->validate(ctx, expr, &data);
if (err < 0)
return err;
if (data == NULL)
continue;
switch (data->verdict) {
case NFT_JUMP:
case NFT_GOTO:
err = nf_tables_check_loops(ctx, data->chain);
if (err < 0)
return err;
default:
break;
}
}
}
list_for_each_entry(set, &ctx->table->sets, list) {
if (!(set->flags & NFT_SET_MAP) ||
set->dtype != NFT_DATA_VERDICT)
continue;
list_for_each_entry(binding, &set->bindings, list) {
if (binding->chain != chain)
continue;
iter.skip = 0;
iter.count = 0;
iter.err = 0;
iter.fn = nf_tables_loop_check_setelem;
set->ops->walk(ctx, set, &iter);
if (iter.err < 0)
return iter.err;
}
}
return 0;
}
| 13,378 |
58,404 | 0 | static inline void _gdScaleCol (gdImagePtr pSrc, unsigned int src_width, gdImagePtr pRes, unsigned int dst_width, unsigned int dst_height, unsigned int uCol, LineContribType *contrib)
{
unsigned int y;
for (y = 0; y < dst_height - 1; y++) {
register unsigned char r = 0, g = 0, b = 0, a = 0;
const int iLeft = contrib->ContribRow[y].Left;
const int iRight = contrib->ContribRow[y].Right;
int i;
/* Accumulate each channel */
for (i = iLeft; i <= iRight; i++) {
const int pCurSrc = pSrc->tpixels[i][uCol];
const int i_iLeft = i - iLeft;
r += (unsigned char)(contrib->ContribRow[y].Weights[i_iLeft] * (double)(gdTrueColorGetRed(pCurSrc)));
g += (unsigned char)(contrib->ContribRow[y].Weights[i_iLeft] * (double)(gdTrueColorGetGreen(pCurSrc)));
b += (unsigned char)(contrib->ContribRow[y].Weights[i_iLeft] * (double)(gdTrueColorGetBlue(pCurSrc)));
a += (unsigned char)(contrib->ContribRow[y].Weights[i_iLeft] * (double)(gdTrueColorGetAlpha(pCurSrc)));
}
pRes->tpixels[y][uCol] = gdTrueColorAlpha(r, g, b, a);
}
}
| 13,379 |
67,294 | 0 | struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
{
struct dentry *p;
for (p = p2; !IS_ROOT(p); p = p->d_parent) {
if (p->d_parent == p1)
return p;
}
return NULL;
}
| 13,380 |
145,444 | 0 | quic::QuicConnection* server_connection() {
return server_peer_->quic_transport()->connection();
}
| 13,381 |
93,218 | 0 | METHODDEF(JDIMENSION)
get_raw_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo)
/* This version is for reading raw-byte-format files with maxval = MAXJSAMPLE.
* In this case we just read right into the JSAMPLE buffer!
* Note that same code works for PPM and PGM files.
*/
{
ppm_source_ptr source = (ppm_source_ptr)sinfo;
if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width))
ERREXIT(cinfo, JERR_INPUT_EOF);
return 1;
}
| 13,382 |
122,065 | 0 | void ProfileDependencyManager::AddEdge(ProfileKeyedBaseFactory* depended,
ProfileKeyedBaseFactory* dependee) {
edges_.insert(std::make_pair(depended, dependee));
destruction_order_.clear();
}
| 13,383 |
115,944 | 0 | void ewk_frame_redirect_provisional_load(Evas_Object* ewkFrame)
{
evas_object_smart_callback_call(ewkFrame, "redirect,load,provisional", 0);
}
| 13,384 |
13,431 | 0 | void js_getglobal(js_State *J, const char *name)
{
jsR_getproperty(J, J->G, name);
}
| 13,385 |
11,360 | 0 | fbCombineDisjointAtopReverseC (CARD32 *dest, CARD32 *src, CARD32 *mask, int width)
{
fbCombineDisjointGeneralC (dest, src, mask, width, CombineBAtop);
}
| 13,386 |
88,296 | 0 | poolCopyStringN(STRING_POOL *pool, const XML_Char *s, int n) {
if (! pool->ptr && ! poolGrow(pool)) {
/* The following line is unreachable given the current usage of
* poolCopyStringN(). Currently it is called from exactly one
* place to copy the text of a simple general entity. By that
* point, the name of the entity is already stored in the pool, so
* pool->ptr cannot be NULL.
*
* If poolCopyStringN() is used elsewhere as it well might be,
* this line may well become executable again. Regardless, this
* sort of check shouldn't be removed lightly, so we just exclude
* it from the coverage statistics.
*/
return NULL; /* LCOV_EXCL_LINE */
}
for (; n > 0; --n, s++) {
if (! poolAppendChar(pool, *s))
return NULL;
}
s = pool->start;
poolFinish(pool);
return s;
}
| 13,387 |
22,920 | 0 | __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
{
struct nfs4_lock_state *pos;
list_for_each_entry(pos, &state->lock_states, ls_locks) {
if (pos->ls_owner != fl_owner)
continue;
atomic_inc(&pos->ls_count);
return pos;
}
return NULL;
}
| 13,388 |
57,793 | 0 | static void process_smi_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
{
struct kvm_segment seg;
int offset;
kvm_get_segment(vcpu, &seg, n);
put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector);
if (n < 3)
offset = 0x7f84 + n * 12;
else
offset = 0x7f2c + (n - 3) * 12;
put_smstate(u32, buf, offset + 8, seg.base);
put_smstate(u32, buf, offset + 4, seg.limit);
put_smstate(u32, buf, offset, process_smi_get_segment_flags(&seg));
}
| 13,389 |
124,543 | 0 | static inline RenderObject* findFirstLetterBlock(RenderBlock* start)
{
RenderObject* firstLetterBlock = start;
while (true) {
bool canHaveFirstLetterRenderer = firstLetterBlock->style()->hasPseudoStyle(FIRST_LETTER)
&& firstLetterBlock->canHaveGeneratedChildren()
&& (!firstLetterBlock->isFlexibleBox() || firstLetterBlock->isRenderButton());
if (canHaveFirstLetterRenderer)
return firstLetterBlock;
RenderObject* parentBlock = firstLetterBlock->parent();
if (firstLetterBlock->isReplaced() || !parentBlock || parentBlock->firstChild() != firstLetterBlock ||
(!parentBlock->isRenderBlockFlow() && !parentBlock->isRenderButton()))
return 0;
firstLetterBlock = parentBlock;
}
return 0;
}
| 13,390 |
129,602 | 0 | LayoutSVGViewportContainer::LayoutSVGViewportContainer(SVGElement* node)
: LayoutSVGContainer(node)
, m_isLayoutSizeChanged(false)
, m_needsTransformUpdate(true)
{
}
| 13,391 |
38,666 | 0 | void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
{
ath_txq_lock(sc, txq);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
int idx = txq->txq_tailidx;
while (!list_empty(&txq->txq_fifo[idx])) {
ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx]);
INCR(idx, ATH_TXFIFO_DEPTH);
}
txq->txq_tailidx = idx;
}
txq->axq_link = NULL;
txq->axq_tx_inprogress = false;
ath_drain_txq_list(sc, txq, &txq->axq_q);
ath_txq_unlock_complete(sc, txq);
}
| 13,392 |
147,834 | 0 | static void SetterCallWithExecutionContextStringAttributeAttributeSetter(
v8::Local<v8::Value> v8_value, const v8::FunctionCallbackInfo<v8::Value>& info) {
v8::Isolate* isolate = info.GetIsolate();
ALLOW_UNUSED_LOCAL(isolate);
v8::Local<v8::Object> holder = info.Holder();
ALLOW_UNUSED_LOCAL(holder);
TestObject* impl = V8TestObject::ToImpl(holder);
V8StringResource<> cpp_value = v8_value;
if (!cpp_value.Prepare())
return;
ExecutionContext* execution_context = ExecutionContext::ForRelevantRealm(info);
impl->setSetterCallWithExecutionContextStringAttribute(execution_context, cpp_value);
}
| 13,393 |
101,931 | 0 | PrintingMessageFilter::~PrintingMessageFilter() {
}
| 13,394 |
21,475 | 0 | build_path_from_dentry(struct dentry *direntry)
{
struct dentry *temp;
int namelen;
int dfsplen;
char *full_path;
char dirsep;
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
unsigned seq;
dirsep = CIFS_DIR_SEP(cifs_sb);
if (tcon->Flags & SMB_SHARE_IS_IN_DFS)
dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
else
dfsplen = 0;
cifs_bp_rename_retry:
namelen = dfsplen;
seq = read_seqbegin(&rename_lock);
rcu_read_lock();
for (temp = direntry; !IS_ROOT(temp);) {
namelen += (1 + temp->d_name.len);
temp = temp->d_parent;
if (temp == NULL) {
cERROR(1, "corrupt dentry");
rcu_read_unlock();
return NULL;
}
}
rcu_read_unlock();
full_path = kmalloc(namelen+1, GFP_KERNEL);
if (full_path == NULL)
return full_path;
full_path[namelen] = 0; /* trailing null */
rcu_read_lock();
for (temp = direntry; !IS_ROOT(temp);) {
spin_lock(&temp->d_lock);
namelen -= 1 + temp->d_name.len;
if (namelen < 0) {
spin_unlock(&temp->d_lock);
break;
} else {
full_path[namelen] = dirsep;
strncpy(full_path + namelen + 1, temp->d_name.name,
temp->d_name.len);
cFYI(0, "name: %s", full_path + namelen);
}
spin_unlock(&temp->d_lock);
temp = temp->d_parent;
if (temp == NULL) {
cERROR(1, "corrupt dentry");
rcu_read_unlock();
kfree(full_path);
return NULL;
}
}
rcu_read_unlock();
if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) {
cFYI(1, "did not end path lookup where expected. namelen=%d "
"dfsplen=%d", namelen, dfsplen);
/* presumably this is only possible if racing with a rename
of one of the parent directories (we can not lock the dentries
above us to prevent this, but retrying should be harmless) */
kfree(full_path);
goto cifs_bp_rename_retry;
}
/* DIR_SEP already set for byte 0 / vs \ but not for
subsequent slashes in prepath which currently must
be entered the right way - not sure if there is an alternative
since the '\' is a valid posix character so we can not switch
those safely to '/' if any are found in the middle of the prepath */
/* BB test paths to Windows with '/' in the midst of prepath */
if (dfsplen) {
strncpy(full_path, tcon->treeName, dfsplen);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
int i;
for (i = 0; i < dfsplen; i++) {
if (full_path[i] == '\\')
full_path[i] = '/';
}
}
}
return full_path;
}
| 13,395 |
97,096 | 0 | bool AreURLsInPageNavigation(const GURL& existing_url, const GURL& new_url) {
if (existing_url == new_url || !new_url.has_ref())
return false;
url_canon::Replacements<char> replacements;
replacements.ClearRef();
return existing_url.ReplaceComponents(replacements) ==
new_url.ReplaceComponents(replacements);
}
| 13,396 |
60,581 | 0 | int snd_seq_create_kernel_client(struct snd_card *card, int client_index,
const char *name_fmt, ...)
{
struct snd_seq_client *client;
va_list args;
if (snd_BUG_ON(in_interrupt()))
return -EBUSY;
if (card && client_index >= SNDRV_SEQ_CLIENTS_PER_CARD)
return -EINVAL;
if (card == NULL && client_index >= SNDRV_SEQ_GLOBAL_CLIENTS)
return -EINVAL;
if (mutex_lock_interruptible(®ister_mutex))
return -ERESTARTSYS;
if (card) {
client_index += SNDRV_SEQ_GLOBAL_CLIENTS
+ card->number * SNDRV_SEQ_CLIENTS_PER_CARD;
if (client_index >= SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN)
client_index = -1;
}
/* empty write queue as default */
client = seq_create_client1(client_index, 0);
if (client == NULL) {
mutex_unlock(®ister_mutex);
return -EBUSY; /* failure code */
}
usage_alloc(&client_usage, 1);
client->accept_input = 1;
client->accept_output = 1;
client->data.kernel.card = card;
va_start(args, name_fmt);
vsnprintf(client->name, sizeof(client->name), name_fmt, args);
va_end(args);
client->type = KERNEL_CLIENT;
mutex_unlock(®ister_mutex);
/* make others aware this new client */
snd_seq_system_client_ev_client_start(client->number);
/* return client number to caller */
return client->number;
}
| 13,397 |
131,368 | 0 | static void doubleMethodMethodCallback(const v8::FunctionCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMMethod");
TestObjectPythonV8Internal::doubleMethodMethod(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
| 13,398 |
102,486 | 0 | void LayerTreeCoordinator::setVisibleContentsRect(const IntRect& rect, float scale, const FloatPoint& trajectoryVector)
{
bool contentsRectDidChange = rect != m_visibleContentsRect;
bool contentsScaleDidChange = scale != m_contentsScale;
toCoordinatedGraphicsLayer(m_nonCompositedContentLayer.get())->setVisibleContentRectTrajectoryVector(trajectoryVector);
if (contentsRectDidChange || contentsScaleDidChange) {
m_visibleContentsRect = rect;
m_contentsScale = scale;
HashSet<WebCore::CoordinatedGraphicsLayer*>::iterator end = m_registeredLayers.end();
for (HashSet<WebCore::CoordinatedGraphicsLayer*>::iterator it = m_registeredLayers.begin(); it != end; ++it) {
if (contentsScaleDidChange)
(*it)->setContentsScale(scale);
if (contentsRectDidChange)
(*it)->adjustVisibleRect();
}
}
scheduleLayerFlush();
if (m_webPage->useFixedLayout())
m_webPage->setFixedVisibleContentRect(rect);
if (contentsRectDidChange)
m_shouldSendScrollPositionUpdate = true;
}
| 13,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.