unique_id
int64 13
189k
| target
int64 0
1
| code
stringlengths 20
241k
| __index_level_0__
int64 0
18.9k
|
---|---|---|---|
47,072 | 0 | static int __init twofish_init(void)
{
u64 xcr0;
if (!cpu_has_avx || !cpu_has_osxsave) {
printk(KERN_INFO "AVX instructions are not detected.\n");
return -ENODEV;
}
xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
printk(KERN_INFO "AVX detected but unusable.\n");
return -ENODEV;
}
return crypto_register_algs(twofish_algs, ARRAY_SIZE(twofish_algs));
}
| 7,000 |
13,542 | 0 | static void Free_PairPos1( HB_PairPosFormat1* ppf1,
HB_UShort format1,
HB_UShort format2 )
{
HB_UShort n, count;
HB_PairSet* ps;
if ( ppf1->PairSet )
{
count = ppf1->PairSetCount;
ps = ppf1->PairSet;
for ( n = 0; n < count; n++ )
Free_PairSet( &ps[n], format1, format2 );
FREE( ps );
}
}
| 7,001 |
97,871 | 0 | void RenderView::CapturePageInfo(int load_id, bool preliminary_capture) {
if (load_id != page_id_)
return; // this capture call is no longer relevant due to navigation
if (load_id == last_indexed_page_id_)
return; // we already indexed this page
if (!webview())
return;
WebFrame* main_frame = webview()->mainFrame();
if (!main_frame)
return;
if (main_frame->isViewSourceModeEnabled())
return;
WebDataSource* ds = main_frame->dataSource();
if (ds && ds->hasUnreachableURL())
return;
if (!preliminary_capture)
last_indexed_page_id_ = load_id;
GURL url(main_frame->url());
if (url.is_empty())
return;
string16 contents;
CaptureText(main_frame, &contents);
if (contents.size()) {
base::TimeTicks begin_time = base::TimeTicks::Now();
std::string language = DetermineTextLanguage(contents);
UMA_HISTOGRAM_MEDIUM_TIMES("Renderer4.LanguageDetection",
base::TimeTicks::Now() - begin_time);
WebKit::WebDocument document = main_frame->document();
Send(new ViewHostMsg_PageContents(routing_id_, url, load_id, contents,
language, IsPageTranslatable(&document)));
}
OnCaptureThumbnail();
}
| 7,002 |
184,733 | 1 | GesturePoint::GesturePoint()
: first_touch_time_(0.0),
last_touch_time_(0.0),
last_tap_time_(0.0),
velocity_calculator_(kBufferedPoints) {
}
| 7,003 |
184,195 | 1 | void InfoBarContainer::ChangeTabContents(TabContents* contents) {
registrar_.RemoveAll();
RemoveAllChildViews(false);
tab_contents_ = contents;
if (tab_contents_) {
UpdateInfoBars();
Source<TabContents> tc_source(tab_contents_);
registrar_.Add(this, NotificationType::TAB_CONTENTS_INFOBAR_ADDED,
tc_source);
registrar_.Add(this, NotificationType::TAB_CONTENTS_INFOBAR_REMOVED,
tc_source);
registrar_.Add(this, NotificationType::TAB_CONTENTS_INFOBAR_REPLACED,
tc_source);
}
}
| 7,004 |
107,379 | 0 | bool GetFindBarWindowInfoForBrowser(
Browser* browser, gfx::Point* position, bool* fully_visible) {
FindBarTesting* find_bar =
browser->GetFindBarController()->find_bar()->GetFindBarTesting();
return find_bar->GetFindBarWindowInfo(position, fully_visible);
}
| 7,005 |
153,579 | 0 | void GLES2Implementation::BufferSubData(GLenum target,
GLintptr offset,
GLsizeiptr size,
const void* data) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBufferSubData("
<< GLES2Util::GetStringBufferTarget(target) << ", "
<< offset << ", " << size << ", "
<< static_cast<const void*>(data) << ")");
BufferSubDataHelper(target, offset, size, data);
CheckGLError();
}
| 7,006 |
1,373 | 0 | static uint32_t *rpc_add_credentials(uint32_t *p)
{
int hl;
int hostnamelen = 0;
/* Here's the executive summary on authentication requirements of the
* various NFS server implementations: Linux accepts both AUTH_NONE
* and AUTH_UNIX authentication (also accepts an empty hostname field
* in the AUTH_UNIX scheme). *BSD refuses AUTH_NONE, but accepts
* AUTH_UNIX (also accepts an empty hostname field in the AUTH_UNIX
* scheme). To be safe, use AUTH_UNIX and pass the hostname if we have
* it (if the BOOTP/DHCP reply didn't give one, just use an empty
* hostname). */
hl = (hostnamelen + 3) & ~3;
/* Provide an AUTH_UNIX credential. */
*p++ = htonl(1); /* AUTH_UNIX */
*p++ = htonl(hl+20); /* auth length */
*p++ = htonl(0); /* stamp */
*p++ = htonl(hostnamelen); /* hostname string */
if (hostnamelen & 3)
*(p + hostnamelen / 4) = 0; /* add zero padding */
/* memcpy(p, hostname, hostnamelen); */ /* empty hostname */
p += hl / 4;
*p++ = 0; /* uid */
*p++ = 0; /* gid */
*p++ = 0; /* auxiliary gid list */
/* Provide an AUTH_NONE verifier. */
*p++ = 0; /* AUTH_NONE */
*p++ = 0; /* auth length */
return p;
}
| 7,007 |
25,290 | 0 | xscale1pmu_write_counter(int counter, u32 val)
{
switch (counter) {
case XSCALE_CYCLE_COUNTER:
asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
break;
case XSCALE_COUNTER0:
asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
break;
case XSCALE_COUNTER1:
asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
break;
}
}
| 7,008 |
150,947 | 0 | void Bluetooth::RequestDeviceCallback(
ScriptPromiseResolver* resolver,
mojom::blink::WebBluetoothResult result,
mojom::blink::WebBluetoothDevicePtr device) {
if (!resolver->GetExecutionContext() ||
resolver->GetExecutionContext()->IsContextDestroyed()) {
return;
}
if (result == mojom::blink::WebBluetoothResult::SUCCESS) {
BluetoothDevice* bluetooth_device = GetBluetoothDeviceRepresentingDevice(
std::move(device), resolver->GetExecutionContext());
resolver->Resolve(bluetooth_device);
} else {
resolver->Reject(BluetoothError::CreateDOMException(result));
}
}
| 7,009 |
105,101 | 0 | PassRefPtr<Range> Range::cloneRange(ExceptionCode& ec) const
{
if (!m_start.container()) {
ec = INVALID_STATE_ERR;
return 0;
}
return Range::create(m_ownerDocument, m_start.container(), m_start.offset(), m_end.container(), m_end.offset());
}
| 7,010 |
89,789 | 0 | usage (int ecode, FILE *out)
{
fprintf (out, "usage: %s [OPTIONS...] [--] COMMAND [ARGS...]\n\n", argv0);
fprintf (out,
" --help Print this help\n"
" --version Print version\n"
" --args FD Parse NUL-separated args from FD\n"
" --unshare-all Unshare every namespace we support by default\n"
" --share-net Retain the network namespace (can only combine with --unshare-all)\n"
" --unshare-user Create new user namespace (may be automatically implied if not setuid)\n"
" --unshare-user-try Create new user namespace if possible else continue by skipping it\n"
" --unshare-ipc Create new ipc namespace\n"
" --unshare-pid Create new pid namespace\n"
" --unshare-net Create new network namespace\n"
" --unshare-uts Create new uts namespace\n"
" --unshare-cgroup Create new cgroup namespace\n"
" --unshare-cgroup-try Create new cgroup namespace if possible else continue by skipping it\n"
" --uid UID Custom uid in the sandbox (requires --unshare-user)\n"
" --gid GID Custom gid in the sandbox (requires --unshare-user)\n"
" --hostname NAME Custom hostname in the sandbox (requires --unshare-uts)\n"
" --chdir DIR Change directory to DIR\n"
" --setenv VAR VALUE Set an environment variable\n"
" --unsetenv VAR Unset an environment variable\n"
" --lock-file DEST Take a lock on DEST while sandbox is running\n"
" --sync-fd FD Keep this fd open while sandbox is running\n"
" --bind SRC DEST Bind mount the host path SRC on DEST\n"
" --bind-try SRC DEST Equal to --bind but ignores non-existent SRC\n"
" --dev-bind SRC DEST Bind mount the host path SRC on DEST, allowing device access\n"
" --dev-bind-try SRC DEST Equal to --dev-bind but ignores non-existent SRC\n"
" --ro-bind SRC DEST Bind mount the host path SRC readonly on DEST\n"
" --ro-bind-try SRC DEST Equal to --ro-bind but ignores non-existent SRC\n"
" --remount-ro DEST Remount DEST as readonly; does not recursively remount\n"
" --exec-label LABEL Exec label for the sandbox\n"
" --file-label LABEL File label for temporary sandbox content\n"
" --proc DEST Mount new procfs on DEST\n"
" --dev DEST Mount new dev on DEST\n"
" --tmpfs DEST Mount new tmpfs on DEST\n"
" --mqueue DEST Mount new mqueue on DEST\n"
" --dir DEST Create dir at DEST\n"
" --file FD DEST Copy from FD to destination DEST\n"
" --bind-data FD DEST Copy from FD to file which is bind-mounted on DEST\n"
" --ro-bind-data FD DEST Copy from FD to file which is readonly bind-mounted on DEST\n"
" --symlink SRC DEST Create symlink at DEST with target SRC\n"
" --seccomp FD Load and use seccomp rules from FD\n"
" --block-fd FD Block on FD until some data to read is available\n"
" --userns-block-fd FD Block on FD until the user namespace is ready\n"
" --info-fd FD Write information about the running container to FD\n"
" --json-status-fd FD Write container status to FD as multiple JSON documents\n"
" --new-session Create a new terminal session\n"
" --die-with-parent Kills with SIGKILL child process (COMMAND) when bwrap or bwrap's parent dies.\n"
" --as-pid-1 Do not install a reaper process with PID=1\n"
" --cap-add CAP Add cap CAP when running as privileged user\n"
" --cap-drop CAP Drop cap CAP when running as privileged user\n"
);
exit (ecode);
}
| 7,011 |
49,893 | 0 | phar_entry_data *phar_get_or_create_entry_data(char *fname, int fname_len, char *path, int path_len, const char *mode, char allow_dir, char **error, int security) /* {{{ */
{
phar_archive_data *phar;
phar_entry_info *entry, etemp;
phar_entry_data *ret;
const char *pcr_error;
char is_dir;
#ifdef PHP_WIN32
phar_unixify_path_separators(path, path_len);
#endif
is_dir = (path_len && path[path_len - 1] == '/') ? 1 : 0;
if (FAILURE == phar_get_archive(&phar, fname, fname_len, NULL, 0, error)) {
return NULL;
}
if (FAILURE == phar_get_entry_data(&ret, fname, fname_len, path, path_len, mode, allow_dir, error, security)) {
return NULL;
} else if (ret) {
return ret;
}
if (phar_path_check(&path, &path_len, &pcr_error) > pcr_is_ok) {
if (error) {
spprintf(error, 0, "phar error: invalid path \"%s\" contains %s", path, pcr_error);
}
return NULL;
}
if (phar->is_persistent && FAILURE == phar_copy_on_write(&phar)) {
if (error) {
spprintf(error, 4096, "phar error: file \"%s\" in phar \"%s\" cannot be created, could not make cached phar writeable", path, fname);
}
return NULL;
}
/* create a new phar data holder */
ret = (phar_entry_data *) emalloc(sizeof(phar_entry_data));
/* create an entry, this is a new file */
memset(&etemp, 0, sizeof(phar_entry_info));
etemp.filename_len = path_len;
etemp.fp_type = PHAR_MOD;
etemp.fp = php_stream_fopen_tmpfile();
if (!etemp.fp) {
if (error) {
spprintf(error, 0, "phar error: unable to create temporary file");
}
efree(ret);
return NULL;
}
etemp.fp_refcount = 1;
if (allow_dir == 2) {
etemp.is_dir = 1;
etemp.flags = etemp.old_flags = PHAR_ENT_PERM_DEF_DIR;
} else {
etemp.flags = etemp.old_flags = PHAR_ENT_PERM_DEF_FILE;
}
if (is_dir) {
etemp.filename_len--; /* strip trailing / */
path_len--;
}
phar_add_virtual_dirs(phar, path, path_len);
etemp.is_modified = 1;
etemp.timestamp = time(0);
etemp.is_crc_checked = 1;
etemp.phar = phar;
etemp.filename = estrndup(path, path_len);
etemp.is_zip = phar->is_zip;
if (phar->is_tar) {
etemp.is_tar = phar->is_tar;
etemp.tar_type = etemp.is_dir ? TAR_DIR : TAR_FILE;
}
if (NULL == (entry = zend_hash_str_add_mem(&phar->manifest, etemp.filename, path_len, (void*)&etemp, sizeof(phar_entry_info)))) {
php_stream_close(etemp.fp);
if (error) {
spprintf(error, 0, "phar error: unable to add new entry \"%s\" to phar \"%s\"", etemp.filename, phar->fname);
}
efree(ret);
efree(etemp.filename);
return NULL;
}
if (!entry) {
php_stream_close(etemp.fp);
efree(etemp.filename);
efree(ret);
return NULL;
}
++(phar->refcount);
ret->phar = phar;
ret->fp = entry->fp;
ret->position = ret->zero = 0;
ret->for_write = 1;
ret->is_zip = entry->is_zip;
ret->is_tar = entry->is_tar;
ret->internal_file = entry;
return ret;
}
/* }}} */
| 7,012 |
91,672 | 0 | static void encap_finish(void)
{
hash_clean(encap_hash, (void (*)(void *))encap_free);
hash_free(encap_hash);
encap_hash = NULL;
#if ENABLE_BGP_VNC
hash_clean(vnc_hash, (void (*)(void *))encap_free);
hash_free(vnc_hash);
vnc_hash = NULL;
#endif
}
| 7,013 |
142,094 | 0 | CGaiaCredentialBase::UIProcessInfo::~UIProcessInfo() {}
| 7,014 |
67,630 | 0 | MODRET set_userdirroot(cmd_rec *cmd) {
int bool = -1;
config_rec *c = NULL;
CHECK_ARGS(cmd, 1);
CHECK_CONF(cmd, CONF_ANON);
bool = get_boolean(cmd, 1);
if (bool == -1)
CONF_ERROR(cmd, "expected Boolean parameter");
c = add_config_param(cmd->argv[0], 1, NULL);
c->argv[0] = pcalloc(c->pool, sizeof(unsigned char));
*((unsigned char *) c->argv[0]) = bool;
return PR_HANDLED(cmd);
}
| 7,015 |
187,948 | 1 | void impeg2d_dec_pic_data_thread(dec_state_t *ps_dec)
{
WORD32 i4_continue_decode;
WORD32 i4_cur_row, temp;
UWORD32 u4_bits_read;
WORD32 i4_dequeue_job;
IMPEG2D_ERROR_CODES_T e_error;
i4_cur_row = ps_dec->u2_mb_y + 1;
i4_continue_decode = 1;
i4_dequeue_job = 1;
do
{
if(i4_cur_row > ps_dec->u2_num_vert_mb)
{
i4_continue_decode = 0;
break;
}
{
if((ps_dec->i4_num_cores> 1) && (i4_dequeue_job))
{
job_t s_job;
IV_API_CALL_STATUS_T e_ret;
UWORD8 *pu1_buf;
e_ret = impeg2_jobq_dequeue(ps_dec->pv_jobq, &s_job, sizeof(s_job), 1, 1);
if(e_ret != IV_SUCCESS)
break;
if(CMD_PROCESS == s_job.i4_cmd)
{
pu1_buf = ps_dec->pu1_inp_bits_buf + s_job.i4_bistream_ofst;
impeg2d_bit_stream_init(&(ps_dec->s_bit_stream), pu1_buf,
(ps_dec->u4_num_inp_bytes - s_job.i4_bistream_ofst) + 8);
i4_cur_row = s_job.i2_start_mb_y;
ps_dec->i4_start_mb_y = s_job.i2_start_mb_y;
ps_dec->i4_end_mb_y = s_job.i2_end_mb_y;
ps_dec->u2_mb_x = 0;
ps_dec->u2_mb_y = ps_dec->i4_start_mb_y;
ps_dec->u2_num_mbs_left = (ps_dec->i4_end_mb_y - ps_dec->i4_start_mb_y) * ps_dec->u2_num_horiz_mb;
}
else
{
WORD32 start_row;
WORD32 num_rows;
start_row = s_job.i2_start_mb_y << 4;
num_rows = MIN((s_job.i2_end_mb_y << 4), ps_dec->u2_vertical_size);
num_rows -= start_row;
impeg2d_format_convert(ps_dec, ps_dec->ps_disp_pic,
ps_dec->ps_disp_frm_buf,
start_row, num_rows);
break;
}
}
e_error = impeg2d_dec_slice(ps_dec);
if ((IMPEG2D_ERROR_CODES_T)IVD_ERROR_NONE != e_error)
{
impeg2d_next_start_code(ps_dec);
}
}
/* Detecting next slice start code */
while(1)
{
u4_bits_read = impeg2d_bit_stream_nxt(&ps_dec->s_bit_stream,START_CODE_LEN);
temp = u4_bits_read & 0xFF;
i4_continue_decode = (((u4_bits_read >> 8) == 0x01) && (temp) && (temp <= 0xAF));
if(i4_continue_decode)
{
/* If the slice is from the same row, then continue decoding without dequeue */
if((temp - 1) == i4_cur_row)
{
i4_dequeue_job = 0;
break;
}
if(temp < ps_dec->i4_end_mb_y)
{
i4_cur_row = ps_dec->u2_mb_y;
}
else
{
i4_dequeue_job = 1;
}
break;
}
else
break;
}
}while(i4_continue_decode);
if(ps_dec->i4_num_cores > 1)
{
while(1)
{
job_t s_job;
IV_API_CALL_STATUS_T e_ret;
e_ret = impeg2_jobq_dequeue(ps_dec->pv_jobq, &s_job, sizeof(s_job), 1, 1);
if(e_ret != IV_SUCCESS)
break;
if(CMD_FMTCONV == s_job.i4_cmd)
{
WORD32 start_row;
WORD32 num_rows;
start_row = s_job.i2_start_mb_y << 4;
num_rows = MIN((s_job.i2_end_mb_y << 4), ps_dec->u2_vertical_size);
num_rows -= start_row;
impeg2d_format_convert(ps_dec, ps_dec->ps_disp_pic,
ps_dec->ps_disp_frm_buf,
start_row, num_rows);
}
}
}
else
{
if((NULL != ps_dec->ps_disp_pic) && ((0 == ps_dec->u4_share_disp_buf) || (IV_YUV_420P != ps_dec->i4_chromaFormat)))
impeg2d_format_convert(ps_dec, ps_dec->ps_disp_pic,
ps_dec->ps_disp_frm_buf,
0, ps_dec->u2_vertical_size);
}
}
| 7,016 |
151,637 | 0 | ukm::UkmRecorder* ChromePaymentRequestDelegate::GetUkmRecorder() {
return g_browser_process->ukm_recorder();
}
| 7,017 |
103,242 | 0 | void WebSocketJob::CloseInternal() {
if (spdy_websocket_stream_.get())
spdy_websocket_stream_->Close();
if (socket_.get())
socket_->Close();
}
| 7,018 |
158,688 | 0 | void GLES2DecoderImpl::DoReleaseTexImage2DCHROMIUM(
GLenum target, GLint image_id) {
TRACE_EVENT0("gpu", "GLES2DecoderImpl::DoReleaseTexImage2DCHROMIUM");
TextureRef* texture_ref =
texture_manager()->GetTextureInfoForTargetUnlessDefault(&state_, target);
if (!texture_ref) {
LOCAL_SET_GL_ERROR(
GL_INVALID_OPERATION,
"glReleaseTexImage2DCHROMIUM", "no texture bound");
return;
}
gl::GLImage* image = image_manager()->LookupImage(image_id);
if (!image) {
LOCAL_SET_GL_ERROR(
GL_INVALID_OPERATION,
"glReleaseTexImage2DCHROMIUM", "no image found with the given ID");
return;
}
Texture::ImageState image_state;
if (texture_ref->texture()->GetLevelImage(target, 0, &image_state) != image)
return;
if (image_state == Texture::BOUND) {
ScopedGLErrorSuppressor suppressor(
"GLES2DecoderImpl::DoReleaseTexImage2DCHROMIUM", GetErrorState());
image->ReleaseTexImage(target);
texture_manager()->SetLevelInfo(texture_ref, target, 0, GL_RGBA, 0, 0, 1, 0,
GL_RGBA, GL_UNSIGNED_BYTE, gfx::Rect());
}
texture_manager()->SetLevelImage(texture_ref, target, 0, nullptr,
Texture::UNBOUND);
}
| 7,019 |
113,176 | 0 | explicit FadeInAnimationDelegate(views::View* view) : view_(view) {}
| 7,020 |
75,347 | 0 | packet_stats(const tcpreplay_stats_t *stats)
{
struct timeval diff;
COUNTER diff_us;
COUNTER bytes_sec = 0;
u_int32_t bytes_sec_10ths = 0;
COUNTER mb_sec = 0;
u_int32_t mb_sec_100ths = 0;
u_int32_t mb_sec_1000ths = 0;
COUNTER pkts_sec = 0;
u_int32_t pkts_sec_100ths = 0;
timersub(&stats->end_time, &stats->start_time, &diff);
diff_us = TIMEVAL_TO_MICROSEC(&diff);
if (diff_us && stats->pkts_sent && stats->bytes_sent) {
COUNTER bytes_sec_X10;
COUNTER pkts_sec_X100;
COUNTER mb_sec_X1000;
COUNTER mb_sec_X100;
if (stats->bytes_sent > 1000 * 1000 * 1000 && diff_us > 1000 * 1000) {
bytes_sec_X10 = (stats->bytes_sent * 10 * 1000) / (diff_us / 1000);
pkts_sec_X100 = (stats->pkts_sent * 100 * 1000) / (diff_us / 1000);
} else {
bytes_sec_X10 = (stats->bytes_sent * 10 * 1000 * 1000) / diff_us;
pkts_sec_X100 = (stats->pkts_sent * 100 * 1000 * 1000) / diff_us;
}
bytes_sec = bytes_sec_X10 / 10;
bytes_sec_10ths = bytes_sec_X10 % 10;
mb_sec_X1000 = (bytes_sec * 8) / 1000;
mb_sec_X100 = mb_sec_X1000 / 10;
mb_sec = mb_sec_X1000 / 1000;
mb_sec_100ths = mb_sec_X100 % 100;
mb_sec_1000ths = mb_sec_X1000 % 1000;
pkts_sec = pkts_sec_X100 / 100;
pkts_sec_100ths = pkts_sec_X100 % 100;
}
if (diff_us >= 1000 * 1000)
printf("Actual: " COUNTER_SPEC " packets (" COUNTER_SPEC " bytes) sent in %zd.%02zd seconds\n",
stats->pkts_sent, stats->bytes_sent, (ssize_t)diff.tv_sec, (ssize_t)(diff.tv_usec / (10 * 1000)));
else
printf("Actual: " COUNTER_SPEC " packets (" COUNTER_SPEC " bytes) sent in %zd.%06zd seconds\n",
stats->pkts_sent, stats->bytes_sent, (ssize_t)diff.tv_sec, (ssize_t)diff.tv_usec);
if (mb_sec >= 1)
printf("Rated: %llu.%1u Bps, %llu.%02u Mbps, %llu.%02u pps\n",
bytes_sec, bytes_sec_10ths, mb_sec, mb_sec_100ths, pkts_sec, pkts_sec_100ths);
else
printf("Rated: %llu.%1u Bps, %llu.%03u Mbps, %llu.%02u pps\n",
bytes_sec, bytes_sec_10ths, mb_sec, mb_sec_1000ths, pkts_sec, pkts_sec_100ths);
fflush(NULL);
if (stats->failed)
printf("Failed write attempts: " COUNTER_SPEC "\n",
stats->failed);
}
| 7,021 |
112,296 | 0 | void ShellWindow::RequestMediaAccessPermission(
content::WebContents* web_contents,
const content::MediaStreamRequest* request,
const content::MediaResponseCallback& callback) {
content::MediaStreamDevices devices;
content::MediaStreamDeviceMap::const_iterator iter =
request->devices.find(content::MEDIA_STREAM_DEVICE_TYPE_AUDIO_CAPTURE);
if (iter != request->devices.end() &&
extension()->HasAPIPermission(ExtensionAPIPermission::kAudioCapture) &&
!iter->second.empty()) {
devices.push_back(iter->second[0]);
}
iter = request->devices.find(content::MEDIA_STREAM_DEVICE_TYPE_VIDEO_CAPTURE);
if (iter != request->devices.end() &&
extension()->HasAPIPermission(ExtensionAPIPermission::kVideoCapture) &&
!iter->second.empty()) {
devices.push_back(iter->second[0]);
}
callback.Run(devices);
}
| 7,022 |
179,898 | 1 | int main(int argc, char *argv[])
{
int ret;
struct lxc_lock *lock;
lock = lxc_newlock(NULL, NULL);
if (!lock) {
fprintf(stderr, "%d: failed to get unnamed lock\n", __LINE__);
exit(1);
}
ret = lxclock(lock, 0);
if (ret) {
fprintf(stderr, "%d: failed to take unnamed lock (%d)\n", __LINE__, ret);
exit(1);
}
ret = lxcunlock(lock);
if (ret) {
fprintf(stderr, "%d: failed to put unnamed lock (%d)\n", __LINE__, ret);
exit(1);
}
lxc_putlock(lock);
lock = lxc_newlock("/var/lib/lxc", mycontainername);
if (!lock) {
fprintf(stderr, "%d: failed to get lock\n", __LINE__);
exit(1);
}
struct stat sb;
char *pathname = RUNTIME_PATH "/lock/lxc/var/lib/lxc/";
ret = stat(pathname, &sb);
if (ret != 0) {
fprintf(stderr, "%d: filename %s not created\n", __LINE__,
pathname);
exit(1);
}
lxc_putlock(lock);
test_two_locks();
fprintf(stderr, "all tests passed\n");
exit(ret);
}
| 7,023 |
35,453 | 0 | static void ttusbdecfe_release(struct dvb_frontend* fe)
{
struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
kfree(state);
}
| 7,024 |
60,697 | 0 | static int sctp_setsockopt_pr_supported(struct sock *sk,
char __user *optval,
unsigned int optlen)
{
struct sctp_assoc_value params;
struct sctp_association *asoc;
int retval = -EINVAL;
if (optlen != sizeof(params))
goto out;
if (copy_from_user(¶ms, optval, optlen)) {
retval = -EFAULT;
goto out;
}
asoc = sctp_id2assoc(sk, params.assoc_id);
if (asoc) {
asoc->prsctp_enable = !!params.assoc_value;
} else if (!params.assoc_id) {
struct sctp_sock *sp = sctp_sk(sk);
sp->ep->prsctp_enable = !!params.assoc_value;
} else {
goto out;
}
retval = 0;
out:
return retval;
}
| 7,025 |
43,318 | 0 | int CLASS median4 (int *p)
{
int min, max, sum, i;
min = max = sum = p[0];
for (i=1; i < 4; i++) {
sum += p[i];
if (min > p[i]) min = p[i];
if (max < p[i]) max = p[i];
}
return (sum - min - max) >> 1;
}
| 7,026 |
115,712 | 0 | void ScreenRecorder::DoStopOnEncodeThread(const base::Closure& done_task) {
DCHECK_EQ(encode_loop_, MessageLoop::current());
encoder_stopped_ = true;
capture_loop_->PostTask(FROM_HERE, done_task);
}
| 7,027 |
136,541 | 0 | void PaintController::ResetCurrentListIndices() {
next_item_to_match_ = 0;
next_item_to_index_ = 0;
next_chunk_to_match_ = 0;
under_invalidation_checking_begin_ = 0;
under_invalidation_checking_end_ = 0;
skipped_probable_under_invalidation_count_ = 0;
}
| 7,028 |
140,765 | 0 | bool GLES2DecoderImpl::FormsTextureCopyingFeedbackLoop(
TextureRef* texture, GLint level) {
Framebuffer* framebuffer = features().chromium_framebuffer_multisample ?
framebuffer_state_.bound_read_framebuffer.get() :
framebuffer_state_.bound_draw_framebuffer.get();
if (!framebuffer)
return false;
const Framebuffer::Attachment* attachment = framebuffer->GetAttachment(
GL_COLOR_ATTACHMENT0);
if (!attachment)
return false;
return attachment->FormsFeedbackLoop(texture, level);
}
| 7,029 |
114,564 | 0 | WebPluginProxy::~WebPluginProxy() {
#if defined(USE_X11)
if (windowless_shm_pixmaps_[0] != None)
XFreePixmap(ui::GetXDisplay(), windowless_shm_pixmaps_[0]);
if (windowless_shm_pixmaps_[1] != None)
XFreePixmap(ui::GetXDisplay(), windowless_shm_pixmaps_[1]);
#endif
#if defined(OS_MACOSX)
if (accelerated_surface_.get())
accelerated_surface_.reset();
#endif
if (plugin_element_)
WebBindings::releaseObject(plugin_element_);
if (window_npobject_)
WebBindings::releaseObject(window_npobject_);
}
| 7,030 |
14,828 | 0 | cvt_flip(int type, int flip)
{
if (flip == 0)
return type;
switch (type) {
case FILE_BESHORT:
return FILE_LESHORT;
case FILE_BELONG:
return FILE_LELONG;
case FILE_BEDATE:
return FILE_LEDATE;
case FILE_BELDATE:
return FILE_LELDATE;
case FILE_BEQUAD:
return FILE_LEQUAD;
case FILE_BEQDATE:
return FILE_LEQDATE;
case FILE_BEQLDATE:
return FILE_LEQLDATE;
case FILE_BEQWDATE:
return FILE_LEQWDATE;
case FILE_LESHORT:
return FILE_BESHORT;
case FILE_LELONG:
return FILE_BELONG;
case FILE_LEDATE:
return FILE_BEDATE;
case FILE_LELDATE:
return FILE_BELDATE;
case FILE_LEQUAD:
return FILE_BEQUAD;
case FILE_LEQDATE:
return FILE_BEQDATE;
case FILE_LEQLDATE:
return FILE_BEQLDATE;
case FILE_LEQWDATE:
return FILE_BEQWDATE;
case FILE_BEFLOAT:
return FILE_LEFLOAT;
case FILE_LEFLOAT:
return FILE_BEFLOAT;
case FILE_BEDOUBLE:
return FILE_LEDOUBLE;
case FILE_LEDOUBLE:
return FILE_BEDOUBLE;
default:
return type;
}
}
| 7,031 |
1,439 | 0 | static void ct_build_clabel(const struct nf_conntrack *ct, struct nethdr *n)
{
const struct nfct_bitmask *b;
uint32_t *words;
unsigned int wordcount, i, maxbit;
if (!nfct_attr_is_set(ct, ATTR_CONNLABELS))
return;
b = nfct_get_attr(ct, ATTR_CONNLABELS);
maxbit = nfct_bitmask_maxbit(b);
for (i=0; i <= maxbit; i++) {
if (nfct_bitmask_test_bit(b, i))
break;
}
if (i > maxbit)
return;
wordcount = (nfct_bitmask_maxbit(b) / 32) + 1;
words = put_header(n, NTA_LABELS, wordcount * sizeof(*words));
for (i=0; i < wordcount; i++) {
int bit = 31;
uint32_t tmp = 0;
do {
if (nfct_bitmask_test_bit(b, (32 * i) + bit))
tmp |= (1 << bit);
} while (--bit >= 0);
words[i] = htonl(tmp);
}
}
| 7,032 |
183,576 | 1 | void WebPluginDelegateProxy::OnGetWindowScriptNPObject(
int route_id, bool* success, intptr_t* npobject_ptr) {
*success = false;
NPObject* npobject = NULL;
if (plugin_)
npobject = plugin_->GetWindowScriptNPObject();
if (!npobject)
return;
// The stub will delete itself when the proxy tells it that it's released, or
// otherwise when the channel is closed.
window_script_object_ = (new NPObjectStub(
npobject, channel_host_.get(), route_id, 0, page_url_))->AsWeakPtr();
*success = true;
*npobject_ptr = reinterpret_cast<intptr_t>(npobject);
}
| 7,033 |
57,190 | 0 | int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
{
struct rpc_task *task;
struct nfs4_get_lease_time_args args;
struct nfs4_get_lease_time_res res = {
.lr_fsinfo = fsinfo,
};
struct nfs4_get_lease_time_data data = {
.args = &args,
.res = &res,
.clp = clp,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
.rpc_argp = &args,
.rpc_resp = &res,
};
struct rpc_task_setup task_setup = {
.rpc_client = clp->cl_rpcclient,
.rpc_message = &msg,
.callback_ops = &nfs4_get_lease_time_ops,
.callback_data = &data,
.flags = RPC_TASK_TIMEOUT,
};
int status;
nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
nfs4_set_sequence_privileged(&args.la_seq_args);
dprintk("--> %s\n", __func__);
task = rpc_run_task(&task_setup);
if (IS_ERR(task))
status = PTR_ERR(task);
else {
status = task->tk_status;
rpc_put_task(task);
}
dprintk("<-- %s return %d\n", __func__, status);
return status;
}
| 7,034 |
55,863 | 0 | static inline ssize_t do_tty_write(
ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
struct tty_struct *tty,
struct file *file,
const char __user *buf,
size_t count)
{
ssize_t ret, written = 0;
unsigned int chunk;
ret = tty_write_lock(tty, file->f_flags & O_NDELAY);
if (ret < 0)
return ret;
/*
* We chunk up writes into a temporary buffer. This
* simplifies low-level drivers immensely, since they
* don't have locking issues and user mode accesses.
*
* But if TTY_NO_WRITE_SPLIT is set, we should use a
* big chunk-size..
*
* The default chunk-size is 2kB, because the NTTY
* layer has problems with bigger chunks. It will
* claim to be able to handle more characters than
* it actually does.
*
* FIXME: This can probably go away now except that 64K chunks
* are too likely to fail unless switched to vmalloc...
*/
chunk = 2048;
if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags))
chunk = 65536;
if (count < chunk)
chunk = count;
/* write_buf/write_cnt is protected by the atomic_write_lock mutex */
if (tty->write_cnt < chunk) {
unsigned char *buf_chunk;
if (chunk < 1024)
chunk = 1024;
buf_chunk = kmalloc(chunk, GFP_KERNEL);
if (!buf_chunk) {
ret = -ENOMEM;
goto out;
}
kfree(tty->write_buf);
tty->write_cnt = chunk;
tty->write_buf = buf_chunk;
}
/* Do the write .. */
for (;;) {
size_t size = count;
if (size > chunk)
size = chunk;
ret = -EFAULT;
if (copy_from_user(tty->write_buf, buf, size))
break;
ret = write(tty, file, tty->write_buf, size);
if (ret <= 0)
break;
written += ret;
buf += ret;
count -= ret;
if (!count)
break;
ret = -ERESTARTSYS;
if (signal_pending(current))
break;
cond_resched();
}
if (written) {
tty_update_time(&file_inode(file)->i_mtime);
ret = written;
}
out:
tty_write_unlock(tty);
return ret;
}
| 7,035 |
43,550 | 0 | static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
{
int val = 0;
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
if (sctp_sk(sk)->recvrcvinfo)
val = 1;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
| 7,036 |
71,720 | 0 | static void SVGComment(void *context,const xmlChar *value)
{
SVGInfo
*svg_info;
/*
A comment has been parsed.
*/
(void) LogMagickEvent(CoderEvent,GetMagickModule()," SAX.comment(%s)",
value);
svg_info=(SVGInfo *) context;
if (svg_info->comment != (char *) NULL)
(void) ConcatenateString(&svg_info->comment,"\n");
(void) ConcatenateString(&svg_info->comment,(const char *) value);
}
| 7,037 |
68,301 | 0 | static inline int __pmu_filter_match(struct perf_event *event)
{
struct pmu *pmu = event->pmu;
return pmu->filter_match ? pmu->filter_match(event) : 1;
}
| 7,038 |
147,179 | 0 | void V8TestObject::ActivityLoggingGetterForIsolatedWorldsPerWorldBindingsLongAttributeAttributeSetterCallback(
const v8::FunctionCallbackInfo<v8::Value>& info) {
RUNTIME_CALL_TIMER_SCOPE_DISABLED_BY_DEFAULT(info.GetIsolate(), "Blink_TestObject_activityLoggingGetterForIsolatedWorldsPerWorldBindingsLongAttribute_Setter");
v8::Local<v8::Value> v8_value = info[0];
test_object_v8_internal::ActivityLoggingGetterForIsolatedWorldsPerWorldBindingsLongAttributeAttributeSetter(v8_value, info);
}
| 7,039 |
54,093 | 0 | static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[IFA_MAX+1];
struct in_device *in_dev;
struct ifaddrmsg *ifm;
struct in_ifaddr *ifa, **ifap;
int err = -EINVAL;
ASSERT_RTNL();
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
if (err < 0)
goto errout;
ifm = nlmsg_data(nlh);
in_dev = inetdev_by_index(net, ifm->ifa_index);
if (!in_dev) {
err = -ENODEV;
goto errout;
}
for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
ifap = &ifa->ifa_next) {
if (tb[IFA_LOCAL] &&
ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
continue;
if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
continue;
if (tb[IFA_ADDRESS] &&
(ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
!inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
continue;
if (ipv4_is_multicast(ifa->ifa_address))
ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa);
__inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
return 0;
}
err = -EADDRNOTAVAIL;
errout:
return err;
}
| 7,040 |
90,638 | 0 | zip_read_data_zipx_lzma_alone(struct archive_read *a, const void **buff,
size_t *size, int64_t *offset)
{
struct zip* zip = (struct zip *)(a->format->data);
int ret;
lzma_ret lz_ret;
const void* compressed_buf;
ssize_t bytes_avail, in_bytes, to_consume;
(void) offset; /* UNUSED */
/* Initialize decompressor if not yet initialized. */
if (!zip->decompress_init) {
ret = zipx_lzma_alone_init(a, zip);
if (ret != ARCHIVE_OK)
return (ret);
}
/* Fetch more compressed data. The same note as in deflate handler applies
* here as well:
*
* Note: '1' here is a performance optimization. Recall that the
* decompression layer returns a count of available bytes; asking for more
* than that forces the decompressor to combine reads by copying data.
*/
compressed_buf = __archive_read_ahead(a, 1, &bytes_avail);
if (bytes_avail < 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated lzma file body");
return (ARCHIVE_FATAL);
}
/* Set decompressor parameters. */
in_bytes = zipmin(zip->entry_bytes_remaining, bytes_avail);
zip->zipx_lzma_stream.next_in = compressed_buf;
zip->zipx_lzma_stream.avail_in = in_bytes;
zip->zipx_lzma_stream.total_in = 0;
zip->zipx_lzma_stream.next_out = zip->uncompressed_buffer;
zip->zipx_lzma_stream.avail_out =
/* These lzma_alone streams lack end of stream marker, so let's make
* sure the unpacker won't try to unpack more than it's supposed to. */
zipmin((int64_t) zip->uncompressed_buffer_size,
zip->entry->uncompressed_size -
zip->entry_uncompressed_bytes_read);
zip->zipx_lzma_stream.total_out = 0;
/* Perform the decompression. */
lz_ret = lzma_code(&zip->zipx_lzma_stream, LZMA_RUN);
switch(lz_ret) {
case LZMA_DATA_ERROR:
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"lzma data error (error %d)", (int) lz_ret);
return (ARCHIVE_FATAL);
case LZMA_OK:
break;
default:
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"lzma unknown error %d", (int) lz_ret);
return (ARCHIVE_FATAL);
}
to_consume = zip->zipx_lzma_stream.total_in;
/* Update pointers. */
__archive_read_consume(a, to_consume);
zip->entry_bytes_remaining -= to_consume;
zip->entry_compressed_bytes_read += to_consume;
zip->entry_uncompressed_bytes_read += zip->zipx_lzma_stream.total_out;
if(zip->entry_bytes_remaining == 0) {
zip->end_of_entry = 1;
}
/* Return values. */
*size = zip->zipx_lzma_stream.total_out;
*buff = zip->uncompressed_buffer;
/* Behave the same way as during deflate decompression. */
ret = consume_optional_marker(a, zip);
if (ret != ARCHIVE_OK)
return (ret);
/* Free lzma decoder handle because we'll no longer need it. */
if(zip->end_of_entry) {
lzma_end(&zip->zipx_lzma_stream);
zip->zipx_lzma_valid = 0;
}
/* If we're here, then we're good! */
return (ARCHIVE_OK);
}
| 7,041 |
103,494 | 0 | void ExtensionServiceBackend::LoadSingleExtensionWithFileAccess(
const FilePath& extension_path,
bool allow_file_access,
bool prompt_for_plugins) {
CHECK(BrowserThread::CurrentlyOn(BrowserThread::FILE));
int flags = allow_file_access ?
Extension::ALLOW_FILE_ACCESS : Extension::NO_FLAGS;
if (Extension::ShouldDoStrictErrorChecking(Extension::LOAD))
flags |= Extension::STRICT_ERROR_CHECKS;
std::string error;
scoped_refptr<const Extension> extension(extension_file_util::LoadExtension(
extension_path,
Extension::LOAD,
flags,
&error));
if (!extension) {
BrowserThread::PostTask(BrowserThread::UI, FROM_HERE,
NewRunnableMethod(
this,
&ExtensionServiceBackend::ReportExtensionLoadError,
extension_path, error));
return;
}
BrowserThread::PostTask(BrowserThread::UI, FROM_HERE,
NewRunnableMethod(
this,
&ExtensionServiceBackend::OnLoadSingleExtension,
extension, prompt_for_plugins));
}
| 7,042 |
173,855 | 0 | virtual status_t getState(
node_id node, OMX_STATETYPE* state) {
Parcel data, reply;
data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
data.writeInt32((int32_t)node);
remote()->transact(GET_STATE, data, &reply);
*state = static_cast<OMX_STATETYPE>(reply.readInt32());
return reply.readInt32();
}
| 7,043 |
169,639 | 0 | std::string TestURLLoader::TestUntrustedCrossOriginRequest() {
pp::URLRequestInfo request(instance_);
std::string cross_origin_url = GetReachableCrossOriginURL("test_case.html");
request.SetURL(cross_origin_url);
request.SetAllowCrossOriginRequests(true);
int32_t rv = OpenUntrusted(request, NULL);
if (rv != PP_OK)
return ReportError(
"Untrusted, intended cross-origin request failed", rv);
PASS();
}
| 7,044 |
123,437 | 0 | void ChromeDownloadManagerDelegate::OnTargetPathDetermined(
int32 download_id,
const content::DownloadTargetCallback& callback,
DownloadItem::TargetDisposition disposition,
content::DownloadDangerType danger_type,
const FilePath& target_path) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
FilePath intermediate_path;
DownloadItem* download =
download_manager_->GetDownload(download_id);
if (!download || (download->GetState() != DownloadItem::IN_PROGRESS))
return;
if (!target_path.empty()) {
intermediate_path = GetIntermediatePath(target_path, danger_type);
if (disposition == DownloadItem::TARGET_DISPOSITION_PROMPT &&
!download->IsTemporary())
last_download_path_ = target_path.DirName();
}
callback.Run(target_path, disposition, danger_type, intermediate_path);
}
| 7,045 |
74,230 | 0 | record_timing_stats(
const char *text /* text message */
)
{
static unsigned int flshcnt;
l_fp now;
u_long day;
if (!stats_control)
return;
get_systime(&now);
filegen_setup(&timingstats, now.l_ui);
day = now.l_ui / 86400 + MJD_1900;
now.l_ui %= 86400;
if (timingstats.fp != NULL) {
fprintf(timingstats.fp, "%lu %s %s\n", day, lfptoa(&now,
3), text);
if (++flshcnt % 100 == 0)
fflush(timingstats.fp);
}
}
| 7,046 |
100,739 | 0 | xmlParseNCNameComplex(xmlParserCtxtPtr ctxt) {
int len = 0, l;
int c;
int count = 0;
#ifdef DEBUG
nbParseNCNameComplex++;
#endif
/*
* Handler for more complex cases
*/
GROW;
c = CUR_CHAR(l);
if ((c == ' ') || (c == '>') || (c == '/') || /* accelerators */
(!xmlIsNameStartChar(ctxt, c) || (c == ':'))) {
return(NULL);
}
while ((c != ' ') && (c != '>') && (c != '/') && /* test bigname.xml */
(xmlIsNameChar(ctxt, c) && (c != ':'))) {
if (count++ > 100) {
count = 0;
GROW;
}
len += l;
NEXTL(l);
c = CUR_CHAR(l);
}
return(xmlDictLookup(ctxt->dict, ctxt->input->cur - len, len));
}
| 7,047 |
153,516 | 0 | float GM2TabStyle::GetHoverOpacity() const {
const float range_start = float{GetStandardWidth()};
const float range_end = float{GetMinimumInactiveWidth()};
const float value_in_range = float{tab_->width()};
const float t = (value_in_range - range_start) / (range_end - range_start);
return tab_->controller()->GetHoverOpacityForTab(t * t);
}
| 7,048 |
78,275 | 0 | static int coolkey_check_sw(sc_card_t *card, unsigned int sw1, unsigned int sw2)
{
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL,
"sw1 = 0x%02x, sw2 = 0x%02x\n", sw1, sw2);
if (sw1 == 0x90)
return SC_SUCCESS;
if (sw1 == 0x9c) {
if (sw2 == 0xff) {
/* shouldn't happen on a production applet, 0x9cff is a debugging error code */
return SC_ERROR_INTERNAL;
}
if (sw2 >= coolkey_number_of_error_codes) {
return SC_ERROR_UNKNOWN;
}
return coolkey_error_codes[sw2].sc_error;
}
/* iso error */
return sc_get_iso7816_driver()->ops->check_sw(card, sw1, sw2);
}
| 7,049 |
152,726 | 0 | HistogramType LinearHistogram::GetHistogramType() const {
return LINEAR_HISTOGRAM;
}
| 7,050 |
93,556 | 0 | static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ipmr_vif_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
struct mr6_table *mrt = iter->mrt;
++*pos;
if (v == SEQ_START_TOKEN)
return ip6mr_vif_seq_idx(net, iter, 0);
while (++iter->ct < mrt->maxvif) {
if (!MIF_EXISTS(mrt, iter->ct))
continue;
return &mrt->vif6_table[iter->ct];
}
return NULL;
}
| 7,051 |
21,246 | 0 | static inline int is_cow_mapping(vm_flags_t flags)
{
return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
}
| 7,052 |
44,860 | 0 | int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
{
struct super_block *sb = inode->i_sb;
ext4_lblk_t punch_start, punch_stop;
handle_t *handle;
unsigned int credits;
loff_t new_size, ioffset;
int ret;
/* Collapse range works only on fs block size aligned offsets. */
if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
len & (EXT4_CLUSTER_SIZE(sb) - 1))
return -EINVAL;
if (!S_ISREG(inode->i_mode))
return -EINVAL;
trace_ext4_collapse_range(inode, offset, len);
punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
/* Call ext4_force_commit to flush all data in case of data=journal. */
if (ext4_should_journal_data(inode)) {
ret = ext4_force_commit(inode->i_sb);
if (ret)
return ret;
}
/*
* Need to round down offset to be aligned with page size boundary
* for page size > block size.
*/
ioffset = round_down(offset, PAGE_SIZE);
/* Write out all dirty pages */
ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
LLONG_MAX);
if (ret)
return ret;
/* Take mutex lock */
mutex_lock(&inode->i_mutex);
/*
* There is no need to overlap collapse range with EOF, in which case
* it is effectively a truncate operation
*/
if (offset + len >= i_size_read(inode)) {
ret = -EINVAL;
goto out_mutex;
}
/* Currently just for extent based files */
if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
ret = -EOPNOTSUPP;
goto out_mutex;
}
truncate_pagecache(inode, ioffset);
/* Wait for existing dio to complete */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
credits = ext4_writepage_trans_blocks(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out_dio;
}
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
ret = ext4_es_remove_extent(inode, punch_start,
EXT_MAX_BLOCKS - punch_start);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
ext4_discard_preallocations(inode);
ret = ext4_ext_shift_extents(inode, handle, punch_stop,
punch_stop - punch_start);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
new_size = i_size_read(inode) - len;
i_size_write(inode, new_size);
EXT4_I(inode)->i_disksize = new_size;
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
out_stop:
ext4_journal_stop(handle);
out_dio:
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
mutex_unlock(&inode->i_mutex);
return ret;
}
| 7,053 |
19,855 | 0 | static bool nfs41_match_stateid(const nfs4_stateid *s1,
const nfs4_stateid *s2)
{
if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
return false;
if (s1->seqid == s2->seqid)
return true;
if (s1->seqid == 0 || s2->seqid == 0)
return true;
return false;
}
| 7,054 |
93,966 | 0 | xfs_start_page_writeback(
struct page *page,
int clear_dirty)
{
ASSERT(PageLocked(page));
ASSERT(!PageWriteback(page));
/*
* if the page was not fully cleaned, we need to ensure that the higher
* layers come back to it correctly. That means we need to keep the page
* dirty, and for WB_SYNC_ALL writeback we need to ensure the
* PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
* write this page in this writeback sweep will be made.
*/
if (clear_dirty) {
clear_page_dirty_for_io(page);
set_page_writeback(page);
} else
set_page_writeback_keepwrite(page);
unlock_page(page);
}
| 7,055 |
150,545 | 0 | std::string DoGetCurrentNetworkID(
network::NetworkConnectionTracker* network_connection_tracker) {
while (true) {
auto connection_type = GetConnectionType(network_connection_tracker);
std::string ssid_mccmnc;
switch (connection_type) {
case network::mojom::ConnectionType::CONNECTION_UNKNOWN:
case network::mojom::ConnectionType::CONNECTION_NONE:
case network::mojom::ConnectionType::CONNECTION_BLUETOOTH:
case network::mojom::ConnectionType::CONNECTION_ETHERNET:
break;
case network::mojom::ConnectionType::CONNECTION_WIFI:
#if defined(OS_ANDROID)
ssid_mccmnc = net::GetWifiSSID();
#endif
break;
case network::mojom::ConnectionType::CONNECTION_2G:
case network::mojom::ConnectionType::CONNECTION_3G:
case network::mojom::ConnectionType::CONNECTION_4G:
#if defined(OS_ANDROID)
ssid_mccmnc = net::android::GetTelephonyNetworkOperator();
#endif
break;
}
if (connection_type == GetConnectionType(network_connection_tracker)) {
if (connection_type >= network::mojom::ConnectionType::CONNECTION_2G &&
connection_type <= network::mojom::ConnectionType::CONNECTION_4G) {
return "cell," + ssid_mccmnc;
}
return base::NumberToString(static_cast<int>(connection_type)) + "," +
ssid_mccmnc;
}
}
NOTREACHED();
}
| 7,056 |
1,366 | 0 | static int nfs_mount_reply(unsigned char *pkt, unsigned len)
{
int ret;
ret = rpc_check_reply(pkt, 1);
if (ret)
return ret;
memcpy(dirfh, pkt + sizeof(struct rpc_reply) + 4, NFS_FHSIZE);
return 0;
}
| 7,057 |
127,903 | 0 | void DocumentThreadableLoader::handlePreflightResponse(const ResourceResponse& response)
{
String accessControlErrorDescription;
if (!passesAccessControlCheck(response, effectiveAllowCredentials(), getSecurityOrigin(), accessControlErrorDescription, m_requestContext)) {
handlePreflightFailure(response.url().getString(), "Response to preflight request doesn't pass access control check: " + accessControlErrorDescription);
return;
}
if (!passesPreflightStatusCheck(response, accessControlErrorDescription)) {
handlePreflightFailure(response.url().getString(), accessControlErrorDescription);
return;
}
OwnPtr<CrossOriginPreflightResultCacheItem> preflightResult = adoptPtr(new CrossOriginPreflightResultCacheItem(effectiveAllowCredentials()));
if (!preflightResult->parse(response, accessControlErrorDescription)
|| !preflightResult->allowsCrossOriginMethod(m_actualRequest.httpMethod(), accessControlErrorDescription)
|| !preflightResult->allowsCrossOriginHeaders(m_actualRequest.httpHeaderFields(), accessControlErrorDescription)) {
handlePreflightFailure(response.url().getString(), accessControlErrorDescription);
return;
}
CrossOriginPreflightResultCache::shared().appendEntry(getSecurityOrigin()->toString(), m_actualRequest.url(), preflightResult.release());
}
| 7,058 |
25,479 | 0 | static int fpr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
#ifdef CONFIG_VSX
double buf[33];
int i;
#endif
flush_fp_to_thread(target);
#ifdef CONFIG_VSX
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
return i;
for (i = 0; i < 32 ; i++)
target->thread.TS_FPR(i) = buf[i];
memcpy(&target->thread.fpscr, &buf[32], sizeof(double));
return 0;
#else
BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
offsetof(struct thread_struct, TS_FPR(32)));
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpr, 0, -1);
#endif
}
| 7,059 |
182,905 | 1 | void gdImageGifCtx(gdImagePtr im, gdIOCtxPtr out)
{
gdImagePtr pim = 0, tim = im;
int interlace, BitsPerPixel;
interlace = im->interlace;
if (im->trueColor) {
/* Expensive, but the only way that produces an
acceptable result: mix down to a palette
based temporary image. */
pim = gdImageCreatePaletteFromTrueColor(im, 1, 256);
if (!pim) {
return;
}
tim = pim;
}
BitsPerPixel = colorstobpp(tim->colorsTotal);
/* All set, let's do it. */
GIFEncode(
out, tim->sx, tim->sy, tim->interlace, 0, tim->transparent, BitsPerPixel,
tim->red, tim->green, tim->blue, tim);
if (pim) {
/* Destroy palette based temporary image. */
gdImageDestroy( pim);
}
}
| 7,060 |
158,703 | 0 | const CompressedFormatInfo* GetCompressedFormatInfo(GLenum format) {
for (size_t i = 0; i < arraysize(kCompressedFormatInfoArray); i++) {
if (kCompressedFormatInfoArray[i].format == format) {
return &kCompressedFormatInfoArray[i];
}
}
return nullptr;
}
| 7,061 |
182,119 | 1 | int _our_safe_pcap_next_ex(pcap_t *pcap, struct pcap_pkthdr **pkthdr,
const u_char **pktdata, const char *funcname,
const int line, const char *file)
{
int res = pcap_next_ex(pcap, pkthdr, pktdata);
if (*pktdata && *pkthdr) {
if ((*pkthdr)->len > MAXPACKET) {
fprintf(stderr, "safe_pcap_next_ex ERROR: Invalid packet length in %s:%s() line %d: %u is greater than maximum %u\n",
file, funcname, line, (*pkthdr)->len, MAXPACKET);
exit(-1);
}
if ((*pkthdr)->len < (*pkthdr)->caplen) {
fprintf(stderr, "safe_pcap_next_ex ERROR: Invalid packet length in %s:%s() line %d: packet length %u is less than capture length %u\n",
file, funcname, line, (*pkthdr)->len, (*pkthdr)->caplen);
exit(-1);
}
}
return res;
}
| 7,062 |
162,084 | 0 | void ForwardRequest(const char* service_name,
mojo::InterfaceRequest<Interface> request) {
service_manager::Connector* connector =
ServiceManagerConnection::GetForProcess()->GetConnector();
connector->BindInterface(service_name, std::move(request));
}
| 7,063 |
169,498 | 0 | MockCacheVisitor() {}
| 7,064 |
166,773 | 0 | void LargeObjectPage::RemoveFromHeap() {
static_cast<LargeObjectArena*>(Arena())->FreeLargeObjectPage(this);
}
| 7,065 |
103,637 | 0 | static void AppendParams(const std::vector<string16>& additional_names,
const std::vector<string16>& additional_values,
WebVector<WebString>* existing_names,
WebVector<WebString>* existing_values) {
DCHECK(additional_names.size() == additional_values.size());
DCHECK(existing_names->size() == existing_values->size());
size_t existing_size = existing_names->size();
size_t total_size = existing_size + additional_names.size();
WebVector<WebString> names(total_size);
WebVector<WebString> values(total_size);
for (size_t i = 0; i < existing_size; ++i) {
names[i] = (*existing_names)[i];
values[i] = (*existing_values)[i];
}
for (size_t i = 0; i < additional_names.size(); ++i) {
names[existing_size + i] = additional_names[i];
values[existing_size + i] = additional_values[i];
}
existing_names->swap(names);
existing_values->swap(values);
}
| 7,066 |
130,882 | 0 | static void methodWithOptionalStringIsNullStringMethodCallback(const v8::FunctionCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMMethod");
TestObjectV8Internal::methodWithOptionalStringIsNullStringMethod(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
| 7,067 |
79,742 | 0 | png_handle_sCAL(png_structrp png_ptr, png_inforp info_ptr, png_uint_32 length)
{
png_bytep buffer;
size_t i;
int state;
png_debug(1, "in png_handle_sCAL");
if ((png_ptr->mode & PNG_HAVE_IHDR) == 0)
png_chunk_error(png_ptr, "missing IHDR");
else if ((png_ptr->mode & PNG_HAVE_IDAT) != 0)
{
png_crc_finish(png_ptr, length);
png_chunk_benign_error(png_ptr, "out of place");
return;
}
else if (info_ptr != NULL && (info_ptr->valid & PNG_INFO_sCAL) != 0)
{
png_crc_finish(png_ptr, length);
png_chunk_benign_error(png_ptr, "duplicate");
return;
}
/* Need unit type, width, \0, height: minimum 4 bytes */
else if (length < 4)
{
png_crc_finish(png_ptr, length);
png_chunk_benign_error(png_ptr, "invalid");
return;
}
png_debug1(2, "Allocating and reading sCAL chunk data (%u bytes)",
length + 1);
buffer = png_read_buffer(png_ptr, length+1, 2/*silent*/);
if (buffer == NULL)
{
png_chunk_benign_error(png_ptr, "out of memory");
png_crc_finish(png_ptr, length);
return;
}
png_crc_read(png_ptr, buffer, length);
buffer[length] = 0; /* Null terminate the last string */
if (png_crc_finish(png_ptr, 0) != 0)
return;
/* Validate the unit. */
if (buffer[0] != 1 && buffer[0] != 2)
{
png_chunk_benign_error(png_ptr, "invalid unit");
return;
}
/* Validate the ASCII numbers, need two ASCII numbers separated by
* a '\0' and they need to fit exactly in the chunk data.
*/
i = 1;
state = 0;
if (png_check_fp_number((png_const_charp)buffer, length, &state, &i) == 0 ||
i >= length || buffer[i++] != 0)
png_chunk_benign_error(png_ptr, "bad width format");
else if (PNG_FP_IS_POSITIVE(state) == 0)
png_chunk_benign_error(png_ptr, "non-positive width");
else
{
size_t heighti = i;
state = 0;
if (png_check_fp_number((png_const_charp)buffer, length,
&state, &i) == 0 || i != length)
png_chunk_benign_error(png_ptr, "bad height format");
else if (PNG_FP_IS_POSITIVE(state) == 0)
png_chunk_benign_error(png_ptr, "non-positive height");
else
/* This is the (only) success case. */
png_set_sCAL_s(png_ptr, info_ptr, buffer[0],
(png_charp)buffer+1, (png_charp)buffer+heighti);
}
}
| 7,068 |
67,464 | 0 | void unlock_rename(struct dentry *p1, struct dentry *p2)
{
inode_unlock(p1->d_inode);
if (p1 != p2) {
inode_unlock(p2->d_inode);
mutex_unlock(&p1->d_sb->s_vfs_rename_mutex);
}
}
| 7,069 |
127,207 | 0 | void DesktopSessionWin::OnSessionDetached() {
DCHECK(main_task_runner_->BelongsToCurrentThread());
DCHECK(launcher_.get() != NULL);
launcher_.reset();
}
| 7,070 |
86,247 | 0 | static int pcrypt_aead_decrypt(struct aead_request *req)
{
int err;
struct pcrypt_request *preq = aead_request_ctx(req);
struct aead_request *creq = pcrypt_request_ctx(preq);
struct padata_priv *padata = pcrypt_request_padata(preq);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
u32 flags = aead_request_flags(req);
memset(padata, 0, sizeof(struct padata_priv));
padata->parallel = pcrypt_aead_dec;
padata->serial = pcrypt_aead_serial;
aead_request_set_tfm(creq, ctx->child);
aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
pcrypt_aead_done, req);
aead_request_set_crypt(creq, req->src, req->dst,
req->cryptlen, req->iv);
aead_request_set_ad(creq, req->assoclen);
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
if (!err)
return -EINPROGRESS;
return err;
}
| 7,071 |
120,572 | 0 | void Element::normalizeAttributes()
{
if (!hasAttributes())
return;
for (unsigned i = 0; i < attributeCount(); ++i) {
if (RefPtr<Attr> attr = attrIfExists(attributeItem(i)->name()))
attr->normalize();
}
}
| 7,072 |
20,894 | 0 | static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
const void *v)
{
int handled = 0;
int n;
do {
n = min(len, 8);
if (!(vcpu->arch.apic &&
!kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v))
&& kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
break;
handled += n;
addr += n;
len -= n;
v += n;
} while (len);
return handled;
}
| 7,073 |
51,406 | 0 | static void _gdImageFillTiled(gdImagePtr im, int x, int y, int nc)
{
int i, l, x1, x2, dy;
int oc; /* old pixel value */
int wx2,wy2;
/* stack of filled segments */
struct seg *stack;
struct seg *sp;
char **pts;
if (!im->tile) {
return;
}
wx2=im->sx;wy2=im->sy;
nc = gdImageTileGet(im,x,y);
pts = (char **) ecalloc(im->sy + 1, sizeof(char *));
for (i = 0; i < im->sy + 1; i++) {
pts[i] = (char *) ecalloc(im->sx + 1, sizeof(char));
}
stack = (struct seg *)safe_emalloc(sizeof(struct seg), ((int)(im->sy*im->sx)/4), 1);
sp = stack;
oc = gdImageGetPixel(im, x, y);
/* required! */
FILL_PUSH(y,x,x,1);
/* seed segment (popped 1st) */
FILL_PUSH(y+1, x, x, -1);
while (sp>stack) {
FILL_POP(y, x1, x2, dy);
for (x=x1; x>=0 && (!pts[y][x] && gdImageGetPixel(im,x,y)==oc); x--) {
nc = gdImageTileGet(im,x,y);
pts[y][x] = 1;
gdImageSetPixel(im,x, y, nc);
}
if (x>=x1) {
goto skip;
}
l = x+1;
/* leak on left? */
if (l<x1) {
FILL_PUSH(y, l, x1-1, -dy);
}
x = x1+1;
do {
for(; x<wx2 && (!pts[y][x] && gdImageGetPixel(im,x, y)==oc); x++) {
nc = gdImageTileGet(im,x,y);
pts[y][x] = 1;
gdImageSetPixel(im, x, y, nc);
}
FILL_PUSH(y, l, x-1, dy);
/* leak on right? */
if (x>x2+1) {
FILL_PUSH(y, x2+1, x-1, -dy);
}
skip: for(x++; x<=x2 && (pts[y][x] || gdImageGetPixel(im,x, y)!=oc); x++);
l = x;
} while (x<=x2);
}
for(i = 0; i < im->sy + 1; i++) {
efree(pts[i]);
}
efree(pts);
efree(stack);
}
| 7,074 |
7,111 | 0 | tt_cmap14_init( TT_CMap14 cmap,
FT_Byte* table )
{
cmap->cmap.data = table;
table += 6;
cmap->num_selectors = FT_PEEK_ULONG( table );
cmap->max_results = 0;
cmap->results = NULL;
return FT_Err_Ok;
}
| 7,075 |
105,409 | 0 | void webkit_web_view_copy_clipboard(WebKitWebView* webView)
{
g_return_if_fail(WEBKIT_IS_WEB_VIEW(webView));
if (webkit_web_view_can_copy_clipboard(webView))
g_signal_emit(webView, webkit_web_view_signals[COPY_CLIPBOARD], 0);
}
| 7,076 |
82,277 | 0 | mrb_obj_clone(mrb_state *mrb, mrb_value self)
{
struct RObject *p;
mrb_value clone;
if (mrb_immediate_p(self)) {
mrb_raisef(mrb, E_TYPE_ERROR, "can't clone %S", self);
}
if (mrb_type(self) == MRB_TT_SCLASS) {
mrb_raise(mrb, E_TYPE_ERROR, "can't clone singleton class");
}
p = (struct RObject*)mrb_obj_alloc(mrb, mrb_type(self), mrb_obj_class(mrb, self));
p->c = mrb_singleton_class_clone(mrb, self);
mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)p->c);
clone = mrb_obj_value(p);
init_copy(mrb, clone, self);
return clone;
}
| 7,077 |
15,826 | 0 | static void virtio_net_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
dc->props = virtio_net_properties;
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
vdc->realize = virtio_net_device_realize;
vdc->unrealize = virtio_net_device_unrealize;
vdc->get_config = virtio_net_get_config;
vdc->set_config = virtio_net_set_config;
vdc->get_features = virtio_net_get_features;
vdc->set_features = virtio_net_set_features;
vdc->bad_features = virtio_net_bad_features;
vdc->reset = virtio_net_reset;
vdc->set_status = virtio_net_set_status;
vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
}
| 7,078 |
109,599 | 0 | PassRefPtr<Attr> Document::createAttributeNS(const String& namespaceURI, const String& qualifiedName, ExceptionState& es, bool shouldIgnoreNamespaceChecks)
{
String prefix, localName;
if (!parseQualifiedName(qualifiedName, prefix, localName, es))
return 0;
QualifiedName qName(prefix, localName, namespaceURI);
if (!shouldIgnoreNamespaceChecks && !hasValidNamespaceForAttributes(qName)) {
es.throwUninformativeAndGenericDOMException(NamespaceError);
return 0;
}
return Attr::create(*this, qName, emptyString());
}
| 7,079 |
26,071 | 0 | void perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
if (!task) {
/*
* Enable the event on the cpu that it's on
*/
cpu_function_call(event->cpu, __perf_event_enable, event);
return;
}
raw_spin_lock_irq(&ctx->lock);
if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto out;
/*
* If the event is in error state, clear that first.
* That way, if we see the event in error state below, we
* know that it has gone back into error state, as distinct
* from the task having been scheduled away before the
* cross-call arrived.
*/
if (event->state == PERF_EVENT_STATE_ERROR)
event->state = PERF_EVENT_STATE_OFF;
retry:
if (!ctx->is_active) {
__perf_event_mark_enabled(event, ctx);
goto out;
}
raw_spin_unlock_irq(&ctx->lock);
if (!task_function_call(task, __perf_event_enable, event))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* If the context is active and the event is still off,
* we need to retry the cross-call.
*/
if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
/*
* task could have been flipped by a concurrent
* perf_event_context_sched_out()
*/
task = ctx->task;
goto retry;
}
out:
raw_spin_unlock_irq(&ctx->lock);
}
| 7,080 |
58,216 | 0 | void scheduler_ipi(void)
{
/*
* Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
* TIF_NEED_RESCHED remotely (for the first time) will also send
* this IPI.
*/
preempt_fold_need_resched();
if (llist_empty(&this_rq()->wake_list)
&& !tick_nohz_full_cpu(smp_processor_id())
&& !got_nohz_idle_kick())
return;
/*
* Not all reschedule IPI handlers call irq_enter/irq_exit, since
* traditionally all their work was done from the interrupt return
* path. Now that we actually do some work, we need to make sure
* we do call them.
*
* Some archs already do call them, luckily irq_enter/exit nest
* properly.
*
* Arguably we should visit all archs and update all handlers,
* however a fair share of IPIs are still resched only so this would
* somewhat pessimize the simple resched case.
*/
irq_enter();
tick_nohz_full_check();
sched_ttwu_pending();
/*
* Check if someone kicked us for doing the nohz idle load balance.
*/
if (unlikely(got_nohz_idle_kick())) {
this_rq()->idle_balance = 1;
raise_softirq_irqoff(SCHED_SOFTIRQ);
}
irq_exit();
}
| 7,081 |
46,094 | 0 | bool_t auth_gssapi_seal_seq(
gss_ctx_id_t context,
uint32_t seq_num,
gss_buffer_t out_buf)
{
gss_buffer_desc in_buf;
OM_uint32 gssstat, minor_stat;
uint32_t nl_seq_num;
nl_seq_num = htonl(seq_num);
in_buf.length = sizeof(uint32_t);
in_buf.value = (char *) &nl_seq_num;
gssstat = gss_seal(&minor_stat, context, 0, GSS_C_QOP_DEFAULT,
&in_buf, NULL, out_buf);
if (gssstat != GSS_S_COMPLETE) {
PRINTF(("gssapi_seal_seq: failed\n"));
AUTH_GSSAPI_DISPLAY_STATUS(("sealing sequence number",
gssstat, minor_stat));
return FALSE;
}
return TRUE;
}
| 7,082 |
161,438 | 0 | void GotUsageAndQuotaDataCallback(
std::unique_ptr<StorageHandler::GetUsageAndQuotaCallback> callback,
blink::mojom::QuotaStatusCode code,
int64_t usage,
int64_t quota,
base::flat_map<storage::QuotaClient::ID, int64_t> usage_breakdown) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
BrowserThread::PostTask(
BrowserThread::UI, FROM_HERE,
base::BindOnce(ReportUsageAndQuotaDataOnUIThread,
base::Passed(std::move(callback)), code, usage, quota,
std::move(usage_breakdown)));
}
| 7,083 |
12,888 | 0 | int EVP_EncodeBlock(unsigned char *t, const unsigned char *f, int dlen)
{
int i, ret = 0;
unsigned long l;
for (i = dlen; i > 0; i -= 3) {
if (i >= 3) {
l = (((unsigned long)f[0]) << 16L) |
(((unsigned long)f[1]) << 8L) | f[2];
*(t++) = conv_bin2ascii(l >> 18L);
*(t++) = conv_bin2ascii(l >> 12L);
*(t++) = conv_bin2ascii(l >> 6L);
*(t++) = conv_bin2ascii(l);
} else {
l = ((unsigned long)f[0]) << 16L;
if (i == 2)
l |= ((unsigned long)f[1] << 8L);
*(t++) = conv_bin2ascii(l >> 18L);
*(t++) = conv_bin2ascii(l >> 12L);
*(t++) = (i == 1) ? '=' : conv_bin2ascii(l >> 6L);
*(t++) = '=';
}
ret += 4;
f += 3;
}
*t = '\0';
return (ret);
}
| 7,084 |
119,581 | 0 | void RenderBlock::computeBlockDirectionPositionsForLine(RootInlineBox* lineBox, BidiRun* firstRun, GlyphOverflowAndFallbackFontsMap& textBoxDataMap,
VerticalPositionCache& verticalPositionCache)
{
setLogicalHeight(lineBox->alignBoxesInBlockDirection(logicalHeight(), textBoxDataMap, verticalPositionCache));
for (BidiRun* r = firstRun; r; r = r->next()) {
ASSERT(r->m_box);
if (!r->m_box)
continue; // Skip runs with no line boxes.
if (r->m_object->isOutOfFlowPositioned())
r->m_box->setLogicalTop(logicalHeight());
if (r->m_object->isText())
toRenderText(r->m_object)->positionLineBox(r->m_box);
else if (r->m_object->isBox())
toRenderBox(r->m_object)->positionLineBox(r->m_box);
}
lineBox->markDirty(false);
}
| 7,085 |
70,780 | 0 | static int build_feed_streams(void)
{
FFServerStream *stream, *feed;
int i, fd;
/* gather all streams */
for(stream = config.first_stream; stream; stream = stream->next) {
feed = stream->feed;
if (!feed)
continue;
if (stream->is_feed) {
for(i=0;i<stream->nb_streams;i++)
stream->feed_streams[i] = i;
continue;
}
/* we handle a stream coming from a feed */
for(i=0;i<stream->nb_streams;i++)
stream->feed_streams[i] = add_av_stream(feed, stream->streams[i]);
}
/* create feed files if needed */
for(feed = config.first_feed; feed; feed = feed->next_feed) {
if (avio_check(feed->feed_filename, AVIO_FLAG_READ) > 0) {
AVFormatContext *s = NULL;
int matches = 0;
/* See if it matches */
if (avformat_open_input(&s, feed->feed_filename, NULL, NULL) < 0) {
http_log("Deleting feed file '%s' as it appears "
"to be corrupt\n",
feed->feed_filename);
goto drop;
}
/* set buffer size */
if (ffio_set_buf_size(s->pb, FFM_PACKET_SIZE) < 0) {
http_log("Failed to set buffer size\n");
avformat_close_input(&s);
goto bail;
}
/* Now see if it matches */
if (s->nb_streams != feed->nb_streams) {
http_log("Deleting feed file '%s' as stream counts "
"differ (%d != %d)\n",
feed->feed_filename, s->nb_streams, feed->nb_streams);
goto drop;
}
matches = 1;
for(i=0;i<s->nb_streams;i++) {
AVStream *ss;
LayeredAVStream *sf;
sf = feed->streams[i];
ss = s->streams[i];
if (sf->index != ss->index || sf->id != ss->id) {
http_log("Index & Id do not match for stream %d (%s)\n",
i, feed->feed_filename);
matches = 0;
break;
}
matches = check_codec_match (sf, ss, i);
if (!matches)
break;
}
drop:
if (s)
avformat_close_input(&s);
if (!matches) {
if (feed->readonly) {
http_log("Unable to delete read-only feed file '%s'\n",
feed->feed_filename);
goto bail;
}
unlink(feed->feed_filename);
}
}
if (avio_check(feed->feed_filename, AVIO_FLAG_WRITE) <= 0) {
AVFormatContext *s = avformat_alloc_context();
if (!s) {
http_log("Failed to allocate context\n");
goto bail;
}
if (feed->readonly) {
http_log("Unable to create feed file '%s' as it is "
"marked readonly\n",
feed->feed_filename);
avformat_free_context(s);
goto bail;
}
/* only write the header of the ffm file */
if (avio_open(&s->pb, feed->feed_filename, AVIO_FLAG_WRITE) < 0) {
http_log("Could not open output feed file '%s'\n",
feed->feed_filename);
avformat_free_context(s);
goto bail;
}
s->oformat = feed->fmt;
for (i = 0; i<feed->nb_streams; i++) {
AVStream *st = avformat_new_stream(s, NULL); // FIXME free this
if (!st) {
http_log("Failed to allocate stream\n");
goto bail;
}
unlayer_stream(st, feed->streams[i]);
}
if (avformat_write_header(s, NULL) < 0) {
http_log("Container doesn't support the required parameters\n");
avio_closep(&s->pb);
s->streams = NULL;
s->nb_streams = 0;
avformat_free_context(s);
goto bail;
}
/* XXX: need better API */
av_freep(&s->priv_data);
avio_closep(&s->pb);
s->streams = NULL;
s->nb_streams = 0;
avformat_free_context(s);
}
/* get feed size and write index */
fd = open(feed->feed_filename, O_RDONLY);
if (fd < 0) {
http_log("Could not open output feed file '%s'\n",
feed->feed_filename);
goto bail;
}
feed->feed_write_index = FFMAX(ffm_read_write_index(fd),
FFM_PACKET_SIZE);
feed->feed_size = lseek(fd, 0, SEEK_END);
/* ensure that we do not wrap before the end of file */
if (feed->feed_max_size && feed->feed_max_size < feed->feed_size)
feed->feed_max_size = feed->feed_size;
close(fd);
}
return 0;
bail:
return -1;
}
| 7,086 |
53,462 | 0 | archive_read_format_rar_options(struct archive_read *a,
const char *key, const char *val)
{
struct rar *rar;
int ret = ARCHIVE_FAILED;
rar = (struct rar *)(a->format->data);
if (strcmp(key, "hdrcharset") == 0) {
if (val == NULL || val[0] == 0)
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"rar: hdrcharset option needs a character-set name");
else {
rar->opt_sconv =
archive_string_conversion_from_charset(
&a->archive, val, 0);
if (rar->opt_sconv != NULL)
ret = ARCHIVE_OK;
else
ret = ARCHIVE_FATAL;
}
return (ret);
}
/* Note: The "warn" return is just to inform the options
* supervisor that we didn't handle it. It will generate
* a suitable error if no one used this option. */
return (ARCHIVE_WARN);
}
| 7,087 |
13,930 | 0 | gs_get_colorname_string(const gs_memory_t *mem, gs_separation_name colorname_index,
unsigned char **ppstr, unsigned int *pname_size)
{
ref nref;
name_index_ref(mem, colorname_index, &nref);
name_string_ref(mem, &nref, &nref);
return obj_string_data(mem, &nref, (const unsigned char**) ppstr, pname_size);
}
| 7,088 |
149,807 | 0 | bool LayerTreeHost::IsThreaded() const {
DCHECK(compositor_mode_ != CompositorMode::THREADED ||
task_runner_provider_->HasImplThread());
return compositor_mode_ == CompositorMode::THREADED;
}
| 7,089 |
73,843 | 0 | php_http_url_t *php_http_url_from_zval(zval *value, unsigned flags TSRMLS_DC)
{
zval *zcpy;
php_http_url_t *purl;
switch (Z_TYPE_P(value)) {
case IS_ARRAY:
case IS_OBJECT:
purl = php_http_url_from_struct(HASH_OF(value));
break;
default:
zcpy = php_http_ztyp(IS_STRING, value);
purl = php_http_url_parse(Z_STRVAL_P(zcpy), Z_STRLEN_P(zcpy), flags TSRMLS_CC);
zval_ptr_dtor(&zcpy);
}
return purl;
}
| 7,090 |
43,476 | 0 | static int cbc_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
nbytes & AES_BLOCK_MASK, walk.iv);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
kernel_fpu_end();
return err;
}
| 7,091 |
154,668 | 0 | error::Error GLES2DecoderPassthroughImpl::DoGenTextures(
GLsizei n,
volatile GLuint* textures) {
return GenHelper(n, textures, &resources_->texture_id_map,
[this](GLsizei n, GLuint* textures) {
api()->glGenTexturesFn(n, textures);
});
}
| 7,092 |
48,350 | 0 | void t2p_read_tiff_init(T2P* t2p, TIFF* input){
tdir_t directorycount=0;
tdir_t i=0;
uint16 pagen=0;
uint16 paged=0;
uint16 xuint16=0;
directorycount=TIFFNumberOfDirectories(input);
t2p->tiff_pages = (T2P_PAGE*) _TIFFmalloc(TIFFSafeMultiply(tmsize_t,directorycount,sizeof(T2P_PAGE)));
if(t2p->tiff_pages==NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate " TIFF_SIZE_FORMAT " bytes of memory for tiff_pages array, %s",
(TIFF_SIZE_T) directorycount * sizeof(T2P_PAGE),
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
_TIFFmemset( t2p->tiff_pages, 0x00, directorycount * sizeof(T2P_PAGE));
t2p->tiff_tiles = (T2P_TILES*) _TIFFmalloc(TIFFSafeMultiply(tmsize_t,directorycount,sizeof(T2P_TILES)));
if(t2p->tiff_tiles==NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate " TIFF_SIZE_FORMAT " bytes of memory for tiff_tiles array, %s",
(TIFF_SIZE_T) directorycount * sizeof(T2P_TILES),
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
_TIFFmemset( t2p->tiff_tiles, 0x00, directorycount * sizeof(T2P_TILES));
for(i=0;i<directorycount;i++){
uint32 subfiletype = 0;
if(!TIFFSetDirectory(input, i)){
TIFFError(
TIFF2PDF_MODULE,
"Can't set directory %u of input file %s",
i,
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if(TIFFGetField(input, TIFFTAG_PAGENUMBER, &pagen, &paged)){
if((pagen>paged) && (paged != 0)){
t2p->tiff_pages[t2p->tiff_pagecount].page_number =
paged;
} else {
t2p->tiff_pages[t2p->tiff_pagecount].page_number =
pagen;
}
goto ispage2;
}
if(TIFFGetField(input, TIFFTAG_SUBFILETYPE, &subfiletype)){
if ( ((subfiletype & FILETYPE_PAGE) != 0)
|| (subfiletype == 0)){
goto ispage;
} else {
goto isnotpage;
}
}
if(TIFFGetField(input, TIFFTAG_OSUBFILETYPE, &subfiletype)){
if ((subfiletype == OFILETYPE_IMAGE)
|| (subfiletype == OFILETYPE_PAGE)
|| (subfiletype == 0) ){
goto ispage;
} else {
goto isnotpage;
}
}
ispage:
t2p->tiff_pages[t2p->tiff_pagecount].page_number=t2p->tiff_pagecount;
ispage2:
t2p->tiff_pages[t2p->tiff_pagecount].page_directory=i;
if(TIFFIsTiled(input)){
t2p->tiff_pages[t2p->tiff_pagecount].page_tilecount =
TIFFNumberOfTiles(input);
}
t2p->tiff_pagecount++;
isnotpage:
(void)0;
}
qsort((void*) t2p->tiff_pages, t2p->tiff_pagecount,
sizeof(T2P_PAGE), t2p_cmp_t2p_page);
for(i=0;i<t2p->tiff_pagecount;i++){
t2p->pdf_xrefcount += 5;
TIFFSetDirectory(input, t2p->tiff_pages[i].page_directory );
if((TIFFGetField(input, TIFFTAG_PHOTOMETRIC, &xuint16)
&& (xuint16==PHOTOMETRIC_PALETTE))
|| TIFFGetField(input, TIFFTAG_INDEXED, &xuint16)) {
t2p->tiff_pages[i].page_extra++;
t2p->pdf_xrefcount++;
}
#ifdef ZIP_SUPPORT
if (TIFFGetField(input, TIFFTAG_COMPRESSION, &xuint16)) {
if( (xuint16== COMPRESSION_DEFLATE ||
xuint16== COMPRESSION_ADOBE_DEFLATE) &&
((t2p->tiff_pages[i].page_tilecount != 0)
|| TIFFNumberOfStrips(input)==1) &&
(t2p->pdf_nopassthrough==0) ){
if(t2p->pdf_minorversion<2){t2p->pdf_minorversion=2;}
}
}
#endif
if (TIFFGetField(input, TIFFTAG_TRANSFERFUNCTION,
&(t2p->tiff_transferfunction[0]),
&(t2p->tiff_transferfunction[1]),
&(t2p->tiff_transferfunction[2]))) {
if((t2p->tiff_transferfunction[1] != (float*) NULL) &&
(t2p->tiff_transferfunction[2] != (float*) NULL) &&
(t2p->tiff_transferfunction[1] !=
t2p->tiff_transferfunction[0])) {
t2p->tiff_transferfunctioncount = 3;
t2p->tiff_pages[i].page_extra += 4;
t2p->pdf_xrefcount += 4;
} else {
t2p->tiff_transferfunctioncount = 1;
t2p->tiff_pages[i].page_extra += 2;
t2p->pdf_xrefcount += 2;
}
if(t2p->pdf_minorversion < 2)
t2p->pdf_minorversion = 2;
} else {
t2p->tiff_transferfunctioncount=0;
}
if( TIFFGetField(
input,
TIFFTAG_ICCPROFILE,
&(t2p->tiff_iccprofilelength),
&(t2p->tiff_iccprofile)) != 0){
t2p->tiff_pages[i].page_extra++;
t2p->pdf_xrefcount++;
if(t2p->pdf_minorversion<3){t2p->pdf_minorversion=3;}
}
t2p->tiff_tiles[i].tiles_tilecount=
t2p->tiff_pages[i].page_tilecount;
if( (TIFFGetField(input, TIFFTAG_PLANARCONFIG, &xuint16) != 0)
&& (xuint16 == PLANARCONFIG_SEPARATE ) ){
if( !TIFFGetField(input, TIFFTAG_SAMPLESPERPIXEL, &xuint16) )
{
TIFFError(
TIFF2PDF_MODULE,
"Missing SamplesPerPixel, %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
if( (t2p->tiff_tiles[i].tiles_tilecount % xuint16) != 0 )
{
TIFFError(
TIFF2PDF_MODULE,
"Invalid tile count, %s",
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
t2p->tiff_tiles[i].tiles_tilecount/= xuint16;
}
if( t2p->tiff_tiles[i].tiles_tilecount > 0){
t2p->pdf_xrefcount +=
(t2p->tiff_tiles[i].tiles_tilecount -1)*2;
TIFFGetField(input,
TIFFTAG_TILEWIDTH,
&( t2p->tiff_tiles[i].tiles_tilewidth) );
TIFFGetField(input,
TIFFTAG_TILELENGTH,
&( t2p->tiff_tiles[i].tiles_tilelength) );
t2p->tiff_tiles[i].tiles_tiles =
(T2P_TILE*) _TIFFmalloc(TIFFSafeMultiply(tmsize_t,t2p->tiff_tiles[i].tiles_tilecount,
sizeof(T2P_TILE)) );
if( t2p->tiff_tiles[i].tiles_tiles == NULL){
TIFFError(
TIFF2PDF_MODULE,
"Can't allocate " TIFF_SIZE_FORMAT " bytes of memory for t2p_read_tiff_init, %s",
(TIFF_SIZE_T) t2p->tiff_tiles[i].tiles_tilecount * sizeof(T2P_TILE),
TIFFFileName(input));
t2p->t2p_error = T2P_ERR_ERROR;
return;
}
}
}
return;
}
| 7,093 |
145,490 | 0 | void ResourceDispatcherHostImpl::DidReceiveResponse(ResourceLoader* loader) {
ResourceRequestInfoImpl* info = loader->GetRequestInfo();
net::URLRequest* request = loader->request();
if (request->was_fetched_via_proxy() &&
request->was_fetched_via_spdy() &&
request->url().SchemeIs(url::kHttpScheme)) {
scheduler_->OnReceivedSpdyProxiedHttpResponse(
info->GetChildID(), info->GetRouteID());
}
if (request->response_info().async_revalidation_required) {
DCHECK(async_revalidation_manager_);
async_revalidation_manager_->BeginAsyncRevalidation(*request,
scheduler_.get());
}
int render_process_id, render_frame_host;
if (!info->GetAssociatedRenderFrame(&render_process_id, &render_frame_host))
return;
if (info->IsDownload())
return;
scoped_ptr<ResourceRequestDetails> detail(new ResourceRequestDetails(
request, GetCertID(request, info->GetChildID())));
BrowserThread::PostTask(
BrowserThread::UI, FROM_HERE,
base::Bind(
&NotifyResponseOnUI,
render_process_id, render_frame_host, base::Passed(&detail)));
}
| 7,094 |
19,471 | 0 | static void efx_fill_test(unsigned int test_index,
struct ethtool_string *strings, u64 *data,
int *test, const char *unit_format, int unit_id,
const char *test_format, const char *test_id)
{
struct ethtool_string unit_str, test_str;
/* Fill data value, if applicable */
if (data)
data[test_index] = *test;
/* Fill string, if applicable */
if (strings) {
if (strchr(unit_format, '%'))
snprintf(unit_str.name, sizeof(unit_str.name),
unit_format, unit_id);
else
strcpy(unit_str.name, unit_format);
snprintf(test_str.name, sizeof(test_str.name),
test_format, test_id);
snprintf(strings[test_index].name,
sizeof(strings[test_index].name),
"%-6s %-24s", unit_str.name, test_str.name);
}
}
| 7,095 |
113,826 | 0 | void BlacklistAddOneDll(const wchar_t* module_name,
bool check_in_browser,
sandbox::TargetPolicy* policy) {
HMODULE module = check_in_browser ? ::GetModuleHandleW(module_name) : NULL;
if (!module) {
std::wstring name(module_name);
size_t period = name.rfind(L'.');
DCHECK_NE(std::string::npos, period);
DCHECK_LE(3U, (name.size() - period));
if (period <= 8)
return;
for (int ix = 0; ix < 3; ++ix) {
const wchar_t suffix[] = {'~', ('1' + ix), 0};
std::wstring alt_name = name.substr(0, 6) + suffix;
alt_name += name.substr(period, name.size());
if (check_in_browser) {
module = ::GetModuleHandleW(alt_name.c_str());
if (!module)
return;
if (!IsExpandedModuleName(module, module_name))
return;
}
policy->AddDllToUnload(alt_name.c_str());
}
}
policy->AddDllToUnload(module_name);
DVLOG(1) << "dll to unload found: " << module_name;
return;
}
| 7,096 |
54,065 | 0 | static void devinet_sysctl_unregister(struct in_device *idev)
{
}
| 7,097 |
115,880 | 0 | static bool _ewk_frame_rect_is_negative_value(const WebCore::IntRect& rect)
{
return (rect.x() < 0 || rect.y() < 0);
}
| 7,098 |
777 | 0 | poppler_page_get_crop_box (PopplerPage *page, PopplerRectangle *rect)
{
PDFRectangle* cropBox = page->page->getCropBox ();
rect->x1 = cropBox->x1;
rect->x2 = cropBox->x2;
rect->y1 = cropBox->y1;
rect->y2 = cropBox->y2;
}
| 7,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.