unique_id
int64 13
189k
| target
int64 0
1
| code
stringlengths 20
241k
| __index_level_0__
int64 0
18.9k
|
---|---|---|---|
173,029 | 0 | test_one_file(struct display *dp, const char *filename)
{
/* First cache the file and update the display original file
* information for the new file.
*/
dp->operation = "cache file";
dp->transforms = 0;
display_cache_file(dp, filename);
update_display(dp);
/* First test: if there are options that should be ignored for this file
* verify that they really are ignored.
*/
if (dp->ignored_transforms != 0)
{
read_png(dp, &dp->original_file, "ignored transforms",
dp->ignored_transforms);
/* The result should be identical to the original_rows */
if (!compare_read(dp, 0/*transforms applied*/))
return; /* no point testing more */
}
#ifdef PNG_WRITE_SUPPORTED
/* Second test: write the original PNG data out to a new file (to test the
* write side) then read the result back in and make sure that it hasn't
* changed.
*/
dp->operation = "write";
write_png(dp, dp->original_ip, 0/*transforms*/);
read_png(dp, &dp->written_file, NULL, 0/*transforms*/);
if (!compare_read(dp, 0/*transforms applied*/))
return;
#endif
/* Third test: the active options. Test each in turn, or, with the
* EXHAUSTIVE option, test all possible combinations.
*/
{
/* Use unsigned int here because the code below to increment through all
* the possibilities exhaustively has to use a compare and that must be
* unsigned, because some transforms are negative on a 16-bit system.
*/
unsigned int active = dp->active_transforms;
const int exhaustive = (dp->options & EXHAUSTIVE) != 0;
unsigned int current = first_transform(active);
unsigned int bad_transforms = 0;
unsigned int bad_combo = ~0U; /* bitwise AND of failing transforms */
unsigned int bad_combo_list = 0; /* bitwise OR of failures */
for (;;)
{
read_png(dp, &dp->original_file, "active transforms", current);
/* If this involved any irreversible transformations then if we write
* it out with just the reversible transformations and read it in again
* with the same transforms we should get the same thing. At present
* this isn't done - it just seems like a waste of time and it would
* require two sets of read png_struct/png_info.
*
* If there were no irreversible transformations then if we write it
* out and read it back in again (without the reversible transforms)
* we should get back to the place where we started.
*/
#ifdef PNG_WRITE_SUPPORTED
if ((current & write_transforms) == current)
{
/* All transforms reversible: write the PNG with the transformations
* reversed, then read it back in with no transformations. The
* result should be the same as the original apart from the loss of
* low order bits because of the SHIFT/sBIT transform.
*/
dp->operation = "reversible transforms";
write_png(dp, dp->read_ip, current);
/* And if this is read back in, because all the transformations were
* reversible, the result should be the same.
*/
read_png(dp, &dp->written_file, NULL, 0);
if (!compare_read(dp, current/*for the SHIFT/sBIT transform*/))
{
/* This set of transforms failed. If a single bit is set - if
* there is just one transform - don't include this in further
* 'exhaustive' tests. Notice that each transform is tested on
* its own before testing combos in the exhaustive case.
*/
if (is_combo(current))
{
bad_combo &= current;
bad_combo_list |= current;
}
else
bad_transforms |= current;
}
}
#endif
/* Now move to the next transform */
if (exhaustive) /* all combinations */
{
unsigned int next = current;
do
{
if (next == read_transforms) /* Everything tested */
goto combo;
++next;
} /* skip known bad combos if the relevant option is set; skip
* combos involving known bad single transforms in all cases.
*/
while ( (next & read_transforms) <= current
|| (next & active) == 0 /* skip cases that do nothing */
|| (next & bad_transforms) != 0
|| skip_transform(dp, next));
assert((next & read_transforms) == next);
current = next;
}
else /* one at a time */
{
active &= ~current;
if (active == 0)
goto combo;
current = first_transform(active);
}
}
combo:
if (dp->options & FIND_BAD_COMBOS)
{
/* bad_combos identifies the combos that occur in all failing cases;
* bad_combo_list identifies transforms that do not prevent the
* failure.
*/
if (bad_combo != ~0U)
printf("%s[0x%x]: PROBLEM: 0x%x[0x%x] ANTIDOTE: 0x%x\n",
dp->filename, active, bad_combo, bad_combo_list,
rw_transforms & ~bad_combo_list);
else
printf("%s: no %sbad combos found\n", dp->filename,
(dp->options & SKIP_BUGS) ? "additional " : "");
}
}
}
| 16,000 |
131,393 | 0 | static void floatArrayAttributeAttributeSetterCallback(v8::Local<v8::String>, v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMSetter");
TestObjectPythonV8Internal::floatArrayAttributeAttributeSetter(jsValue, info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
| 16,001 |
148,072 | 0 | void V8TestObject::VoidMethodDefaultUndefinedStringArgMethodCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
RUNTIME_CALL_TIMER_SCOPE_DISABLED_BY_DEFAULT(info.GetIsolate(), "Blink_TestObject_voidMethodDefaultUndefinedStringArg");
test_object_v8_internal::VoidMethodDefaultUndefinedStringArgMethod(info);
}
| 16,002 |
109,063 | 0 | WebURLError RenderViewImpl::cannotHandleRequestError(
WebFrame* frame, const WebURLRequest& request) {
NOTREACHED(); // Since we said we can handle all requests.
return WebURLError();
}
| 16,003 |
99,114 | 0 | void ResourceDispatcherHost::RemoveObserver(Observer* obs) {
observer_list_.RemoveObserver(obs);
}
| 16,004 |
159,481 | 0 | void Pack<WebGLImageConversion::kDataFormatRGB8,
WebGLImageConversion::kAlphaDoPremultiply,
uint8_t,
uint8_t>(const uint8_t* source,
uint8_t* destination,
unsigned pixels_per_row) {
for (unsigned i = 0; i < pixels_per_row; ++i) {
float scale_factor = source[3] / 255.0f;
uint8_t source_r =
static_cast<uint8_t>(static_cast<float>(source[0]) * scale_factor);
uint8_t source_g =
static_cast<uint8_t>(static_cast<float>(source[1]) * scale_factor);
uint8_t source_b =
static_cast<uint8_t>(static_cast<float>(source[2]) * scale_factor);
destination[0] = source_r;
destination[1] = source_g;
destination[2] = source_b;
source += 4;
destination += 3;
}
}
| 16,005 |
23,065 | 0 | static int encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap)
{
__be32 *p;
RESERVE_SPACE(12);
WRITE32(OP_GETATTR);
WRITE32(1);
WRITE32(bitmap);
return 0;
}
| 16,006 |
121,285 | 0 | void HTMLInputElement::subtreeHasChanged()
{
m_inputType->subtreeHasChanged();
calculateAndAdjustDirectionality();
}
| 16,007 |
172,204 | 0 | virtual void SetUp() {
AllocationTestHarness::SetUp();
pipe(pipefd);
done = semaphore_new(0);
}
| 16,008 |
56,813 | 0 | struct usb_device *usb_hub_find_child(struct usb_device *hdev,
int port1)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
if (port1 < 1 || port1 > hdev->maxchild)
return NULL;
return hub->ports[port1 - 1]->child;
}
| 16,009 |
143,453 | 0 | void setUrlToLoad(const String& value, URLReplacement replacement)
{
if (replacement == DisallowURLReplacement && !m_urlToLoad.isEmpty())
return;
String url = stripLeadingAndTrailingHTMLSpaces(value);
if (url.isEmpty())
return;
m_urlToLoad = url;
}
| 16,010 |
154,007 | 0 | void GLES2DecoderImpl::DoGetBufferParameteriv(GLenum target,
GLenum pname,
GLint* params,
GLsizei params_size) {
buffer_manager()->ValidateAndDoGetBufferParameteriv(
&state_, error_state_.get(), target, pname, params);
}
| 16,011 |
142,455 | 0 | void ShelfLayoutManager::MaybeUpdateShelfBackground(AnimationChangeType type) {
const ShelfBackgroundType new_background_type(GetShelfBackgroundType());
if (new_background_type == shelf_background_type_)
return;
shelf_background_type_ = new_background_type;
for (auto& observer : observers_)
observer.OnBackgroundUpdated(shelf_background_type_, type);
}
| 16,012 |
17,961 | 0 | kex_new(struct ssh *ssh, char *proposal[PROPOSAL_MAX], struct kex **kexp)
{
struct kex *kex;
int r;
*kexp = NULL;
if ((kex = calloc(1, sizeof(*kex))) == NULL)
return SSH_ERR_ALLOC_FAIL;
if ((kex->peer = sshbuf_new()) == NULL ||
(kex->my = sshbuf_new()) == NULL) {
r = SSH_ERR_ALLOC_FAIL;
goto out;
}
if ((r = kex_prop2buf(kex->my, proposal)) != 0)
goto out;
kex->done = 0;
kex_reset_dispatch(ssh);
r = 0;
*kexp = kex;
out:
if (r != 0)
kex_free(kex);
return r;
}
| 16,013 |
13,553 | 0 | static HB_Error Get_Anchor( GPOS_Instance* gpi,
HB_Anchor* an,
HB_UShort glyph_index,
HB_Fixed* x_value,
HB_Fixed* y_value )
{
HB_Error error = HB_Err_Ok;
#ifdef HB_SUPPORT_MULTIPLE_MASTER
HB_GPOSHeader* gpos = gpi->gpos;
#endif
HB_UShort ap;
HB_Short pixel_value;
HB_UShort x_ppem, y_ppem;
HB_16Dot16 x_scale, y_scale;
x_ppem = gpi->font->x_ppem;
y_ppem = gpi->font->y_ppem;
x_scale = gpi->font->x_scale;
y_scale = gpi->font->y_scale;
switch ( an->PosFormat )
{
case 0:
/* The special case of an empty AnchorTable */
default:
return HB_Err_Not_Covered;
case 1:
*x_value = x_scale * an->af.af1.XCoordinate / 0x10000;
*y_value = y_scale * an->af.af1.YCoordinate / 0x10000;
break;
case 2:
if ( !gpi->dvi )
{
hb_uint32 n_points = 0;
ap = an->af.af2.AnchorPoint;
if (!gpi->font->klass->getPointInOutline)
goto no_contour_point;
error = gpi->font->klass->getPointInOutline(gpi->font, glyph_index, gpi->load_flags, ap, x_value, y_value, &n_points);
if (error)
return error;
/* if n_points is set to zero, we use the design coordinate value pair.
* This can happen e.g. for sbit glyphs. */
if (!n_points)
goto no_contour_point;
}
else
{
no_contour_point:
*x_value = x_scale * an->af.af3.XCoordinate / 0x10000;
*y_value = y_scale * an->af.af3.YCoordinate / 0x10000;
}
break;
case 3:
if ( !gpi->dvi )
{
_HB_OPEN_Get_Device( an->af.af3.DeviceTables[AF3_X_DEVICE_TABLE], x_ppem, &pixel_value );
*x_value = pixel_value << 6;
_HB_OPEN_Get_Device( an->af.af3.DeviceTables[AF3_Y_DEVICE_TABLE], y_ppem, &pixel_value );
*y_value = pixel_value << 6;
}
else
*x_value = *y_value = 0;
*x_value += x_scale * an->af.af3.XCoordinate / 0x10000;
*y_value += y_scale * an->af.af3.YCoordinate / 0x10000;
break;
case 4:
#ifdef HB_SUPPORT_MULTIPLE_MASTER
error = (gpos->mmfunc)( gpi->font, an->af.af4.XIdAnchor,
x_value, gpos->data );
if ( error )
return error;
error = (gpos->mmfunc)( gpi->font, an->af.af4.YIdAnchor,
y_value, gpos->data );
if ( error )
return error;
break;
#else
return ERR(HB_Err_Not_Covered);
#endif
}
return error;
}
| 16,014 |
188,517 | 1 | static void die(const char *fmt, ...) {
va_list ap;
va_start(ap, fmt);
vprintf(fmt, ap);
if(fmt[strlen(fmt)-1] != '\n')
printf("\n");
exit(EXIT_FAILURE);
}
| 16,015 |
39,753 | 0 | static struct dentry *proc_fdinfo_instantiate(struct inode *dir,
struct dentry *dentry, struct task_struct *task, const void *ptr)
{
unsigned fd = *(unsigned *)ptr;
struct inode *inode;
struct proc_inode *ei;
struct dentry *error = ERR_PTR(-ENOENT);
inode = proc_pid_make_inode(dir->i_sb, task);
if (!inode)
goto out;
ei = PROC_I(inode);
ei->fd = fd;
inode->i_mode = S_IFREG | S_IRUSR;
inode->i_fop = &proc_fdinfo_file_operations;
dentry->d_op = &tid_fd_dentry_operations;
d_add(dentry, inode);
/* Close the race of the process dying before we return the dentry */
if (tid_fd_revalidate(dentry, NULL))
error = NULL;
out:
return error;
}
| 16,016 |
107,310 | 0 | void Browser::OnStartDownload(DownloadItem* download, TabContents* tab) {
if (!window())
return;
#if defined(OS_CHROMEOS)
if (download->is_extension_install()) {
ExtensionService* service = profile_->GetExtensionService();
if (service && service->IsDownloadFromGallery(download->url(),
download->referrer_url())) {
return;
}
}
std::string arg = download->full_path().DirName().value();
FileBrowseUI::OpenPopup(profile_,
arg,
FileBrowseUI::kPopupWidth,
FileBrowseUI::kPopupHeight);
#else
window()->GetDownloadShelf()->AddDownload(new DownloadItemModel(download));
if (download->total_bytes() <= 0)
return;
if (download->is_extension_install() &&
!ExtensionService::IsDownloadFromMiniGallery(download->url()))
return;
TabContents* current_tab = GetSelectedTabContents();
if (platform_util::IsVisible(current_tab->GetNativeView()) &&
ui::Animation::ShouldRenderRichAnimation()) {
DownloadStartedAnimation::Show(current_tab);
}
#endif
if (tab->controller().IsInitialNavigation() &&
GetConstrainingContents(tab) == tab && tab_count() > 1) {
CloseContents(tab);
}
}
| 16,017 |
83,376 | 0 | static void ssl_write_extended_ms_ext( mbedtls_ssl_context *ssl,
unsigned char *buf, size_t *olen )
{
unsigned char *p = buf;
const unsigned char *end = ssl->out_msg + MBEDTLS_SSL_MAX_CONTENT_LEN;
*olen = 0;
if( ssl->conf->extended_ms == MBEDTLS_SSL_EXTENDED_MS_DISABLED ||
ssl->conf->max_minor_ver == MBEDTLS_SSL_MINOR_VERSION_0 )
{
return;
}
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello, adding extended_master_secret "
"extension" ) );
if( end < p || (size_t)( end - p ) < 4 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "buffer too small" ) );
return;
}
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_EXTENDED_MASTER_SECRET >> 8 ) & 0xFF );
*p++ = (unsigned char)( ( MBEDTLS_TLS_EXT_EXTENDED_MASTER_SECRET ) & 0xFF );
*p++ = 0x00;
*p++ = 0x00;
*olen = 4;
}
| 16,018 |
48,988 | 0 | void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
{
struct escan_info *escan = &cfg->escan_info;
set_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
if (cfg->scan_request) {
escan->escan_state = WL_ESCAN_STATE_IDLE;
brcmf_notify_escan_complete(cfg, escan->ifp, true, true);
}
clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
clear_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
}
| 16,019 |
11,735 | 0 | linux_lvm2_lv_set_name_completed_cb (DBusGMethodInvocation *context,
Device *device,
gboolean job_was_cancelled,
int status,
const char *stderr,
const char *stdout,
gpointer user_data)
{
if (WEXITSTATUS (status) == 0 && !job_was_cancelled)
{
dbus_g_method_return (context);
}
else
{
if (job_was_cancelled)
{
throw_error (context, ERROR_CANCELLED, "Job was cancelled");
}
else
{
throw_error (context,
ERROR_FAILED,
"Error setting name for LVM2 Logical Volume: lvrename exited with exit code %d: %s",
WEXITSTATUS (status),
stderr);
}
}
}
| 16,020 |
185,268 | 1 | void LinkChangeSerializerMarkupAccumulator::appendAttribute(StringBuilder& result, Element* element, const Attribute& attribute, Namespaces* namespaces)
{
if (m_replaceLinks && element->isURLAttribute(attribute) && !element->isJavaScriptURLAttribute(attribute)) {
String completeURL = m_document->completeURL(attribute.value());
if (m_replaceLinks->contains(completeURL)) {
// FIXME: Refactor MarkupAccumulator so it is easier to append an attribute like this.
result.append(' ');
result.append(attribute.name().toString());
result.appendLiteral("=\"");
if (!m_directoryName.isEmpty()) {
result.appendLiteral("./");
result.append(m_directoryName);
result.append('/');
}
result.append(m_replaceLinks->get(completeURL));
result.appendLiteral("\"");
return;
}
}
MarkupAccumulator::appendAttribute(result, element, attribute, namespaces);
}
| 16,021 |
36,710 | 0 | get_req_flags(unsigned char **buff_in, OM_uint32 bodysize,
OM_uint32 *req_flags)
{
unsigned int len;
if (**buff_in != (CONTEXT | 0x01))
return (0);
if (g_get_tag_and_length(buff_in, (CONTEXT | 0x01),
bodysize, &len) < 0)
return GSS_S_DEFECTIVE_TOKEN;
if (*(*buff_in)++ != BIT_STRING)
return GSS_S_DEFECTIVE_TOKEN;
if (*(*buff_in)++ != BIT_STRING_LENGTH)
return GSS_S_DEFECTIVE_TOKEN;
if (*(*buff_in)++ != BIT_STRING_PADDING)
return GSS_S_DEFECTIVE_TOKEN;
*req_flags = (OM_uint32) (*(*buff_in)++ >> 1);
return (0);
}
| 16,022 |
131,194 | 0 | static void activityLoggingGetterForIsolatedWorldsPerWorldBindingsLongAttributeAttributeSetterCallbackForMainWorld(v8::Local<v8::String>, v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMSetter");
TestObjectPythonV8Internal::activityLoggingGetterForIsolatedWorldsPerWorldBindingsLongAttributeAttributeSetterForMainWorld(jsValue, info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
| 16,023 |
2,494 | 0 | bool smbXcli_session_is_authenticated(struct smbXcli_session *session)
{
const DATA_BLOB *application_key;
if (session->conn == NULL) {
return false;
}
/*
* If we have an application key we had a session key negotiated
* at auth time.
*/
if (session->conn->protocol >= PROTOCOL_SMB2_02) {
application_key = &session->smb2->application_key;
} else {
application_key = &session->smb1.application_key;
}
if (application_key->length == 0) {
return false;
}
return true;
}
| 16,024 |
114,755 | 0 | void OutOfProcessPPAPITest::SetUpCommandLine(CommandLine* command_line) {
PPAPITest::SetUpCommandLine(command_line);
command_line->AppendSwitch(switches::kPpapiOutOfProcess);
}
| 16,025 |
167,593 | 0 | int32_t SiteInstanceImpl::GetId() {
return id_;
}
| 16,026 |
126,723 | 0 | void BrowserView::ChildPreferredSizeChanged(View* child) {
Layout();
}
| 16,027 |
75,310 | 0 | int stb_vorbis_get_frame_short(stb_vorbis *f, int num_c, short **buffer, int num_samples)
{
float **output;
int len = stb_vorbis_get_frame_float(f, NULL, &output);
if (len > num_samples) len = num_samples;
if (len)
convert_samples_short(num_c, buffer, 0, f->channels, output, 0, len);
return len;
}
| 16,028 |
112 | 0 | PHP_FUNCTION(openssl_pkey_export_to_file)
{
struct php_x509_request req;
zval ** zpkey, * args = NULL;
char * passphrase = NULL; int passphrase_len = 0;
char * filename = NULL; int filename_len = 0;
long key_resource = -1;
EVP_PKEY * key;
BIO * bio_out = NULL;
const EVP_CIPHER * cipher;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Zs|s!a!", &zpkey, &filename, &filename_len, &passphrase, &passphrase_len, &args) == FAILURE) {
return;
}
RETVAL_FALSE;
if (strlen(filename) != filename_len) {
return;
}
key = php_openssl_evp_from_zval(zpkey, 0, passphrase, 0, &key_resource TSRMLS_CC);
if (key == NULL) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "cannot get key from parameter 1");
RETURN_FALSE;
}
if (php_openssl_safe_mode_chk(filename TSRMLS_CC)) {
RETURN_FALSE;
}
PHP_SSL_REQ_INIT(&req);
if (PHP_SSL_REQ_PARSE(&req, args) == SUCCESS) {
bio_out = BIO_new_file(filename, "w");
if (passphrase && req.priv_key_encrypt) {
cipher = (EVP_CIPHER *) EVP_des_ede3_cbc();
} else {
cipher = NULL;
}
if (PEM_write_bio_PrivateKey(bio_out, key, cipher, (unsigned char *)passphrase, passphrase_len, NULL, NULL)) {
/* Success!
* If returning the output as a string, do so now */
RETVAL_TRUE;
}
}
PHP_SSL_REQ_DISPOSE(&req);
if (key_resource == -1 && key) {
EVP_PKEY_free(key);
}
if (bio_out) {
BIO_free(bio_out);
}
}
| 16,029 |
184,799 | 1 | static v8::Handle<v8::Value> acceptTransferListCallback(const v8::Arguments& args)
{
INC_STATS("DOM.TestSerializedScriptValueInterface.acceptTransferList");
if (args.Length() < 1)
return V8Proxy::throwNotEnoughArgumentsError();
TestSerializedScriptValueInterface* imp = V8TestSerializedScriptValueInterface::toNative(args.Holder());
MessagePortArray messagePortArrayTransferList;
ArrayBufferArray arrayBufferArrayTransferList;
if (args.Length() > 1) {
if (!extractTransferables(args[1], messagePortArrayTransferList, arrayBufferArrayTransferList))
return V8Proxy::throwTypeError("Could not extract transferables");
}
bool dataDidThrow = false;
RefPtr<SerializedScriptValue> data = SerializedScriptValue::create(args[0], &messagePortArrayTransferList, &arrayBufferArrayTransferList, dataDidThrow, args.GetIsolate());
if (dataDidThrow)
return v8::Undefined();
if (args.Length() <= 1) {
imp->acceptTransferList(data);
return v8::Handle<v8::Value>();
}
imp->acceptTransferList(data, messagePortArrayTransferList);
return v8::Handle<v8::Value>();
}
| 16,030 |
126,923 | 0 | void BrowserTabStripController::SetTabDataAt(content::WebContents* web_contents,
int model_index) {
TabRendererData data;
SetTabRendererDataFromModel(web_contents, model_index, &data, EXISTING_TAB);
tabstrip_->SetTabData(model_index, data);
}
| 16,031 |
69,022 | 0 | static void FLTReplacePropertyName(FilterEncodingNode *psFilterNode,
const char *pszOldName,
const char *pszNewName)
{
if (psFilterNode && pszOldName && pszNewName) {
if (psFilterNode->eType == FILTER_NODE_TYPE_PROPERTYNAME) {
if (psFilterNode->pszValue &&
strcasecmp(psFilterNode->pszValue, pszOldName) == 0) {
msFree(psFilterNode->pszValue);
psFilterNode->pszValue = msStrdup(pszNewName);
}
}
if (psFilterNode->psLeftNode)
FLTReplacePropertyName(psFilterNode->psLeftNode, pszOldName,
pszNewName);
if (psFilterNode->psRightNode)
FLTReplacePropertyName(psFilterNode->psRightNode, pszOldName,
pszNewName);
}
}
| 16,032 |
148,636 | 0 | void SkiaOutputSurfaceImpl::DiscardBackbuffer() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
auto callback = base::BindOnce(&SkiaOutputSurfaceImplOnGpu::DiscardBackbuffer,
base::Unretained(impl_on_gpu_.get()));
task_sequence_->ScheduleOrRetainTask(std::move(callback),
std::vector<gpu::SyncToken>());
}
| 16,033 |
181,622 | 1 | entry_guard_obeys_restriction(const entry_guard_t *guard,
const entry_guard_restriction_t *rst)
{
tor_assert(guard);
if (! rst)
return 1; // No restriction? No problem.
return tor_memneq(guard->identity, rst->exclude_id, DIGEST_LEN);
}
| 16,034 |
127,802 | 0 | void PepperMediaDeviceManager::CloseDevice(const std::string& label) {
#if defined(ENABLE_WEBRTC)
GetMediaStreamDispatcher()->CloseDevice(label);
#endif
}
| 16,035 |
32,968 | 0 | static int sctp_autobind(struct sock *sk)
{
union sctp_addr autoaddr;
struct sctp_af *af;
__be16 port;
/* Initialize a local sockaddr structure to INADDR_ANY. */
af = sctp_sk(sk)->pf->af;
port = htons(inet_sk(sk)->inet_num);
af->inaddr_any(&autoaddr, port);
return sctp_do_bind(sk, &autoaddr, af->sockaddr_len);
}
| 16,036 |
50,526 | 0 | static int __init perf_workqueue_init(void)
{
perf_wq = create_singlethread_workqueue("perf");
WARN(!perf_wq, "failed to create perf workqueue\n");
return perf_wq ? 0 : -1;
}
| 16,037 |
17,981 | 0 | ssh_packet_log_type(u_char type)
{
switch (type) {
case SSH2_MSG_CHANNEL_DATA:
case SSH2_MSG_CHANNEL_EXTENDED_DATA:
case SSH2_MSG_CHANNEL_WINDOW_ADJUST:
return 0;
default:
return 1;
}
}
| 16,038 |
158,844 | 0 | bool Browser::PreHandleGestureEvent(content::WebContents* source,
const blink::WebGestureEvent& event) {
#if defined(OS_MACOSX)
if (event.GetType() == blink::WebInputEvent::kGestureDoubleTap) {
content::BrowserPluginGuestManager* guest_manager =
source->GetBrowserContext()->GetGuestManager();
if (guest_manager) {
const content::WebContents* guest_contents =
guest_manager->GetFullPageGuest(source);
if (guest_contents) {
const extensions::Extension* extension =
extensions::ProcessManager::Get(guest_contents->GetBrowserContext())
->GetExtensionForWebContents(guest_contents);
if (extension && extension->id() == extension_misc::kPdfExtensionId)
return true;
}
}
}
#endif // defined(OS_MACOSX)
if (app_name() == DevToolsWindow::kDevToolsApp)
return blink::WebInputEvent::IsPinchGestureEventType(event.GetType());
return false;
}
| 16,039 |
60,592 | 0 | static int snd_seq_ioctl_get_queue_info(struct snd_seq_client *client,
void *arg)
{
struct snd_seq_queue_info *info = arg;
struct snd_seq_queue *q;
q = queueptr(info->queue);
if (q == NULL)
return -EINVAL;
memset(info, 0, sizeof(*info));
info->queue = q->queue;
info->owner = q->owner;
info->locked = q->locked;
strlcpy(info->name, q->name, sizeof(info->name));
queuefree(q);
return 0;
}
| 16,040 |
19,605 | 0 | void exit_creds(struct task_struct *tsk)
{
struct cred *cred;
kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
atomic_read(&tsk->cred->usage),
read_cred_subscribers(tsk->cred));
cred = (struct cred *) tsk->real_cred;
tsk->real_cred = NULL;
validate_creds(cred);
alter_cred_subscribers(cred, -1);
put_cred(cred);
cred = (struct cred *) tsk->cred;
tsk->cred = NULL;
validate_creds(cred);
alter_cred_subscribers(cred, -1);
put_cred(cred);
cred = (struct cred *) tsk->replacement_session_keyring;
if (cred) {
tsk->replacement_session_keyring = NULL;
validate_creds(cred);
put_cred(cred);
}
}
| 16,041 |
17,317 | 0 | cpl_flush ()
{
struct cpelement *cpe, *p;
for (cpe = coproc_list.head; cpe; )
{
p = cpe;
cpe = cpe->next;
coproc_dispose (p->coproc);
cpe_dispose (p);
}
coproc_list.head = coproc_list.tail = 0;
coproc_list.ncoproc = 0;
}
| 16,042 |
54,470 | 0 | static int do_move_page_to_node_array(struct mm_struct *mm,
struct page_to_node *pm,
int migrate_all)
{
int err;
struct page_to_node *pp;
LIST_HEAD(pagelist);
down_read(&mm->mmap_sem);
/*
* Build a list of pages to migrate
*/
for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
struct vm_area_struct *vma;
struct page *page;
err = -EFAULT;
vma = find_vma(mm, pp->addr);
if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
goto set_status;
/* FOLL_DUMP to ignore special (like zero) pages */
page = follow_page(vma, pp->addr,
FOLL_GET | FOLL_SPLIT | FOLL_DUMP);
err = PTR_ERR(page);
if (IS_ERR(page))
goto set_status;
err = -ENOENT;
if (!page)
goto set_status;
pp->page = page;
err = page_to_nid(page);
if (err == pp->node)
/*
* Node already in the right place
*/
goto put_and_set;
err = -EACCES;
if (page_mapcount(page) > 1 &&
!migrate_all)
goto put_and_set;
if (PageHuge(page)) {
if (PageHead(page))
isolate_huge_page(page, &pagelist);
goto put_and_set;
}
err = isolate_lru_page(page);
if (!err) {
list_add_tail(&page->lru, &pagelist);
inc_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
}
put_and_set:
/*
* Either remove the duplicate refcount from
* isolate_lru_page() or drop the page ref if it was
* not isolated.
*/
put_page(page);
set_status:
pp->status = err;
}
err = 0;
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_page_node, NULL,
(unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
if (err)
putback_movable_pages(&pagelist);
}
up_read(&mm->mmap_sem);
return err;
}
| 16,043 |
113,046 | 0 | bool DownloadItemImpl::GetOpenWhenComplete() const {
return open_when_complete_;
}
| 16,044 |
164,110 | 0 | bool AppCacheDatabase::FindOnlineWhiteListForCache(
int64_t cache_id,
std::vector<OnlineWhiteListRecord>* records) {
DCHECK(records && records->empty());
if (!LazyOpen(kDontCreate))
return false;
static const char kSql[] =
"SELECT cache_id, namespace_url, is_pattern FROM OnlineWhiteLists"
" WHERE cache_id = ?";
sql::Statement statement(db_->GetCachedStatement(SQL_FROM_HERE, kSql));
statement.BindInt64(0, cache_id);
while (statement.Step()) {
records->push_back(OnlineWhiteListRecord());
this->ReadOnlineWhiteListRecord(statement, &records->back());
DCHECK(records->back().cache_id == cache_id);
}
return statement.Succeeded();
}
| 16,045 |
96,788 | 0 | void fuse_abort_conn(struct fuse_conn *fc)
{
struct fuse_iqueue *fiq = &fc->iq;
spin_lock(&fc->lock);
if (fc->connected) {
struct fuse_dev *fud;
struct fuse_req *req, *next;
LIST_HEAD(to_end);
unsigned int i;
/* Background queuing checks fc->connected under bg_lock */
spin_lock(&fc->bg_lock);
fc->connected = 0;
spin_unlock(&fc->bg_lock);
fuse_set_initialized(fc);
list_for_each_entry(fud, &fc->devices, entry) {
struct fuse_pqueue *fpq = &fud->pq;
spin_lock(&fpq->lock);
fpq->connected = 0;
list_for_each_entry_safe(req, next, &fpq->io, list) {
req->out.h.error = -ECONNABORTED;
spin_lock(&req->waitq.lock);
set_bit(FR_ABORTED, &req->flags);
if (!test_bit(FR_LOCKED, &req->flags)) {
set_bit(FR_PRIVATE, &req->flags);
__fuse_get_request(req);
list_move(&req->list, &to_end);
}
spin_unlock(&req->waitq.lock);
}
for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
list_splice_tail_init(&fpq->processing[i],
&to_end);
spin_unlock(&fpq->lock);
}
spin_lock(&fc->bg_lock);
fc->blocked = 0;
fc->max_background = UINT_MAX;
flush_bg_queue(fc);
spin_unlock(&fc->bg_lock);
spin_lock(&fiq->waitq.lock);
fiq->connected = 0;
list_for_each_entry(req, &fiq->pending, list)
clear_bit(FR_PENDING, &req->flags);
list_splice_tail_init(&fiq->pending, &to_end);
while (forget_pending(fiq))
kfree(dequeue_forget(fiq, 1, NULL));
wake_up_all_locked(&fiq->waitq);
spin_unlock(&fiq->waitq.lock);
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
end_polls(fc);
wake_up_all(&fc->blocked_waitq);
spin_unlock(&fc->lock);
end_requests(fc, &to_end);
} else {
spin_unlock(&fc->lock);
}
}
| 16,046 |
136,268 | 0 | static void parseSourceList(CSPSourceList& sourceList, String& sources)
{
Vector<UChar> characters;
sources.appendTo(characters);
sourceList.parse(characters.data(), characters.data() + characters.size());
}
| 16,047 |
62,172 | 0 | void BrowseForFolder(void) {
BROWSEINFOW bi;
LPITEMIDLIST pidl;
WCHAR *wpath;
size_t i;
HRESULT hr;
IShellItem *psi = NULL;
IShellItem *si_path = NULL; // Automatically freed
IFileOpenDialog *pfod = NULL;
WCHAR *fname;
char* tmp_path = NULL;
dialog_showing++;
if (nWindowsVersion >= WINDOWS_VISTA) {
INIT_VISTA_SHELL32;
if (IS_VISTA_SHELL32_AVAILABLE) {
hr = CoCreateInstance(&CLSID_FileOpenDialog, NULL, CLSCTX_INPROC,
&IID_IFileOpenDialog, (LPVOID)&pfod);
if (FAILED(hr)) {
uprintf("CoCreateInstance for FileOpenDialog failed: error %X\n", hr);
pfod = NULL; // Just in case
goto fallback;
}
hr = pfod->lpVtbl->SetOptions(pfod, FOS_PICKFOLDERS);
if (FAILED(hr)) {
uprintf("Failed to set folder option for FileOpenDialog: error %X\n", hr);
goto fallback;
}
wpath = utf8_to_wchar(szFolderPath);
fname = NULL;
if ((wpath != NULL) && (wcslen(wpath) >= 1)) {
for (i = wcslen(wpath) - 1; i != 0; i--) {
if (wpath[i] == L'\\') {
wpath[i] = 0;
fname = &wpath[i + 1];
break;
}
}
}
hr = (*pfSHCreateItemFromParsingName)(wpath, NULL, &IID_IShellItem, (LPVOID)&si_path);
if (SUCCEEDED(hr)) {
if (wpath != NULL) {
pfod->lpVtbl->SetFolder(pfod, si_path);
}
if (fname != NULL) {
pfod->lpVtbl->SetFileName(pfod, fname);
}
}
safe_free(wpath);
hr = pfod->lpVtbl->Show(pfod, hMainDialog);
if (SUCCEEDED(hr)) {
hr = pfod->lpVtbl->GetResult(pfod, &psi);
if (SUCCEEDED(hr)) {
psi->lpVtbl->GetDisplayName(psi, SIGDN_FILESYSPATH, &wpath);
tmp_path = wchar_to_utf8(wpath);
CoTaskMemFree(wpath);
if (tmp_path == NULL) {
uprintf("Could not convert path\n");
} else {
static_strcpy(szFolderPath, tmp_path);
safe_free(tmp_path);
}
} else {
uprintf("Failed to set folder option for FileOpenDialog: error %X\n", hr);
}
} else if ((hr & 0xFFFF) != ERROR_CANCELLED) {
uprintf("Could not show FileOpenDialog: error %X\n", hr);
goto fallback;
}
pfod->lpVtbl->Release(pfod);
dialog_showing--;
return;
}
fallback:
if (pfod != NULL) {
pfod->lpVtbl->Release(pfod);
}
}
INIT_XP_SHELL32;
memset(&bi, 0, sizeof(BROWSEINFOW));
bi.hwndOwner = hMainDialog;
bi.lpszTitle = utf8_to_wchar(lmprintf(MSG_106));
bi.lpfn = BrowseInfoCallback;
bi.ulFlags = BIF_RETURNFSANCESTORS | BIF_RETURNONLYFSDIRS |
BIF_DONTGOBELOWDOMAIN | BIF_EDITBOX | 0x00000200;
pidl = SHBrowseForFolderW(&bi);
if (pidl != NULL) {
CoTaskMemFree(pidl);
}
safe_free(bi.lpszTitle);
dialog_showing--;
}
| 16,048 |
103,798 | 0 | void RenderView::OnAccessibilityDoDefaultAction(int acc_obj_id) {
if (!accessibility_.get())
return;
WebAccessibilityObject obj = accessibility_->getObjectById(acc_obj_id);
if (!obj.isValid())
return;
obj.performDefaultAction();
}
| 16,049 |
110,677 | 0 | bool TextureManager::TextureInfo::MarkMipmapsGenerated(
const FeatureInfo* feature_info) {
if (!CanGenerateMipmaps(feature_info)) {
return false;
}
for (size_t ii = 0; ii < level_infos_.size(); ++ii) {
const TextureInfo::LevelInfo& info1 = level_infos_[ii][0];
GLsizei width = info1.width;
GLsizei height = info1.height;
GLsizei depth = info1.depth;
GLenum target = target_ == GL_TEXTURE_2D ? GL_TEXTURE_2D :
FaceIndexToGLTarget(ii);
int num_mips = ComputeMipMapCount(width, height, depth);
for (int level = 1; level < num_mips; ++level) {
width = std::max(1, width >> 1);
height = std::max(1, height >> 1);
depth = std::max(1, depth >> 1);
SetLevelInfo(feature_info,
target,
level,
info1.internal_format,
width,
height,
depth,
info1.border,
info1.format,
info1.type,
true);
}
}
return true;
}
| 16,050 |
187,525 | 1 | OMX_ERRORTYPE omx_video::use_input_buffer(
OMX_IN OMX_HANDLETYPE hComp,
OMX_INOUT OMX_BUFFERHEADERTYPE** bufferHdr,
OMX_IN OMX_U32 port,
OMX_IN OMX_PTR appData,
OMX_IN OMX_U32 bytes,
OMX_IN OMX_U8* buffer)
{
(void) hComp;
OMX_ERRORTYPE eRet = OMX_ErrorNone;
unsigned i = 0;
unsigned char *buf_addr = NULL;
DEBUG_PRINT_HIGH("use_input_buffer: port = %u appData = %p bytes = %u buffer = %p",(unsigned int)port,appData,(unsigned int)bytes,buffer);
if (bytes != m_sInPortDef.nBufferSize) {
DEBUG_PRINT_ERROR("ERROR: use_input_buffer: Size Mismatch!! "
"bytes[%u] != Port.nBufferSize[%u]", (unsigned int)bytes, (unsigned int)m_sInPortDef.nBufferSize);
return OMX_ErrorBadParameter;
}
if (!m_inp_mem_ptr) {
input_use_buffer = true;
m_inp_mem_ptr = (OMX_BUFFERHEADERTYPE*) \
calloc( (sizeof(OMX_BUFFERHEADERTYPE)), m_sInPortDef.nBufferCountActual);
if (m_inp_mem_ptr == NULL) {
DEBUG_PRINT_ERROR("ERROR: calloc() Failed for m_inp_mem_ptr");
return OMX_ErrorInsufficientResources;
}
DEBUG_PRINT_LOW("Successfully allocated m_inp_mem_ptr = %p", m_inp_mem_ptr);
m_pInput_pmem = (struct pmem *) calloc(sizeof (struct pmem), m_sInPortDef.nBufferCountActual);
if (m_pInput_pmem == NULL) {
DEBUG_PRINT_ERROR("ERROR: calloc() Failed for m_pInput_pmem");
return OMX_ErrorInsufficientResources;
}
#ifdef USE_ION
m_pInput_ion = (struct venc_ion *) calloc(sizeof (struct venc_ion), m_sInPortDef.nBufferCountActual);
if (m_pInput_ion == NULL) {
DEBUG_PRINT_ERROR("ERROR: calloc() Failed for m_pInput_ion");
return OMX_ErrorInsufficientResources;
}
#endif
for (i=0; i< m_sInPortDef.nBufferCountActual; i++) {
m_pInput_pmem[i].fd = -1;
#ifdef USE_ION
m_pInput_ion[i].ion_device_fd =-1;
m_pInput_ion[i].fd_ion_data.fd =-1;
m_pInput_ion[i].ion_alloc_data.handle = 0;
#endif
}
}
for (i=0; i< m_sInPortDef.nBufferCountActual; i++) {
if (BITMASK_ABSENT(&m_inp_bm_count,i)) {
break;
}
}
if (i < m_sInPortDef.nBufferCountActual) {
*bufferHdr = (m_inp_mem_ptr + i);
BITMASK_SET(&m_inp_bm_count,i);
(*bufferHdr)->pBuffer = (OMX_U8 *)buffer;
(*bufferHdr)->nSize = sizeof(OMX_BUFFERHEADERTYPE);
(*bufferHdr)->nVersion.nVersion = OMX_SPEC_VERSION;
(*bufferHdr)->nAllocLen = m_sInPortDef.nBufferSize;
(*bufferHdr)->pAppPrivate = appData;
(*bufferHdr)->nInputPortIndex = PORT_INDEX_IN;
if (!m_use_input_pmem) {
#ifdef USE_ION
#ifdef _MSM8974_
m_pInput_ion[i].ion_device_fd = alloc_map_ion_memory(m_sInPortDef.nBufferSize,
&m_pInput_ion[i].ion_alloc_data,
&m_pInput_ion[i].fd_ion_data,0);
#else
m_pInput_ion[i].ion_device_fd = alloc_map_ion_memory(m_sInPortDef.nBufferSize,
&m_pInput_ion[i].ion_alloc_data,
&m_pInput_ion[i].fd_ion_data,ION_FLAG_CACHED);
#endif
if (m_pInput_ion[i].ion_device_fd < 0) {
DEBUG_PRINT_ERROR("ERROR:ION device open() Failed");
return OMX_ErrorInsufficientResources;
}
m_pInput_pmem[i].fd = m_pInput_ion[i].fd_ion_data.fd;
#else
m_pInput_pmem[i].fd = open (MEM_DEVICE,O_RDWR);
if (m_pInput_pmem[i].fd == 0) {
m_pInput_pmem[i].fd = open (MEM_DEVICE,O_RDWR);
}
if (m_pInput_pmem[i] .fd < 0) {
DEBUG_PRINT_ERROR("ERROR: /dev/pmem_adsp open() Failed");
return OMX_ErrorInsufficientResources;
}
#endif
m_pInput_pmem[i].size = m_sInPortDef.nBufferSize;
m_pInput_pmem[i].offset = 0;
m_pInput_pmem[i].buffer = (OMX_U8 *)SECURE_BUFPTR;
if(!secure_session) {
m_pInput_pmem[i].buffer = (unsigned char *)mmap(
NULL,m_pInput_pmem[i].size,PROT_READ|PROT_WRITE,
MAP_SHARED,m_pInput_pmem[i].fd,0);
if (m_pInput_pmem[i].buffer == MAP_FAILED) {
DEBUG_PRINT_ERROR("ERROR: mmap() Failed");
close(m_pInput_pmem[i].fd);
#ifdef USE_ION
free_ion_memory(&m_pInput_ion[i]);
#endif
return OMX_ErrorInsufficientResources;
}
}
} else {
OMX_QCOM_PLATFORM_PRIVATE_PMEM_INFO *pParam = reinterpret_cast<OMX_QCOM_PLATFORM_PRIVATE_PMEM_INFO *>((*bufferHdr)->pAppPrivate);
DEBUG_PRINT_LOW("Inside qcom_ext with luma:(fd:%lu,offset:0x%x)", pParam->pmem_fd, (unsigned)pParam->offset);
if (pParam) {
m_pInput_pmem[i].fd = pParam->pmem_fd;
m_pInput_pmem[i].offset = pParam->offset;
m_pInput_pmem[i].size = m_sInPortDef.nBufferSize;
m_pInput_pmem[i].buffer = (unsigned char *)buffer;
DEBUG_PRINT_LOW("DBG:: pParam->pmem_fd = %u, pParam->offset = %u",
(unsigned int)pParam->pmem_fd, (unsigned int)pParam->offset);
} else {
DEBUG_PRINT_ERROR("ERROR: Invalid AppData given for PMEM i/p UseBuffer case");
return OMX_ErrorBadParameter;
}
}
DEBUG_PRINT_LOW("use_inp:: bufhdr = %p, pBuffer = %p, m_pInput_pmem[i].buffer = %p",
(*bufferHdr), (*bufferHdr)->pBuffer, m_pInput_pmem[i].buffer);
if ( dev_use_buf(&m_pInput_pmem[i],PORT_INDEX_IN,i) != true) {
DEBUG_PRINT_ERROR("ERROR: dev_use_buf() Failed for i/p buf");
return OMX_ErrorInsufficientResources;
}
} else {
DEBUG_PRINT_ERROR("ERROR: All buffers are already used, invalid use_buf call for "
"index = %u", i);
eRet = OMX_ErrorInsufficientResources;
}
return eRet;
}
| 16,051 |
137,533 | 0 | bool PrintWebViewHelper::GetPrintFrame(blink::WebLocalFrame** frame) {
DCHECK(frame);
blink::WebView* webView = render_view()->GetWebView();
DCHECK(webView);
if (!webView)
return false;
blink::WebLocalFrame* focusedFrame =
webView->focusedFrame()->toWebLocalFrame();
*frame = focusedFrame->hasSelection()
? focusedFrame
: webView->mainFrame()->toWebLocalFrame();
return true;
}
| 16,052 |
125,799 | 0 | void LogBytes(const std::vector<CharType>& data, std::string* out) {
#if defined(OS_WIN)
for (size_t i = 0; i < data.size(); ++i)
out->push_back(data[i]);
#else
static const size_t kMaxBytesToLog = 100;
for (size_t i = 0; i < std::min(data.size(), kMaxBytesToLog); ++i) {
if (isprint(data[i]))
out->push_back(data[i]);
else
out->append(StringPrintf("[%02X]", static_cast<unsigned char>(data[i])));
}
if (data.size() > kMaxBytesToLog) {
out->append(
StringPrintf(" and %u more bytes",
static_cast<unsigned>(data.size() - kMaxBytesToLog)));
}
#endif
}
| 16,053 |
123,610 | 0 | void InspectorPageAgent::didResizeMainFrame()
{
#if !OS(ANDROID)
if (m_enabled && m_state->getBoolean(PageAgentState::showSizeOnResize))
m_overlay->showAndHideViewSize(m_state->getBoolean(PageAgentState::showGridOnResize));
#endif
m_frontend->frameResized();
}
| 16,054 |
142,504 | 0 | void EndScroll(bool is_fling, float velocity_y) {
IncreaseTimestamp();
ui::GestureEventDetails event_details =
is_fling
? ui::GestureEventDetails(ui::ET_SCROLL_FLING_START, 0, velocity_y)
: ui::GestureEventDetails(ui::ET_GESTURE_SCROLL_END);
ui::GestureEvent event =
ui::GestureEvent(current_point_.x(), current_point_.y(), ui::EF_NONE,
timestamp_, event_details);
GetShelfLayoutManager()->ProcessGestureEvent(event);
}
| 16,055 |
133,725 | 0 | static bool semicolonSeparatedValueContainsJavaScriptURL(const String& value)
{
Vector<String> valueList;
value.split(';', valueList);
for (size_t i = 0; i < valueList.size(); ++i) {
if (protocolIsJavaScript(valueList[i]))
return true;
}
return false;
}
| 16,056 |
69,162 | 0 | ZEND_API void ZEND_FASTCALL zend_hash_apply(HashTable *ht, apply_func_t apply_func)
{
uint32_t idx;
Bucket *p;
int result;
IS_CONSISTENT(ht);
HT_ASSERT(GC_REFCOUNT(ht) == 1);
HASH_PROTECT_RECURSION(ht);
for (idx = 0; idx < ht->nNumUsed; idx++) {
p = ht->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
result = apply_func(&p->val);
if (result & ZEND_HASH_APPLY_REMOVE) {
_zend_hash_del_el(ht, HT_IDX_TO_HASH(idx), p);
}
if (result & ZEND_HASH_APPLY_STOP) {
break;
}
}
HASH_UNPROTECT_RECURSION(ht);
}
| 16,057 |
54,476 | 0 | int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page)
{
int expected_count;
void **pslot;
spin_lock_irq(&mapping->tree_lock);
pslot = radix_tree_lookup_slot(&mapping->page_tree,
page_index(page));
expected_count = 2 + page_has_private(page);
if (page_count(page) != expected_count ||
radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
}
if (!page_freeze_refs(page, expected_count)) {
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
}
set_page_memcg(newpage, page_memcg(page));
newpage->index = page->index;
newpage->mapping = page->mapping;
get_page(newpage);
radix_tree_replace_slot(pslot, newpage);
page_unfreeze_refs(page, expected_count - 1);
spin_unlock_irq(&mapping->tree_lock);
return MIGRATEPAGE_SUCCESS;
}
| 16,058 |
95,327 | 0 | psf_d2s_clip_array (const double *src, short *dest, int count, int normalize)
{ double normfact, scaled_value ;
normfact = normalize ? (1.0 * 0x8000) : 1.0 ;
while (--count >= 0)
{ scaled_value = src [count] * normfact ;
if (CPU_CLIPS_POSITIVE == 0 && scaled_value >= (1.0 * 0x7FFF))
{ dest [count] = 0x7FFF ;
continue ;
} ;
if (CPU_CLIPS_NEGATIVE == 0 && scaled_value <= (-8.0 * 0x1000))
{ dest [count] = 0x8000 ;
continue ;
} ;
dest [count] = lrint (scaled_value) ;
} ;
return ;
} /* psf_d2s_clip_array */
| 16,059 |
91,748 | 0 | COMPS_HSList * comps_mrtree_get(COMPS_MRTree * rt, const char * key) {
COMPS_HSList * subnodes;
COMPS_HSListItem * it = NULL;
COMPS_MRTreeData * rtdata;
unsigned int offset, len, x;
char found, ended;
len = strlen(key);
offset = 0;
subnodes = rt->subnodes;
while (offset != len) {
found = 0;
for (it = subnodes->first; it != NULL; it=it->next) {
if (((COMPS_MRTreeData*)it->data)->key[0] == key[offset]) {
found = 1;
break;
}
}
if (!found)
return NULL;
rtdata = (COMPS_MRTreeData*)it->data;
for (x=1; ;x++) {
ended=0;
if (rtdata->key[x] == 0) ended += 1;
if (x == len - offset) ended += 2;
if (ended != 0) break;
if (key[offset+x] != rtdata->key[x]) break;
}
if (ended == 3) return rtdata->data;
else if (ended == 1) offset+=x;
else return NULL;
subnodes = ((COMPS_MRTreeData*)it->data)->subnodes;
}
if (it)
return ((COMPS_MRTreeData*)it->data)->data;
else return NULL;
}
| 16,060 |
78,371 | 0 | aes128_encrypt_ecb(const unsigned char *key, int keysize,
const unsigned char *input, size_t length, unsigned char *output)
{
unsigned char iv[EVP_MAX_IV_LENGTH] = { 0 };
return openssl_enc(EVP_aes_128_ecb(), key, iv, input, length, output);
}
| 16,061 |
374 | 0 | fz_drop_link_key(fz_context *ctx, void *key_)
{
fz_link_key *key = (fz_link_key *)key_;
if (fz_drop_imp(ctx, key, &key->refs))
fz_free(ctx, key);
}
| 16,062 |
83,501 | 0 | int is_valid_bugaddr(unsigned long addr)
{
unsigned short ud;
if (addr < TASK_SIZE_MAX)
return 0;
if (probe_kernel_address((unsigned short *)addr, ud))
return 0;
return ud == INSN_UD0 || ud == INSN_UD2;
}
| 16,063 |
110,225 | 0 | bool NaClProcessHost::Send(IPC::Message* msg) {
return process_->Send(msg);
}
| 16,064 |
38,577 | 0 | void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
unsigned long exp_time)
{
struct ieee80211_local *local = sdata->local;
struct sta_info *sta, *tmp;
mutex_lock(&local->sta_mtx);
list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
if (sdata != sta->sdata)
continue;
if (time_after(jiffies, sta->last_rx + exp_time)) {
sta_dbg(sta->sdata, "expiring inactive STA %pM\n",
sta->sta.addr);
if (ieee80211_vif_is_mesh(&sdata->vif) &&
test_sta_flag(sta, WLAN_STA_PS_STA))
atomic_dec(&sdata->u.mesh.ps.num_sta_ps);
WARN_ON(__sta_info_destroy(sta));
}
}
mutex_unlock(&local->sta_mtx);
}
| 16,065 |
68,171 | 0 | static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
{
unsigned char optbuf[sizeof(struct ip_options) + 40];
struct ip_options *opt = (struct ip_options *)optbuf;
if (IPCB(skb)->opt.optlen == 0)
return;
if (ip_options_echo(opt, skb)) {
msg->msg_flags |= MSG_CTRUNC;
return;
}
ip_options_undo(opt);
put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
}
| 16,066 |
12,496 | 0 | bus_activation_ref (BusActivation *activation)
{
_dbus_assert (activation->refcount > 0);
activation->refcount += 1;
return activation;
}
| 16,067 |
54,382 | 0 | static PHP_NAMED_FUNCTION(zif_zip_close)
{
zval * zip;
zip_rsrc *z_rsrc = NULL;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zip) == FAILURE) {
return;
}
if ((z_rsrc = (zip_rsrc *)zend_fetch_resource(Z_RES_P(zip), le_zip_dir_name, le_zip_dir)) == NULL) {
RETURN_FALSE;
}
/* really close the zip will break BC :-D */
zend_list_close(Z_RES_P(zip));
}
| 16,068 |
64,424 | 0 | incremental_gc_step(mrb_state *mrb, mrb_gc *gc)
{
size_t limit = 0, result = 0;
limit = (GC_STEP_SIZE/100) * gc->step_ratio;
while (result < limit) {
result += incremental_gc(mrb, gc, limit);
if (gc->state == MRB_GC_STATE_ROOT)
break;
}
gc->threshold = gc->live + GC_STEP_SIZE;
}
| 16,069 |
107,552 | 0 | void ewk_view_load_show(Evas_Object* ewkView)
{
DBG("ewkView=%p", ewkView);
evas_object_smart_callback_call(ewkView, "load,newwindow,show", 0);
}
| 16,070 |
27,408 | 0 | ip6_tnl_dev_uninit(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct net *net = dev_net(dev);
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
if (dev == ip6n->fb_tnl_dev) {
spin_lock_bh(&ip6_tnl_lock);
ip6n->tnls_wc[0] = NULL;
spin_unlock_bh(&ip6_tnl_lock);
} else {
ip6_tnl_unlink(ip6n, t);
}
ip6_tnl_dst_reset(t);
dev_put(dev);
}
| 16,071 |
172,085 | 0 | static inline void set_poll(poll_slot_t* ps, int fd, int type, int flags, uint32_t user_id)
{
ps->pfd.fd = fd;
ps->user_id = user_id;
if(ps->type != 0 && ps->type != type)
APPL_TRACE_ERROR("poll socket type should not changed! type was:%d, type now:%d", ps->type, type);
ps->type = type;
ps->flags = flags;
ps->pfd.events = flags2pevents(flags);
ps->pfd.revents = 0;
}
| 16,072 |
62,119 | 0 | static MagickBooleanType IsMNG(const unsigned char *magick,const size_t length)
{
if (length < 8)
return(MagickFalse);
if (memcmp(magick,"\212MNG\r\n\032\n",8) == 0)
return(MagickTrue);
return(MagickFalse);
}
| 16,073 |
124,763 | 0 | void RenderBlockFlow::setCollapsedBottomMargin(const MarginInfo& marginInfo)
{
if (marginInfo.canCollapseWithMarginAfter() && !marginInfo.canCollapseWithMarginBefore()) {
if (marginInfo.discardMargin()) {
setMustDiscardMarginAfter();
return;
}
setMaxMarginAfterValues(max(maxPositiveMarginAfter(), marginInfo.positiveMargin()), max(maxNegativeMarginAfter(), marginInfo.negativeMargin()));
if (!marginInfo.hasMarginAfterQuirk())
setHasMarginAfterQuirk(false);
if (marginInfo.hasMarginAfterQuirk() && !marginAfter()) {
setHasMarginAfterQuirk(true);
}
}
}
| 16,074 |
20,890 | 0 | static void vapic_exit(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
int idx;
if (!apic || !apic->vapic_addr)
return;
idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_release_page_dirty(apic->vapic_page);
mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
| 16,075 |
87,376 | 0 | Mat_VarRead4(mat_t *mat,matvar_t *matvar)
{
int err;
size_t nelems = 1;
err = SafeMulDims(matvar, &nelems);
if ( err ) {
Mat_Critical("Integer multiplication overflow");
return;
}
(void)fseek((FILE*)mat->fp,matvar->internal->datapos,SEEK_SET);
switch ( matvar->class_type ) {
case MAT_C_DOUBLE:
matvar->data_size = sizeof(double);
err = SafeMul(&matvar->nbytes, nelems, matvar->data_size);
if ( err ) {
Mat_Critical("Integer multiplication overflow");
return;
}
if ( matvar->isComplex ) {
mat_complex_split_t *complex_data = ComplexMalloc(matvar->nbytes);
if ( NULL != complex_data ) {
matvar->data = complex_data;
ReadDoubleData(mat, (double*)complex_data->Re, matvar->data_type, nelems);
ReadDoubleData(mat, (double*)complex_data->Im, matvar->data_type, nelems);
}
else {
Mat_Critical("Couldn't allocate memory for the complex data");
}
} else {
matvar->data = malloc(matvar->nbytes);
if ( NULL != matvar->data ) {
ReadDoubleData(mat, (double*)matvar->data, matvar->data_type, nelems);
}
else {
Mat_Critical("Couldn't allocate memory for the data");
}
}
/* Update data type to match format of matvar->data */
matvar->data_type = MAT_T_DOUBLE;
break;
case MAT_C_CHAR:
matvar->data_size = 1;
matvar->nbytes = nelems;
matvar->data = malloc(matvar->nbytes);
if ( NULL != matvar->data ) {
ReadUInt8Data(mat, (mat_uint8_t*)matvar->data, matvar->data_type, nelems);
}
else {
Mat_Critical("Couldn't allocate memory for the data");
}
matvar->data_type = MAT_T_UINT8;
break;
case MAT_C_SPARSE:
matvar->data_size = sizeof(mat_sparse_t);
matvar->data = malloc(matvar->data_size);
if ( NULL != matvar->data ) {
double tmp;
int i;
mat_sparse_t* sparse;
long fpos;
enum matio_types data_type = MAT_T_DOUBLE;
/* matvar->dims[1] either is 3 for real or 4 for complex sparse */
matvar->isComplex = matvar->dims[1] == 4 ? 1 : 0;
sparse = (mat_sparse_t*)matvar->data;
sparse->nir = matvar->dims[0] - 1;
sparse->nzmax = sparse->nir;
sparse->ir = (mat_int32_t*)malloc(sparse->nir*sizeof(mat_int32_t));
if ( sparse->ir != NULL ) {
ReadInt32Data(mat, sparse->ir, data_type, sparse->nir);
for ( i = 0; i < sparse->nir; i++ )
sparse->ir[i] = sparse->ir[i] - 1;
} else {
free(matvar->data);
matvar->data = NULL;
Mat_Critical("Couldn't allocate memory for the sparse row array");
return;
}
ReadDoubleData(mat, &tmp, data_type, 1);
matvar->dims[0] = (size_t)tmp;
fpos = ftell((FILE*)mat->fp);
if ( fpos == -1L ) {
free(sparse->ir);
free(matvar->data);
matvar->data = NULL;
Mat_Critical("Couldn't determine file position");
return;
}
(void)fseek((FILE*)mat->fp,sparse->nir*Mat_SizeOf(data_type),
SEEK_CUR);
ReadDoubleData(mat, &tmp, data_type, 1);
if ( tmp > INT_MAX-1 || tmp < 0 ) {
free(sparse->ir);
free(matvar->data);
matvar->data = NULL;
Mat_Critical("Invalid column dimension for sparse matrix");
return;
}
matvar->dims[1] = (size_t)tmp;
(void)fseek((FILE*)mat->fp,fpos,SEEK_SET);
if ( matvar->dims[1] > INT_MAX-1 ) {
free(sparse->ir);
free(matvar->data);
matvar->data = NULL;
Mat_Critical("Invalid column dimension for sparse matrix");
return;
}
sparse->njc = (int)matvar->dims[1] + 1;
sparse->jc = (mat_int32_t*)malloc(sparse->njc*sizeof(mat_int32_t));
if ( sparse->jc != NULL ) {
mat_int32_t *jc;
jc = (mat_int32_t*)malloc(sparse->nir*sizeof(mat_int32_t));
if ( jc != NULL ) {
int j = 0;
sparse->jc[0] = 0;
ReadInt32Data(mat, jc, data_type, sparse->nir);
for ( i = 1; i < sparse->njc-1; i++ ) {
while ( j < sparse->nir && jc[j] <= i )
j++;
sparse->jc[i] = j;
}
free(jc);
/* terminating nnz */
sparse->jc[sparse->njc-1] = sparse->nir;
} else {
free(sparse->jc);
free(sparse->ir);
free(matvar->data);
matvar->data = NULL;
Mat_Critical("Couldn't allocate memory for the sparse index array");
return;
}
} else {
free(sparse->ir);
free(matvar->data);
matvar->data = NULL;
Mat_Critical("Couldn't allocate memory for the sparse index array");
return;
}
ReadDoubleData(mat, &tmp, data_type, 1);
sparse->ndata = sparse->nir;
data_type = matvar->data_type;
if ( matvar->isComplex ) {
mat_complex_split_t *complex_data =
ComplexMalloc(sparse->ndata*Mat_SizeOf(data_type));
if ( NULL != complex_data ) {
sparse->data = complex_data;
#if defined(EXTENDED_SPARSE)
switch ( data_type ) {
case MAT_T_DOUBLE:
ReadDoubleData(mat, (double*)complex_data->Re,
data_type, sparse->ndata);
ReadDoubleData(mat, &tmp, data_type, 1);
ReadDoubleData(mat, (double*)complex_data->Im,
data_type, sparse->ndata);
ReadDoubleData(mat, &tmp, data_type, 1);
break;
case MAT_T_SINGLE:
{
float tmp2;
ReadSingleData(mat, (float*)complex_data->Re,
data_type, sparse->ndata);
ReadSingleData(mat, &tmp2, data_type, 1);
ReadSingleData(mat, (float*)complex_data->Im,
data_type, sparse->ndata);
ReadSingleData(mat, &tmp2, data_type, 1);
break;
}
case MAT_T_INT32:
{
mat_int32_t tmp2;
ReadInt32Data(mat, (mat_int32_t*)complex_data->Re,
data_type, sparse->ndata);
ReadInt32Data(mat, &tmp2, data_type, 1);
ReadInt32Data(mat, (mat_int32_t*)complex_data->Im,
data_type, sparse->ndata);
ReadInt32Data(mat, &tmp2, data_type, 1);
break;
}
case MAT_T_INT16:
{
mat_int16_t tmp2;
ReadInt16Data(mat, (mat_int16_t*)complex_data->Re,
data_type, sparse->ndata);
ReadInt16Data(mat, &tmp2, data_type, 1);
ReadInt16Data(mat, (mat_int16_t*)complex_data->Im,
data_type, sparse->ndata);
ReadInt16Data(mat, &tmp2, data_type, 1);
break;
}
case MAT_T_UINT16:
{
mat_uint16_t tmp2;
ReadUInt16Data(mat, (mat_uint16_t*)complex_data->Re,
data_type, sparse->ndata);
ReadUInt16Data(mat, &tmp2, data_type, 1);
ReadUInt16Data(mat, (mat_uint16_t*)complex_data->Im,
data_type, sparse->ndata);
ReadUInt16Data(mat, &tmp2, data_type, 1);
break;
}
case MAT_T_UINT8:
{
mat_uint8_t tmp2;
ReadUInt8Data(mat, (mat_uint8_t*)complex_data->Re,
data_type, sparse->ndata);
ReadUInt8Data(mat, &tmp2, data_type, 1);
ReadUInt8Data(mat, (mat_uint8_t*)complex_data->Im,
data_type, sparse->ndata);
ReadUInt8Data(mat, &tmp2, data_type, 1);
break;
}
default:
free(complex_data->Re);
free(complex_data->Im);
free(complex_data);
free(sparse->jc);
free(sparse->ir);
free(matvar->data);
matvar->data = NULL;
Mat_Critical("Mat_VarRead4: %d is not a supported data type for "
"extended sparse", data_type);
return;
}
#else
ReadDoubleData(mat, (double*)complex_data->Re,
data_type, sparse->ndata);
ReadDoubleData(mat, &tmp, data_type, 1);
ReadDoubleData(mat, (double*)complex_data->Im,
data_type, sparse->ndata);
ReadDoubleData(mat, &tmp, data_type, 1);
#endif
}
else {
free(sparse->jc);
free(sparse->ir);
free(matvar->data);
matvar->data = NULL;
Mat_Critical("Couldn't allocate memory for the complex sparse data");
return;
}
} else {
sparse->data = malloc(sparse->ndata*Mat_SizeOf(data_type));
if ( sparse->data != NULL ) {
#if defined(EXTENDED_SPARSE)
switch ( data_type ) {
case MAT_T_DOUBLE:
ReadDoubleData(mat, (double*)sparse->data,
data_type, sparse->ndata);
ReadDoubleData(mat, &tmp, data_type, 1);
break;
case MAT_T_SINGLE:
{
float tmp2;
ReadSingleData(mat, (float*)sparse->data,
data_type, sparse->ndata);
ReadSingleData(mat, &tmp2, data_type, 1);
break;
}
case MAT_T_INT32:
{
mat_int32_t tmp2;
ReadInt32Data(mat, (mat_int32_t*)sparse->data,
data_type, sparse->ndata);
ReadInt32Data(mat, &tmp2, data_type, 1);
break;
}
case MAT_T_INT16:
{
mat_int16_t tmp2;
ReadInt16Data(mat, (mat_int16_t*)sparse->data,
data_type, sparse->ndata);
ReadInt16Data(mat, &tmp2, data_type, 1);
break;
}
case MAT_T_UINT16:
{
mat_uint16_t tmp2;
ReadUInt16Data(mat, (mat_uint16_t*)sparse->data,
data_type, sparse->ndata);
ReadUInt16Data(mat, &tmp2, data_type, 1);
break;
}
case MAT_T_UINT8:
{
mat_uint8_t tmp2;
ReadUInt8Data(mat, (mat_uint8_t*)sparse->data,
data_type, sparse->ndata);
ReadUInt8Data(mat, &tmp2, data_type, 1);
break;
}
default:
free(sparse->data);
free(sparse->jc);
free(sparse->ir);
free(matvar->data);
matvar->data = NULL;
Mat_Critical("Mat_VarRead4: %d is not a supported data type for "
"extended sparse", data_type);
return;
}
#else
ReadDoubleData(mat, (double*)sparse->data, data_type, sparse->ndata);
ReadDoubleData(mat, &tmp, data_type, 1);
#endif
} else {
free(sparse->jc);
free(sparse->ir);
free(matvar->data);
matvar->data = NULL;
Mat_Critical("Couldn't allocate memory for the sparse data");
return;
}
}
break;
}
else {
Mat_Critical("Couldn't allocate memory for the data");
return;
}
default:
Mat_Critical("MAT V4 data type error");
return;
}
return;
}
| 16,076 |
112,616 | 0 | void Document::webkitDidEnterFullScreenForElement(Element*)
{
if (!m_fullScreenElement)
return;
if (!attached() || inPageCache())
return;
m_fullScreenElement->didBecomeFullscreenElement();
m_fullScreenChangeDelayTimer.startOneShot(0);
}
| 16,077 |
138,797 | 0 | void RenderFrameHostImpl::SendJavaScriptDialogReply(
IPC::Message* reply_msg,
bool success,
const base::string16& user_input) {
FrameHostMsg_RunJavaScriptDialog::WriteReplyParams(reply_msg, success,
user_input);
Send(reply_msg);
}
| 16,078 |
170,745 | 0 | void OMXNodeInstance::onMessage(const omx_message &msg) {
const sp<GraphicBufferSource>& bufferSource(getGraphicBufferSource());
if (msg.type == omx_message::FILL_BUFFER_DONE) {
OMX_BUFFERHEADERTYPE *buffer =
findBufferHeader(msg.u.extended_buffer_data.buffer);
{
Mutex::Autolock _l(mDebugLock);
mOutputBuffersWithCodec.remove(buffer);
CLOG_BUMPED_BUFFER(
FBD, WITH_STATS(FULL_BUFFER(msg.u.extended_buffer_data.buffer, buffer)));
unbumpDebugLevel_l(kPortIndexOutput);
}
BufferMeta *buffer_meta =
static_cast<BufferMeta *>(buffer->pAppPrivate);
buffer_meta->CopyFromOMX(buffer);
if (bufferSource != NULL) {
bufferSource->codecBufferFilled(buffer);
omx_message newMsg = msg;
newMsg.u.extended_buffer_data.timestamp = buffer->nTimeStamp;
mObserver->onMessage(newMsg);
return;
}
} else if (msg.type == omx_message::EMPTY_BUFFER_DONE) {
OMX_BUFFERHEADERTYPE *buffer =
findBufferHeader(msg.u.buffer_data.buffer);
{
Mutex::Autolock _l(mDebugLock);
mInputBuffersWithCodec.remove(buffer);
CLOG_BUMPED_BUFFER(
EBD, WITH_STATS(EMPTY_BUFFER(msg.u.buffer_data.buffer, buffer)));
}
if (bufferSource != NULL) {
bufferSource->codecBufferEmptied(buffer);
return;
}
}
mObserver->onMessage(msg);
}
| 16,079 |
66,027 | 0 | int yr_arena_create(
size_t initial_size,
int flags,
YR_ARENA** arena)
{
YR_ARENA* new_arena;
YR_ARENA_PAGE* new_page;
*arena = NULL;
new_arena = (YR_ARENA*) yr_malloc(sizeof(YR_ARENA));
if (new_arena == NULL)
return ERROR_INSUFFICIENT_MEMORY;
new_page = _yr_arena_new_page(initial_size);
if (new_page == NULL)
{
yr_free(new_arena);
return ERROR_INSUFFICIENT_MEMORY;
}
new_arena->page_list_head = new_page;
new_arena->current_page = new_page;
new_arena->flags = flags | ARENA_FLAGS_COALESCED;
*arena = new_arena;
return ERROR_SUCCESS;
}
| 16,080 |
179,191 | 1 | static void br_multicast_del_pg(struct net_bridge *br,
struct net_bridge_port_group *pg)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
mdb = mlock_dereference(br->mdb, br);
mp = br_mdb_ip_get(mdb, &pg->addr);
if (WARN_ON(!mp))
return;
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (p != pg)
continue;
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
call_rcu_bh(&p->rcu, br_multicast_free_pg);
if (!mp->ports && !mp->mglist &&
netif_running(br->dev))
mod_timer(&mp->timer, jiffies);
return;
}
WARN_ON(1);
}
| 16,081 |
39,406 | 0 | static void print_errors(void)
{
DPRINT("");
if (ST0 & ST0_ECE) {
pr_cont("Recalibrate failed!");
} else if (ST2 & ST2_CRC) {
pr_cont("data CRC error");
tell_sector();
} else if (ST1 & ST1_CRC) {
pr_cont("CRC error");
tell_sector();
} else if ((ST1 & (ST1_MAM | ST1_ND)) ||
(ST2 & ST2_MAM)) {
if (!probing) {
pr_cont("sector not found");
tell_sector();
} else
pr_cont("probe failed...");
} else if (ST2 & ST2_WC) { /* seek error */
pr_cont("wrong cylinder");
} else if (ST2 & ST2_BC) { /* cylinder marked as bad */
pr_cont("bad cylinder");
} else {
pr_cont("unknown error. ST[0..2] are: 0x%x 0x%x 0x%x",
ST0, ST1, ST2);
tell_sector();
}
pr_cont("\n");
}
| 16,082 |
17,026 | 0 | void OxideQQuickWebViewPrivate::CloseRequested() {
Q_Q(OxideQQuickWebView);
emit q->closeRequested();
}
| 16,083 |
59,664 | 0 | static int csnmp_config_add_data_values(data_definition_t *dd,
oconfig_item_t *ci) {
if (ci->values_num < 1) {
WARNING("snmp plugin: `Values' needs at least one argument.");
return (-1);
}
for (int i = 0; i < ci->values_num; i++)
if (ci->values[i].type != OCONFIG_TYPE_STRING) {
WARNING("snmp plugin: `Values' needs only string argument.");
return (-1);
}
sfree(dd->values);
dd->values_len = 0;
dd->values = malloc(sizeof(*dd->values) * ci->values_num);
if (dd->values == NULL)
return (-1);
dd->values_len = (size_t)ci->values_num;
for (int i = 0; i < ci->values_num; i++) {
dd->values[i].oid_len = MAX_OID_LEN;
if (NULL == snmp_parse_oid(ci->values[i].value.string, dd->values[i].oid,
&dd->values[i].oid_len)) {
ERROR("snmp plugin: snmp_parse_oid (%s) failed.",
ci->values[i].value.string);
free(dd->values);
dd->values = NULL;
dd->values_len = 0;
return (-1);
}
}
return (0);
} /* int csnmp_config_add_data_instance */
| 16,084 |
80,691 | 0 | GF_Err blnk_dump(GF_Box *a, FILE * trace)
{
GF_TextBlinkBox*p = (GF_TextBlinkBox*)a;
gf_isom_box_dump_start(a, "TextBlinkBox", trace);
fprintf(trace, "start_charoffset=\"%d\" end_charoffset=\"%d\">\n", p->startcharoffset, p->endcharoffset);
gf_isom_box_dump_done("TextBlinkBox", a, trace);
return GF_OK;
}
| 16,085 |
55,861 | 0 | static void do_SAK_work(struct work_struct *work)
{
struct tty_struct *tty =
container_of(work, struct tty_struct, SAK_work);
__do_SAK(tty);
}
| 16,086 |
97,949 | 0 | void RenderView::OnExecuteCode(const ViewMsg_ExecuteCode_Params& params) {
WebFrame* main_frame = webview() ? webview()->mainFrame() : NULL;
if (!main_frame) {
Send(new ViewMsg_ExecuteCodeFinished(routing_id_, params.request_id,
false));
return;
}
WebDataSource* ds = main_frame->dataSource();
NavigationState* navigation_state = NavigationState::FromDataSource(ds);
if (!navigation_state->user_script_idle_scheduler()->has_run()) {
pending_code_execution_queue_.push(
linked_ptr<ViewMsg_ExecuteCode_Params>(
new ViewMsg_ExecuteCode_Params(params)));
return;
}
ExecuteCodeImpl(main_frame, params);
}
| 16,087 |
56,981 | 0 | static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
{
struct inode *inode = req->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
mapping_set_error(inode->i_mapping, req->out.h.error);
spin_lock(&fc->lock);
while (req->misc.write.next) {
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_write_in *inarg = &req->misc.write.in;
struct fuse_req *next = req->misc.write.next;
req->misc.write.next = next->misc.write.next;
next->misc.write.next = NULL;
next->ff = fuse_file_get(req->ff);
list_add(&next->writepages_entry, &fi->writepages);
/*
* Skip fuse_flush_writepages() to make it easy to crop requests
* based on primary request size.
*
* 1st case (trivial): there are no concurrent activities using
* fuse_set/release_nowrite. Then we're on safe side because
* fuse_flush_writepages() would call fuse_send_writepage()
* anyway.
*
* 2nd case: someone called fuse_set_nowrite and it is waiting
* now for completion of all in-flight requests. This happens
* rarely and no more than once per page, so this should be
* okay.
*
* 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
* of fuse_set_nowrite..fuse_release_nowrite section. The fact
* that fuse_set_nowrite returned implies that all in-flight
* requests were completed along with all of their secondary
* requests. Further primary requests are blocked by negative
* writectr. Hence there cannot be any in-flight requests and
* no invocations of fuse_writepage_end() while we're in
* fuse_set_nowrite..fuse_release_nowrite section.
*/
fuse_send_writepage(fc, next, inarg->offset + inarg->size);
}
fi->writectr--;
fuse_writepage_finish(fc, req);
spin_unlock(&fc->lock);
fuse_writepage_free(fc, req);
}
| 16,088 |
20,998 | 0 | static void smaps_pte_entry(pte_t ptent, unsigned long addr,
unsigned long ptent_size, struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = mss->vma;
struct page *page;
int mapcount;
if (is_swap_pte(ptent)) {
mss->swap += ptent_size;
return;
}
if (!pte_present(ptent))
return;
page = vm_normal_page(vma, addr, ptent);
if (!page)
return;
if (PageAnon(page))
mss->anonymous += ptent_size;
mss->resident += ptent_size;
/* Accumulate the size in pages that have been accessed. */
if (pte_young(ptent) || PageReferenced(page))
mss->referenced += ptent_size;
mapcount = page_mapcount(page);
if (mapcount >= 2) {
if (pte_dirty(ptent) || PageDirty(page))
mss->shared_dirty += ptent_size;
else
mss->shared_clean += ptent_size;
mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
} else {
if (pte_dirty(ptent) || PageDirty(page))
mss->private_dirty += ptent_size;
else
mss->private_clean += ptent_size;
mss->pss += (ptent_size << PSS_SHIFT);
}
}
| 16,089 |
97,054 | 0 | static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct bpf_reg_state *dst_reg,
struct bpf_reg_state src_reg)
{
struct bpf_reg_state *regs = cur_regs(env);
u8 opcode = BPF_OP(insn->code);
bool src_known, dst_known;
s64 smin_val, smax_val;
u64 umin_val, umax_val;
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
if (insn_bitness == 32) {
/* Relevant for 32-bit RSH: Information can propagate towards
* LSB, so it isn't sufficient to only truncate the output to
* 32 bits.
*/
coerce_reg_to_size(dst_reg, 4);
coerce_reg_to_size(&src_reg, 4);
}
smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value;
umin_val = src_reg.umin_value;
umax_val = src_reg.umax_value;
src_known = tnum_is_const(src_reg.var_off);
dst_known = tnum_is_const(dst_reg->var_off);
if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
smin_val > smax_val || umin_val > umax_val) {
/* Taint dst register if offset had invalid bounds derived from
* e.g. dead branches.
*/
__mark_reg_unknown(dst_reg);
return 0;
}
if (!src_known &&
opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
__mark_reg_unknown(dst_reg);
return 0;
}
switch (opcode) {
case BPF_ADD:
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
signed_add_overflows(dst_reg->smax_value, smax_val)) {
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value += smin_val;
dst_reg->smax_value += smax_val;
}
if (dst_reg->umin_value + umin_val < umin_val ||
dst_reg->umax_value + umax_val < umax_val) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value += umin_val;
dst_reg->umax_value += umax_val;
}
dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
break;
case BPF_SUB:
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
signed_sub_overflows(dst_reg->smax_value, smin_val)) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value -= smax_val;
dst_reg->smax_value -= smin_val;
}
if (dst_reg->umin_value < umax_val) {
/* Overflow possible, we know nothing */
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
/* Cannot overflow (as long as bounds are consistent) */
dst_reg->umin_value -= umax_val;
dst_reg->umax_value -= umin_val;
}
dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
break;
case BPF_MUL:
dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
if (smin_val < 0 || dst_reg->smin_value < 0) {
/* Ain't nobody got time to multiply that sign */
__mark_reg_unbounded(dst_reg);
__update_reg_bounds(dst_reg);
break;
}
/* Both values are positive, so we can work with unsigned and
* copy the result to signed (unless it exceeds S64_MAX).
*/
if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
/* Potential overflow, we know nothing */
__mark_reg_unbounded(dst_reg);
/* (except what we can learn from the var_off) */
__update_reg_bounds(dst_reg);
break;
}
dst_reg->umin_value *= umin_val;
dst_reg->umax_value *= umax_val;
if (dst_reg->umax_value > S64_MAX) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
break;
case BPF_AND:
if (src_known && dst_known) {
__mark_reg_known(dst_reg, dst_reg->var_off.value &
src_reg.var_off.value);
break;
}
/* We get our minimum from the var_off, since that's inherently
* bitwise. Our maximum is the minimum of the operands' maxima.
*/
dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
dst_reg->umin_value = dst_reg->var_off.value;
dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
if (dst_reg->smin_value < 0 || smin_val < 0) {
/* Lose signed bounds when ANDing negative numbers,
* ain't nobody got time for that.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
/* ANDing two positives gives a positive, so safe to
* cast result into s64.
*/
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_OR:
if (src_known && dst_known) {
__mark_reg_known(dst_reg, dst_reg->var_off.value |
src_reg.var_off.value);
break;
}
/* We get our maximum from the var_off, and our minimum is the
* maximum of the operands' minima
*/
dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
dst_reg->umax_value = dst_reg->var_off.value |
dst_reg->var_off.mask;
if (dst_reg->smin_value < 0 || smin_val < 0) {
/* Lose signed bounds when ORing negative numbers,
* ain't nobody got time for that.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
/* ORing two positives gives a positive, so safe to
* cast result into s64.
*/
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_LSH:
if (umax_val >= insn_bitness) {
/* Shifts greater than 31 or 63 are undefined.
* This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* We lose all sign bit information (except what we can pick
* up from var_off)
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
/* If we might shift our top bit out, then we know nothing */
if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value <<= umin_val;
dst_reg->umax_value <<= umax_val;
}
dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_RSH:
if (umax_val >= insn_bitness) {
/* Shifts greater than 31 or 63 are undefined.
* This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* BPF_RSH is an unsigned shift. If the value in dst_reg might
* be negative, then either:
* 1) src_reg might be zero, so the sign bit of the result is
* unknown, so we lose our signed bounds
* 2) it's known negative, thus the unsigned bounds capture the
* signed bounds
* 3) the signed bounds cross zero, so they tell us nothing
* about the result
* If the value in dst_reg is known nonnegative, then again the
* unsigned bounts capture the signed bounds.
* Thus, in all cases it suffices to blow away our signed bounds
* and rely on inferring new ones from the unsigned bounds and
* var_off of the result.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
dst_reg->umin_value >>= umax_val;
dst_reg->umax_value >>= umin_val;
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_ARSH:
if (umax_val >= insn_bitness) {
/* Shifts greater than 31 or 63 are undefined.
* This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* Upon reaching here, src_known is true and
* umax_val is equal to umin_val.
*/
dst_reg->smin_value >>= umin_val;
dst_reg->smax_value >>= umin_val;
dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val);
/* blow away the dst_reg umin_value/umax_value and rely on
* dst_reg var_off to refine the result.
*/
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
__update_reg_bounds(dst_reg);
break;
default:
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops are (32,32)->32 */
coerce_reg_to_size(dst_reg, 4);
}
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
return 0;
}
| 16,090 |
34,238 | 0 | static void ip_vs_unlink_service(struct ip_vs_service *svc)
{
/*
* Unhash it from the service table
*/
write_lock_bh(&__ip_vs_svc_lock);
ip_vs_svc_unhash(svc);
/*
* Wait until all the svc users go away.
*/
IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 0);
__ip_vs_del_service(svc);
write_unlock_bh(&__ip_vs_svc_lock);
}
| 16,091 |
45,694 | 0 | static int crypto_rfc3686_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_skcipher_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_rfc3686_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_ablkcipher *cipher;
unsigned long align;
cipher = crypto_spawn_skcipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
align = crypto_tfm_alg_alignmask(tfm);
align &= ~(crypto_tfm_ctx_alignment() - 1);
tfm->crt_ablkcipher.reqsize = align +
sizeof(struct crypto_rfc3686_req_ctx) +
crypto_ablkcipher_reqsize(cipher);
return 0;
}
| 16,092 |
68,687 | 0 | vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
{
unsigned int i;
for (i = 0; i < state->user_state.bo_count; i++)
drm_gem_object_unreference_unlocked(state->bo[i]);
kfree(state);
}
| 16,093 |
60,459 | 0 | int wasm_asm(const char *str, unsigned char *buf, int buf_len) {
int i = 0, len = -1;
char tmp[R_ASM_BUFSIZE];
while (str[i] != ' ' && i < buf_len) {
tmp[i] = str[i];
i++;
}
tmp[i] = 0;
for (i = 0; i < 0xff; i++) {
WasmOpDef *opdef = &opcodes[i];
if (opdef->txt) {
if (!strcmp (opdef->txt, tmp)) {
buf[0] = i;
return 1;
}
}
}
return len;
}
| 16,094 |
153,860 | 0 | GLES2Implementation::SingleThreadChecker::~SingleThreadChecker() {
--gles2_implementation_->use_count_;
CHECK_EQ(0, gles2_implementation_->use_count_);
}
| 16,095 |
96,602 | 0 | static int bin_libs(RCore *r, int mode) {
RList *libs;
RListIter *iter;
char* lib;
int i = 0;
if (!(libs = r_bin_get_libs (r->bin))) {
return false;
}
if (IS_MODE_JSON (mode)) {
r_cons_print ("[");
} else if (IS_MODE_NORMAL (mode)) {
r_cons_println ("[Linked libraries]");
}
r_list_foreach (libs, iter, lib) {
if (IS_MODE_SET (mode)) {
} else if (IS_MODE_RAD (mode)) {
r_cons_printf ("\"CCa entry0 %s\"\n", lib);
} else if (IS_MODE_JSON (mode)) {
r_cons_printf ("%s\"%s\"", iter->p ? "," : "", lib);
} else {
r_cons_println (lib);
}
i++;
}
if (IS_MODE_JSON (mode)) {
r_cons_print ("]");
} else if (IS_MODE_NORMAL (mode)) {
if (i == 1) {
r_cons_printf ("\n%i library\n", i);
} else {
r_cons_printf ("\n%i libraries\n", i);
}
}
return true;
}
| 16,096 |
78,225 | 0 | static int cac_card_ctl(sc_card_t *card, unsigned long cmd, void *ptr)
{
cac_private_data_t * priv = CAC_DATA(card);
LOG_FUNC_CALLED(card->ctx);
sc_log(card->ctx, "cmd=%ld ptr=%p", cmd, ptr);
if (priv == NULL) {
LOG_FUNC_RETURN(card->ctx, SC_ERROR_INTERNAL);
}
switch(cmd) {
case SC_CARDCTL_CAC_GET_ACA_PATH:
return cac_get_ACA_path(card, (sc_path_t *) ptr);
case SC_CARDCTL_GET_SERIALNR:
return cac_get_serial_nr_from_CUID(card, (sc_serial_number_t *) ptr);
case SC_CARDCTL_CAC_INIT_GET_GENERIC_OBJECTS:
return cac_get_init_and_get_count(&priv->general_list, &priv->general_current, (int *)ptr);
case SC_CARDCTL_CAC_INIT_GET_CERT_OBJECTS:
return cac_get_init_and_get_count(&priv->pki_list, &priv->pki_current, (int *)ptr);
case SC_CARDCTL_CAC_GET_NEXT_GENERIC_OBJECT:
return cac_fill_object_info(&priv->general_list, &priv->general_current, (sc_pkcs15_data_info_t *)ptr);
case SC_CARDCTL_CAC_GET_NEXT_CERT_OBJECT:
return cac_fill_object_info(&priv->pki_list, &priv->pki_current, (sc_pkcs15_data_info_t *)ptr);
case SC_CARDCTL_CAC_FINAL_GET_GENERIC_OBJECTS:
return cac_final_iterator(&priv->general_list);
case SC_CARDCTL_CAC_FINAL_GET_CERT_OBJECTS:
return cac_final_iterator(&priv->pki_list);
}
LOG_FUNC_RETURN(card->ctx, SC_ERROR_NOT_SUPPORTED);
}
| 16,097 |
18,675 | 0 | static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct sock_iocb siocb, *x;
if (pos != 0)
return -ESPIPE;
if (iocb->ki_left == 0) /* Match SYS5 behaviour */
return 0;
x = alloc_sock_iocb(iocb, &siocb);
if (!x)
return -ENOMEM;
return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
}
| 16,098 |
78,739 | 0 | static void print_serial(sc_card_t *in_card)
{
int r;
sc_serial_number_t serial;
r = sc_lock(card);
if (r == SC_SUCCESS)
r = sc_card_ctl(in_card, SC_CARDCTL_GET_SERIALNR, &serial);
sc_unlock(card);
if (r)
fprintf(stderr, "sc_card_ctl(*, SC_CARDCTL_GET_SERIALNR, *) failed\n");
else
util_hex_dump_asc(stdout, serial.value, serial.len, -1);
}
| 16,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.