unique_id
int64 13
189k
| target
int64 0
1
| code
stringlengths 20
241k
| __index_level_0__
int64 0
18.9k
|
---|---|---|---|
151,886 | 0 | void RenderFrameHostImpl::BindPresentationServiceRequest(
blink::mojom::PresentationServiceRequest request) {
if (!presentation_service_)
presentation_service_ = PresentationServiceImpl::Create(this);
presentation_service_->Bind(std::move(request));
}
| 15,000 |
3,019 | 0 | uint32_t vga_mem_readb(VGACommonState *s, hwaddr addr)
{
int memory_map_mode, plane;
uint32_t ret;
/* convert to VGA memory offset */
memory_map_mode = (s->gr[VGA_GFX_MISC] >> 2) & 3;
addr &= 0x1ffff;
switch(memory_map_mode) {
case 0:
break;
case 1:
if (addr >= 0x10000)
return 0xff;
addr += s->bank_offset;
break;
case 2:
addr -= 0x10000;
if (addr >= 0x8000)
return 0xff;
break;
default:
case 3:
addr -= 0x18000;
if (addr >= 0x8000)
return 0xff;
break;
}
if (sr(s, VGA_SEQ_MEMORY_MODE) & VGA_SR04_CHN_4M) {
/* chain 4 mode : simplest access */
assert(addr < s->vram_size);
ret = s->vram_ptr[addr];
} else if (s->gr[VGA_GFX_MODE] & 0x10) {
/* odd/even mode (aka text mode mapping) */
plane = (s->gr[VGA_GFX_PLANE_READ] & 2) | (addr & 1);
addr = ((addr & ~1) << 1) | plane;
if (addr >= s->vram_size) {
return 0xff;
}
ret = s->vram_ptr[addr];
} else {
/* standard VGA latched access */
if (addr * sizeof(uint32_t) >= s->vram_size) {
return 0xff;
}
s->latch = ((uint32_t *)s->vram_ptr)[addr];
if (!(s->gr[VGA_GFX_MODE] & 0x08)) {
/* read mode 0 */
plane = s->gr[VGA_GFX_PLANE_READ];
ret = GET_PLANE(s->latch, plane);
} else {
/* read mode 1 */
ret = (s->latch ^ mask16[s->gr[VGA_GFX_COMPARE_VALUE]]) &
mask16[s->gr[VGA_GFX_COMPARE_MASK]];
ret |= ret >> 16;
ret |= ret >> 8;
ret = (~ret) & 0xff;
}
}
return ret;
}
| 15,001 |
128,709 | 0 | bool TemplateURL::HasSearchTermsReplacementKey(const GURL& url) const {
std::string params[] = {url.query(), url.ref()};
for (int i = 0; i < 2; ++i) {
url::Component query, key, value;
query.len = static_cast<int>(params[i].size());
while (url::ExtractQueryKeyValue(params[i].c_str(), &query, &key, &value)) {
if (key.is_nonempty() &&
params[i].substr(key.begin, key.len) ==
search_terms_replacement_key()) {
return true;
}
}
}
return false;
}
| 15,002 |
4,850 | 0 | IsMaster(DeviceIntPtr dev)
{
return dev->type == MASTER_POINTER || dev->type == MASTER_KEYBOARD;
}
| 15,003 |
132,203 | 0 | media::CdmFactory* RenderFrameImpl::GetCdmFactory() {
#if defined(ENABLE_BROWSER_CDMS)
if (!cdm_manager_)
cdm_manager_ = new RendererCdmManager(this);
#endif // defined(ENABLE_BROWSER_CDMS)
if (!cdm_factory_) {
DCHECK(frame_);
#if defined(ENABLE_MOJO_MEDIA)
cdm_factory_.reset(new media::MojoCdmFactory(GetMediaServiceFactory()));
#else
cdm_factory_.reset(new RenderCdmFactory(
#if defined(ENABLE_PEPPER_CDMS)
base::Bind(&PepperCdmWrapperImpl::Create, frame_)
#elif defined(ENABLE_BROWSER_CDMS)
cdm_manager_
#endif
));
#endif // defined(ENABLE_MOJO_MEDIA)
}
return cdm_factory_.get();
}
| 15,004 |
169,430 | 0 | int ChunkedUploadDataStream::ReadInternal(IOBuffer* buf, int buf_len) {
DCHECK_LT(0, buf_len);
DCHECK(!read_buffer_.get());
int result = ReadChunk(buf, buf_len);
if (result == ERR_IO_PENDING) {
read_buffer_ = buf;
read_buffer_len_ = buf_len;
}
return result;
}
| 15,005 |
174,298 | 0 | Camera3Device::PreparerThread::~PreparerThread() {
Thread::requestExitAndWait();
if (mCurrentStream != nullptr) {
mCurrentStream->cancelPrepare();
ATRACE_ASYNC_END("stream prepare", mCurrentStream->getId());
mCurrentStream.clear();
}
clear();
}
| 15,006 |
140,415 | 0 | PermissionRequestGestureType PermissionUtil::GetGestureType(bool user_gesture) {
return user_gesture ? PermissionRequestGestureType::GESTURE
: PermissionRequestGestureType::NO_GESTURE;
}
| 15,007 |
141,763 | 0 | void V8InjectedScriptHost::getInternalPropertiesCallback(const v8::FunctionCallbackInfo<v8::Value>& info)
{
if (info.Length() < 1)
return;
v8::Local<v8::Array> properties;
if (unwrapInspector(info)->debugger()->internalProperties(info.GetIsolate()->GetCurrentContext(), info[0]).ToLocal(&properties))
info.GetReturnValue().Set(properties);
}
| 15,008 |
147,317 | 0 | void V8TestObject::DOMTimeStampAttributeAttributeSetterCallback(
const v8::FunctionCallbackInfo<v8::Value>& info) {
RUNTIME_CALL_TIMER_SCOPE_DISABLED_BY_DEFAULT(info.GetIsolate(), "Blink_TestObject_domTimeStampAttribute_Setter");
v8::Local<v8::Value> v8_value = info[0];
test_object_v8_internal::DOMTimeStampAttributeAttributeSetter(v8_value, info);
}
| 15,009 |
13,010 | 0 | sshpkt_get_ec(struct ssh *ssh, EC_POINT *v, const EC_GROUP *g)
{
return sshbuf_get_ec(ssh->state->incoming_packet, v, g);
}
| 15,010 |
13,292 | 0 | pdf14_cmap_gray_direct(frac gray, gx_device_color * pdc, const gs_gstate * pgs,
gx_device * dev, gs_color_select_t select)
{
int i,ncomps;
frac cm_comps[GX_DEVICE_COLOR_MAX_COMPONENTS];
gx_color_value cv[GX_DEVICE_COLOR_MAX_COMPONENTS];
gx_color_index color;
gx_device *trans_device;
/* If trans device is set, we need to use its procs. */
if (pgs->trans_device != NULL) {
trans_device = pgs->trans_device;
} else {
trans_device = dev;
}
ncomps = trans_device->color_info.num_components;
/* map to the color model */
dev_proc(trans_device, get_color_mapping_procs)(trans_device)->map_gray(trans_device, gray, cm_comps);
/* If we are in a Gray blending color space and have spots then we have
* possibly an issue here with the transfer function */
if (pgs->trans_device != NULL) {
cv[0] = frac2cv(gx_map_color_frac(pgs, cm_comps[0], effective_transfer[0]));
for (i = 1; i < ncomps; i++)
cv[i] = gx_color_value_from_byte(cm_comps[i]);
} else {
/* Not a transparency device. Just use the transfer functions directly */
for (i = 0; i < ncomps; i++)
cv[i] = frac2cv(gx_map_color_frac(pgs, cm_comps[i], effective_transfer[i]));
}
/* if output device supports devn, we need to make sure we send it the
proper color type. We now support Gray + spots as devn colors */
if (dev_proc(trans_device, dev_spec_op)(trans_device, gxdso_supports_devn, NULL, 0)) {
for (i = 0; i < ncomps; i++)
pdc->colors.devn.values[i] = cv[i];
pdc->type = gx_dc_type_devn;
} else {
/* encode as a color index */
color = dev_proc(trans_device, encode_color)(trans_device, cv);
/* check if the encoding was successful; we presume failure is rare */
if (color != gx_no_color_index)
color_set_pure(pdc, color);
}
}
| 15,011 |
67,551 | 0 | static void __ipxitf_put(struct ipx_interface *intrfc)
{
if (atomic_dec_and_test(&intrfc->refcnt))
__ipxitf_down(intrfc);
}
| 15,012 |
4,158 | 0 | ExponentialFunction::ExponentialFunction(const ExponentialFunction *func) : Function(func) {
memcpy(c0, func->c0, funcMaxOutputs * sizeof(double));
memcpy(c1, func->c1, funcMaxOutputs * sizeof(double));
e = func->e;
isLinear = func->isLinear;
ok = func->ok;
}
| 15,013 |
118,246 | 0 | AutofillDialogViews::DetailsGroup* AutofillDialogViews::GroupForView(
views::View* view) {
DCHECK(view);
views::View* input_view = GetAncestralInputView(view);
if (!input_view)
return NULL;
for (DetailGroupMap::iterator iter = detail_groups_.begin();
iter != detail_groups_.end(); ++iter) {
DetailsGroup* group = &iter->second;
if (input_view->parent() == group->manual_input)
return group;
if (input_view == group->suggested_info->textfield()) {
return group;
}
}
return NULL;
}
| 15,014 |
185,982 | 1 | v8::Local<v8::Object> V8InjectedScriptHost::create(v8::Local<v8::Context> context, V8InspectorImpl* inspector)
{
v8::Isolate* isolate = inspector->isolate();
v8::Local<v8::Object> injectedScriptHost = v8::Object::New(isolate);
v8::Local<v8::External> debuggerExternal = v8::External::New(isolate, inspector);
setFunctionProperty(context, injectedScriptHost, "internalConstructorName", V8InjectedScriptHost::internalConstructorNameCallback, debuggerExternal);
setFunctionProperty(context, injectedScriptHost, "formatAccessorsAsProperties", V8InjectedScriptHost::formatAccessorsAsProperties, debuggerExternal);
setFunctionProperty(context, injectedScriptHost, "subtype", V8InjectedScriptHost::subtypeCallback, debuggerExternal);
setFunctionProperty(context, injectedScriptHost, "getInternalProperties", V8InjectedScriptHost::getInternalPropertiesCallback, debuggerExternal);
setFunctionProperty(context, injectedScriptHost, "objectHasOwnProperty", V8InjectedScriptHost::objectHasOwnPropertyCallback, debuggerExternal);
setFunctionProperty(context, injectedScriptHost, "bind", V8InjectedScriptHost::bindCallback, debuggerExternal);
setFunctionProperty(context, injectedScriptHost, "proxyTargetValue", V8InjectedScriptHost::proxyTargetValueCallback, debuggerExternal);
return injectedScriptHost;
}
| 15,015 |
10,399 | 0 | FcDirCacheDisposeUnlocked (FcCache *cache)
{
FcCacheRemoveUnlocked (cache);
switch (cache->magic) {
case FC_CACHE_MAGIC_ALLOC:
free (cache);
break;
case FC_CACHE_MAGIC_MMAP:
#if defined(HAVE_MMAP) || defined(__CYGWIN__)
munmap (cache, cache->size);
#elif defined(_WIN32)
UnmapViewOfFile (cache);
#endif
break;
}
}
| 15,016 |
49,699 | 0 | static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos)
{
loff_t skip = *pos;
struct class_dev_iter *iter;
struct device *dev;
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return ERR_PTR(-ENOMEM);
seqf->private = iter;
class_dev_iter_init(iter, &block_class, NULL, &disk_type);
do {
dev = class_dev_iter_next(iter);
if (!dev)
return NULL;
} while (skip--);
return dev_to_disk(dev);
}
| 15,017 |
59,094 | 0 | static void __mark_reg_unbounded(struct bpf_reg_state *reg)
{
reg->smin_value = S64_MIN;
reg->smax_value = S64_MAX;
reg->umin_value = 0;
reg->umax_value = U64_MAX;
}
| 15,018 |
101,337 | 0 | void SessionModelAssociator::InitializeCurrentSessionName() {
DCHECK(CalledOnValidThread());
if (setup_for_test_) {
OnSessionNameInitialized("TestSessionName");
} else {
#if defined(OS_CHROMEOS)
OnSessionNameInitialized("Chromebook");
#else
BrowserThread::PostTask(BrowserThread::FILE, FROM_HERE,
new GetSessionNameTask(MakeWeakHandle(AsWeakPtr())));
#endif
}
}
| 15,019 |
129,871 | 0 | void ValidateAllTraceMacrosCreatedData(const ListValue& trace_parsed) {
const DictionaryValue* item = NULL;
#define EXPECT_FIND_(string) \
item = FindTraceEntry(trace_parsed, string); \
EXPECT_TRUE(item);
#define EXPECT_NOT_FIND_(string) \
item = FindTraceEntry(trace_parsed, string); \
EXPECT_FALSE(item);
#define EXPECT_SUB_FIND_(string) \
if (item) \
EXPECT_TRUE(IsStringInDict(string, item));
EXPECT_FIND_("TRACE_EVENT0 call");
{
std::string ph;
std::string ph_end;
EXPECT_TRUE((item = FindTraceEntry(trace_parsed, "TRACE_EVENT0 call")));
EXPECT_TRUE((item && item->GetString("ph", &ph)));
EXPECT_EQ("X", ph);
item = FindTraceEntry(trace_parsed, "TRACE_EVENT0 call", item);
EXPECT_FALSE(item);
}
EXPECT_FIND_("TRACE_EVENT1 call");
EXPECT_SUB_FIND_("name1");
EXPECT_SUB_FIND_("value1");
EXPECT_FIND_("TRACE_EVENT2 call");
EXPECT_SUB_FIND_("name1");
EXPECT_SUB_FIND_("\"value1\"");
EXPECT_SUB_FIND_("name2");
EXPECT_SUB_FIND_("value\\2");
EXPECT_FIND_("TRACE_EVENT_INSTANT0 call");
{
std::string scope;
EXPECT_TRUE((item && item->GetString("s", &scope)));
EXPECT_EQ("g", scope);
}
EXPECT_FIND_("TRACE_EVENT_INSTANT1 call");
{
std::string scope;
EXPECT_TRUE((item && item->GetString("s", &scope)));
EXPECT_EQ("p", scope);
}
EXPECT_SUB_FIND_("name1");
EXPECT_SUB_FIND_("value1");
EXPECT_FIND_("TRACE_EVENT_INSTANT2 call");
{
std::string scope;
EXPECT_TRUE((item && item->GetString("s", &scope)));
EXPECT_EQ("t", scope);
}
EXPECT_SUB_FIND_("name1");
EXPECT_SUB_FIND_("value1");
EXPECT_SUB_FIND_("name2");
EXPECT_SUB_FIND_("value2");
EXPECT_FIND_("TRACE_EVENT_BEGIN0 call");
EXPECT_FIND_("TRACE_EVENT_BEGIN1 call");
EXPECT_SUB_FIND_("name1");
EXPECT_SUB_FIND_("value1");
EXPECT_FIND_("TRACE_EVENT_BEGIN2 call");
EXPECT_SUB_FIND_("name1");
EXPECT_SUB_FIND_("value1");
EXPECT_SUB_FIND_("name2");
EXPECT_SUB_FIND_("value2");
EXPECT_FIND_("TRACE_EVENT_END0 call");
EXPECT_FIND_("TRACE_EVENT_END1 call");
EXPECT_SUB_FIND_("name1");
EXPECT_SUB_FIND_("value1");
EXPECT_FIND_("TRACE_EVENT_END2 call");
EXPECT_SUB_FIND_("name1");
EXPECT_SUB_FIND_("value1");
EXPECT_SUB_FIND_("name2");
EXPECT_SUB_FIND_("value2");
EXPECT_FIND_("TRACE_EVENT_ASYNC_BEGIN0 call");
EXPECT_SUB_FIND_("id");
EXPECT_SUB_FIND_(kAsyncIdStr);
EXPECT_FIND_("TRACE_EVENT_ASYNC_BEGIN1 call");
EXPECT_SUB_FIND_("id");
EXPECT_SUB_FIND_(kAsyncIdStr);
EXPECT_SUB_FIND_("name1");
EXPECT_SUB_FIND_("value1");
EXPECT_FIND_("TRACE_EVENT_ASYNC_BEGIN2 call");
EXPECT_SUB_FIND_("id");
EXPECT_SUB_FIND_(kAsyncIdStr);
EXPECT_SUB_FIND_("name1");
EXPECT_SUB_FIND_("value1");
EXPECT_SUB_FIND_("name2");
EXPECT_SUB_FIND_("value2");
EXPECT_FIND_("TRACE_EVENT_ASYNC_STEP_INTO0 call");
EXPECT_SUB_FIND_("id");
EXPECT_SUB_FIND_(kAsyncIdStr);
EXPECT_SUB_FIND_("step_begin1");
EXPECT_FIND_("TRACE_EVENT_ASYNC_STEP_INTO1 call");
EXPECT_SUB_FIND_("id");
EXPECT_SUB_FIND_(kAsyncIdStr);
EXPECT_SUB_FIND_("step_begin2");
EXPECT_SUB_FIND_("name1");
EXPECT_SUB_FIND_("value1");
EXPECT_FIND_("TRACE_EVENT_ASYNC_END0 call");
EXPECT_SUB_FIND_("id");
EXPECT_SUB_FIND_(kAsyncIdStr);
EXPECT_FIND_("TRACE_EVENT_ASYNC_END1 call");
EXPECT_SUB_FIND_("id");
EXPECT_SUB_FIND_(kAsyncIdStr);
EXPECT_SUB_FIND_("name1");
EXPECT_SUB_FIND_("value1");
EXPECT_FIND_("TRACE_EVENT_ASYNC_END2 call");
EXPECT_SUB_FIND_("id");
EXPECT_SUB_FIND_(kAsyncIdStr);
EXPECT_SUB_FIND_("name1");
EXPECT_SUB_FIND_("value1");
EXPECT_SUB_FIND_("name2");
EXPECT_SUB_FIND_("value2");
EXPECT_FIND_("TRACE_EVENT_FLOW_BEGIN0 call");
EXPECT_SUB_FIND_("id");
EXPECT_SUB_FIND_(kFlowIdStr);
EXPECT_FIND_("TRACE_EVENT_FLOW_STEP0 call");
EXPECT_SUB_FIND_("id");
EXPECT_SUB_FIND_(kFlowIdStr);
EXPECT_SUB_FIND_("step1");
EXPECT_FIND_("TRACE_EVENT_FLOW_END_BIND_TO_ENCLOSING0 call");
EXPECT_SUB_FIND_("id");
EXPECT_SUB_FIND_(kFlowIdStr);
EXPECT_FIND_("TRACE_COUNTER1 call");
{
std::string ph;
EXPECT_TRUE((item && item->GetString("ph", &ph)));
EXPECT_EQ("C", ph);
int value;
EXPECT_TRUE((item && item->GetInteger("args.value", &value)));
EXPECT_EQ(31415, value);
}
EXPECT_FIND_("TRACE_COUNTER2 call");
{
std::string ph;
EXPECT_TRUE((item && item->GetString("ph", &ph)));
EXPECT_EQ("C", ph);
int value;
EXPECT_TRUE((item && item->GetInteger("args.a", &value)));
EXPECT_EQ(30000, value);
EXPECT_TRUE((item && item->GetInteger("args.b", &value)));
EXPECT_EQ(1415, value);
}
EXPECT_FIND_("TRACE_COUNTER_ID1 call");
{
std::string id;
EXPECT_TRUE((item && item->GetString("id", &id)));
EXPECT_EQ("0x319009", id);
std::string ph;
EXPECT_TRUE((item && item->GetString("ph", &ph)));
EXPECT_EQ("C", ph);
int value;
EXPECT_TRUE((item && item->GetInteger("args.value", &value)));
EXPECT_EQ(31415, value);
}
EXPECT_FIND_("TRACE_COUNTER_ID2 call");
{
std::string id;
EXPECT_TRUE((item && item->GetString("id", &id)));
EXPECT_EQ("0x319009", id);
std::string ph;
EXPECT_TRUE((item && item->GetString("ph", &ph)));
EXPECT_EQ("C", ph);
int value;
EXPECT_TRUE((item && item->GetInteger("args.a", &value)));
EXPECT_EQ(30000, value);
EXPECT_TRUE((item && item->GetInteger("args.b", &value)));
EXPECT_EQ(1415, value);
}
EXPECT_FIND_("TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call");
{
int val;
EXPECT_TRUE((item && item->GetInteger("ts", &val)));
EXPECT_EQ(12345, val);
EXPECT_TRUE((item && item->GetInteger("tid", &val)));
EXPECT_EQ(kThreadId, val);
std::string id;
EXPECT_TRUE((item && item->GetString("id", &id)));
EXPECT_EQ(kAsyncIdStr, id);
}
EXPECT_FIND_("TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0 call");
{
int val;
EXPECT_TRUE((item && item->GetInteger("ts", &val)));
EXPECT_EQ(23456, val);
EXPECT_TRUE((item && item->GetInteger("tid", &val)));
EXPECT_EQ(kThreadId, val);
std::string id;
EXPECT_TRUE((item && item->GetString("id", &id)));
EXPECT_EQ(kAsyncIdStr, id);
}
EXPECT_FIND_("TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call");
{
int val;
EXPECT_TRUE((item && item->GetInteger("ts", &val)));
EXPECT_EQ(34567, val);
EXPECT_TRUE((item && item->GetInteger("tid", &val)));
EXPECT_EQ(kThreadId, val);
std::string id;
EXPECT_TRUE((item && item->GetString("id", &id)));
EXPECT_EQ(kAsyncId2Str, id);
}
EXPECT_FIND_("TRACE_EVENT_ASYNC_STEP_PAST0 call");
{
EXPECT_SUB_FIND_("id");
EXPECT_SUB_FIND_(kAsyncId2Str);
EXPECT_SUB_FIND_("step_end1");
EXPECT_FIND_("TRACE_EVENT_ASYNC_STEP_PAST1 call");
EXPECT_SUB_FIND_("id");
EXPECT_SUB_FIND_(kAsyncId2Str);
EXPECT_SUB_FIND_("step_end2");
EXPECT_SUB_FIND_("name1");
EXPECT_SUB_FIND_("value1");
}
EXPECT_FIND_("TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0 call");
{
int val;
EXPECT_TRUE((item && item->GetInteger("ts", &val)));
EXPECT_EQ(45678, val);
EXPECT_TRUE((item && item->GetInteger("tid", &val)));
EXPECT_EQ(kThreadId, val);
std::string id;
EXPECT_TRUE((item && item->GetString("id", &id)));
EXPECT_EQ(kAsyncId2Str, id);
}
EXPECT_FIND_("tracked object 1");
{
std::string phase;
std::string id;
std::string snapshot;
EXPECT_TRUE((item && item->GetString("ph", &phase)));
EXPECT_EQ("N", phase);
EXPECT_TRUE((item && item->GetString("id", &id)));
EXPECT_EQ("0x42", id);
item = FindTraceEntry(trace_parsed, "tracked object 1", item);
EXPECT_TRUE(item);
EXPECT_TRUE(item && item->GetString("ph", &phase));
EXPECT_EQ("O", phase);
EXPECT_TRUE(item && item->GetString("id", &id));
EXPECT_EQ("0x42", id);
EXPECT_TRUE(item && item->GetString("args.snapshot", &snapshot));
EXPECT_EQ("hello", snapshot);
item = FindTraceEntry(trace_parsed, "tracked object 1", item);
EXPECT_TRUE(item);
EXPECT_TRUE(item && item->GetString("ph", &phase));
EXPECT_EQ("D", phase);
EXPECT_TRUE(item && item->GetString("id", &id));
EXPECT_EQ("0x42", id);
}
EXPECT_FIND_("tracked object 2");
{
std::string phase;
std::string id;
std::string snapshot;
EXPECT_TRUE(item && item->GetString("ph", &phase));
EXPECT_EQ("N", phase);
EXPECT_TRUE(item && item->GetString("id", &id));
EXPECT_EQ("0x2128506", id);
item = FindTraceEntry(trace_parsed, "tracked object 2", item);
EXPECT_TRUE(item);
EXPECT_TRUE(item && item->GetString("ph", &phase));
EXPECT_EQ("O", phase);
EXPECT_TRUE(item && item->GetString("id", &id));
EXPECT_EQ("0x2128506", id);
EXPECT_TRUE(item && item->GetString("args.snapshot", &snapshot));
EXPECT_EQ("world", snapshot);
item = FindTraceEntry(trace_parsed, "tracked object 2", item);
EXPECT_TRUE(item);
EXPECT_TRUE(item && item->GetString("ph", &phase));
EXPECT_EQ("D", phase);
EXPECT_TRUE(item && item->GetString("id", &id));
EXPECT_EQ("0x2128506", id);
}
EXPECT_FIND_(kControlCharacters);
EXPECT_SUB_FIND_(kControlCharacters);
}
| 15,020 |
78,167 | 0 | static int asepcos_select_file(sc_card_t *card, const sc_path_t *in_path,
sc_file_t **file)
{
int r;
sc_path_t npath = *in_path;
SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_NORMAL);
if (in_path->type == SC_PATH_TYPE_PATH) {
/* check the current DF to avoid unnecessary re-selection of
* the MF (as this might invalidate a security status) */
sc_path_t tpath;
r = asepcos_get_current_df_path(card, &tpath);
/* workaround: as opensc can't handle paths with file id
* and application names in it let's ignore the current
* DF if the returned path contains a unsupported tag.
*/
if (r != SC_ERROR_INVALID_ASN1_OBJECT && r != SC_SUCCESS)
return r;
if (r == SC_SUCCESS && sc_compare_path_prefix(&tpath, &npath) != 0) {
/* remove the currently selected DF from the path */
if (tpath.len == npath.len) {
/* we are already in the requested DF */
if (file == NULL)
/* no file information requested =>
* nothing to do */
return SC_SUCCESS;
} else {
/* shorten path */
r = sc_path_set(&npath, 0, &in_path->value[tpath.len],
npath.len - tpath.len, 0, 0);
if (r != SC_SUCCESS)
return r;
if (npath.len == 2)
npath.type = SC_PATH_TYPE_FILE_ID;
else
npath.type = SC_PATH_TYPE_PATH;
}
}
}
r = iso_ops->select_file(card, &npath, file);
/* XXX: this doesn't look right */
if (file != NULL && *file != NULL)
if ((*file)->ef_structure == SC_FILE_EF_UNKNOWN)
(*file)->ef_structure = SC_FILE_EF_TRANSPARENT;
if (r == SC_SUCCESS && file != NULL && *file != NULL) {
r = asepcos_parse_sec_attr(card, *file, (*file)->sec_attr, (*file)->sec_attr_len);
if (r != SC_SUCCESS)
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "error parsing security attributes");
}
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, r);
}
| 15,021 |
187,782 | 1 | void ih264d_init_decoder(void * ps_dec_params)
{
dec_struct_t * ps_dec = (dec_struct_t *)ps_dec_params;
dec_slice_params_t *ps_cur_slice;
pocstruct_t *ps_prev_poc, *ps_cur_poc;
WORD32 size;
size = sizeof(pred_info_t) * 2 * 32;
memset(ps_dec->ps_pred, 0 , size);
size = sizeof(disp_mgr_t);
memset(ps_dec->pv_disp_buf_mgr, 0 , size);
size = sizeof(buf_mgr_t) + ithread_get_mutex_lock_size();
memset(ps_dec->pv_pic_buf_mgr, 0, size);
size = sizeof(dec_err_status_t);
memset(ps_dec->ps_dec_err_status, 0, size);
size = sizeof(sei);
memset(ps_dec->ps_sei, 0, size);
size = sizeof(dpb_commands_t);
memset(ps_dec->ps_dpb_cmds, 0, size);
size = sizeof(dec_bit_stream_t);
memset(ps_dec->ps_bitstrm, 0, size);
size = sizeof(dec_slice_params_t);
memset(ps_dec->ps_cur_slice, 0, size);
size = MAX(sizeof(dec_seq_params_t), sizeof(dec_pic_params_t));
memset(ps_dec->pv_scratch_sps_pps, 0, size);
size = sizeof(ctxt_inc_mb_info_t);
memset(ps_dec->ps_left_mb_ctxt_info, 0, size);
size = (sizeof(neighbouradd_t) << 2);
memset(ps_dec->ps_left_mvpred_addr, 0 ,size);
size = sizeof(buf_mgr_t) + ithread_get_mutex_lock_size();
memset(ps_dec->pv_mv_buf_mgr, 0, size);
/* Free any dynamic buffers that are allocated */
ih264d_free_dynamic_bufs(ps_dec);
ps_cur_slice = ps_dec->ps_cur_slice;
ps_dec->init_done = 0;
ps_dec->u4_num_cores = 1;
ps_dec->u2_pic_ht = ps_dec->u2_pic_wd = 0;
ps_dec->u1_separate_parse = DEFAULT_SEPARATE_PARSE;
ps_dec->u4_app_disable_deblk_frm = 0;
ps_dec->i4_degrade_type = 0;
ps_dec->i4_degrade_pics = 0;
ps_dec->i4_app_skip_mode = IVD_SKIP_NONE;
ps_dec->i4_dec_skip_mode = IVD_SKIP_NONE;
memset(ps_dec->ps_pps, 0,
((sizeof(dec_pic_params_t)) * MAX_NUM_PIC_PARAMS));
memset(ps_dec->ps_sps, 0,
((sizeof(dec_seq_params_t)) * MAX_NUM_SEQ_PARAMS));
/* Initialization of function pointers ih264d_deblock_picture function*/
ps_dec->p_DeblockPicture[0] = ih264d_deblock_picture_non_mbaff;
ps_dec->p_DeblockPicture[1] = ih264d_deblock_picture_mbaff;
ps_dec->s_cab_dec_env.pv_codec_handle = ps_dec;
ps_dec->u4_num_fld_in_frm = 0;
ps_dec->ps_dpb_mgr->pv_codec_handle = ps_dec;
/* Initialize the sei validity u4_flag with zero indiacting sei is not valid*/
ps_dec->ps_sei->u1_is_valid = 0;
/* decParams Initializations */
ps_dec->ps_cur_pps = NULL;
ps_dec->ps_cur_sps = NULL;
ps_dec->u1_init_dec_flag = 0;
ps_dec->u1_first_slice_in_stream = 1;
ps_dec->u1_first_pb_nal_in_pic = 1;
ps_dec->u1_last_pic_not_decoded = 0;
ps_dec->u4_app_disp_width = 0;
ps_dec->i4_header_decoded = 0;
ps_dec->u4_total_frames_decoded = 0;
ps_dec->i4_error_code = 0;
ps_dec->i4_content_type = -1;
ps_dec->ps_cur_slice->u1_mbaff_frame_flag = 0;
ps_dec->ps_dec_err_status->u1_err_flag = ACCEPT_ALL_PICS; //REJECT_PB_PICS;
ps_dec->ps_dec_err_status->u1_cur_pic_type = PIC_TYPE_UNKNOWN;
ps_dec->ps_dec_err_status->u4_frm_sei_sync = SYNC_FRM_DEFAULT;
ps_dec->ps_dec_err_status->u4_cur_frm = INIT_FRAME;
ps_dec->ps_dec_err_status->u1_pic_aud_i = PIC_TYPE_UNKNOWN;
ps_dec->u1_pr_sl_type = 0xFF;
ps_dec->u2_mbx = 0xffff;
ps_dec->u2_mby = 0;
ps_dec->u2_total_mbs_coded = 0;
/* POC initializations */
ps_prev_poc = &ps_dec->s_prev_pic_poc;
ps_cur_poc = &ps_dec->s_cur_pic_poc;
ps_prev_poc->i4_pic_order_cnt_lsb = ps_cur_poc->i4_pic_order_cnt_lsb = 0;
ps_prev_poc->i4_pic_order_cnt_msb = ps_cur_poc->i4_pic_order_cnt_msb = 0;
ps_prev_poc->i4_delta_pic_order_cnt_bottom =
ps_cur_poc->i4_delta_pic_order_cnt_bottom = 0;
ps_prev_poc->i4_delta_pic_order_cnt[0] =
ps_cur_poc->i4_delta_pic_order_cnt[0] = 0;
ps_prev_poc->i4_delta_pic_order_cnt[1] =
ps_cur_poc->i4_delta_pic_order_cnt[1] = 0;
ps_prev_poc->u1_mmco_equalto5 = ps_cur_poc->u1_mmco_equalto5 = 0;
ps_prev_poc->i4_top_field_order_count = ps_cur_poc->i4_top_field_order_count =
0;
ps_prev_poc->i4_bottom_field_order_count =
ps_cur_poc->i4_bottom_field_order_count = 0;
ps_prev_poc->u1_bot_field = ps_cur_poc->u1_bot_field = 0;
ps_prev_poc->u1_mmco_equalto5 = ps_cur_poc->u1_mmco_equalto5 = 0;
ps_prev_poc->i4_prev_frame_num_ofst = ps_cur_poc->i4_prev_frame_num_ofst = 0;
ps_cur_slice->u1_mmco_equalto5 = 0;
ps_cur_slice->u2_frame_num = 0;
ps_dec->i4_max_poc = 0;
ps_dec->i4_prev_max_display_seq = 0;
ps_dec->u1_recon_mb_grp = 4;
/* Field PIC initializations */
ps_dec->u1_second_field = 0;
ps_dec->s_prev_seq_params.u1_eoseq_pending = 0;
/* Set the cropping parameters as zero */
ps_dec->u2_crop_offset_y = 0;
ps_dec->u2_crop_offset_uv = 0;
/* The Initial Frame Rate Info is not Present */
ps_dec->i4_vui_frame_rate = -1;
ps_dec->i4_pic_type = -1;
ps_dec->i4_frametype = -1;
ps_dec->i4_content_type = -1;
ps_dec->u1_res_changed = 0;
ps_dec->u1_frame_decoded_flag = 0;
/* Set the default frame seek mask mode */
ps_dec->u4_skip_frm_mask = SKIP_NONE;
/********************************************************/
/* Initialize CAVLC residual decoding function pointers */
/********************************************************/
ps_dec->pf_cavlc_4x4res_block[0] = ih264d_cavlc_4x4res_block_totalcoeff_1;
ps_dec->pf_cavlc_4x4res_block[1] =
ih264d_cavlc_4x4res_block_totalcoeff_2to10;
ps_dec->pf_cavlc_4x4res_block[2] =
ih264d_cavlc_4x4res_block_totalcoeff_11to16;
ps_dec->pf_cavlc_parse4x4coeff[0] = ih264d_cavlc_parse4x4coeff_n0to7;
ps_dec->pf_cavlc_parse4x4coeff[1] = ih264d_cavlc_parse4x4coeff_n8;
ps_dec->pf_cavlc_parse_8x8block[0] =
ih264d_cavlc_parse_8x8block_none_available;
ps_dec->pf_cavlc_parse_8x8block[1] =
ih264d_cavlc_parse_8x8block_left_available;
ps_dec->pf_cavlc_parse_8x8block[2] =
ih264d_cavlc_parse_8x8block_top_available;
ps_dec->pf_cavlc_parse_8x8block[3] =
ih264d_cavlc_parse_8x8block_both_available;
/***************************************************************************/
/* Initialize Bs calculation function pointers for P and B, 16x16/non16x16 */
/***************************************************************************/
ps_dec->pf_fill_bs1[0][0] = ih264d_fill_bs1_16x16mb_pslice;
ps_dec->pf_fill_bs1[0][1] = ih264d_fill_bs1_non16x16mb_pslice;
ps_dec->pf_fill_bs1[1][0] = ih264d_fill_bs1_16x16mb_bslice;
ps_dec->pf_fill_bs1[1][1] = ih264d_fill_bs1_non16x16mb_bslice;
ps_dec->pf_fill_bs_xtra_left_edge[0] =
ih264d_fill_bs_xtra_left_edge_cur_frm;
ps_dec->pf_fill_bs_xtra_left_edge[1] =
ih264d_fill_bs_xtra_left_edge_cur_fld;
/* Initialize Reference Pic Buffers */
ih264d_init_ref_bufs(ps_dec->ps_dpb_mgr);
ps_dec->u2_prv_frame_num = 0;
ps_dec->u1_top_bottom_decoded = 0;
ps_dec->u1_dangling_field = 0;
ps_dec->s_cab_dec_env.cabac_table = gau4_ih264d_cabac_table;
ps_dec->pu1_left_mv_ctxt_inc = ps_dec->u1_left_mv_ctxt_inc_arr[0];
ps_dec->pi1_left_ref_idx_ctxt_inc =
&ps_dec->i1_left_ref_idx_ctx_inc_arr[0][0];
ps_dec->pu1_left_yuv_dc_csbp = &ps_dec->u1_yuv_dc_csbp_topmb;
/* ! */
/* Initializing flush frame u4_flag */
ps_dec->u1_flushfrm = 0;
{
ps_dec->s_cab_dec_env.pv_codec_handle = (void*)ps_dec;
ps_dec->ps_bitstrm->pv_codec_handle = (void*)ps_dec;
ps_dec->ps_cur_slice->pv_codec_handle = (void*)ps_dec;
ps_dec->ps_dpb_mgr->pv_codec_handle = (void*)ps_dec;
}
memset(ps_dec->disp_bufs, 0, (MAX_DISP_BUFS_NEW) * sizeof(disp_buf_t));
memset(ps_dec->u4_disp_buf_mapping, 0,
(MAX_DISP_BUFS_NEW) * sizeof(UWORD32));
memset(ps_dec->u4_disp_buf_to_be_freed, 0,
(MAX_DISP_BUFS_NEW) * sizeof(UWORD32));
ih264d_init_arch(ps_dec);
ih264d_init_function_ptr(ps_dec);
ps_dec->e_frm_out_mode = IVD_DISPLAY_FRAME_OUT;
ps_dec->init_done = 1;
}
| 15,022 |
187,404 | 1 | static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
const uint8_t *data,
unsigned int data_sz,
void *user_priv,
long deadline)
{
vpx_codec_err_t res = VPX_CODEC_OK;
unsigned int resolution_change = 0;
unsigned int w, h;
if (!ctx->fragments.enabled && (data == NULL && data_sz == 0))
{
return 0;
}
/* Update the input fragment data */
if(update_fragments(ctx, data, data_sz, &res) <= 0)
return res;
/* Determine the stream parameters. Note that we rely on peek_si to
* validate that we have a buffer that does not wrap around the top
* of the heap.
*/
w = ctx->si.w;
h = ctx->si.h;
res = vp8_peek_si_internal(ctx->fragments.ptrs[0], ctx->fragments.sizes[0],
&ctx->si, ctx->decrypt_cb, ctx->decrypt_state);
if((res == VPX_CODEC_UNSUP_BITSTREAM) && !ctx->si.is_kf)
{
/* the peek function returns an error for non keyframes, however for
* this case, it is not an error */
res = VPX_CODEC_OK;
}
if(!ctx->decoder_init && !ctx->si.is_kf)
res = VPX_CODEC_UNSUP_BITSTREAM;
if ((ctx->si.h != h) || (ctx->si.w != w))
resolution_change = 1;
/* Initialize the decoder instance on the first frame*/
if (!res && !ctx->decoder_init)
{
VP8D_CONFIG oxcf;
oxcf.Width = ctx->si.w;
oxcf.Height = ctx->si.h;
oxcf.Version = 9;
oxcf.postprocess = 0;
oxcf.max_threads = ctx->cfg.threads;
oxcf.error_concealment =
(ctx->base.init_flags & VPX_CODEC_USE_ERROR_CONCEALMENT);
/* If postprocessing was enabled by the application and a
* configuration has not been provided, default it.
*/
if (!ctx->postproc_cfg_set
&& (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)) {
ctx->postproc_cfg.post_proc_flag =
VP8_DEBLOCK | VP8_DEMACROBLOCK | VP8_MFQE;
ctx->postproc_cfg.deblocking_level = 4;
ctx->postproc_cfg.noise_level = 0;
}
res = vp8_create_decoder_instances(&ctx->yv12_frame_buffers, &oxcf);
ctx->decoder_init = 1;
}
/* Set these even if already initialized. The caller may have changed the
* decrypt config between frames.
*/
if (ctx->decoder_init) {
ctx->yv12_frame_buffers.pbi[0]->decrypt_cb = ctx->decrypt_cb;
ctx->yv12_frame_buffers.pbi[0]->decrypt_state = ctx->decrypt_state;
}
if (!res)
{
VP8D_COMP *pbi = ctx->yv12_frame_buffers.pbi[0];
if (resolution_change)
{
VP8_COMMON *const pc = & pbi->common;
MACROBLOCKD *const xd = & pbi->mb;
#if CONFIG_MULTITHREAD
int i;
#endif
pc->Width = ctx->si.w;
pc->Height = ctx->si.h;
{
int prev_mb_rows = pc->mb_rows;
if (setjmp(pbi->common.error.jmp))
{
pbi->common.error.setjmp = 0;
vp8_clear_system_state();
/* same return value as used in vp8dx_receive_compressed_data */
return -1;
}
pbi->common.error.setjmp = 1;
if (pc->Width <= 0)
{
pc->Width = w;
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
"Invalid frame width");
}
if (pc->Height <= 0)
{
pc->Height = h;
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
"Invalid frame height");
}
if (vp8_alloc_frame_buffers(pc, pc->Width, pc->Height))
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate frame buffers");
xd->pre = pc->yv12_fb[pc->lst_fb_idx];
xd->dst = pc->yv12_fb[pc->new_fb_idx];
#if CONFIG_MULTITHREAD
for (i = 0; i < pbi->allocated_decoding_thread_count; i++)
{
pbi->mb_row_di[i].mbd.dst = pc->yv12_fb[pc->new_fb_idx];
vp8_build_block_doffsets(&pbi->mb_row_di[i].mbd);
}
#endif
vp8_build_block_doffsets(&pbi->mb);
/* allocate memory for last frame MODE_INFO array */
#if CONFIG_ERROR_CONCEALMENT
if (pbi->ec_enabled)
{
/* old prev_mip was released by vp8_de_alloc_frame_buffers()
* called in vp8_alloc_frame_buffers() */
pc->prev_mip = vpx_calloc(
(pc->mb_cols + 1) * (pc->mb_rows + 1),
sizeof(MODE_INFO));
if (!pc->prev_mip)
{
vp8_de_alloc_frame_buffers(pc);
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate"
"last frame MODE_INFO array");
}
pc->prev_mi = pc->prev_mip + pc->mode_info_stride + 1;
if (vp8_alloc_overlap_lists(pbi))
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate overlap lists "
"for error concealment");
}
#endif
#if CONFIG_MULTITHREAD
if (pbi->b_multithreaded_rd)
vp8mt_alloc_temp_buffers(pbi, pc->Width, prev_mb_rows);
#else
(void)prev_mb_rows;
#endif
}
pbi->common.error.setjmp = 0;
/* required to get past the first get_free_fb() call */
pbi->common.fb_idx_ref_cnt[0] = 0;
}
/* update the pbi fragment data */
pbi->fragments = ctx->fragments;
ctx->user_priv = user_priv;
if (vp8dx_receive_compressed_data(pbi, data_sz, data, deadline))
{
res = update_error_state(ctx, &pbi->common.error);
}
/* get ready for the next series of fragments */
ctx->fragments.count = 0;
}
return res;
}
| 15,023 |
29,841 | 0 | cifs_umount(struct cifs_sb_info *cifs_sb)
{
struct rb_root *root = &cifs_sb->tlink_tree;
struct rb_node *node;
struct tcon_link *tlink;
cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
spin_lock(&cifs_sb->tlink_tree_lock);
while ((node = rb_first(root))) {
tlink = rb_entry(node, struct tcon_link, tl_rbnode);
cifs_get_tlink(tlink);
clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
rb_erase(node, root);
spin_unlock(&cifs_sb->tlink_tree_lock);
cifs_put_tlink(tlink);
spin_lock(&cifs_sb->tlink_tree_lock);
}
spin_unlock(&cifs_sb->tlink_tree_lock);
bdi_destroy(&cifs_sb->bdi);
kfree(cifs_sb->mountdata);
unload_nls(cifs_sb->local_nls);
kfree(cifs_sb);
}
| 15,024 |
89,285 | 0 | void qrio_set_leds(void)
{
u8 ctrlh;
void __iomem *qrio_base = (void *)CONFIG_SYS_QRIO_BASE;
/* set UNIT LED to RED and BOOT LED to ON */
ctrlh = in_8(qrio_base + CTRLH_OFF);
ctrlh |= (CTRLH_WRL_BOOT | CTRLH_WRL_UNITRUN);
out_8(qrio_base + CTRLH_OFF, ctrlh);
}
| 15,025 |
55,968 | 0 | static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
struct skcipher_async_req *sreq;
struct skcipher_request *req;
struct skcipher_async_rsgl *last_rsgl = NULL;
unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
unsigned int reqlen = sizeof(struct skcipher_async_req) +
GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
int err = -ENOMEM;
bool mark = false;
lock_sock(sk);
req = kmalloc(reqlen, GFP_KERNEL);
if (unlikely(!req))
goto unlock;
sreq = GET_SREQ(req, ctx);
sreq->iocb = msg->msg_iocb;
memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
INIT_LIST_HEAD(&sreq->list);
sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
if (unlikely(!sreq->tsg)) {
kfree(req);
goto unlock;
}
sg_init_table(sreq->tsg, tx_nents);
memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
skcipher_async_cb, sk);
while (iov_iter_count(&msg->msg_iter)) {
struct skcipher_async_rsgl *rsgl;
int used;
if (!ctx->used) {
err = skcipher_wait_for_data(sk, flags);
if (err)
goto free;
}
sgl = list_first_entry(&ctx->tsgl,
struct skcipher_sg_list, list);
sg = sgl->sg;
while (!sg->length)
sg++;
used = min_t(unsigned long, ctx->used,
iov_iter_count(&msg->msg_iter));
used = min_t(unsigned long, used, sg->length);
if (txbufs == tx_nents) {
struct scatterlist *tmp;
int x;
/* Ran out of tx slots in async request
* need to expand */
tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
GFP_KERNEL);
if (!tmp)
goto free;
sg_init_table(tmp, tx_nents * 2);
for (x = 0; x < tx_nents; x++)
sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]),
sreq->tsg[x].length,
sreq->tsg[x].offset);
kfree(sreq->tsg);
sreq->tsg = tmp;
tx_nents *= 2;
mark = true;
}
/* Need to take over the tx sgl from ctx
* to the asynch req - these sgls will be freed later */
sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
sg->offset);
if (list_empty(&sreq->list)) {
rsgl = &sreq->first_sgl;
list_add_tail(&rsgl->list, &sreq->list);
} else {
rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL);
if (!rsgl) {
err = -ENOMEM;
goto free;
}
list_add_tail(&rsgl->list, &sreq->list);
}
used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used);
err = used;
if (used < 0)
goto free;
if (last_rsgl)
af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
last_rsgl = rsgl;
len += used;
skcipher_pull_sgl(sk, used, 0);
iov_iter_advance(&msg->msg_iter, used);
}
if (mark)
sg_mark_end(sreq->tsg + txbufs - 1);
skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
len, sreq->iv);
err = ctx->enc ? crypto_skcipher_encrypt(req) :
crypto_skcipher_decrypt(req);
if (err == -EINPROGRESS) {
atomic_inc(&ctx->inflight);
err = -EIOCBQUEUED;
goto unlock;
}
free:
skcipher_free_async_sgls(sreq);
kfree(req);
unlock:
skcipher_wmem_wakeup(sk);
release_sock(sk);
return err;
}
| 15,026 |
110,968 | 0 | gfx::Rect RootWindowHostWin::GetBounds() const {
RECT r;
GetClientRect(hwnd(), &r);
return gfx::Rect(r);
}
| 15,027 |
70,552 | 0 | static void init_once(void *foo)
{
struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
INIT_LIST_HEAD(&ei->i_orphan);
init_rwsem(&ei->xattr_sem);
init_rwsem(&ei->i_data_sem);
init_rwsem(&ei->i_mmap_sem);
inode_init_once(&ei->vfs_inode);
}
| 15,028 |
14,062 | 0 | ProcRenderCreateGlyphSet (ClientPtr client)
{
GlyphSetPtr glyphSet;
PictFormatPtr format;
int rc, f;
REQUEST(xRenderCreateGlyphSetReq);
REQUEST_SIZE_MATCH(xRenderCreateGlyphSetReq);
LEGAL_NEW_RESOURCE(stuff->gsid, client);
rc = dixLookupResourceByType((pointer *)&format, stuff->format,
PictFormatType, client, DixReadAccess);
if (rc != Success)
return rc;
switch (format->depth) {
case 1:
f = GlyphFormat1;
break;
case 4:
f = GlyphFormat4;
break;
case 8:
f = GlyphFormat8;
break;
case 16:
f = GlyphFormat16;
break;
case 32:
f = GlyphFormat32;
break;
default:
return BadMatch;
}
if (format->type != PictTypeDirect)
return BadMatch;
glyphSet = AllocateGlyphSet (f, format);
if (!glyphSet)
return BadAlloc;
/* security creation/labeling check */
rc = XaceHook(XACE_RESOURCE_ACCESS, client, stuff->gsid, GlyphSetType,
glyphSet, RT_NONE, NULL, DixCreateAccess);
if (rc != Success)
return rc;
if (!AddResource (stuff->gsid, GlyphSetType, (pointer)glyphSet))
return BadAlloc;
return Success;
}
| 15,029 |
101,919 | 0 | PrintDialogGtk::~PrintDialogGtk() {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
if (dialog_) {
gtk_widget_destroy(dialog_);
dialog_ = NULL;
}
if (gtk_settings_) {
g_object_unref(gtk_settings_);
gtk_settings_ = NULL;
}
if (page_setup_) {
g_object_unref(page_setup_);
page_setup_ = NULL;
}
if (printer_) {
g_object_unref(printer_);
printer_ = NULL;
}
}
| 15,030 |
60,208 | 0 | static void __exit cleanup_encrypted(void)
{
crypto_free_shash(hash_tfm);
unregister_key_type(&key_type_encrypted);
}
| 15,031 |
137,091 | 0 | bool InputType::RangeUnderflow(const String& value) const {
if (!IsSteppable())
return false;
const Decimal numeric_value = ParseToNumberOrNaN(value);
if (!numeric_value.IsFinite())
return false;
return numeric_value < CreateStepRange(kRejectAny).Minimum();
}
| 15,032 |
149,291 | 0 | void DatabaseImpl::Count(
int64_t transaction_id,
int64_t object_store_id,
int64_t index_id,
const IndexedDBKeyRange& key_range,
::indexed_db::mojom::CallbacksAssociatedPtrInfo callbacks_info) {
scoped_refptr<IndexedDBCallbacks> callbacks(
new IndexedDBCallbacks(dispatcher_host_->AsWeakPtr(), origin_,
std::move(callbacks_info), idb_runner_));
idb_runner_->PostTask(
FROM_HERE, base::Bind(&IDBThreadHelper::Count, base::Unretained(helper_),
transaction_id, object_store_id, index_id,
key_range, base::Passed(&callbacks)));
}
| 15,033 |
185,229 | 1 | bool ParamTraits<AudioParameters>::Read(const Message* m,
PickleIterator* iter,
AudioParameters* r) {
int format, channel_layout, sample_rate, bits_per_sample,
frames_per_buffer, channels;
if (!m->ReadInt(iter, &format) ||
!m->ReadInt(iter, &channel_layout) ||
!m->ReadInt(iter, &sample_rate) ||
!m->ReadInt(iter, &bits_per_sample) ||
!m->ReadInt(iter, &frames_per_buffer) ||
!m->ReadInt(iter, &channels))
return false;
r->Reset(static_cast<AudioParameters::Format>(format),
static_cast<ChannelLayout>(channel_layout),
sample_rate, bits_per_sample, frames_per_buffer);
return true;
}
| 15,034 |
142,016 | 0 | static void Destroy(const internal::BindStateBase* self) {
delete static_cast<const FakeBindState*>(self);
}
| 15,035 |
42,231 | 0 | int vhost_poll_start(struct vhost_poll *poll, struct file *file)
{
unsigned long mask;
int ret = 0;
if (poll->wqh)
return 0;
mask = file->f_op->poll(file, &poll->table);
if (mask)
vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
if (mask & POLLERR) {
if (poll->wqh)
remove_wait_queue(poll->wqh, &poll->wait);
ret = -EINVAL;
}
return ret;
}
| 15,036 |
143,768 | 0 | void set_run_called() { run_called_ = true; }
| 15,037 |
91,160 | 0 | static int jpeg_embed(Image *ifile, Image *ofile, Image *iptc)
{
unsigned int marker;
unsigned int done = 0;
unsigned int len;
int inx;
if (jpeg_transfer_1(ifile, ofile) != 0xFF)
return 0;
if (jpeg_transfer_1(ifile, ofile) != M_SOI)
return 0;
while (done == MagickFalse)
{
marker=(unsigned int) jpeg_nextmarker(ifile, ofile);
if (marker == M_EOI)
{ /* EOF */
break;
}
else
{
if (marker != M_APP13)
{
(void) WriteBlobByte(ofile,0xff);
(void) WriteBlobByte(ofile,(unsigned char) marker);
}
}
switch (marker)
{
case M_APP13:
/* we are going to write a new APP13 marker, so don't output the old one */
jpeg_skip_variable2(ifile, ofile);
break;
case M_APP0:
/* APP0 is in each and every JPEG, so when we hit APP0 we insert our new APP13! */
jpeg_skip_variable(ifile, ofile);
if (iptc != (Image *) NULL)
{
char
psheader[] = "\xFF\xED\0\0Photoshop 3.0\0" "8BIM\x04\x04\0\0\0\0";
len=(unsigned int) GetBlobSize(iptc);
if (len & 1)
len++; /* make the length even */
psheader[2]=(char) ((len+16)>>8);
psheader[3]=(char) ((len+16)&0xff);
for (inx = 0; inx < 18; inx++)
(void) WriteBlobByte(ofile,(unsigned char) psheader[inx]);
jpeg_read_remaining(iptc, ofile);
len=(unsigned int) GetBlobSize(iptc);
if (len & 1)
(void) WriteBlobByte(ofile,0);
}
break;
case M_SOS:
/* we hit data, no more marker-inserting can be done! */
jpeg_read_remaining(ifile, ofile);
done = 1;
break;
default:
jpeg_skip_variable(ifile, ofile);
break;
}
}
return 1;
}
| 15,038 |
145,892 | 0 | gfx::Rect GetTransformedBounds(aura::Window* window) {
gfx::Rect bounds_in_screen = window->layer()->bounds();
::wm::ConvertRectToScreen(window->parent(), &bounds_in_screen);
gfx::RectF bounds(bounds_in_screen);
gfx::Transform transform(gfx::TransformAboutPivot(
gfx::ToFlooredPoint(bounds.origin()), window->layer()->transform()));
transform.TransformRect(&bounds);
return gfx::ToEnclosingRect(bounds);
}
| 15,039 |
186,178 | 1 | int WebContentsImpl::DownloadImage(
const GURL& url,
bool is_favicon,
uint32_t max_bitmap_size,
bool bypass_cache,
const WebContents::ImageDownloadCallback& callback) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
static int next_image_download_id = 0;
const image_downloader::ImageDownloaderPtr& mojo_image_downloader =
GetMainFrame()->GetMojoImageDownloader();
const int download_id = ++next_image_download_id;
if (!mojo_image_downloader) {
// If the renderer process is dead (i.e. crash, or memory pressure on
// Android), the downloader service will be invalid. Pre-Mojo, this would
// hang the callback indefinetly since the IPC would be dropped. Now,
// respond with a 400 HTTP error code to indicate that something went wrong.
BrowserThread::PostTask(
BrowserThread::UI, FROM_HERE,
base::Bind(&WebContents::ImageDownloadCallback::Run,
base::Owned(new ImageDownloadCallback(callback)),
download_id, 400, url, std::vector<SkBitmap>(),
std::vector<gfx::Size>()));
return download_id;
}
image_downloader::DownloadRequestPtr req =
image_downloader::DownloadRequest::New();
req->url = mojo::String::From(url);
req->is_favicon = is_favicon;
req->max_bitmap_size = max_bitmap_size;
req->bypass_cache = bypass_cache;
mojo_image_downloader->DownloadImage(
std::move(req),
base::Bind(&DidDownloadImage, callback, download_id, url));
return download_id;
}
| 15,040 |
132,437 | 0 | void UsbDeviceImpl::Open(const OpenCallback& callback) {
DCHECK(thread_checker_.CalledOnValidThread());
#if defined(OS_CHROMEOS)
chromeos::PermissionBrokerClient* client =
chromeos::DBusThreadManager::Get()->GetPermissionBrokerClient();
DCHECK(client) << "Could not get permission broker client.";
client->OpenPath(
device_path_,
base::Bind(&UsbDeviceImpl::OnOpenRequestComplete, this, callback));
#else
blocking_task_runner_->PostTask(
FROM_HERE,
base::Bind(&UsbDeviceImpl::OpenOnBlockingThread, this, callback));
#endif // defined(OS_CHROMEOS)
}
| 15,041 |
106,937 | 0 | bool RenderBox::sizesToIntrinsicLogicalWidth(LogicalWidthType widthType) const
{
if (isFloating() || (isInlineBlockOrInlineTable() && !isHTMLMarquee()))
return true;
Length logicalWidth = (widthType == MaxLogicalWidth) ? style()->logicalMaxWidth() : style()->logicalWidth();
if (logicalWidth.type() == Intrinsic)
return true;
if (parent()->style()->overflowX() == OMARQUEE) {
EMarqueeDirection dir = parent()->style()->marqueeDirection();
if (dir == MAUTO || dir == MFORWARD || dir == MBACKWARD || dir == MLEFT || dir == MRIGHT)
return true;
}
if (parent()->isDeprecatedFlexibleBox()
&& (parent()->style()->boxOrient() == HORIZONTAL || parent()->style()->boxAlign() != BSTRETCH))
return true;
if (logicalWidth.type() == Auto && !(parent()->isDeprecatedFlexibleBox() && parent()->style()->boxOrient() == VERTICAL && parent()->style()->boxAlign() == BSTRETCH) && node() && (node()->hasTagName(inputTag) || node()->hasTagName(selectTag) || node()->hasTagName(buttonTag) || node()->hasTagName(textareaTag) || node()->hasTagName(legendTag)))
return true;
return false;
}
| 15,042 |
99,803 | 0 | void WebPluginDelegateStub::OnDidFinishManualLoading() {
delegate_->DidFinishManualLoading();
}
| 15,043 |
28,626 | 0 | void qeth_schedule_recovery(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "startrec");
if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
schedule_work(&card->kernel_thread_starter);
}
| 15,044 |
23,274 | 0 | static int decode_attr_mode(struct xdr_stream *xdr, uint32_t *bitmap, umode_t *mode)
{
uint32_t tmp;
__be32 *p;
int ret = 0;
*mode = 0;
if (unlikely(bitmap[1] & (FATTR4_WORD1_MODE - 1U)))
return -EIO;
if (likely(bitmap[1] & FATTR4_WORD1_MODE)) {
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
tmp = be32_to_cpup(p);
*mode = tmp & ~S_IFMT;
bitmap[1] &= ~FATTR4_WORD1_MODE;
ret = NFS_ATTR_FATTR_MODE;
}
dprintk("%s: file mode=0%o\n", __func__, (unsigned int)*mode);
return ret;
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
}
| 15,045 |
122,689 | 0 | bool Extension::LoadManagedModeSites(
const DictionaryValue* content_pack_value,
string16* error) {
if (!content_pack_value->HasKey(keys::kContentPackSites))
return true;
FilePath::StringType site_list_str;
if (!content_pack_value->GetString(keys::kContentPackSites, &site_list_str)) {
*error = ASCIIToUTF16(errors::kInvalidContentPackSites);
return false;
}
content_pack_site_list_ = FilePath(site_list_str);
return true;
}
| 15,046 |
26,265 | 0 | context_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
struct mm_struct *mm, *oldmm;
prepare_task_switch(rq, prev, next);
mm = next->mm;
oldmm = prev->active_mm;
/*
* For paravirt, this is coupled with an exit in switch_to to
* combine the page table reload and the switch backend into
* one hypercall.
*/
arch_start_context_switch(prev);
if (!mm) {
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
enter_lazy_tlb(oldmm, next);
} else
switch_mm(oldmm, mm, next);
if (!prev->mm) {
prev->active_mm = NULL;
rq->prev_mm = oldmm;
}
/*
* Since the runqueue lock will be released by the next
* task (which is an invalid locking op but in the case
* of the scheduler it's an obvious special-case), so we
* do an early lockdep release here:
*/
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
#endif
/* Here we just switch the register state and the stack. */
switch_to(prev, next, prev);
barrier();
/*
* this_rq must be evaluated again because prev may have moved
* CPUs since it called schedule(), thus the 'rq' on its stack
* frame will be invalid.
*/
finish_task_switch(this_rq(), prev);
}
| 15,047 |
123,017 | 0 | void RenderWidgetHostImpl::OnMsgUnlockMouse() {
RejectMouseLockOrUnlockIfNecessary();
}
| 15,048 |
37,051 | 0 | static void free_nested(struct vcpu_vmx *vmx)
{
if (!vmx->nested.vmxon)
return;
vmx->nested.vmxon = false;
nested_release_vmcs12(vmx);
if (enable_shadow_vmcs)
free_vmcs(vmx->nested.current_shadow_vmcs);
/* Unpin physical memory we referred to in current vmcs02 */
if (vmx->nested.apic_access_page) {
nested_release_page(vmx->nested.apic_access_page);
vmx->nested.apic_access_page = NULL;
}
if (vmx->nested.virtual_apic_page) {
nested_release_page(vmx->nested.virtual_apic_page);
vmx->nested.virtual_apic_page = NULL;
}
nested_free_all_saved_vmcss(vmx);
}
| 15,049 |
116,131 | 0 | ResourceRequestInfoImpl* ResourceDispatcherHostImpl::CreateRequestInfo(
ResourceHandler* handler,
int child_id,
int route_id,
bool download,
ResourceContext* context) {
return new ResourceRequestInfoImpl(
handler,
PROCESS_TYPE_RENDERER,
child_id,
route_id,
0,
request_id_,
false, // is_main_frame
-1, // frame_id
false, // parent_is_main_frame
-1, // parent_frame_id
ResourceType::SUB_RESOURCE,
PAGE_TRANSITION_LINK,
0, // upload_size
download, // is_download
download, // allow_download
false, // has_user_gesture
WebKit::WebReferrerPolicyDefault,
context);
}
| 15,050 |
49,776 | 0 | static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
struct MessageUnit_B *reg = acb->pmuB;
struct ARCMSR_CDB *arcmsr_cdb;
struct CommandControlBlock *ccb;
uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
int index, rtn;
bool error;
polling_hbb_ccb_retry:
poll_count++;
/* clear doorbell interrupt */
writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
while(1){
index = reg->doneq_index;
flag_ccb = reg->done_qbuffer[index];
if (flag_ccb == 0) {
if (poll_ccb_done){
rtn = SUCCESS;
break;
}else {
msleep(25);
if (poll_count > 100){
rtn = FAILED;
break;
}
goto polling_hbb_ccb_retry;
}
}
reg->done_qbuffer[index] = 0;
index++;
/*if last index number set it to 0 */
index %= ARCMSR_MAX_HBB_POSTQUEUE;
reg->doneq_index = index;
/* check if command done with no error*/
arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
" poll command abort successfully \n"
,acb->host->host_no
,ccb->pcmd->device->id
,(u32)ccb->pcmd->device->lun
,ccb);
ccb->pcmd->result = DID_ABORT << 16;
arcmsr_ccb_complete(ccb);
continue;
}
printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
" command done ccb = '0x%p'"
"ccboutstandingcount = %d \n"
, acb->host->host_no
, ccb
, atomic_read(&acb->ccboutstandingcount));
continue;
}
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
arcmsr_report_ccb_state(acb, ccb, error);
}
return rtn;
}
| 15,051 |
172,690 | 0 | status_t MediaPlayer::setVideoSurfaceTexture(
const sp<IGraphicBufferProducer>& bufferProducer)
{
ALOGV("setVideoSurfaceTexture");
Mutex::Autolock _l(mLock);
if (mPlayer == 0) return NO_INIT;
return mPlayer->setVideoSurfaceTexture(bufferProducer);
}
| 15,052 |
115,674 | 0 | MATCHER_P2(EqualsKeyEvent, keycode, pressed, "") {
return arg.keycode() == keycode && arg.pressed() == pressed;
}
| 15,053 |
88,193 | 0 | XML_GetInputContext(XML_Parser parser, int *offset, int *size) {
#ifdef XML_CONTEXT_BYTES
if (parser == NULL)
return NULL;
if (parser->m_eventPtr && parser->m_buffer) {
if (offset != NULL)
*offset = (int)(parser->m_eventPtr - parser->m_buffer);
if (size != NULL)
*size = (int)(parser->m_bufferEnd - parser->m_buffer);
return parser->m_buffer;
}
#else
(void)parser;
(void)offset;
(void)size;
#endif /* defined XML_CONTEXT_BYTES */
return (char *)0;
}
| 15,054 |
19,793 | 0 | i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
u32 plane, flip_mask;
int ret;
/* Check for any pending flips. As we only maintain a flip queue depth
* of 1, we can simply insert a WAIT for the next display flip prior
* to executing the batch and avoid stalling the CPU.
*/
for (plane = 0; flips >> plane; plane++) {
if (((flips >> plane) & 1) == 0)
continue;
if (plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
}
return 0;
}
| 15,055 |
123,583 | 0 | void InspectorAgentRegistry::discardAgents()
{
for (size_t i = 0; i < m_agents.size(); i++)
m_agents[i]->discardAgent();
}
| 15,056 |
9,388 | 0 | int ssl3_connect(SSL *s)
{
BUF_MEM *buf = NULL;
unsigned long Time = (unsigned long)time(NULL);
void (*cb) (const SSL *ssl, int type, int val) = NULL;
int ret = -1;
int new_state, state, skip = 0;
RAND_add(&Time, sizeof(Time), 0);
ERR_clear_error();
clear_sys_error();
if (s->info_callback != NULL)
cb = s->info_callback;
else if (s->ctx->info_callback != NULL)
cb = s->ctx->info_callback;
s->in_handshake++;
if (!SSL_in_init(s) || SSL_in_before(s))
SSL_clear(s);
#ifndef OPENSSL_NO_HEARTBEATS
/*
* If we're awaiting a HeartbeatResponse, pretend we already got and
* don't await it anymore, because Heartbeats don't make sense during
* handshakes anyway.
*/
if (s->tlsext_hb_pending) {
s->tlsext_hb_pending = 0;
s->tlsext_hb_seq++;
}
#endif
for (;;) {
state = s->state;
switch (s->state) {
case SSL_ST_RENEGOTIATE:
s->renegotiate = 1;
s->state = SSL_ST_CONNECT;
s->ctx->stats.sess_connect_renegotiate++;
/* break */
case SSL_ST_BEFORE:
case SSL_ST_CONNECT:
case SSL_ST_BEFORE | SSL_ST_CONNECT:
case SSL_ST_OK | SSL_ST_CONNECT:
s->server = 0;
if (cb != NULL)
cb(s, SSL_CB_HANDSHAKE_START, 1);
if ((s->version & 0xff00) != 0x0300) {
SSLerr(SSL_F_SSL3_CONNECT, ERR_R_INTERNAL_ERROR);
s->state = SSL_ST_ERR;
ret = -1;
goto end;
}
/* s->version=SSL3_VERSION; */
s->type = SSL_ST_CONNECT;
if (s->init_buf == NULL) {
if ((buf = BUF_MEM_new()) == NULL) {
ret = -1;
s->state = SSL_ST_ERR;
goto end;
}
if (!BUF_MEM_grow(buf, SSL3_RT_MAX_PLAIN_LENGTH)) {
ret = -1;
s->state = SSL_ST_ERR;
goto end;
}
s->init_buf = buf;
buf = NULL;
}
if (!ssl3_setup_buffers(s)) {
ret = -1;
goto end;
}
/* setup buffing BIO */
if (!ssl_init_wbio_buffer(s, 0)) {
ret = -1;
s->state = SSL_ST_ERR;
goto end;
}
/* don't push the buffering BIO quite yet */
ssl3_init_finished_mac(s);
s->state = SSL3_ST_CW_CLNT_HELLO_A;
s->ctx->stats.sess_connect++;
s->init_num = 0;
s->s3->flags &= ~SSL3_FLAGS_CCS_OK;
/*
* Should have been reset by ssl3_get_finished, too.
*/
s->s3->change_cipher_spec = 0;
break;
case SSL3_ST_CW_CLNT_HELLO_A:
case SSL3_ST_CW_CLNT_HELLO_B:
s->shutdown = 0;
ret = ssl3_client_hello(s);
if (ret <= 0)
goto end;
s->state = SSL3_ST_CR_SRVR_HELLO_A;
s->init_num = 0;
/* turn on buffering for the next lot of output */
if (s->bbio != s->wbio)
s->wbio = BIO_push(s->bbio, s->wbio);
break;
case SSL3_ST_CR_SRVR_HELLO_A:
case SSL3_ST_CR_SRVR_HELLO_B:
ret = ssl3_get_server_hello(s);
if (ret <= 0)
goto end;
if (s->hit) {
s->state = SSL3_ST_CR_FINISHED_A;
#ifndef OPENSSL_NO_TLSEXT
if (s->tlsext_ticket_expected) {
/* receive renewed session ticket */
s->state = SSL3_ST_CR_SESSION_TICKET_A;
}
#endif
} else
s->state = SSL3_ST_CR_CERT_A;
s->init_num = 0;
break;
case SSL3_ST_CR_CERT_A:
case SSL3_ST_CR_CERT_B:
#ifndef OPENSSL_NO_TLSEXT
/* Noop (ret = 0) for everything but EAP-FAST. */
ret = ssl3_check_finished(s);
if (ret < 0)
goto end;
if (ret == 1) {
s->hit = 1;
s->state = SSL3_ST_CR_FINISHED_A;
s->init_num = 0;
break;
}
#endif
/* Check if it is anon DH/ECDH, SRP auth */
/* or PSK */
if (!
(s->s3->tmp.
new_cipher->algorithm_auth & (SSL_aNULL | SSL_aSRP))
&& !(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK)) {
ret = ssl3_get_server_certificate(s);
if (ret <= 0)
goto end;
#ifndef OPENSSL_NO_TLSEXT
if (s->tlsext_status_expected)
s->state = SSL3_ST_CR_CERT_STATUS_A;
else
s->state = SSL3_ST_CR_KEY_EXCH_A;
} else {
skip = 1;
s->state = SSL3_ST_CR_KEY_EXCH_A;
}
#else
} else
skip = 1;
s->state = SSL3_ST_CR_KEY_EXCH_A;
#endif
s->init_num = 0;
break;
case SSL3_ST_CR_KEY_EXCH_A:
case SSL3_ST_CR_KEY_EXCH_B:
ret = ssl3_get_key_exchange(s);
if (ret <= 0)
goto end;
s->state = SSL3_ST_CR_CERT_REQ_A;
s->init_num = 0;
/*
* at this point we check that we have the required stuff from
* the server
*/
if (!ssl3_check_cert_and_algorithm(s)) {
ret = -1;
s->state = SSL_ST_ERR;
goto end;
}
break;
case SSL3_ST_CR_CERT_REQ_A:
case SSL3_ST_CR_CERT_REQ_B:
ret = ssl3_get_certificate_request(s);
if (ret <= 0)
goto end;
s->state = SSL3_ST_CR_SRVR_DONE_A;
s->init_num = 0;
break;
case SSL3_ST_CR_SRVR_DONE_A:
case SSL3_ST_CR_SRVR_DONE_B:
ret = ssl3_get_server_done(s);
if (ret <= 0)
goto end;
#ifndef OPENSSL_NO_SRP
if (s->s3->tmp.new_cipher->algorithm_mkey & SSL_kSRP) {
if ((ret = SRP_Calc_A_param(s)) <= 0) {
SSLerr(SSL_F_SSL3_CONNECT, SSL_R_SRP_A_CALC);
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR);
s->state = SSL_ST_ERR;
goto end;
}
}
#endif
if (s->s3->tmp.cert_req)
s->state = SSL3_ST_CW_CERT_A;
else
s->state = SSL3_ST_CW_KEY_EXCH_A;
s->init_num = 0;
break;
case SSL3_ST_CW_CERT_A:
case SSL3_ST_CW_CERT_B:
case SSL3_ST_CW_CERT_C:
case SSL3_ST_CW_CERT_D:
ret = ssl3_send_client_certificate(s);
if (ret <= 0)
goto end;
s->state = SSL3_ST_CW_KEY_EXCH_A;
s->init_num = 0;
break;
case SSL3_ST_CW_KEY_EXCH_A:
case SSL3_ST_CW_KEY_EXCH_B:
ret = ssl3_send_client_key_exchange(s);
if (ret <= 0)
goto end;
/*
* EAY EAY EAY need to check for DH fix cert sent back
*/
/*
* For TLS, cert_req is set to 2, so a cert chain of nothing is
* sent, but no verify packet is sent
*/
/*
* XXX: For now, we do not support client authentication in ECDH
* cipher suites with ECDH (rather than ECDSA) certificates. We
* need to skip the certificate verify message when client's
* ECDH public key is sent inside the client certificate.
*/
if (s->s3->tmp.cert_req == 1) {
s->state = SSL3_ST_CW_CERT_VRFY_A;
} else {
s->state = SSL3_ST_CW_CHANGE_A;
}
if (s->s3->flags & TLS1_FLAGS_SKIP_CERT_VERIFY) {
s->state = SSL3_ST_CW_CHANGE_A;
}
s->init_num = 0;
break;
case SSL3_ST_CW_CERT_VRFY_A:
case SSL3_ST_CW_CERT_VRFY_B:
ret = ssl3_send_client_verify(s);
if (ret <= 0)
goto end;
s->state = SSL3_ST_CW_CHANGE_A;
s->init_num = 0;
break;
case SSL3_ST_CW_CHANGE_A:
case SSL3_ST_CW_CHANGE_B:
ret = ssl3_send_change_cipher_spec(s,
SSL3_ST_CW_CHANGE_A,
SSL3_ST_CW_CHANGE_B);
if (ret <= 0)
goto end;
#if defined(OPENSSL_NO_TLSEXT) || defined(OPENSSL_NO_NEXTPROTONEG)
s->state = SSL3_ST_CW_FINISHED_A;
#else
if (s->s3->next_proto_neg_seen)
s->state = SSL3_ST_CW_NEXT_PROTO_A;
else
s->state = SSL3_ST_CW_FINISHED_A;
#endif
s->init_num = 0;
s->session->cipher = s->s3->tmp.new_cipher;
#ifdef OPENSSL_NO_COMP
s->session->compress_meth = 0;
#else
if (s->s3->tmp.new_compression == NULL)
s->session->compress_meth = 0;
else
s->session->compress_meth = s->s3->tmp.new_compression->id;
#endif
if (!s->method->ssl3_enc->setup_key_block(s)) {
ret = -1;
s->state = SSL_ST_ERR;
goto end;
}
if (!s->method->ssl3_enc->change_cipher_state(s,
SSL3_CHANGE_CIPHER_CLIENT_WRITE))
{
ret = -1;
s->state = SSL_ST_ERR;
goto end;
}
break;
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NEXTPROTONEG)
case SSL3_ST_CW_NEXT_PROTO_A:
case SSL3_ST_CW_NEXT_PROTO_B:
ret = ssl3_send_next_proto(s);
if (ret <= 0)
goto end;
s->state = SSL3_ST_CW_FINISHED_A;
break;
#endif
case SSL3_ST_CW_FINISHED_A:
case SSL3_ST_CW_FINISHED_B:
ret = ssl3_send_finished(s,
SSL3_ST_CW_FINISHED_A,
SSL3_ST_CW_FINISHED_B,
s->method->
ssl3_enc->client_finished_label,
s->method->
ssl3_enc->client_finished_label_len);
if (ret <= 0)
goto end;
s->state = SSL3_ST_CW_FLUSH;
/* clear flags */
s->s3->flags &= ~SSL3_FLAGS_POP_BUFFER;
if (s->hit) {
s->s3->tmp.next_state = SSL_ST_OK;
if (s->s3->flags & SSL3_FLAGS_DELAY_CLIENT_FINISHED) {
s->state = SSL_ST_OK;
s->s3->flags |= SSL3_FLAGS_POP_BUFFER;
s->s3->delay_buf_pop_ret = 0;
}
} else {
#ifndef OPENSSL_NO_TLSEXT
/*
* Allow NewSessionTicket if ticket expected
*/
if (s->tlsext_ticket_expected)
s->s3->tmp.next_state = SSL3_ST_CR_SESSION_TICKET_A;
else
#endif
s->s3->tmp.next_state = SSL3_ST_CR_FINISHED_A;
}
s->init_num = 0;
break;
#ifndef OPENSSL_NO_TLSEXT
case SSL3_ST_CR_SESSION_TICKET_A:
case SSL3_ST_CR_SESSION_TICKET_B:
ret = ssl3_get_new_session_ticket(s);
if (ret <= 0)
goto end;
s->state = SSL3_ST_CR_FINISHED_A;
s->init_num = 0;
break;
case SSL3_ST_CR_CERT_STATUS_A:
case SSL3_ST_CR_CERT_STATUS_B:
ret = ssl3_get_cert_status(s);
if (ret <= 0)
goto end;
s->state = SSL3_ST_CR_KEY_EXCH_A;
s->init_num = 0;
break;
#endif
case SSL3_ST_CR_FINISHED_A:
case SSL3_ST_CR_FINISHED_B:
if (!s->s3->change_cipher_spec)
s->s3->flags |= SSL3_FLAGS_CCS_OK;
ret = ssl3_get_finished(s, SSL3_ST_CR_FINISHED_A,
SSL3_ST_CR_FINISHED_B);
if (ret <= 0)
goto end;
if (s->hit)
s->state = SSL3_ST_CW_CHANGE_A;
else
s->state = SSL_ST_OK;
s->init_num = 0;
break;
case SSL3_ST_CW_FLUSH:
s->rwstate = SSL_WRITING;
if (BIO_flush(s->wbio) <= 0) {
ret = -1;
goto end;
}
s->rwstate = SSL_NOTHING;
s->state = s->s3->tmp.next_state;
break;
case SSL_ST_OK:
/* clean a few things up */
ssl3_cleanup_key_block(s);
if (s->init_buf != NULL) {
BUF_MEM_free(s->init_buf);
s->init_buf = NULL;
}
/*
* If we are not 'joining' the last two packets, remove the
* buffering now
*/
if (!(s->s3->flags & SSL3_FLAGS_POP_BUFFER))
ssl_free_wbio_buffer(s);
/* else do it later in ssl3_write */
s->init_num = 0;
s->renegotiate = 0;
s->new_session = 0;
ssl_update_cache(s, SSL_SESS_CACHE_CLIENT);
if (s->hit)
s->ctx->stats.sess_hit++;
ret = 1;
/* s->server=0; */
s->handshake_func = ssl3_connect;
s->ctx->stats.sess_connect_good++;
if (cb != NULL)
cb(s, SSL_CB_HANDSHAKE_DONE, 1);
goto end;
/* break; */
case SSL_ST_ERR:
default:
SSLerr(SSL_F_SSL3_CONNECT, SSL_R_UNKNOWN_STATE);
ret = -1;
goto end;
/* break; */
}
/* did we do anything */
if (!s->s3->tmp.reuse_message && !skip) {
if (s->debug) {
if ((ret = BIO_flush(s->wbio)) <= 0)
goto end;
}
if ((cb != NULL) && (s->state != state)) {
new_state = s->state;
s->state = state;
cb(s, SSL_CB_CONNECT_LOOP, 1);
s->state = new_state;
}
}
skip = 0;
}
| 15,057 |
68,401 | 0 | static void perf_pmu_output_stop(struct perf_event *event)
{
struct perf_event *iter;
int err, cpu;
restart:
rcu_read_lock();
list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
/*
* For per-CPU events, we need to make sure that neither they
* nor their children are running; for cpu==-1 events it's
* sufficient to stop the event itself if it's active, since
* it can't have children.
*/
cpu = iter->cpu;
if (cpu == -1)
cpu = READ_ONCE(iter->oncpu);
if (cpu == -1)
continue;
err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
if (err == -EAGAIN) {
rcu_read_unlock();
goto restart;
}
}
rcu_read_unlock();
}
| 15,058 |
58,109 | 0 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
{
struct sched_param lp;
struct task_struct *p;
int retval;
if (!param || pid < 0)
return -EINVAL;
rcu_read_lock();
p = find_process_by_pid(pid);
retval = -ESRCH;
if (!p)
goto out_unlock;
retval = security_task_getscheduler(p);
if (retval)
goto out_unlock;
if (task_has_dl_policy(p)) {
retval = -EINVAL;
goto out_unlock;
}
lp.sched_priority = p->rt_priority;
rcu_read_unlock();
/*
* This one might sleep, we cannot do it with a spinlock held ...
*/
retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
return retval;
out_unlock:
rcu_read_unlock();
return retval;
}
| 15,059 |
129,380 | 0 | Shader* GetShaderInfoNotProgram(
GLuint client_id, const char* function_name) {
Shader* shader = GetShader(client_id);
if (!shader) {
if (GetProgram(client_id)) {
LOCAL_SET_GL_ERROR(
GL_INVALID_OPERATION, function_name, "program passed for shader");
} else {
LOCAL_SET_GL_ERROR(
GL_INVALID_VALUE, function_name, "unknown shader");
}
}
LogClientServiceForInfo(shader, client_id, function_name);
return shader;
}
| 15,060 |
149,043 | 0 | static int cannotBeFunction(Parse *pParse, struct SrcList_item *pFrom){
if( pFrom->fg.isTabFunc ){
sqlite3ErrorMsg(pParse, "'%s' is not a function", pFrom->zName);
return 1;
}
return 0;
}
| 15,061 |
182,732 | 1 | qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
const char *fmt, ...)
{
va_list va;
struct va_format vaf;
char nfunc[32];
memset(nfunc, 0, sizeof(nfunc));
memcpy(nfunc, func, sizeof(nfunc) - 1);
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
if (!(qedi_dbg_log & QEDI_LOG_NOTICE))
goto ret;
if (likely(qedi) && likely(qedi->pdev))
pr_notice("[%s]:[%s:%d]:%d: %pV",
dev_name(&qedi->pdev->dev), nfunc, line,
qedi->host_no, &vaf);
else
pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
ret:
va_end(va);
}
| 15,062 |
156,094 | 0 | RequestQuery ParseQuery(const GURL& url) {
RequestQuery queries;
for (QueryIterator it(url); !it.IsAtEnd(); it.Advance()) {
std::string unescaped_query;
UnescapeBinaryURLComponent(
it.GetKey(), UnescapeRule::REPLACE_PLUS_WITH_SPACE, &unescaped_query);
queries[unescaped_query].push_back(it.GetUnescapedValue());
}
return queries;
}
| 15,063 |
154,107 | 0 | void GLES2DecoderImpl::DoVertexAttribI4i(
GLuint index, GLint v0, GLint v1, GLint v2, GLint v3) {
GLint v[4] = { v0, v1, v2, v3 };
if (SetVertexAttribValue("glVertexAttribI4i", index, v)) {
state_.SetGenericVertexAttribBaseType(
index, SHADER_VARIABLE_INT);
api()->glVertexAttribI4iFn(index, v0, v1, v2, v3);
}
}
| 15,064 |
86,172 | 0 | static int amd_pinconf_group_get(struct pinctrl_dev *pctldev,
unsigned int group,
unsigned long *config)
{
const unsigned *pins;
unsigned npins;
int ret;
ret = amd_get_group_pins(pctldev, group, &pins, &npins);
if (ret)
return ret;
if (amd_pinconf_get(pctldev, pins[0], config))
return -ENOTSUPP;
return 0;
}
| 15,065 |
73,613 | 0 | MagickExport IndexPacket *GetAuthenticIndexQueue(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
return(cache_info->methods.get_authentic_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
| 15,066 |
99,215 | 0 | explicit PdfUnsupportedInfoBarDelegate(Browser* browser)
: LinkInfoBarDelegate(NULL),
browser_(browser) {
}
| 15,067 |
139,796 | 0 | AshVisibilityController() {}
| 15,068 |
33,822 | 0 | static int vq_access_ok(struct vhost_dev *d, unsigned int num,
struct vring_desc __user *desc,
struct vring_avail __user *avail,
struct vring_used __user *used)
{
size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
access_ok(VERIFY_READ, avail,
sizeof *avail + num * sizeof *avail->ring + s) &&
access_ok(VERIFY_WRITE, used,
sizeof *used + num * sizeof *used->ring + s);
}
| 15,069 |
96,731 | 0 | MagickExport ChannelStatistics *GetImageStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
*histogram,
standard_deviation;
MagickStatusType
status;
QuantumAny
range;
register ssize_t
i;
size_t
depth;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*histogram));
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(
MaxPixelChannels+1,sizeof(*channel_statistics));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (double *) NULL))
{
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) memset(channel_statistics,0,(MaxPixelChannels+1)*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (channel_statistics[channel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[channel].depth;
range=GetQuantumRange(depth);
status=p[i] != ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),
range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[channel].depth++;
i--;
continue;
}
}
if ((double) p[i] < channel_statistics[channel].minima)
channel_statistics[channel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[channel].maxima)
channel_statistics[channel].maxima=(double) p[i];
channel_statistics[channel].sum+=p[i];
channel_statistics[channel].sum_squared+=(double) p[i]*p[i];
channel_statistics[channel].sum_cubed+=(double) p[i]*p[i]*p[i];
channel_statistics[channel].sum_fourth_power+=(double) p[i]*p[i]*p[i]*
p[i];
channel_statistics[channel].area++;
if ((double) p[i] < channel_statistics[CompositePixelChannel].minima)
channel_statistics[CompositePixelChannel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[CompositePixelChannel].maxima)
channel_statistics[CompositePixelChannel].maxima=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum((double) p[i]))+i]++;
channel_statistics[CompositePixelChannel].sum+=(double) p[i];
channel_statistics[CompositePixelChannel].sum_squared+=(double)
p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_cubed+=(double)
p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_fourth_power+=(double)
p[i]*p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].area++;
}
p+=GetPixelChannels(image);
}
}
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal(channel_statistics[i].area);
channel_statistics[i].sum*=area;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=channel_statistics[i].sum;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal(channel_statistics[i].area-
1.0)*channel_statistics[i].area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
number_bins;
register ssize_t
j;
/*
Compute pixel entropy.
*/
PixelChannel channel = GetPixelChannelChannel(image,i);
number_bins=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
if (histogram[GetPixelChannels(image)*j+i] > 0.0)
number_bins++;
area=PerceptibleReciprocal(channel_statistics[channel].area);
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
count;
count=area*histogram[GetPixelChannels(image)*j+i];
channel_statistics[channel].entropy+=-count*MagickLog10(count)*
PerceptibleReciprocal(MagickLog10(number_bins));
channel_statistics[CompositePixelChannel].entropy+=-count*
MagickLog10(count)*PerceptibleReciprocal(MagickLog10(number_bins))/
GetPixelChannels(image);
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
channel_statistics[CompositePixelChannel].mean=0.0;
channel_statistics[CompositePixelChannel].standard_deviation=0.0;
channel_statistics[CompositePixelChannel].entropy=0.0;
for (i=0; i < (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[CompositePixelChannel].mean+=
channel_statistics[i].mean;
channel_statistics[CompositePixelChannel].standard_deviation+=
channel_statistics[i].standard_deviation;
channel_statistics[CompositePixelChannel].entropy+=
channel_statistics[i].entropy;
}
channel_statistics[CompositePixelChannel].mean/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].standard_deviation/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].entropy/=(double)
GetImageChannels(image);
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
| 15,070 |
155,778 | 0 | void DiceResponseHandler::DeleteTokenFetcher(DiceTokenFetcher* token_fetcher) {
for (auto it = token_fetchers_.begin(); it != token_fetchers_.end(); ++it) {
if (it->get() == token_fetcher) {
token_fetchers_.erase(it);
return;
}
}
NOTREACHED();
}
| 15,071 |
55,352 | 0 | static int atl2_up(struct atl2_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int err = 0;
u32 val;
/* hardware has been reset, we need to reload some things */
err = atl2_init_hw(&adapter->hw);
if (err) {
err = -EIO;
return err;
}
atl2_set_multi(netdev);
init_ring_ptrs(adapter);
atl2_restore_vlan(adapter);
if (atl2_configure(adapter)) {
err = -EIO;
goto err_up;
}
clear_bit(__ATL2_DOWN, &adapter->flags);
val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL);
ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val |
MASTER_CTRL_MANUAL_INT);
atl2_irq_enable(adapter);
err_up:
return err;
}
| 15,072 |
36,523 | 0 | void snd_card_info_read_oss(struct snd_info_buffer *buffer)
{
int idx, count;
struct snd_card *card;
for (idx = count = 0; idx < SNDRV_CARDS; idx++) {
mutex_lock(&snd_card_mutex);
if ((card = snd_cards[idx]) != NULL) {
count++;
snd_iprintf(buffer, "%s\n", card->longname);
}
mutex_unlock(&snd_card_mutex);
}
if (!count) {
snd_iprintf(buffer, "--- no soundcards ---\n");
}
}
| 15,073 |
28,988 | 0 | int ssl_close_notify( ssl_context *ssl )
{
int ret;
SSL_DEBUG_MSG( 2, ( "=> write close notify" ) );
if( ( ret = ssl_flush_output( ssl ) ) != 0 )
{
SSL_DEBUG_RET( 1, "ssl_flush_output", ret );
return( ret );
}
if( ssl->state == SSL_HANDSHAKE_OVER )
{
if( ( ret = ssl_send_alert_message( ssl,
SSL_ALERT_LEVEL_WARNING,
SSL_ALERT_MSG_CLOSE_NOTIFY ) ) != 0 )
{
return( ret );
}
}
SSL_DEBUG_MSG( 2, ( "<= write close notify" ) );
return( ret );
}
| 15,074 |
66,495 | 0 | static void fill_skb_pool(rtl8150_t *dev)
{
struct sk_buff *skb;
int i;
for (i = 0; i < RX_SKB_POOL_SIZE; i++) {
if (dev->rx_skb_pool[i])
continue;
skb = dev_alloc_skb(RTL8150_MTU + 2);
if (!skb) {
return;
}
skb_reserve(skb, 2);
dev->rx_skb_pool[i] = skb;
}
}
| 15,075 |
177,439 | 0 | long SeekHead::Parse() {
IMkvReader* const pReader = m_pSegment->m_pReader;
long long pos = m_start;
const long long stop = m_start + m_size;
int entry_count = 0;
int void_element_count = 0;
while (pos < stop) {
long long id, size;
const long status = ParseElementHeader(pReader, pos, stop, id, size);
if (status < 0) // error
return status;
if (id == 0x0DBB) // SeekEntry ID
++entry_count;
else if (id == 0x6C) // Void ID
++void_element_count;
pos += size; // consume payload
if (pos > stop)
return E_FILE_FORMAT_INVALID;
}
if (pos != stop)
return E_FILE_FORMAT_INVALID;
m_entries = new (std::nothrow) Entry[entry_count];
if (m_entries == NULL)
return -1;
m_void_elements = new (std::nothrow) VoidElement[void_element_count];
if (m_void_elements == NULL)
return -1;
Entry* pEntry = m_entries;
VoidElement* pVoidElement = m_void_elements;
pos = m_start;
while (pos < stop) {
const long long idpos = pos;
long long id, size;
const long status = ParseElementHeader(pReader, pos, stop, id, size);
if (status < 0) // error
return status;
if (id == 0x0DBB) { // SeekEntry ID
if (ParseEntry(pReader, pos, size, pEntry)) {
Entry& e = *pEntry++;
e.element_start = idpos;
e.element_size = (pos + size) - idpos;
}
} else if (id == 0x6C) { // Void ID
VoidElement& e = *pVoidElement++;
e.element_start = idpos;
e.element_size = (pos + size) - idpos;
}
pos += size; // consume payload
if (pos > stop)
return E_FILE_FORMAT_INVALID;
}
if (pos != stop)
return E_FILE_FORMAT_INVALID;
ptrdiff_t count_ = ptrdiff_t(pEntry - m_entries);
assert(count_ >= 0);
assert(count_ <= entry_count);
m_entry_count = static_cast<int>(count_);
count_ = ptrdiff_t(pVoidElement - m_void_elements);
assert(count_ >= 0);
assert(count_ <= void_element_count);
m_void_element_count = static_cast<int>(count_);
return 0;
}
| 15,076 |
123,001 | 0 | void RenderWidgetHostImpl::OnMsgDidActivateAcceleratedCompositing(
bool activated) {
TRACE_EVENT1("renderer_host",
"RenderWidgetHostImpl::OnMsgDidActivateAcceleratedCompositing",
"activated", activated);
is_accelerated_compositing_active_ = activated;
if (view_)
view_->OnAcceleratedCompositingStateChange();
}
| 15,077 |
173,952 | 0 | Block::Lacing Block::GetLacing() const {
const int value = int(m_flags & 0x06) >> 1;
return static_cast<Lacing>(value);
}
| 15,078 |
36,451 | 0 | static int snd_ctl_dev_free(struct snd_device *device)
{
struct snd_card *card = device->device_data;
struct snd_kcontrol *control;
down_write(&card->controls_rwsem);
while (!list_empty(&card->controls)) {
control = snd_kcontrol(card->controls.next);
snd_ctl_remove(card, control);
}
up_write(&card->controls_rwsem);
return 0;
}
| 15,079 |
97,226 | 0 | void WebFrameLoaderClient::provisionalLoadStarted() {
}
| 15,080 |
116,002 | 0 | string16 ExtensionGlobalError::GetBubbleViewTitle() {
return l10n_util::GetStringUTF16(IDS_EXTENSION_ALERT_TITLE);
}
| 15,081 |
135,680 | 0 | void FrameSelection::SetSelectionFromNone() {
Document* document = frame_->GetDocument();
if (!ComputeVisibleSelectionInDOMTreeDeprecated().IsNone() ||
!(blink::HasEditableStyle(*document)))
return;
Element* document_element = document->documentElement();
if (!document_element)
return;
if (HTMLBodyElement* body =
Traversal<HTMLBodyElement>::FirstChild(*document_element)) {
SetSelection(SelectionInDOMTree::Builder()
.Collapse(FirstPositionInOrBeforeNode(body))
.Build());
}
}
| 15,082 |
144,700 | 0 | void WebContentsImpl::UpdateState(RenderViewHost* rvh,
int32_t page_id,
const PageState& page_state) {
DCHECK(!SiteIsolationPolicy::UseSubframeNavigationEntries());
if (rvh->GetDelegate()->GetAsWebContents() != this)
return;
RenderViewHostImpl* rvhi = static_cast<RenderViewHostImpl*>(rvh);
NavigationEntryImpl* entry = controller_.GetEntryWithPageID(
rvhi->GetSiteInstance(), page_id);
if (!entry)
return;
if (rvhi->GetMainFrame()) {
NavigationEntryImpl* new_entry = controller_.GetEntryWithUniqueID(
static_cast<RenderFrameHostImpl*>(rvhi->GetMainFrame())
->nav_entry_id());
DCHECK_EQ(entry, new_entry);
}
if (page_state == entry->GetPageState())
return; // Nothing to update.
entry->SetPageState(page_state);
controller_.NotifyEntryChanged(entry);
}
| 15,083 |
140,924 | 0 | void PresentationConnection::send(Blob* data, ExceptionState& exceptionState) {
ASSERT(data);
if (!canSendMessage(exceptionState))
return;
m_messages.append(new Message(data->blobDataHandle()));
handleMessageQueue();
}
| 15,084 |
30,300 | 0 | stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{
int cpu;
preempt_disable_notrace();
cpu = raw_smp_processor_id();
/* no atomic needed, we only modify this variable by this cpu */
if (per_cpu(trace_active, cpu)++ != 0)
goto out;
check_stack();
out:
per_cpu(trace_active, cpu)--;
/* prevent recursion in schedule */
preempt_enable_notrace();
}
| 15,085 |
80,450 | 0 | GF_Box *stri_New()
{
ISOM_DECL_BOX_ALLOC(GF_SubTrackInformationBox, GF_ISOM_BOX_TYPE_STRI);
return (GF_Box *)tmp;
}
| 15,086 |
125,724 | 0 | base::i18n::TextDirection WebTextDirectionToChromeTextDirection(
WebKit::WebTextDirection dir) {
switch (dir) {
case WebKit::WebTextDirectionLeftToRight:
return base::i18n::LEFT_TO_RIGHT;
case WebKit::WebTextDirectionRightToLeft:
return base::i18n::RIGHT_TO_LEFT;
default:
NOTREACHED();
return base::i18n::UNKNOWN_DIRECTION;
}
}
| 15,087 |
76,837 | 0 | decode_OFPAT_RAW_SET_DL_SRC(const struct ofp_action_dl_addr *a,
enum ofp_version ofp_version OVS_UNUSED,
struct ofpbuf *out)
{
ofpact_put_SET_ETH_SRC(out)->mac = a->dl_addr;
return 0;
}
| 15,088 |
11,385 | 0 | fbCombineMaskU (CARD32 *src, const CARD32 *mask, int width)
{
int i;
for (i = 0; i < width; ++i) {
CARD32 a = READ(mask + i) >> 24;
CARD32 s = READ(src + i);
FbByteMul(s, a);
WRITE(src + i, s);
}
}
| 15,089 |
89,984 | 0 | size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
{
return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
}
| 15,090 |
137,294 | 0 | void Textfield::SetStyle(gfx::TextStyle style, bool value) {
GetRenderText()->SetStyle(style, value);
SchedulePaint();
}
| 15,091 |
137,878 | 0 | void MediaControlFullscreenButtonElement::setIsFullscreen(bool isFullscreen) {
setDisplayType(isFullscreen ? MediaExitFullscreenButton
: MediaEnterFullscreenButton);
}
| 15,092 |
151,655 | 0 | bool Browser::CanCloseWithInProgressDownloads() {
if (cancel_download_confirmation_state_ != NOT_PROMPTED)
return cancel_download_confirmation_state_ != WAITING_FOR_RESPONSE;
int num_downloads_blocking;
Browser::DownloadClosePreventionType dialog_type =
OkToCloseWithInProgressDownloads(&num_downloads_blocking);
if (dialog_type == DOWNLOAD_CLOSE_OK)
return true;
cancel_download_confirmation_state_ = WAITING_FOR_RESPONSE;
window_->ConfirmBrowserCloseWithPendingDownloads(
num_downloads_blocking,
dialog_type,
false,
base::Bind(&Browser::InProgressDownloadResponse,
weak_factory_.GetWeakPtr()));
return false;
}
| 15,093 |
167,680 | 0 | void WebRuntimeFeatures::EnableAdTagging(bool enable) {
RuntimeEnabledFeatures::SetAdTaggingEnabled(enable);
}
| 15,094 |
103,147 | 0 | void Browser::OpenClearBrowsingDataDialog() {
UserMetrics::RecordAction(UserMetricsAction("ClearBrowsingData_ShowDlg"),
profile_);
ShowOptionsTab(chrome::kClearBrowserDataSubPage);
}
| 15,095 |
135,091 | 0 | MockFrontend()
: last_host_id_(-222), last_cache_id_(-222),
last_status_(APPCACHE_STATUS_OBSOLETE),
last_status_changed_(APPCACHE_STATUS_OBSOLETE),
last_event_id_(APPCACHE_OBSOLETE_EVENT),
content_blocked_(false) {
}
| 15,096 |
63,582 | 0 | xsmp_get_app_name (GsmClient *client)
{
SmProp *prop;
char *name;
prop = find_property (GSM_XSMP_CLIENT (client), SmProgram, NULL);
name = prop_to_command (prop);
return name;
}
| 15,097 |
78,766 | 0 | static int muscle_pin_cmd(sc_card_t *card, struct sc_pin_cmd_data *cmd,
int *tries_left)
{
muscle_private_t* priv = MUSCLE_DATA(card);
const int bufferLength = MSC_MAX_PIN_COMMAND_LENGTH;
u8 buffer[MSC_MAX_PIN_COMMAND_LENGTH];
switch(cmd->cmd) {
case SC_PIN_CMD_VERIFY:
switch(cmd->pin_type) {
case SC_AC_CHV: {
sc_apdu_t apdu;
int r;
msc_verify_pin_apdu(card, &apdu, buffer, bufferLength, cmd->pin_reference, cmd->pin1.data, cmd->pin1.len);
cmd->apdu = &apdu;
cmd->pin1.offset = 5;
r = iso_ops->pin_cmd(card, cmd, tries_left);
if(r >= 0)
priv->verifiedPins |= (1 << cmd->pin_reference);
return r;
}
case SC_AC_TERM:
case SC_AC_PRO:
case SC_AC_AUT:
case SC_AC_NONE:
default:
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Unsupported authentication method\n");
return SC_ERROR_NOT_SUPPORTED;
}
case SC_PIN_CMD_CHANGE:
switch(cmd->pin_type) {
case SC_AC_CHV: {
sc_apdu_t apdu;
msc_change_pin_apdu(card, &apdu, buffer, bufferLength, cmd->pin_reference, cmd->pin1.data, cmd->pin1.len, cmd->pin2.data, cmd->pin2.len);
cmd->apdu = &apdu;
return iso_ops->pin_cmd(card, cmd, tries_left);
}
case SC_AC_TERM:
case SC_AC_PRO:
case SC_AC_AUT:
case SC_AC_NONE:
default:
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Unsupported authentication method\n");
return SC_ERROR_NOT_SUPPORTED;
}
case SC_PIN_CMD_UNBLOCK:
switch(cmd->pin_type) {
case SC_AC_CHV: {
sc_apdu_t apdu;
msc_unblock_pin_apdu(card, &apdu, buffer, bufferLength, cmd->pin_reference, cmd->pin1.data, cmd->pin1.len);
cmd->apdu = &apdu;
return iso_ops->pin_cmd(card, cmd, tries_left);
}
case SC_AC_TERM:
case SC_AC_PRO:
case SC_AC_AUT:
case SC_AC_NONE:
default:
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Unsupported authentication method\n");
return SC_ERROR_NOT_SUPPORTED;
}
default:
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL, "Unsupported command\n");
return SC_ERROR_NOT_SUPPORTED;
}
}
| 15,098 |
137,542 | 0 | bool PrintWebViewHelper::PrintPreviewContext::IsFinalPageRendered() const {
DCHECK(IsRendering());
return static_cast<size_t>(current_page_index_) == pages_to_render_.size();
}
| 15,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.