code
stringlengths
12
2.05k
label_name
stringclasses
5 values
label
int64
0
4
static CPINLINE zend_class_entry* swoole_try_get_ce(zend_string *class_name) { //user class , do not support incomplete class now zend_class_entry *ce = zend_lookup_class(class_name); if (ce) { return ce; } // try call unserialize callback and retry lookup zval user_func, args[1], retval; /* Check for unserialize callback */ if ((PG(unserialize_callback_func) == NULL) || (PG(unserialize_callback_func)[0] == '\0')) { zend_throw_exception_ex(NULL, 0, "can not find class %s", class_name->val TSRMLS_CC); return NULL; } zend_string *fname = swoole_string_init(ZEND_STRL(PG(unserialize_callback_func))); Z_STR(user_func) = fname; Z_TYPE_INFO(user_func) = IS_STRING_EX; ZVAL_STR(&args[0], class_name); call_user_function_ex(CG(function_table), NULL, &user_func, &retval, 1, args, 0, NULL); swoole_string_release(fname); //user class , do not support incomplete class now ce = zend_lookup_class(class_name); if (!ce) { zend_throw_exception_ex(NULL, 0, "can not find class %s", class_name->val TSRMLS_CC); return NULL; } else { return ce; } }
Base
1
MONGO_EXPORT int bson_append_code_w_scope_n( bson *b, const char *name, const char *code, int len, const bson *scope ) { int sl, size; if ( !scope ) return BSON_ERROR; sl = len + 1; size = 4 + 4 + sl + bson_size( scope ); if ( bson_append_estart( b, BSON_CODEWSCOPE, name, size ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b, &size ); bson_append32( b, &sl ); bson_append( b, code, sl ); bson_append( b, scope->data, bson_size( scope ) ); return BSON_OK; }
Base
1
void jas_matrix_bindsub(jas_matrix_t *mat0, jas_matrix_t *mat1, int r0, int c0, int r1, int c1) { int i; if (mat0->data_) { if (!(mat0->flags_ & JAS_MATRIX_REF)) { jas_free(mat0->data_); } mat0->data_ = 0; mat0->datasize_ = 0; } if (mat0->rows_) { jas_free(mat0->rows_); mat0->rows_ = 0; } mat0->flags_ |= JAS_MATRIX_REF; mat0->numrows_ = r1 - r0 + 1; mat0->numcols_ = c1 - c0 + 1; mat0->maxrows_ = mat0->numrows_; if (!(mat0->rows_ = jas_alloc2(mat0->maxrows_, sizeof(jas_seqent_t *)))) { /* There is no way to indicate failure to the caller. So, we have no choice but to abort. Ideally, this function should have a non-void return type. In practice, a non-void return type probably would not help much anyways as the caller would just have to terminate anyways. */ abort(); } for (i = 0; i < mat0->numrows_; ++i) { mat0->rows_[i] = mat1->rows_[r0 + i] + c0; } mat0->xstart_ = mat1->xstart_ + c0; mat0->ystart_ = mat1->ystart_ + r0; mat0->xend_ = mat0->xstart_ + mat0->numcols_; mat0->yend_ = mat0->ystart_ + mat0->numrows_; }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* lookup = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(lookup), 1); TF_LITE_ENSURE_EQ(context, lookup->type, kTfLiteInt32); const TfLiteTensor* value = GetInput(context, node, 1); TF_LITE_ENSURE(context, NumDimensions(value) >= 2); TfLiteTensor* output = GetOutput(context, node, 0); TfLiteIntArray* outputSize = TfLiteIntArrayCreate(NumDimensions(value)); outputSize->data[0] = SizeOfDimension(lookup, 0); outputSize->data[1] = SizeOfDimension(value, 1); for (int i = 2; i < NumDimensions(value); i++) { outputSize->data[i] = SizeOfDimension(value, i); } return context->ResizeTensor(context, output, outputSize); }
Base
1
inline typename V::VectorType FBUnserializer<V>::unserializeList() { p_ += CODE_SIZE; // the list size is written so we can reserve it in the vector // in future. Skip past it for now. unserializeInt64(); typename V::VectorType ret = V::createVector(); size_t code = nextCode(); while (code != FB_SERIALIZE_STOP) { V::vectorAppend(ret, unserializeThing()); code = nextCode(); } p_ += CODE_SIZE; return ret; }
Class
2
TEST_CASE_METHOD(TestFixture, "ECDSA AES keygen and signature test", "[ecdsa-aes-key-sig-gen]") { vector<char> errMsg(BUF_LEN, 0); int errStatus = 0; vector <uint8_t> encrPrivKey(BUF_LEN, 0); vector<char> pubKeyX(BUF_LEN, 0); vector<char> pubKeyY(BUF_LEN, 0); uint32_t encLen = 0; PRINT_SRC_LINE auto status = trustedGenerateEcdsaKeyAES(eid, &errStatus, errMsg.data(), encrPrivKey.data(), &encLen, pubKeyX.data(), pubKeyY.data()); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); string hex = SAMPLE_HEX_HASH; vector<char> signatureR(BUF_LEN, 0); vector<char> signatureS(BUF_LEN, 0); uint8_t signatureV = 0; for (int i = 0; i < 50; i++) { PRINT_SRC_LINE status = trustedEcdsaSignAES(eid, &errStatus, errMsg.data(), encrPrivKey.data(), encLen, hex.data(), signatureR.data(), signatureS.data(), &signatureV, 16); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); } }
Base
1
TfLiteStatus MockCustom::Invoke(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = tflite::GetInput(context, node, 0); const int32_t* input_data = input->data.i32; const TfLiteTensor* weight = tflite::GetInput(context, node, 1); const uint8_t* weight_data = weight->data.uint8; TfLiteTensor* output = GetOutput(context, node, 0); int32_t* output_data = output->data.i32; output_data[0] = 0; // Catch output tensor sharing memory with an input tensor output_data[0] = input_data[0] + weight_data[0]; return kTfLiteOk; }
Base
1
void CalculateOutputIndexRowSplit( OpKernelContext* context, const RowPartitionTensor& row_split, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { INDEX_TYPE row_split_size = row_split.size(); if (row_split_size > 0) { result->reserve(row_split(row_split_size - 1)); } for (INDEX_TYPE i = 0; i < row_split_size - 1; ++i) { INDEX_TYPE row_length = row_split(i + 1) - row_split(i); INDEX_TYPE real_length = std::min(output_size, row_length); INDEX_TYPE parent_output_index_current = parent_output_index[i]; if (parent_output_index_current == -1) { real_length = 0; } for (INDEX_TYPE j = 0; j < real_length; ++j) { result->push_back(parent_output_index_current); parent_output_index_current += output_index_multiplier; } for (INDEX_TYPE j = 0; j < row_length - real_length; ++j) { result->push_back(-1); } } if (row_split_size > 0) { OP_REQUIRES(context, result->size() == row_split(row_split_size - 1), errors::InvalidArgument("Invalid row split size.")); } }
Base
1
void BinaryParameter::setParam(const void* v, int len) { LOCK_CONFIG; if (immutable) return; vlog.debug("set %s(Binary)", getName()); delete [] value; value = 0; if (len) { value = new char[len]; length = len; memcpy(value, v, len); } }
Base
1
inline int StringData::size() const { return m_len; }
Base
1
http_error_t::make_body (int n, const str &si, const str &aux) { strbuf b; str ldesc; const str sdesc = http_status.get_desc (n, &ldesc); b << "<html>\n" << " <head>\n" << " <title>" << n << " " << sdesc << "</title>\n" << " </head>\n" << " <body>\n" << " <h1>Error " << n << " " << sdesc << "</h1><br><br>\n" ; if (n == HTTP_NOT_FOUND && aux) { b << "The file <tt>" << aux << "</tt> was not found on this server.<br><br>\n\n"; } b << " <hr>\n" << " <i>" << si << "</i>\n" << " <br>\n" << " </body>\n" << "</html>\n" ; return b; }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* lookup = GetInput(context, node, 0); const TfLiteTensor* value = GetInput(context, node, 1); TfLiteTensor* output = GetOutput(context, node, 0); switch (value->type) { case kTfLiteFloat32: return EvalSimple(context, node, lookup, value, output); case kTfLiteUInt8: case kTfLiteInt8: if (output->type == kTfLiteFloat32) { return EvalHybrid(context, node, lookup, value, output); } else { return EvalSimple(context, node, lookup, value, output); } default: context->ReportError(context, "Type not currently supported."); return kTfLiteError; } }
Base
1
int FdOutStream::length() { return offset + ptr - sentUpTo; }
Base
1
TfLiteStatus PrepareAny(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); const TfLiteTensor* input = GetInput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteBool); return PrepareSimple(context, node); }
Base
1
TfLiteStatus ReverseSequenceImpl(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* seq_lengths_tensor = GetInput(context, node, kSeqLengthsTensor); const TS* seq_lengths = GetTensorData<TS>(seq_lengths_tensor); auto* params = reinterpret_cast<TfLiteReverseSequenceParams*>(node->builtin_data); int seq_dim = params->seq_dim; int batch_dim = params->batch_dim; TF_LITE_ENSURE(context, seq_dim >= 0); TF_LITE_ENSURE(context, batch_dim >= 0); TF_LITE_ENSURE(context, seq_dim != batch_dim); TF_LITE_ENSURE(context, seq_dim < NumDimensions(input)); TF_LITE_ENSURE(context, batch_dim < NumDimensions(input)); TF_LITE_ENSURE_EQ(context, SizeOfDimension(seq_lengths_tensor, 0), SizeOfDimension(input, batch_dim)); for (int i = 0; i < NumDimensions(seq_lengths_tensor); ++i) { TF_LITE_ENSURE(context, seq_lengths[i] <= SizeOfDimension(input, seq_dim)); } TfLiteTensor* output = GetOutput(context, node, kOutputTensor); reference_ops::ReverseSequence<T, TS>( seq_lengths, seq_dim, batch_dim, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); return kTfLiteOk; }
Base
1
TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_OK(context, PrepareSimple(context, node)); OpData* data = reinterpret_cast<OpData*>(node->user_data); // reduce_mean requires a buffer to store intermediate sum result. OpContext op_context(context, node); if (op_context.input->type == kTfLiteInt8 || op_context.input->type == kTfLiteUInt8 || op_context.input->type == kTfLiteInt16) { const double real_multiplier = static_cast<double>(op_context.input->params.scale) / static_cast<double>(op_context.output->params.scale); int exponent; QuantizeMultiplier(real_multiplier, &data->multiplier, &exponent); data->shift = exponent; } TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2); if (!IsConstantTensor(op_context.axis)) { SetTensorToDynamic(temp_sum); return kTfLiteOk; } temp_sum->allocation_type = kTfLiteArenaRw; return ResizeTempSum(context, &op_context, temp_sum); }
Base
1
RawTile KakaduImage::getRegion( int seq, int ang, unsigned int res, int layers, int x, int y, unsigned int w, unsigned int h ) { // Scale up our output bit depth to the nearest factor of 8 unsigned int obpc = bpc; if( bpc <= 16 && bpc > 8 ) obpc = 16; else if( bpc <= 8 ) obpc = 8; #ifdef DEBUG Timer timer; timer.start(); #endif RawTile rawtile( 0, res, seq, ang, w, h, channels, obpc ); if( obpc == 16 ) rawtile.data = new unsigned short[w*h*channels]; else if( obpc == 8 ) rawtile.data = new unsigned char[w*h*channels]; else throw file_error( "Kakadu :: Unsupported number of bits" ); rawtile.dataLength = w*h*channels*(obpc/8); rawtile.filename = getImagePath(); rawtile.timestamp = timestamp; process( res, layers, x, y, w, h, rawtile.data ); #ifdef DEBUG logfile << "Kakadu :: getRegion() :: " << timer.getTime() << " microseconds" << endl; #endif return rawtile; }
Base
1
void CNB::DoIPHdrCSO(PVOID IpHeader, ULONG EthPayloadLength) const { ParaNdis_CheckSumVerifyFlat(IpHeader, EthPayloadLength, pcrIpChecksum | pcrFixIPChecksum, __FUNCTION__); }
Class
2
bool DNP3_Base::AddToBuffer(Endpoint* endp, int target_len, const u_char** data, int* len) { if ( ! target_len ) return true; int to_copy = min(*len, target_len - endp->buffer_len); memcpy(endp->buffer + endp->buffer_len, *data, to_copy); *data += to_copy; *len -= to_copy; endp->buffer_len += to_copy; return endp->buffer_len == target_len; }
Class
2
FdInStream::FdInStream(int fd_, int timeoutms_, int bufSize_, bool closeWhenDone_) : fd(fd_), closeWhenDone(closeWhenDone_), timeoutms(timeoutms_), blockCallback(0), timing(false), timeWaitedIn100us(5), timedKbits(0), bufSize(bufSize_ ? bufSize_ : DEFAULT_BUF_SIZE), offset(0) { ptr = end = start = new U8[bufSize]; }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* begin = GetInput(context, node, kBeginTensor); const TfLiteTensor* size = GetInput(context, node, kSizeTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Ensure validity of input tensor and its dimension. TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); TF_LITE_ENSURE(context, begin->type == kTfLiteInt32 || begin->type == kTfLiteInt64); TF_LITE_ENSURE(context, size->type == kTfLiteInt32 || size->type == kTfLiteInt64); TF_LITE_ENSURE_EQ(context, NumDimensions(begin), 1); TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1); TF_LITE_ENSURE_EQ(context, NumElements(begin), NumElements(size)); TF_LITE_ENSURE_MSG(context, NumDimensions(input) <= kMaxDim, "Slice op only supports 1D-4D input arrays."); // Postpone allocation of output if any of the indexing tensors is not // constant if (!(IsConstantTensor(begin) && IsConstantTensor(size))) { SetTensorToDynamic(output); return kTfLiteOk; } return ResizeOutputShape(context, input, begin, size, output); }
Base
1
void nego_process_negotiation_request(rdpNego* nego, wStream* s) { BYTE flags; UINT16 length; Stream_Read_UINT8(s, flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, nego->RequestedProtocols); WLog_DBG(TAG, "RDP_NEG_REQ: RequestedProtocol: 0x%08" PRIX32 "", nego->RequestedProtocols); nego->state = NEGO_STATE_FINAL; }
Base
1
int length() { return ptr - start; }
Base
1
TfLiteStatus ComparisonPrepareCommon(TfLiteContext* context, TfLiteNode* node, bool is_string_allowed) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Don't support string. if (!is_string_allowed) { TF_LITE_ENSURE(context, input1->type != kTfLiteString); } // Currently only support tensors have the same type. TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); output->type = kTfLiteBool; bool requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } return context->ResizeTensor(context, output, output_size); }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInput); const TfLiteTensor* axis = GetInput(context, node, kAxis); TfLiteTensor* output = GetOutput(context, node, 0); output->type = input->type; if (IsConstantTensor(axis)) { int axis_value; TF_LITE_ENSURE_OK(context, GetAxisValueFromTensor(context, *axis, &axis_value)); return ExpandTensorDim(context, *input, axis_value, output); } SetTensorToDynamic(output); return kTfLiteOk; }
Base
1
static void HeaderMapImplGetByteSize(benchmark::State& state) { HeaderMapImpl headers; addDummyHeaders(headers, state.range(0)); uint64_t size = 0; for (auto _ : state) { size += headers.byteSize(); } benchmark::DoNotOptimize(size); }
Class
2
TEST_F(GroupVerifierTest, TestRequiresAnyWithAllowMissingButOk) { TestUtility::loadFromYaml(RequiresAnyConfig, proto_config_); proto_config_.mutable_rules(0) ->mutable_requires() ->mutable_requires_any() ->add_requirements() ->mutable_allow_missing(); createAsyncMockAuthsAndVerifier(std::vector<std::string>{"example_provider", "other_provider"}); EXPECT_CALL(mock_cb_, onComplete(Status::Ok)); auto headers = Http::TestRequestHeaderMapImpl{}; context_ = Verifier::createContext(headers, parent_span_, &mock_cb_); verifier_->verify(context_); callbacks_["example_provider"](Status::JwtMissed); callbacks_["other_provider"](Status::JwtUnknownIssuer); }
Class
2
BOOL nego_read_request(rdpNego* nego, wStream* s) { BYTE li; BYTE type; UINT16 length; if (!tpkt_read_header(s, &length)) return FALSE; if (!tpdu_read_connection_request(s, &li, length)) return FALSE; if (li != Stream_GetRemainingLength(s) + 6) { WLog_ERR(TAG, "Incorrect TPDU length indicator."); return FALSE; } if (!nego_read_request_token_or_cookie(nego, s)) { WLog_ERR(TAG, "Failed to parse routing token or cookie."); return FALSE; } if (Stream_GetRemainingLength(s) >= 8) { /* rdpNegData (optional) */ Stream_Read_UINT8(s, type); /* Type */ if (type != TYPE_RDP_NEG_REQ) { WLog_ERR(TAG, "Incorrect negotiation request type %" PRIu8 "", type); return FALSE; } nego_process_negotiation_request(nego, s); } return tpkt_ensure_stream_consumed(s, length); }
Base
1
static inline bool isValid(const RemoteFsDevice::Details &d) { return d.isLocalFile() || RemoteFsDevice::constSshfsProtocol==d.url.scheme() || RemoteFsDevice::constSambaProtocol==d.url.scheme() || RemoteFsDevice::constSambaAvahiProtocol==d.url.scheme(); }
Class
2
static __forceinline void draw_line(float *output, int x0, int y0, int x1, int y1, int n) { int dy = y1 - y0; int adx = x1 - x0; int ady = abs(dy); int base; int x=x0,y=y0; int err = 0; int sy; #ifdef STB_VORBIS_DIVIDE_TABLE if (adx < DIVTAB_DENOM && ady < DIVTAB_NUMER) { if (dy < 0) { base = -integer_divide_table[ady][adx]; sy = base-1; } else { base = integer_divide_table[ady][adx]; sy = base+1; } } else { base = dy / adx; if (dy < 0) sy = base - 1; else sy = base+1; } #else base = dy / adx; if (dy < 0) sy = base - 1; else sy = base+1; #endif ady -= abs(base) * adx; if (x1 > n) x1 = n; if (x < x1) { LINE_OP(output[x], inverse_db_table[y]); for (++x; x < x1; ++x) { err += ady; if (err >= adx) { err -= adx; y += sy; } else y += base; LINE_OP(output[x], inverse_db_table[y]); } } }
Base
1
static int mptctl_do_reset(unsigned long arg) { struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg; struct mpt_ioctl_diag_reset krinfo; MPT_ADAPTER *iocp; if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - " "Unable to copy mpt_ioctl_diag_reset struct @ %p\n", __FILE__, __LINE__, urinfo); return -EFAULT; } if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) { printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n", __FILE__, __LINE__, krinfo.hdr.iocnum); return -ENODEV; /* (-6) No such device or address */ } dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n", iocp->name)); if (mpt_HardResetHandler(iocp, CAN_SLEEP) != 0) { printk (MYIOC_s_ERR_FMT "%s@%d::mptctl_do_reset - reset failed.\n", iocp->name, __FILE__, __LINE__); return -1; } return 0; }
Class
2
String StringUtil::Implode(const Variant& items, const String& delim, const bool checkIsContainer /* = true */) { if (checkIsContainer && !isContainer(items)) { throw_param_is_not_container(); } int size = getContainerSize(items); if (size == 0) return empty_string(); req::vector<String> sitems; sitems.reserve(size); int len = 0; int lenDelim = delim.size(); for (ArrayIter iter(items); iter; ++iter) { sitems.emplace_back(iter.second().toString()); len += sitems.back().size() + lenDelim; } len -= lenDelim; // always one delimiter less than count of items assert(sitems.size() == size); String s = String(len, ReserveString); char *buffer = s.mutableData(); const char *sdelim = delim.data(); char *p = buffer; String &init_str = sitems[0]; int init_len = init_str.size(); memcpy(p, init_str.data(), init_len); p += init_len; for (int i = 1; i < size; i++) { String &item = sitems[i]; memcpy(p, sdelim, lenDelim); p += lenDelim; int lenItem = item.size(); memcpy(p, item.data(), lenItem); p += lenItem; } assert(p - buffer == len); s.setSize(len); return s; }
Base
1
Variant HHVM_FUNCTION(mcrypt_generic_init, const Resource& td, const String& key, const String& iv) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } int max_key_size = mcrypt_enc_get_key_size(pm->m_td); int iv_size = mcrypt_enc_get_iv_size(pm->m_td); if (key.empty()) { raise_warning("Key size is 0"); } unsigned char *key_s = (unsigned char *)malloc(key.size()); memset(key_s, 0, key.size()); unsigned char *iv_s = (unsigned char *)malloc(iv_size + 1); memset(iv_s, 0, iv_size + 1); int key_size; if (key.size() > max_key_size) { raise_warning("Key size too large; supplied length: %d, max: %d", key.size(), max_key_size); key_size = max_key_size; } else { key_size = key.size(); } memcpy(key_s, key.data(), key.size()); if (iv.size() != iv_size) { raise_warning("Iv size incorrect; supplied length: %d, needed: %d", iv.size(), iv_size); } memcpy(iv_s, iv.data(), std::min(iv_size, iv.size())); mcrypt_generic_deinit(pm->m_td); int result = mcrypt_generic_init(pm->m_td, key_s, key_size, iv_s); /* If this function fails, close the mcrypt module to prevent crashes * when further functions want to access this resource */ if (result < 0) { pm->close(); switch (result) { case -3: raise_warning("Key length incorrect"); break; case -4: raise_warning("Memory allocation error"); break; case -1: default: raise_warning("Unknown error"); break; } } else { pm->m_init = true; } free(iv_s); free(key_s); return result; }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32) { EvalAddN<float>(context, node); } else if (output->type == kTfLiteInt32) { EvalAddN<int32_t>(context, node); } else { context->ReportError(context, "AddN only supports FLOAT32|INT32 now, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } return kTfLiteOk; }
Base
1
Result ZipFile::uncompressEntry (int index, const File& targetDirectory, bool shouldOverwriteFiles) { auto* zei = entries.getUnchecked (index); #if JUCE_WINDOWS auto entryPath = zei->entry.filename; #else auto entryPath = zei->entry.filename.replaceCharacter ('\\', '/'); #endif if (entryPath.isEmpty()) return Result::ok(); auto targetFile = targetDirectory.getChildFile (entryPath); if (entryPath.endsWithChar ('/') || entryPath.endsWithChar ('\\')) return targetFile.createDirectory(); // (entry is a directory, not a file) std::unique_ptr<InputStream> in (createStreamForEntry (index)); if (in == nullptr) return Result::fail ("Failed to open the zip file for reading"); if (targetFile.exists()) { if (! shouldOverwriteFiles) return Result::ok(); if (! targetFile.deleteFile()) return Result::fail ("Failed to write to target file: " + targetFile.getFullPathName()); } if (! targetFile.getParentDirectory().createDirectory()) return Result::fail ("Failed to create target folder: " + targetFile.getParentDirectory().getFullPathName()); if (zei->entry.isSymbolicLink) { String originalFilePath (in->readEntireStreamAsString() .replaceCharacter (L'/', File::getSeparatorChar())); if (! File::createSymbolicLink (targetFile, originalFilePath, true)) return Result::fail ("Failed to create symbolic link: " + originalFilePath); } else { FileOutputStream out (targetFile); if (out.failedToOpen()) return Result::fail ("Failed to write to target file: " + targetFile.getFullPathName()); out << *in; } targetFile.setCreationTime (zei->entry.fileTime); targetFile.setLastModificationTime (zei->entry.fileTime); targetFile.setLastAccessTime (zei->entry.fileTime); return Result::ok(); }
Base
1
TfLiteStatus EvalHashtableImport(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputResourceIdTensor); const int resource_id = input_resource_id_tensor->data.i32[0]; const TfLiteTensor* key_tensor = GetInput(context, node, kKeyTensor); const TfLiteTensor* value_tensor = GetInput(context, node, kValueTensor); Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_); auto& resources = subgraph->resources(); auto* lookup = resource::GetHashtableResource(&resources, resource_id); TF_LITE_ENSURE(context, lookup != nullptr); TF_LITE_ENSURE_STATUS( lookup->CheckKeyAndValueTypes(context, key_tensor, value_tensor)); // The hashtable resource will only be initialized once, attempting to // initialize it multiple times will be a no-op. auto result = lookup->Import(context, key_tensor, value_tensor); return result; }
Base
1
void resize (std::size_t new_size_) { _buf_size = new_size_; }
Base
1
inline bool operator ==(const MaskedIP& l, const MaskedIP& r) { auto shift = std::max((l.v6 ? 128 : 32) - l.prefix, (r.v6 ? 128 : 32) - r.prefix); ceph_assert(shift > 0); return (l.addr >> shift) == (r.addr >> shift); }
Base
1
void RequestContext::SetApiKeyHeader() { request_->AddHeaderToBackend(kDefaultApiKeyHeaderName, api_key_); }
Base
1
bool SSecurityTLS::processMsg(SConnection *sc) { rdr::InStream* is = sc->getInStream(); rdr::OutStream* os = sc->getOutStream(); vlog.debug("Process security message (session %p)", session); if (!session) { initGlobal(); if (gnutls_init(&session, GNUTLS_SERVER) != GNUTLS_E_SUCCESS) throw AuthFailureException("gnutls_init failed"); if (gnutls_set_default_priority(session) != GNUTLS_E_SUCCESS) throw AuthFailureException("gnutls_set_default_priority failed"); try { setParams(session); } catch(...) { os->writeU8(0); throw; } os->writeU8(1); os->flush(); } rdr::TLSInStream *tlsis = new rdr::TLSInStream(is, session); rdr::TLSOutStream *tlsos = new rdr::TLSOutStream(os, session); int err; err = gnutls_handshake(session); if (err != GNUTLS_E_SUCCESS) { delete tlsis; delete tlsos; if (!gnutls_error_is_fatal(err)) { vlog.debug("Deferring completion of TLS handshake: %s", gnutls_strerror(err)); return false; } vlog.error("TLS Handshake failed: %s", gnutls_strerror (err)); shutdown(); throw AuthFailureException("TLS Handshake failed"); } vlog.debug("Handshake completed"); sc->setStreams(fis = tlsis, fos = tlsos); return true; }
Class
2
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { case kTfLiteFloat32: return EvalImpl<kernel_type, kTfLiteFloat32>(context, node); case kTfLiteUInt8: return EvalImpl<kernel_type, kTfLiteUInt8>(context, node); case kTfLiteInt8: return EvalImpl<kernel_type, kTfLiteInt8>(context, node); case kTfLiteInt16: return EvalImpl<kernel_type, kTfLiteInt16>(context, node); default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
Base
1
__global__ void UnsortedSegmentCustomKernel(const Index input_outer_dim_size, const Index inner_dim_size, const Index output_outer_dim_size, const Index* segment_ids, const T* input, T* output) { const Index input_total_size = input_outer_dim_size * inner_dim_size; const Index output_total_size = output_outer_dim_size * inner_dim_size; for (int input_index : GpuGridRangeX(input_total_size)) { const Index input_segment_index = input_index / inner_dim_size; const Index segment_offset = input_index % inner_dim_size; const Index output_segment_index = segment_ids[input_segment_index]; if (output_segment_index < 0 || output_segment_index >= output_total_size) { continue; } const Index output_index = output_segment_index * inner_dim_size + segment_offset; KernelReductionFunctor()(output + output_index, ldg(input + input_index)); } }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); ruy::profiler::ScopeLabel label("SquaredDifference"); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32) { EvalSquaredDifference<float>(context, node, data, input1, input2, output); } else if (output->type == kTfLiteInt32) { EvalSquaredDifference<int32_t>(context, node, data, input1, input2, output); } else { context->ReportError( context, "SquaredDifference only supports FLOAT32 and INT32 now, got %d.", output->type); return kTfLiteError; } return kTfLiteOk; }
Base
1
inline TfLiteTensor* GetMutableInput(const TfLiteContext* context, const TfLiteNode* node, int index) { if (index >= 0 && index < node->inputs->size) { const int tensor_index = node->inputs->data[index]; if (tensor_index != kTfLiteOptionalTensor) { if (context->tensors != nullptr) { return &context->tensors[tensor_index]; } else { return context->GetTensor(context, tensor_index); } } } return nullptr; }
Base
1
QString Helper::temporaryMountDevice(const QString &device, const QString &name, bool readonly) { QString mount_point = mountPoint(device); if (!mount_point.isEmpty()) return mount_point; mount_point = "%1/.%2/mount/%3"; const QStringList &tmp_paths = QStandardPaths::standardLocations(QStandardPaths::TempLocation); mount_point = mount_point.arg(tmp_paths.isEmpty() ? "/tmp" : tmp_paths.first()).arg(qApp->applicationName()).arg(name); if (!QDir::current().mkpath(mount_point)) { dCError("mkpath \"%s\" failed", qPrintable(mount_point)); return QString(); } if (!mountDevice(device, mount_point, readonly)) { dCError("Mount the device \"%s\" to \"%s\" failed", qPrintable(device), qPrintable(mount_point)); return QString(); } return mount_point; }
Class
2
int HexInStream::overrun(int itemSize, int nItems, bool wait) { if (itemSize > bufSize) throw Exception("HexInStream overrun: max itemSize exceeded"); if (end - ptr != 0) memmove(start, ptr, end - ptr); end -= ptr - start; offset += ptr - start; ptr = start; while (end < ptr + itemSize) { int n = in_stream.check(2, 1, wait); if (n == 0) return 0; const U8* iptr = in_stream.getptr(); const U8* eptr = in_stream.getend(); int length = min((eptr - iptr)/2, start + bufSize - end); U8* optr = (U8*) end; for (int i=0; i<length; i++) { int v = 0; readHexAndShift(iptr[i*2], &v); readHexAndShift(iptr[i*2+1], &v); optr[i] = v; } in_stream.setptr(iptr + length*2); end += length; } if (itemSize * nItems > end - ptr) nItems = (end - ptr) / itemSize; return nItems; }
Base
1
void writeBytes(const void* data, int length) { check(length); memcpy(ptr, data, length); ptr += length; }
Base
1
IniSection (const IniParser *p) : IniBase (-1), ip (p), end_comment (), rewrite_by(-1), container (), ivalues (), isections () {}
Class
2
TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node, int index) { if (index >= 0 && index < node->outputs->size) { const int tensor_index = node->outputs->data[index]; if (tensor_index != kTfLiteOptionalTensor) { if (context->tensors != nullptr) { return &context->tensors[tensor_index]; } else { return context->GetTensor(context, tensor_index); } } } return nullptr; }
Base
1
bool logToUSDT(const Array& bt) { std::lock_guard<std::mutex> lock(usdt_mutex); memset(&bt_slab, 0, sizeof(bt_slab)); int i = 0; IterateVNoInc( bt.get(), [&](TypedValue tv) -> bool { if (i >= strobelight::kMaxStackframes) { return true; } assertx(isArrayLikeType(type(tv))); ArrayData* bt_frame = val(tv).parr; strobelight::backtrace_frame_t* frame = &bt_slab.frames[i]; auto const line = bt_frame->get(s_line.get()); if (line.is_init()) { assertx(isIntType(type(line))); frame->line = val(line).num; } auto const file_name = bt_frame->get(s_file.get()); if (file_name.is_init()) { assertx(isStringType(type(file_name))); strncpy(frame->file_name, val(file_name).pstr->data(), std::min(val(file_name).pstr->size(), strobelight::kFileNameMax)); frame->file_name[strobelight::kFileNameMax - 1] = '\0'; } auto const class_name = bt_frame->get(s_class.get()); if (class_name.is_init()) { assertx(isStringType(type(class_name))); strncpy(frame->class_name, val(class_name).pstr->data(), std::min(val(class_name).pstr->size(), strobelight::kClassNameMax)); frame->class_name[strobelight::kClassNameMax - 1] = '\0'; } auto const function_name = bt_frame->get(s_function.get()); if (function_name.is_init()) { assertx(isStringType(type(function_name))); strncpy(frame->function, val(function_name).pstr->data(), std::min(val(function_name).pstr->size(), strobelight::kFunctionMax)); frame->function[strobelight::kFunctionMax - 1] = '\0'; } i++; return false; } ); bt_slab.len = i; // Allow BPF to read the now-formatted stacktrace FOLLY_SDT_WITH_SEMAPHORE(hhvm, hhvm_stack, &bt_slab); return true; }
Base
1
static jas_image_cmpt_t *jas_image_cmpt_create(int_fast32_t tlx, int_fast32_t tly, int_fast32_t hstep, int_fast32_t vstep, int_fast32_t width, int_fast32_t height, uint_fast16_t depth, bool sgnd, uint_fast32_t inmem) { jas_image_cmpt_t *cmpt; size_t size; cmpt = 0; if (width < 0 || height < 0 || hstep <= 0 || vstep <= 0) { goto error; } if (!jas_safe_intfast32_add(tlx, width, 0) || !jas_safe_intfast32_add(tly, height, 0)) { goto error; } if (!(cmpt = jas_malloc(sizeof(jas_image_cmpt_t)))) { goto error; } cmpt->type_ = JAS_IMAGE_CT_UNKNOWN; cmpt->tlx_ = tlx; cmpt->tly_ = tly; cmpt->hstep_ = hstep; cmpt->vstep_ = vstep; cmpt->width_ = width; cmpt->height_ = height; cmpt->prec_ = depth; cmpt->sgnd_ = sgnd; cmpt->stream_ = 0; cmpt->cps_ = (depth + 7) / 8; // Compute the number of samples in the image component, while protecting // against overflow. // size = cmpt->width_ * cmpt->height_ * cmpt->cps_; if (!jas_safe_size_mul(cmpt->width_, cmpt->height_, &size) || !jas_safe_size_mul(size, cmpt->cps_, &size)) { goto error; } cmpt->stream_ = (inmem) ? jas_stream_memopen(0, size) : jas_stream_tmpfile(); if (!cmpt->stream_) { goto error; } /* Zero the component data. This isn't necessary, but it is convenient for debugging purposes. */ /* Note: conversion of size - 1 to long can overflow */ if (jas_stream_seek(cmpt->stream_, size - 1, SEEK_SET) < 0 || jas_stream_putc(cmpt->stream_, 0) == EOF || jas_stream_seek(cmpt->stream_, 0, SEEK_SET) < 0) { goto error; } return cmpt; error: if (cmpt) { jas_image_cmpt_destroy(cmpt); } return 0; }
Base
1
QString Helper::temporaryMountDevice(const QString &device, const QString &name, bool readonly) { QString mount_point = mountPoint(device); if (!mount_point.isEmpty()) return mount_point; mount_point = "%1/.%2/mount/%3"; const QStringList &tmp_paths = QStandardPaths::standardLocations(QStandardPaths::TempLocation); mount_point = mount_point.arg(tmp_paths.isEmpty() ? "/tmp" : tmp_paths.first()).arg(qApp->applicationName()).arg(name); if (!QDir::current().mkpath(mount_point)) { dCError("mkpath \"%s\" failed", qPrintable(mount_point)); return QString(); } if (!mountDevice(device, mount_point, readonly)) { dCError("Mount the device \"%s\" to \"%s\" failed", qPrintable(device), qPrintable(mount_point)); return QString(); } return mount_point; }
Base
1
Status FillCollectiveParams(CollectiveParams* col_params, CollectiveType collective_type, const Tensor& group_size, const Tensor& group_key, const Tensor& instance_key) { if (group_size.dims() > 0) { return errors::Internal("Unexpected dimensions on input group_size, got ", group_size.shape().DebugString()); } if (group_key.dims() > 0) { return errors::Internal("Unexpected dimensions on input group_key, got ", group_key.shape().DebugString()); } if (instance_key.dims() > 0) { return errors::Internal( "Unexpected dimensions on input instance_key, got ", instance_key.shape().DebugString()); } col_params->name = name_; col_params->group.device_type = device_type_; col_params->group.group_size = group_size.unaligned_flat<int32>()(0); if (col_params->group.group_size <= 0) { return errors::InvalidArgument( "group_size must be positive integer but got ", col_params->group.group_size); } col_params->group.group_key = group_key.unaligned_flat<int32>()(0); col_params->instance.type = collective_type; col_params->instance.instance_key = instance_key.unaligned_flat<int32>()(0); col_params->instance.data_type = data_type_; col_params->instance.impl_details.communication_hint = communication_hint_; col_params->instance.impl_details.timeout_seconds = timeout_seconds_; return Status::OK(); }
Variant
0
int32_t CxImage::GetSize() { return head.biSize + head.biSizeImage + GetPaletteSize(); }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteL2NormParams*>(node->builtin_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE(context, NumDimensions(input) <= 4); TF_LITE_ENSURE(context, output->type == kTfLiteFloat32 || output->type == kTfLiteUInt8 || output->type == kTfLiteInt8); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.scale, (1. / 128.)); if (output->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 128); } if (output->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); } } // TODO(ahentz): For some reason our implementations don't support // activations. TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActNone); TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims); return context->ResizeTensor(context, output, output_size); }
Base
1
Jsi_RC Jsi_ValueInsertArray(Jsi_Interp *interp, Jsi_Value *target, int key, Jsi_Value *val, int flags) { if (target->vt != JSI_VT_OBJECT) { if (interp->strict) Jsi_LogWarn("Target is not object"); return JSI_ERROR; } Jsi_Obj *obj = target->d.obj; if (obj->isarrlist) { if (key >= 0 && key < interp->maxArrayList) { Jsi_ObjArraySet(interp, obj, val, key); return JSI_OK; } return JSI_ERROR; } char unibuf[JSI_MAX_NUMBER_STRING]; Jsi_NumberItoA10(key, unibuf, sizeof(unibuf)); Jsi_ObjInsert(interp, obj, unibuf, val, flags); return JSI_OK; }
Base
1
ssize_t enc_untrusted_recvfrom(int sockfd, void *buf, size_t len, int flags, struct sockaddr *src_addr, socklen_t *addrlen) { int klinux_flags = TokLinuxRecvSendFlag(flags); if (klinux_flags == 0 && flags != 0) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(sockfd); input.Push<uint64_t>(len); input.Push<int>(klinux_flags); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kRecvFromHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_recvfrom", 4); int result = output.next<int>(); int klinux_errno = output.next<int>(); // recvfrom() returns -1 on failure, with errno set to indicate the cause // of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } auto buffer_received = output.next(); memcpy(buf, buffer_received.data(), len); // If |src_addr| is not NULL, and the underlying protocol provides the source // address, this source address is filled in. When |src_addr| is NULL, nothing // is filled in; in this case, |addrlen| is not used, and should also be NULL. if (src_addr != nullptr && addrlen != nullptr) { auto klinux_sockaddr_buf = output.next(); const struct klinux_sockaddr *klinux_addr = klinux_sockaddr_buf.As<struct klinux_sockaddr>(); FromkLinuxSockAddr(klinux_addr, klinux_sockaddr_buf.size(), src_addr, addrlen, TrustedPrimitives::BestEffortAbort); } return result; }
Base
1
bool ZlibInStream::decompress(bool wait) { if (!underlying) throw Exception("ZlibInStream overrun: no underlying stream"); zs->next_out = (U8*)end; zs->avail_out = start + bufSize - end; int n = underlying->check(1, 1, wait); if (n == 0) return false; zs->next_in = (U8*)underlying->getptr(); zs->avail_in = underlying->getend() - underlying->getptr(); if ((int)zs->avail_in > bytesIn) zs->avail_in = bytesIn; int rc = inflate(zs, Z_SYNC_FLUSH); if (rc != Z_OK) { throw Exception("ZlibInStream: inflate failed"); } bytesIn -= zs->next_in - underlying->getptr(); end = zs->next_out; underlying->setptr(zs->next_in); return true; }
Base
1
void RequestContext::AddInstanceIdentityToken() { if (!method()) { return; } const auto &audience = method()->backend_jwt_audience(); if (!audience.empty()) { auto &token = service_context() ->global_context() ->GetInstanceIdentityToken(audience) ->GetAuthToken(); if (!token.empty()) { std::string origin_auth_header; if (request()->FindHeader(kAuthorizationHeader, &origin_auth_header)) { Status status = request()->AddHeaderToBackend( kXForwardedAuthorizationHeader, origin_auth_header); if (!status.ok()) { service_context()->env()->LogError( "Failed to set X-Forwarded-Authorization header to backend."); } } Status status = request()->AddHeaderToBackend(kAuthorizationHeader, kBearerPrefix + token); if (!status.ok()) { service_context()->env()->LogError( "Failed to set authorization header to backend."); } } } }
Base
1
CAMLprim value caml_bitvect_test(value bv, value n) { int pos = Int_val(n); return Val_int(Byte_u(bv, pos >> 3) & (1 << (pos & 7))); }
Class
2
void jas_matrix_divpow2(jas_matrix_t *matrix, int n) { int i; int j; jas_seqent_t *rowstart; int rowstep; jas_seqent_t *data; if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) { assert(matrix->rows_); rowstep = jas_matrix_rowstep(matrix); for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i, rowstart += rowstep) { for (j = matrix->numcols_, data = rowstart; j > 0; --j, ++data) { *data = (*data >= 0) ? ((*data) >> n) : (-((-(*data)) >> n)); } } } }
Base
1
Variant HHVM_METHOD(XMLReader, expand, const Variant& basenode /* = null */) { auto* data = Native::data<XMLReader>(this_); req::ptr<XMLDocumentData> doc; xmlDocPtr docp = nullptr; SYNC_VM_REGS_SCOPED(); if (!basenode.isNull()) { auto dombasenode = Native::data<DOMNode>(basenode.toObject()); doc = dombasenode->doc(); docp = doc->docp(); if (docp == nullptr) { raise_warning("Invalid State Error"); return false; } } if (data->m_ptr) { xmlNodePtr node = xmlTextReaderExpand(data->m_ptr); if (node == nullptr) { raise_warning("An Error Occurred while expanding"); return false; } else { xmlNodePtr nodec = xmlDocCopyNode(node, docp, 1); if (nodec == nullptr) { raise_notice("Cannot expand this node type"); return false; } else { return php_dom_create_object(nodec, doc); } } } raise_warning("Load Data before trying to read"); return false; }
Base
1
static int lookup1_values(int entries, int dim) { int r = (int) floor(exp((float) log((float) entries) / dim)); if ((int) floor(pow((float) r+1, dim)) <= entries) // (int) cast for MinGW warning; ++r; // floor() to avoid _ftol() when non-CRT assert(pow((float) r+1, dim) > entries); assert((int) floor(pow((float) r, dim)) <= entries); // (int),floor() as above return r; }
Base
1
inline boost::system::error_code make_error_code(error_code_enum e) { return boost::system::error_code(e, get_bdecode_category()); }
Class
2
int pos() { return ptr - start; }
Base
1
int TLSOutStream::overrun(int itemSize, int nItems) { if (itemSize > bufSize) throw Exception("TLSOutStream overrun: max itemSize exceeded"); flush(); if (itemSize * nItems > end - ptr) nItems = (end - ptr) / itemSize; return nItems; }
Base
1
TfLiteStatus EvalHashtableImport(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputResourceIdTensor); const int resource_id = input_resource_id_tensor->data.i32[0]; const TfLiteTensor* key_tensor = GetInput(context, node, kKeyTensor); const TfLiteTensor* value_tensor = GetInput(context, node, kValueTensor); Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_); auto& resources = subgraph->resources(); auto* lookup = resource::GetHashtableResource(&resources, resource_id); TF_LITE_ENSURE(context, lookup != nullptr); TF_LITE_ENSURE_STATUS( lookup->CheckKeyAndValueTypes(context, key_tensor, value_tensor)); // The hashtable resource will only be initialized once, attempting to // initialize it multiple times will be a no-op. auto result = lookup->Import(context, key_tensor, value_tensor); return result; }
Base
1
String HHVM_FUNCTION(ldap_escape, const String& value, const String& ignores /* = "" */, int flags /* = 0 */) { char esc[256] = {}; if (flags & k_LDAP_ESCAPE_FILTER) { // llvm.org/bugs/show_bug.cgi?id=18389 esc['*'*1u] = esc['('*1u] = esc[')'*1u] = esc['\0'*1u] = esc['\\'*1u] = 1; } if (flags & k_LDAP_ESCAPE_DN) { esc[','*1u] = esc['='*1u] = esc['+'*1u] = esc['<'*1u] = esc['\\'*1u] = 1; esc['>'*1u] = esc[';'*1u] = esc['"'*1u] = esc['#'*1u] = 1; } if (!flags) { memset(esc, 1, sizeof(esc)); } for (int i = 0; i < ignores.size(); i++) { esc[(unsigned char)ignores[i]] = 0; } char hex[] = "0123456789abcdef"; String result(3 * value.size(), ReserveString); char *rdata = result.get()->mutableData(), *r = rdata; for (int i = 0; i < value.size(); i++) { auto c = (unsigned char)value[i]; if (esc[c]) { *r++ = '\\'; *r++ = hex[c >> 4]; *r++ = hex[c & 0xf]; } else { *r++ = c; } } result.setSize(r - rdata); return result; }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (output->type) { case kTfLiteInt32: { // TensorFlow does not support negative for int32. TF_LITE_ENSURE_OK(context, CheckValue(context, input2)); PowImpl<int32_t>(input1, input2, output, data->requires_broadcast); break; } case kTfLiteFloat32: { PowImpl<float>(input1, input2, output, data->requires_broadcast); break; } default: { context->ReportError(context, "Unsupported data type: %d", output->type); return kTfLiteError; } } return kTfLiteOk; }
Base
1
void BinaryParameter::getData(void** data_, int* length_) const { LOCK_CONFIG; if (length_) *length_ = length; if (data_) { *data_ = new char[length]; memcpy(*data_, value, length); } }
Base
1
int GetS8 (int nPos, bool *pbSuccess) { //*pbSuccess = true; if ( nPos < 0 || nPos >= m_nLen ) { *pbSuccess = false; return 0; } int nRes = m_sFile[ nPos ]; if ( nRes & 0x80 ) nRes |= ~0xff; return nRes; }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input_tensor = GetInput(context, node, 0); const TfLiteTensor* padding_matrix = GetInput(context, node, 1); TfLiteTensor* output_tensor = GetOutput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(padding_matrix), 2); TF_LITE_ENSURE_EQ(context, SizeOfDimension(padding_matrix, 0), NumDimensions(input_tensor)); if (!IsConstantTensor(padding_matrix)) { SetTensorToDynamic(output_tensor); return kTfLiteOk; } // We have constant padding, so we can infer output size. auto output_size = GetPaddedOutputShape(input_tensor, padding_matrix); if (output_size == nullptr) { return kTfLiteError; } return context->ResizeTensor(context, output_tensor, output_size.release()); }
Base
1
Variant HHVM_FUNCTION(mcrypt_generic_init, const Resource& td, const String& key, const String& iv) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } int max_key_size = mcrypt_enc_get_key_size(pm->m_td); int iv_size = mcrypt_enc_get_iv_size(pm->m_td); if (key.empty()) { raise_warning("Key size is 0"); } unsigned char *key_s = (unsigned char *)malloc(key.size()); memset(key_s, 0, key.size()); unsigned char *iv_s = (unsigned char *)malloc(iv_size + 1); memset(iv_s, 0, iv_size + 1); int key_size; if (key.size() > max_key_size) { raise_warning("Key size too large; supplied length: %d, max: %d", key.size(), max_key_size); key_size = max_key_size; } else { key_size = key.size(); } memcpy(key_s, key.data(), key.size()); if (iv.size() != iv_size) { raise_warning("Iv size incorrect; supplied length: %d, needed: %d", iv.size(), iv_size); } memcpy(iv_s, iv.data(), std::min(iv_size, iv.size())); mcrypt_generic_deinit(pm->m_td); int result = mcrypt_generic_init(pm->m_td, key_s, key_size, iv_s); /* If this function fails, close the mcrypt module to prevent crashes * when further functions want to access this resource */ if (result < 0) { pm->close(); switch (result) { case -3: raise_warning("Key length incorrect"); break; case -4: raise_warning("Memory allocation error"); break; case -1: default: raise_warning("Unknown error"); break; } } else { pm->m_init = true; } free(iv_s); free(key_s); return result; }
Base
1
TfLiteStatus StoreAllDecodedSequences( TfLiteContext* context, const std::vector<std::vector<std::vector<int>>>& sequences, TfLiteNode* node, int top_paths) { const int32_t batch_size = sequences.size(); std::vector<int32_t> num_entries(top_paths, 0); // Calculate num_entries per path for (const auto& batch_s : sequences) { TF_LITE_ENSURE_EQ(context, batch_s.size(), top_paths); for (int p = 0; p < top_paths; ++p) { num_entries[p] += batch_s[p].size(); } } for (int p = 0; p < top_paths; ++p) { const int32_t p_num = num_entries[p]; // Resize the decoded outputs. TfLiteTensor* indices = GetOutput(context, node, p); TF_LITE_ENSURE_OK(context, Resize(context, {p_num, 2}, indices)); TfLiteTensor* values = GetOutput(context, node, p + top_paths); TF_LITE_ENSURE_OK(context, Resize(context, {p_num}, values)); TfLiteTensor* decoded_shape = GetOutput(context, node, p + 2 * top_paths); TF_LITE_ENSURE_OK(context, Resize(context, {2}, decoded_shape)); int32_t max_decoded = 0; int32_t offset = 0; int32_t* indices_data = GetTensorData<int32_t>(indices); int32_t* values_data = GetTensorData<int32_t>(values); int32_t* decoded_shape_data = GetTensorData<int32_t>(decoded_shape); for (int b = 0; b < batch_size; ++b) { auto& p_batch = sequences[b][p]; int32_t num_decoded = p_batch.size(); max_decoded = std::max(max_decoded, num_decoded); std::copy_n(p_batch.begin(), num_decoded, values_data + offset); for (int32_t t = 0; t < num_decoded; ++t, ++offset) { indices_data[offset * 2] = b; indices_data[offset * 2 + 1] = t; } } decoded_shape_data[0] = batch_size; decoded_shape_data[1] = max_decoded; } return kTfLiteOk; }
Base
1
TEST_F(EncryptedRecordTest, TestAllPaddingHandshake) { addToQueue("17030100050123456789"); EXPECT_CALL(*readAead_, _decrypt(_, _, 0)) .WillOnce(Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { expectSame(buf, "0123456789"); return getBuf("16000000"); })); EXPECT_NO_THROW(read_.read(queue_)); }
Base
1
int TLSOutStream::length() { return offset + ptr - start; }
Base
1
inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int filter_width, int filter_height, int32 output_activation_min, int32 output_activation_max, uint8* output_data, const Dims<4>& output_dims) { tflite::PoolParams params; params.stride_height = stride_height; params.stride_width = stride_width; params.filter_height = filter_height; params.filter_width = filter_width; params.padding_values.height = pad_height; params.padding_values.width = pad_width; params.quantized_activation_min = output_activation_min; params.quantized_activation_max = output_activation_max; AveragePool(params, DimsToShape(input_dims), input_data, DimsToShape(output_dims), output_data); }
Base
1
TfLiteStatus EqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; }
Base
1
void CSecurityTLS::shutdown(bool needbye) { if (session && needbye) if (gnutls_bye(session, GNUTLS_SHUT_RDWR) != GNUTLS_E_SUCCESS) vlog.error("gnutls_bye failed"); if (anon_cred) { gnutls_anon_free_client_credentials(anon_cred); anon_cred = 0; } if (cert_cred) { gnutls_certificate_free_credentials(cert_cred); cert_cred = 0; } if (session) { gnutls_deinit(session); session = 0; gnutls_global_deinit(); } }
Class
2
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TF_LITE_ENSURE_TYPES_EQ(context, GetInput(context, node, 0)->type, kTfLiteString); TF_LITE_ENSURE_TYPES_EQ(context, GetOutput(context, node, 0)->type, kTfLiteString); return kTfLiteOk; }
Base
1
int jas_matrix_resize(jas_matrix_t *matrix, int numrows, int numcols) { int size; int i; size = numrows * numcols; if (size > matrix->datasize_ || numrows > matrix->maxrows_) { return -1; } matrix->numrows_ = numrows; matrix->numcols_ = numcols; for (i = 0; i < numrows; ++i) { matrix->rows_[i] = &matrix->data_[numcols * i]; } return 0; }
Class
2
void PrivateThreadPoolDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input, DatasetBase** output) { int64_t num_threads = 0; OP_REQUIRES_OK( ctx, ParseScalarArgument<int64_t>(ctx, "num_threads", &num_threads)); OP_REQUIRES(ctx, num_threads >= 0, errors::InvalidArgument("`num_threads` must be >= 0")); *output = new Dataset(ctx, input, num_threads); }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* value = GetInput(context, node, kValueTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (IsDynamicTensor(output)) { const TfLiteTensor* dims = GetInput(context, node, kDimsTensor); TF_LITE_ENSURE_OK(context, ResizeOutput(context, dims, output)); } #define TF_LITE_FILL(data_type) \ reference_ops::Fill(GetTensorShape(value), GetTensorData<data_type>(value), \ GetTensorShape(output), \ GetTensorData<data_type>(output)) switch (output->type) { case kTfLiteInt32: TF_LITE_FILL(int32_t); break; case kTfLiteInt64: TF_LITE_FILL(int64_t); break; case kTfLiteFloat32: TF_LITE_FILL(float); break; case kTfLiteBool: TF_LITE_FILL(bool); break; case kTfLiteString: FillString(value, output); break; default: context->ReportError( context, "Fill only currently supports int32, int64, float32, bool, string " "for input 1, got %d.", value->type); return kTfLiteError; } #undef TF_LITE_FILL return kTfLiteOk; }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* seq_lengths = GetInput(context, node, kSeqLengthsTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(seq_lengths), 1); if (input->type != kTfLiteInt32 && input->type != kTfLiteFloat32 && input->type != kTfLiteUInt8 && input->type != kTfLiteInt16 && input->type != kTfLiteInt64) { context->ReportError(context, "Type '%s' is not supported by reverse_sequence.", TfLiteTypeGetName(input->type)); return kTfLiteError; } if (seq_lengths->type != kTfLiteInt32 && seq_lengths->type != kTfLiteInt64) { context->ReportError( context, "Seq_lengths type '%s' is not supported by reverse_sequence.", TfLiteTypeGetName(seq_lengths->type)); return kTfLiteError; } TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims); TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type); return context->ResizeTensor(context, output, output_shape); }
Base
1
inline typename V::SetType FBUnserializer<V>::unserializeSet() { p_ += CODE_SIZE; // the set size is written so we can reserve it in the set // in future. Skip past it for now. unserializeInt64(); typename V::SetType ret = V::createSet(); size_t code = nextCode(); while (code != FB_SERIALIZE_STOP) { V::setAppend(ret, unserializeThing()); code = nextCode(); } p_ += CODE_SIZE; return ret; }
Class
2
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const int num_elements = NumElements(input); TF_LITE_ENSURE_EQ(context, num_elements, NumElements(output)); switch (input->type) { case kTfLiteInt64: return copyToTensor(context, input->data.i64, output, num_elements); case kTfLiteInt32: return copyToTensor(context, input->data.i32, output, num_elements); case kTfLiteUInt8: return copyToTensor(context, input->data.uint8, output, num_elements); case kTfLiteFloat32: return copyToTensor(context, GetTensorData<float>(input), output, num_elements); case kTfLiteBool: return copyToTensor(context, input->data.b, output, num_elements); case kTfLiteComplex64: return copyToTensor( context, reinterpret_cast<std::complex<float>*>(input->data.c64), output, num_elements); default: // Unsupported type. TF_LITE_UNSUPPORTED_TYPE(context, input->type, "Cast"); } return kTfLiteOk; }
Base
1
void TensorSliceReader::LoadShard(int shard) const { CHECK_LT(shard, sss_.size()); if (sss_[shard] || !status_.ok()) { return; // Already loaded, or invalid. } string value; SavedTensorSlices sts; const string fname = fnames_[shard]; VLOG(1) << "Reading meta data from file " << fname << "..."; Table* table; Status s = open_function_(fname, &table); if (!s.ok()) { status_ = errors::DataLoss("Unable to open table file ", fname, ": ", s.ToString()); return; } sss_[shard].reset(table); if (!(table->Get(kSavedTensorSlicesKey, &value) && ParseProtoUnlimited(&sts, value))) { status_ = errors::Internal( "Failed to find the saved tensor slices at the beginning of the " "checkpoint file: ", fname); return; } status_ = CheckVersions(sts.meta().versions(), TF_CHECKPOINT_VERSION, TF_CHECKPOINT_VERSION_MIN_PRODUCER, "Checkpoint", "checkpoint"); if (!status_.ok()) return; for (const SavedSliceMeta& ssm : sts.meta().tensor()) { TensorShape ssm_shape; status_ = TensorShape::BuildTensorShapeBase(ssm.shape(), &ssm_shape); if (!status_.ok()) return; for (const TensorSliceProto& tsp : ssm.slice()) { TensorSlice ss_slice(tsp); status_ = RegisterTensorSlice(ssm.name(), ssm_shape, ssm.type(), fname, ss_slice, &tensors_); if (!status_.ok()) return; } } }
Class
2
void jas_matrix_asr(jas_matrix_t *matrix, int n) { int i; int j; jas_seqent_t *rowstart; int rowstep; jas_seqent_t *data; assert(n >= 0); if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) { assert(matrix->rows_); rowstep = jas_matrix_rowstep(matrix); for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i, rowstart += rowstep) { for (j = matrix->numcols_, data = rowstart; j > 0; --j, ++data) { //*data >>= n; *data = jas_seqent_asr(*data, n); } } } }
Class
2
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = kTfLiteInt32; // By design, the input shape is always known at the time of Prepare, even // if the preceding op that generates |input| is dynamic. Thus, we can // always compute the rank immediately, without waiting for Eval. SetTensorToPersistentRo(output); // Rank produces a 0-D int32 Tensor representing the rank of input. TfLiteIntArray* output_size = TfLiteIntArrayCreate(0); TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_size)); TF_LITE_ENSURE_EQ(context, NumDimensions(output), 0); // Immediately propagate the known rank to the output tensor. This allows // downstream ops that rely on the value to use it during prepare. if (output->type == kTfLiteInt32) { int32_t* output_data = GetTensorData<int32_t>(output); *output_data = NumDimensions(input); } else { return kTfLiteError; } return kTfLiteOk; }
Base
1
inline int StringData::size() const { return m_len; }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* axis = GetInput(context, node, kAxisTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(axis), 1); TF_LITE_ENSURE(context, NumDimensions(input) >= NumElements(axis)); if (input->type != kTfLiteInt32 && input->type != kTfLiteFloat32 && input->type != kTfLiteUInt8 && input->type != kTfLiteInt16 && input->type != kTfLiteInt64 && input->type != kTfLiteBool) { context->ReportError(context, "Type '%s' is not supported by reverse.", TfLiteTypeGetName(input->type)); return kTfLiteError; } if (axis->type != kTfLiteInt32) { context->ReportError(context, "Axis Type '%s' is not supported by reverse.", TfLiteTypeGetName(axis->type)); return kTfLiteError; } // TODO(renjieliu): support multi-axis case. if (NumElements(axis) > 1) { context->ReportError(context, "Current does not support more than 1 axis."); } TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims); TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type); return context->ResizeTensor(context, output, output_shape); }
Base
1
ZlibOutStream::ZlibOutStream(OutStream* os, int bufSize_, int compressLevel) : underlying(os), compressionLevel(compressLevel), newLevel(compressLevel), bufSize(bufSize_ ? bufSize_ : DEFAULT_BUF_SIZE), offset(0) { zs = new z_stream; zs->zalloc = Z_NULL; zs->zfree = Z_NULL; zs->opaque = Z_NULL; zs->next_in = Z_NULL; zs->avail_in = 0; if (deflateInit(zs, compressLevel) != Z_OK) { delete zs; throw Exception("ZlibOutStream: deflateInit failed"); } ptr = start = new U8[bufSize]; end = start + bufSize; }
Base
1
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* data = reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); TF_LITE_ENSURE_EQ(context, input->type, kTfLiteInt16); output->type = kTfLiteInt32; if (data->out_float) { output->type = kTfLiteFloat32; } TfLiteIntArray* output_size = TfLiteIntArrayCreate(2); int num_frames = 0; if (input->dims->data[0] >= data->state->window.size) { num_frames = (input->dims->data[0] - data->state->window.size) / data->state->window.step / data->frame_stride + 1; } output_size->data[0] = num_frames; output_size->data[1] = data->state->filterbank.num_channels * (1 + data->left_context + data->right_context); return context->ResizeTensor(context, output, output_size); }
Base
1
Http::FilterTrailersStatus Context::onRequestTrailers() { if (!wasm_->onRequestTrailers_) { return Http::FilterTrailersStatus::Continue; } if (wasm_->onRequestTrailers_(this, id_).u64_ == 0) { return Http::FilterTrailersStatus::Continue; } return Http::FilterTrailersStatus::StopIteration; }
Base
1
TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: MaxEvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: MaxEvalQuantizedUInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt8: MaxEvalQuantizedInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt16: MaxEvalQuantizedInt16<kernel_type>(context, node, params, data, input, output); break; default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
Base
1
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* fft_length = GetInput(context, node, kFftLengthTensor); const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type != kTfLiteComplex64) { context->ReportError(context, "Type '%s' for output is not supported by rfft2d.", TfLiteTypeGetName(output->type)); return kTfLiteError; } // Resize the output tensor if the fft_length tensor is not constant. // Otherwise, check if the output shape is correct. if (!IsConstantTensor(fft_length)) { TF_LITE_ENSURE_STATUS(ResizeOutputandTemporaryTensors(context, node)); } else { int num_dims_output = NumDimensions(output); const RuntimeShape output_shape = GetTensorShape(output); TF_LITE_ENSURE_EQ(context, num_dims_output, NumDimensions(input)); TF_LITE_ENSURE(context, num_dims_output >= 2); TF_LITE_ENSURE_EQ(context, output_shape.Dims(num_dims_output - 2), fft_length_data[0]); TF_LITE_ENSURE_EQ(context, output_shape.Dims(num_dims_output - 1), fft_length_data[1] / 2 + 1); } return Rfft2dHelper(context, node); }
Base
1
ssize_t enc_untrusted_read(int fd, void *buf, size_t count) { return static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_read, fd, buf, count)); }
Base
1
void InferenceContext::PreInputInit( const OpDef& op_def, const std::vector<const Tensor*>& input_tensors, const std::vector<ShapeHandle>& input_tensors_as_shapes) { // TODO(mdan): This is also done at graph construction. Run only here instead? const auto ret = full_type::SpecializeType(attrs_, op_def); DCHECK(ret.status().ok()) << "while instantiating types: " << ret.status(); ret_types_ = ret.ValueOrDie(); input_tensors_ = input_tensors; input_tensors_as_shapes_ = input_tensors_as_shapes; construction_status_ = NameRangesForNode(attrs_, op_def, &input_name_map_, &output_name_map_); if (!construction_status_.ok()) return; int num_outputs = 0; for (const auto& e : output_name_map_) { num_outputs = std::max(num_outputs, e.second.second); } outputs_.assign(num_outputs, nullptr); output_handle_shapes_and_types_.resize(num_outputs); }
Class
2
void Ogg::XiphComment::parse(const ByteVector &data) { // The first thing in the comment data is the vendor ID length, followed by a // UTF8 string with the vendor ID. int pos = 0; int vendorLength = data.mid(0, 4).toUInt(false); pos += 4; d->vendorID = String(data.mid(pos, vendorLength), String::UTF8); pos += vendorLength; // Next the number of fields in the comment vector. int commentFields = data.mid(pos, 4).toUInt(false); pos += 4; for(int i = 0; i < commentFields; i++) { // Each comment field is in the format "KEY=value" in a UTF8 string and has // 4 bytes before the text starts that gives the length. int commentLength = data.mid(pos, 4).toUInt(false); pos += 4; String comment = String(data.mid(pos, commentLength), String::UTF8); pos += commentLength; int commentSeparatorPosition = comment.find("="); String key = comment.substr(0, commentSeparatorPosition); String value = comment.substr(commentSeparatorPosition + 1); addField(key, value, false); } }
Class
2
HttpIntegrationTest::waitForNextUpstreamRequest(const std::vector<uint64_t>& upstream_indices) { uint64_t upstream_with_request; // If there is no upstream connection, wait for it to be established. if (!fake_upstream_connection_) { AssertionResult result = AssertionFailure(); for (auto upstream_index : upstream_indices) { result = fake_upstreams_[upstream_index]->waitForHttpConnection( *dispatcher_, fake_upstream_connection_, TestUtility::DefaultTimeout, max_request_headers_kb_); if (result) { upstream_with_request = upstream_index; break; } } RELEASE_ASSERT(result, result.message()); } // Wait for the next stream on the upstream connection. AssertionResult result = fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_); RELEASE_ASSERT(result, result.message()); // Wait for the stream to be completely received. result = upstream_request_->waitForEndStream(*dispatcher_); RELEASE_ASSERT(result, result.message()); return upstream_with_request; }
Class
2