content
stringlengths
10
4.9M
#ifndef _ML_VECTOR_HPP_ #define _ML_VECTOR_HPP_ #include <MemeCore/Matrix.hpp> namespace ml { /* * * * * * * * * * * * * * * * * * * * */ template < class _Elem, size_t _Size > class Vector : public Matrix<_Elem, _Size, 1> , public IComparable<Vector<_Elem, _Size>> { public: // Usings /* * * * * * * * * * * * * * * * * * * * */ using value_type = typename _Elem; using self_type = typename Vector<value_type, _Size>; using base_type = typename Matrix<value_type, _Size, 1>; using init_type = typename base_type::init_type; using array_type = typename base_type::array_type; using const_value = typename base_type::const_value; using pointer = typename base_type::pointer; using reference = typename base_type::reference; using const_pointer = typename base_type::const_pointer; using const_reference = typename base_type::const_reference; using iterator = typename base_type::iterator; using const_iterator = typename base_type::const_iterator; using reverse_iterator = typename base_type::reverse_iterator; using const_reverse_iterator= typename base_type::const_reverse_iterator; public: // Constructors /* * * * * * * * * * * * * * * * * * * * */ Vector() : base_type() { } Vector(const_reference value) : base_type(value) { } Vector(const init_type & value) : base_type(value) { } Vector(const self_type & value) : base_type(value) { } template < size_t S > Vector(const Vector<value_type, S> & copy) : base_type(copy) { } template < class T > Vector(const Vector<T, self_type::Size> & copy) : base_type(copy) { } template < class T, size_t S > Vector(const Vector<T, S> & copy, const_reference def = static_cast<value_type>(0)) : base_type(copy, def) { } virtual ~Vector() {} public: // Overrides /* * * * * * * * * * * * * * * * * * * * */ inline virtual bool equals(const self_type & value) const override { for (size_t i = 0; i < this->size(); i++) { if ((*this)[i] != value[i]) { return false; } } return true; } inline virtual bool lessThan(const self_type & value) const override { for (size_t i = 0; i < this->size(); i++) { if ((*this)[i] >= value[i]) { return false; } } return true; } public: // Member Functions /* * * * * * * * * * * * * * * * * * * * */ inline value_type distanceTo(const self_type & value) const { return self_type::Distance((*this), value); } inline value_type dot(const self_type & value) const { return self_type::Dot((*this), value); } inline self_type & normalize() { return ((*this) /= magnitude()); }; inline self_type normalized() const { return self_type(*this).normalize(); }; inline value_type magnitude() const { return sqrt(sqrMagnitude()); }; inline value_type sqrMagnitude() const { value_type value = 0; for (size_t i = 0; i < this->size(); i++) { value += (*this)[i] * (*this)[i]; } return value; }; public: // Static Functions /* * * * * * * * * * * * * * * * * * * * */ inline static value_type Dot(const self_type & a, const self_type & b) { value_type value = static_cast<value_type>(0); for (size_t i = 0; i < self_type::Size; i++) { value += static_cast<value_type>(a[i]) * static_cast<value_type>(b[i]); } return value; }; inline static self_type Direction(const self_type & from, const self_type & to) { return (to - from).normalized(); }; inline static value_type Distance(const self_type & a, const self_type & b) { return self_type(a - b).magnitude(); }; inline static self_type Lerp(const self_type & a, const self_type & b, value_type t) { return ML_LERP(a, b, t); }; inline static self_type Reflect(const self_type & direction, const self_type & normal) { return (normal - direction) * (static_cast<value_type>(2) * self_type::Dot(direction, normal)); }; public: // Operators /* * * * * * * * * * * * * * * * * * * * */ inline friend self_type operator-(const self_type & rhs) { return (rhs * static_cast<value_type>(-1)); } inline friend self_type operator*(const self_type & lhs, const_reference rhs) { self_type temp; for (size_t i = 0, imax = temp.size(); i < imax; i++) { temp[i] = lhs[i] * rhs; } return temp; }; inline friend self_type operator/(const self_type & lhs, const_reference rhs) { self_type temp; for (size_t i = 0, imax = temp.size(); i < imax; i++) { temp[i] = lhs[i] / rhs; } return temp; }; inline friend self_type & operator*=(self_type & lhs, const_reference rhs) { return (lhs = (lhs * rhs)); }; inline friend self_type & operator/=(self_type & lhs, const_reference rhs) { return (lhs = (lhs / rhs)); }; /* * * * * * * * * * * * * * * * * * * * */ template <class T> inline friend self_type operator+(const self_type & lhs, const Vector<T, self_type::Size> & rhs) { self_type temp; for (size_t i = 0, imax = temp.size(); i < imax; i++) { temp[i] = lhs[i] + static_cast<value_type>(rhs[i]); } return temp; }; template <class T> inline friend self_type operator-(const self_type & lhs, const Vector<T, self_type::Size> & rhs) { self_type temp; for (size_t i = 0, imax = temp.size(); i < imax; i++) { temp[i] = lhs[i] - static_cast<value_type>(rhs[i]); } return temp; }; template <class T> inline friend self_type operator*(const self_type & lhs, const Vector<T, self_type::Size> & rhs) { self_type temp; for (size_t i = 0, imax = temp.size(); i < imax; i++) { temp[i] = lhs[i] * static_cast<value_type>(rhs[i]); } return temp; }; template <class T> inline friend self_type operator/(const self_type & lhs, const Vector<T, self_type::Size> & rhs) { self_type temp; for (size_t i = 0, imax = temp.size(); i < imax; i++) { temp[i] = lhs[i] / static_cast<value_type>(rhs[i]); } return temp; }; /* * * * * * * * * * * * * * * * * * * * */ template <class T> inline friend self_type & operator+=(self_type & lhs, const Vector<T, self_type::Size> & rhs) { return (lhs = (lhs + rhs)); }; template <class T> inline friend self_type & operator-=(self_type & lhs, const Vector<T, self_type::Size> & rhs) { return (lhs = (lhs - rhs)); }; template <class T> inline friend self_type & operator*=(self_type & lhs, const Vector<T, self_type::Size> & rhs) { return (lhs = (lhs * rhs)); }; template <class T> inline friend self_type & operator/=(self_type & lhs, const Vector<T, self_type::Size> & rhs) { return (lhs = (lhs / rhs)); }; /* * * * * * * * * * * * * * * * * * * * */ }; /* * * * * * * * * * * * * * * * * * * * */ } #endif // !_ML_VECTOR_HPP_
Looks like even the Palinites are getting sick of the ‘Cuda this week. Amidst the BOOOOS one can hear “QUITTIN’ ON THE JOB!” Stopped clocks, as they say. Meanwhile, the ‘Cuda’s beloved Facebook page is getting bombarded with angry posts like this. My family (wife, 3year old, and 10 year old) and I waited for 6 hours to get a book signed by Sarah Palin tonight. She left 300 folks standing in the rain and cold without explanation or even an address at exit. That is 1800 hours of voters lives sacrificed for nothing due to lack of concern by another politician for… our time, money and effort to see a cause through. She could have invested 45 minutes and not received the boos, or sign my book chants. I know that the majority of the folks that were chanting go Palin now are returning her books tomorrow morning. What a disappointment. If this is the way the ‘Cuda manages a small-market bus tour, just think of what she’ll do for the country. (h/t TS)
<reponame>IHTSDO/snow-owl /* * Copyright 2011-2017 B2i Healthcare Pte Ltd, http://b2i.sg * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.b2international.snowowl.datastore.request; import java.util.Collections; import com.b2international.index.Hits; import com.b2international.index.query.Expression; import com.b2international.index.query.Expressions; import com.b2international.index.query.Expressions.ExpressionBuilder; import com.b2international.snowowl.core.branch.Branch; import com.b2international.snowowl.core.branch.BranchManager; import com.b2international.snowowl.core.branch.Branches; import com.b2international.snowowl.core.domain.RepositoryContext; import com.b2international.snowowl.datastore.internal.branch.BranchDocument; import com.b2international.snowowl.datastore.internal.branch.InternalBranch; import com.google.common.collect.ImmutableList; /** * @since 4.1 */ final class BranchSearchRequest extends SearchIndexResourceRequest<RepositoryContext, Branches, BranchDocument> { enum OptionKey { /** * Filter branches by their parent path */ PARENT, /** * Filter branches by their name */ NAME, } BranchSearchRequest() {} @Override protected Branches createEmptyResult(int limit) { return new Branches(Collections.emptyList(), null, null, limit, 0); } @Override protected Expression prepareQuery(RepositoryContext context) { ExpressionBuilder queryBuilder = Expressions.builder(); addIdFilter(queryBuilder, ids -> Expressions.matchAny("path", ids)); if (containsKey(OptionKey.PARENT)) { queryBuilder.filter(Expressions.matchAny("parentPath", getCollection(OptionKey.PARENT, String.class))); } if (containsKey(OptionKey.NAME)) { queryBuilder.filter(Expressions.matchAny("name", getCollection(OptionKey.NAME, String.class))); } return queryBuilder.build(); } @Override protected Class<BranchDocument> getDocumentType() { return BranchDocument.class; } @Override protected Branches toCollectionResource(RepositoryContext context, Hits<BranchDocument> hits) { final BranchManager branchManager = context.service(BranchManager.class); final ImmutableList.Builder<Branch> branches = ImmutableList.builder(); for (BranchDocument doc : hits) { final InternalBranch branch = doc.toBranch(); branch.setBranchManager(branchManager); branches.add(branch); } return new Branches(branches.build(), hits.getScrollId(), hits.getSearchAfter(), limit(), hits.getTotal()); } }
def nfs_enable(self): return self.request( "nfs-enable", { }, { } )
<gh_stars>0 export class Message { userId: string; text: string; jobId: string; userType: string; read: boolean; }
<filename>sdk-6.5.20/libs/sdklt/bcmfp/common/bcmfp_strings.c /*! \file bcmfp_strings.c * * APIs to print meaningful names to different * enumerations in BCMFP. These APIs can be used * in LOG messages. */ /* * This license is set out in https://raw.githubusercontent.com/Broadcom-Network-Switching-Software/OpenBCM/master/Legal/LICENSE file. * * Copyright 2007-2020 Broadcom Inc. All rights reserved. */ #include <shr/shr_error.h> #include <shr/shr_debug.h> #include <bcmfp/bcmfp_internal.h> #include <bcmfp/bcmfp_strings_internal.h> #define BSL_LOG_MODULE BSL_LS_BCMFP_DEV char * bcmfp_stage_string(bcmfp_stage_id_t stage_id) { static char *stage_strings[] = BCMFP_STAGE_STRINGS; if (stage_id < BCMFP_STAGE_ID_COUNT && stage_id >= 0) { return stage_strings[stage_id]; } return "STAGE_UNKNOWN"; } char * bcmfp_group_mode_string(bcmfp_group_mode_t group_mode) { static char *group_mode_strings[] = BCMFP_GROUP_MODE_STRINGS; if (group_mode < BCMFP_GROUP_MODE_COUNT && group_mode >= 0) { /* coverity[overrun-local] */ return group_mode_strings[group_mode]; } return "GROUP_MODE_UNKNOWN"; }
import demistomock as demisto from CommonServerPython import * def get_query(cre_name_null): if cre_name_null == "False": query = "SELECT *,\"CRE Name\",\"CRE Description\",CATEGORYNAME(highlevelcategory) " \ "FROM events WHERE \"CRE NAME\" <> NULL AND INOFFENSE({0}) START '{1}'" else: query = "SELECT *,\"CRE Name\",\"CRE Description\",CATEGORYNAME(highlevelcategory) " \ "FROM events WHERE \"CRE NAME\" IS NULL AND INOFFENSE({0}) START '{1}'" return query def main(): d_args = demisto.args() is_cre_name_null = demisto.args().get("is_cre_name_null", "True") QUERY = get_query(is_cre_name_null) offense_id = demisto.get(d_args, 'offenseID') start_time = demisto.get(d_args, 'startTime') # Try converting from date string to timestamp try: start_time = date_to_timestamp(str(start_time), '%Y-%m-%dT%H:%M:%S.%f000Z') except Exception: pass d_args["query_expression"] = QUERY.format(offense_id, start_time) resp = demisto.executeCommand('QRadarFullSearch', d_args) if isError(resp[0]): demisto.results(resp) else: data = demisto.get(resp[0], 'Contents.events') if not data: resp[0]['HumanReadable'] = "No Correlations were found for offense id {0}".format(offense_id) else: data = data if isinstance(data, list) else [data] QRadar = { 'Correlation': [] } # type: Dict for corr in data: keys = corr.keys() correlation = { "SourceIP": demisto.get(corr, "sourceip") } # type: Dict # Standardized known keys keys.remove("sourceip") if "sourceip" in keys else None correlation["CREDescription"] = demisto.get(corr, "CRE Description") keys.remove("CRE Description") if "CRE Description" in keys else "" correlation["CREName"] = demisto.get(corr, "CRE Name") keys.remove("CRE Name") if "CRE Name" in keys else "" correlation["QID"] = demisto.get(corr, "qid") keys.remove("qid") if "qid" in keys else "" correlation["DestinationIP"] = demisto.get(corr, "destinationip") keys.remove("destinationip") if "destinationip" in keys else "" correlation["Category"] = demisto.get(corr, "categoryname_highlevelcategory") keys.remove("categoryname_highlevelcategory") if "categoryname_highlevelcategory" in keys else "" correlation["CategoryID"] = demisto.get(corr, "category") keys.remove("category") if "category" in keys else "" correlation["Username"] = demisto.get(corr, "username") keys.remove("username") if "username" in keys else "" correlation["StartTime"] = demisto.get(corr, "starttime") keys.remove("starttime") if "starttime" in keys else "" # Push to context rest of the keys (won't be shown in 'outputs') for key in keys: correlation[''.join(x for x in key.title() if not x.isspace())] = demisto.get(corr, key) QRadar['Correlation'].append(correlation) context = {"QRadar": QRadar} resp[0]['EntryContext'] = context demisto.results(resp) # python2 uses __builtin__ python3 uses builtins if __name__ in ('__builtin__', 'builtins'): main()
{-# LANGUAGE OverloadedStrings #-} module Caurakarman.Query where import Caurakarman.Persistence (withBH', index, postMappingName) import Caurakarman.Type import Data.Aeson import qualified Data.Text as T import Database.Bloodhound import Network.HTTP.Conduit (responseBody) searchPostBody :: T.Text -> IO (Either String [Hit Post]) searchPostBody phrase = do let query = QueryMatchQuery $ mkMatchQuery (FieldName "body") (QueryString phrase) search = mkSearch (Just query) Nothing reply <- withBH' $ searchByType index postMappingName search let result = eitherDecode (responseBody reply) :: Either String (SearchResult Post) return $ fmap (hits . searchHits) result
<filename>src/unidim-lipm-dcm-estimator.cpp #include <state-observation/dynamics-estimators/unidim-lipm-dcm-estimator.hpp> #include <state-observation/tools/miscellaneous-algorithms.hpp> namespace stateObservation { constexpr double UnidimLipmDcmEstimator::defaultDt_; constexpr double UnidimLipmDcmEstimator::defaultOmega_; /// default expected drift of the bias every second constexpr double UnidimLipmDcmEstimator::defaultBiasDriftSecond; /// default error in the estimation of the sensors constexpr double UnidimLipmDcmEstimator::defaultZmpErrorStd; constexpr double UnidimLipmDcmEstimator::defaultDcmErrorStd; /// default uncertainty in the initial values of DCM and Bias constexpr double UnidimLipmDcmEstimator::defaultDCMUncertainty; constexpr double UnidimLipmDcmEstimator::defaultBiasUncertainty; using namespace tools; UnidimLipmDcmEstimator::UnidimLipmDcmEstimator(double dt, double omega_0, double biasDriftStd, double initDcm, double initZMP, double initBias, double dcmMeasureErrorStd, double zmpMeasureErrorStd, double initDcmUncertainty, double initBiasUncertainty) : omega0_(omega_0), dt_(dt), biasDriftStd_(biasDriftStd), zmpErrorStd_(zmpMeasureErrorStd), previousZmp_(initZMP), filter_(2, 1, 1) { updateMatricesABQ_(); C_ << 1., 1.; R_ << square(dcmMeasureErrorStd); filter_.setC(C_.transpose()); filter_.setMeasurementCovariance(R_); Vector2 x; x << initDcm, initBias; filter_.setState(x, 0); Matrix2 P; // clang-format off P<< square(initDcmUncertainty), 0, 0, square(initBiasUncertainty); // clang-format on filter_.setStateCovariance(P); } void UnidimLipmDcmEstimator::resetWithInputs(double measuredDcm, double measuredZMP, bool measurementIsWithBias, double biasDriftStd, double dcmMeasureErrorStd, double zmpMeasureErrorStd, double initBias, double initBiasuncertainty) { filter_.reset(); biasDriftStd_ = biasDriftStd; zmpErrorStd_ = zmpMeasureErrorStd; previousZmp_ = measuredZMP; updateMatricesABQ_(); R_ << square(dcmMeasureErrorStd); filter_.setMeasurementCovariance(R_); Vector2 x; /// initialize the state using the measurement if(measurementIsWithBias) { x << measuredDcm - initBias, initBias; } else { x << measuredDcm, initBias; } filter_.setState(x, 0); Matrix2 P; if(measurementIsWithBias) { /// The state and the // clang-format off P<< square(dcmMeasureErrorStd) + square(initBiasuncertainty), -square(initBiasuncertainty), -square(initBiasuncertainty), square(initBiasuncertainty); // clang-format on } else { // clang-format off P<< square(dcmMeasureErrorStd), 0, 0, square(initBiasuncertainty); // clang-format on } filter_.setStateCovariance(P); } void UnidimLipmDcmEstimator::setLipmNaturalFrequency(double omega_0) { omega0_ = omega_0; updateMatricesABQ_(); } void UnidimLipmDcmEstimator::setSamplingTime(double dt) { dt_ = dt; updateMatricesABQ_(); } void UnidimLipmDcmEstimator::setBias(double bias) { Vector2 x = filter_.getCurrentEstimatedState(); /// update the bias x(1) = bias; filter_.setCurrentState(x); } void UnidimLipmDcmEstimator::setBias(double bias, double uncertainty) { setBias(bias); Matrix2 P = filter_.getStateCovariance(); /// resetting the non diagonal parts P(0, 1) = P(1, 0) = 0; P(1, 1) = square(uncertainty); filter_.setStateCovariance(P); } void UnidimLipmDcmEstimator::setBiasDriftPerSecond(double driftPerSecond) { Matrix2 Q = filter_.getProcessCovariance(); /// update the corresponding part in the process noise matrix Q(1, 1) = square(driftPerSecond); filter_.setProcessCovariance(Q); } void UnidimLipmDcmEstimator::setUnbiasedDCM(double dcm) { Vector2 x = filter_.getCurrentEstimatedState(); /// update the bias x(0) = dcm; filter_.setCurrentState(x); } void UnidimLipmDcmEstimator::setUnbiasedDCM(double dcm, double uncertainty) { setUnbiasedDCM(dcm); Matrix2 P = filter_.getStateCovariance(); /// resetting the non diagonal parts P(0, 1) = P(1, 0) = 0; P(0, 0) = square(uncertainty); filter_.setStateCovariance(P); } void UnidimLipmDcmEstimator::setZmpMeasureErrorStd(double std) { zmpErrorStd_ = std; updateMatricesABQ_(); } void UnidimLipmDcmEstimator::setDcmMeasureErrorStd(double std) { Matrix1 R; R(0, 0) = square(std); } void UnidimLipmDcmEstimator::setInputs(double dcm, double zmp) { Vector1 u; Vector1 y; y(0) = dcm; /// The prediction of the state depends on the previous value of the ZMP u(0) = previousZmp_; previousZmp_ = zmp; filter_.pushMeasurement(y); filter_.pushInput(u); } double UnidimLipmDcmEstimator::getUnbiasedDCM() const { return filter_.getCurrentEstimatedState()(0); } double UnidimLipmDcmEstimator::getBias() const { return filter_.getCurrentEstimatedState()(1); } void UnidimLipmDcmEstimator::updateMatricesABQ_() { // clang-format off A_ << 1 + omega0_ * dt_, 0, 0, 1; B_ << -omega0_ * dt_, 0; Q_ << square(omega0_* dt_ * zmpErrorStd_), 0, 0, square(biasDriftStd_*dt_); // clang-format on filter_.setA(A_); filter_.setB(B_); filter_.setProcessCovariance(Q_); } } // namespace stateObservation
/** Visitor that deletes an object tree. */ public class DeleteVisitor implements ObjectVisitor { private final MetadataOperations metadataOps; private final Function<ObjectHandle, Boolean> callback; public DeleteVisitor(MetadataOperations metadataOps, Function<ObjectHandle, Boolean> callback) { this.metadataOps = Preconditions.checkNotNull(metadataOps); this.callback = Preconditions.checkNotNull(callback); } /** * No operation needed. Always returns true. * * @param directory * @return */ @Override public boolean preVisitDirectoryObject(VersionedObject directory) { return true; } /** * Deletes the directory. * * @param directory * @return */ @Override public boolean postVisitDirectoryObject(VersionedObject directory) { return metadataOps.delete(directory) && callback.apply(directory); } /** * Deletes a single file. * * @param file * @return */ @Override public boolean visitFileObject(VersionedObject file) { return metadataOps.delete(file) && callback.apply(file); } }
The metazoan origin recognition complex. Regulated initiation of DNA replication relies on the firing of initiator proteins that bind specifically to origin DNA. The discovery of the first eukaryotic initiator, the Saccharomyces cerevisiae Origin Recognition Complex (ORC) has allowed us to discern some aspects of how the onset of replication is regulated. However, understanding the specifics of replication in metazoan organisms can only be achieved by directly addressing these questions in animal cells. This review deals with the current state of knowledge on the metazoan Origin Recognition Complex, its composition and regulation in higher eukaryotes, its role in the initiation of replication and beyond replication, and its possible connection with human pathology.
The Sipapu is the opening (gateway) through the Kiva to the “nether” lands of the ancient Anasazi Indians and their descendants in the south-west. This sensitive love-story of the “end-times” brings together a re-turning tribe of the Ancients from the past, a Pleiadian Space expedition from the “future”, and some “awakening volunteers” from Earth’s pre-sent. The plot climaxes with a “Gathenng” at which many “Masters” from the higher realms speak about the Prophecies and Revelations in these end-times. This manuscript was written in early 1987 This was THE first work by “dharma”, thought to be a Fantasy but now appreciated as the beginning of an en-Light-ening Reality Journey. With help from Little Crow of the Lakota, Dharma’s public mission of translation for Higher Teachers begins here. Sipapu Odyssey was at one time in the early stage of being made into a motion picture utilizing the Pleiadian advanced technology of Futuronics. 5. Sipapu Odyssey depicts a first wave of ascension happening at the same time as big disclosure gathering, which happens right before the big event! 6. You may now know that the big event, may be related to a Nibiru-triggered pole shift event, that will WAKE HUMANITY UP, LIKE NEVER BEFORE. Nibiru/Red Kachina Visible In South African Sky 05/26/2015? Review Red Kachina Hopi Prophecy 7. The name “Sipapu” is a hole in a kiva by which spiritual energy can pass through… does that sound a bit connected to “they will flee to dens of the earth”… the dens of the earth = pithouses, kivas… holes in the earth… caves… that sorta thing. Are you able to perceive that one day soon all this could be playing out, along with you, the reader, fleeing to a den of the earth? Some subjects you may wish to brush up on, are:
#coding:utf-8 import os from folderReader import traverseDIR from bs4 import BeautifulSoup import codecs src = traverseDIR() rootPageList = src[0] def rootPageBuilder(): for item in rootPageList: pageContent ='''''' srcPath = "./src/" + item rootFilesDir = os.walk(srcPath) rootFiles = [] for files in rootFilesDir: # print(files) for file in files[2]: Is_mp4 = file.endswith(".mp4") Is_flv = file.endswith(".flv") Is_webm = file.endswith(".webm") Is_ogv = file.endswith(".ogv") Is_mp3 = file.endswith(".mp3") Is_jpg = file.endswith(".jpg") Is_png = file.endswith(".png") Is_txt = file.endswith(".txt") if Is_mp4: pageContent = pageContent + '''<h4>''' + file.split(".")[0] + '''</h4><video id="movie" preload controls width="90%" height="auto"><source src=".''' + files[0] + '''/''' + file + '''" type="video/mp4" />Your browser doesn't support this video.</video>''' continue if Is_flv: pageContent = pageContent + '''<h4>''' + file.split(".")[0] + '''</h4><video id="movie" preload controls width="90%" height="auto"><source src=".''' + files[0] + '''/''' + file + '''" type="video/mp4" />Your browser doesn't support this video.</video>''' continue if Is_webm: pageContent = pageContent + '''<h4>''' + file.split(".")[0] + '''</h4><video id="movie" preload controls width="90%" height="auto"><source src=".''' + files[0] + '''/''' + file + '''" type="video/web" />Your browser doesn't support this video.</video>''' continue if Is_ogv: pageContent = pageContent + '''<h4>''' + file.split(".")[0] + '''</h4><video id="movie" preload controls width="90%" height="auto"><source src=".''' + files[0] + '''/''' + file + '''" type="video/ogg" />Your browser doesn't support this video.</video>''' continue if Is_mp3: pageContent = pageContent + '''<h4>''' + file.split(".")[0] + '''</h4><audio src=".''' + files[0] + '''/''' + file + '''" autoplay controls></audio>''' continue if Is_jpg: pageContent = pageContent + '''<li>''' + file + '''</li> <img src=".''' + files[0] + '''/''' + file + '''" width="90%" height="auto" alt="''' + file + '''"></li>''' continue if Is_png: pageContent = pageContent + '''<li>''' + file + '''</li> <img src=".''' + files[0] + '''/''' + file + '''" width="90%" height="auto" alt="''' + file + '''"></li>''' continue if Is_txt: pageContent = pageContent + '''<h3>''' + file.split(".")[0] + '''</h3><br>''' fileLink = files[0] + "/" + file fi = open(fileLink, "r") text = '''''' for line in fi: text = text + line + '''<br>''' # print(text) pageContent = pageContent + text continue pageName = item.split("/")[-1] pageNameMenu = item.split("/")[-2] pageNameURL = ''.join(char for char in pageName if char.isalnum()) pageNameHTML = "./templates/posts/" + pageNameURL + ".html" PagePath = "./templates/posts/" isExist = os.path.exists(PagePath) if not isExist: os.makedirs(PagePath) temp = open(pageNameHTML,'w') htmlTemp = '''{% extends "template_posts.html" %}{% block title %}Links{% endblock %}{% block content %}<div class="jumbo"><h2>'''+ pageName +'''</h2><br/>'''+ pageContent +'''</div>{% endblock %}''' soup = BeautifulSoup(htmlTemp, 'html.parser') temp.write(soup.prettify()) temp.close # print(pageNameMenu) # menuPath = "./templates/menu" # menuList = os.walk(menuPath) # for root, dirs, files in menuList: # if files != []: # for file in files: # file_name = os.path.join(root,file) # # print(file_name) # file_name_menu = file_name.split("/")[-1].split(".")[0] # if pageNameMenu == file_name_menu: # print(pageNameMenu, file_name_menu, "Matched!", pageName) # # print("Match!") # # print(pageNameURL) # Tempp = '''<div class="jumbo">'''+ '''<a href="../../posts/''' + pageNameURL + '''.html">'''+ pageName +'''</a>''' +'''</div>''' # # print(Temp) # f = open(file_name, 'r') # # print(f) # html_before_soup = f.read() # # print(html_before_soup) # f.close # soup = BeautifulSoup(html_before_soup, 'html.parser') # soup.append(BeautifulSoup(Tempp, 'html.parser')) # f = open(file_name, 'w') # f.write(soup.prettify()) # f.close # print("------") if __name__ == '__main__': rootPageBuilder()
/** * a class that represents login data. */ public final class LoginData { /** * the chain data. */ @NotNull private final ChainDataEvent.ChainData chainData; /** * the player. */ @NotNull private final PlayerConnection connection; /** * the profile. */ @NotNull private final GameProfile profile; /** * the should login. */ private final AtomicBoolean shouldLogin = new AtomicBoolean(); /** * the task. */ @NotNull private final AtomicReference<Task> task = new AtomicReference<>(); /** * the async login event. */ @Nullable private PlayerAsyncLoginEvent asyncLogin; /** * ctor. * * @param chainData the chain data. * @param connection the connection. * @param profile the profile. */ public LoginData(@NotNull final ChainDataEvent.ChainData chainData, @NotNull final PlayerConnection connection, @NotNull final GameProfile profile) { this.chainData = chainData; this.profile = profile; this.connection = connection; } /** * obtains the chain data. * * @return chain data. */ @NotNull public ChainDataEvent.ChainData chainData() { return this.chainData; } /** * obtains the task. * * @return task. */ @Nullable public Task getTask() { return this.task.get(); } /** * sets the task. * * @param task task to set. */ public void setTask(@NotNull final Task task) { this.task.set(task); } /** * initializes the player. */ public void initialize() { if (this.asyncLogin == null) { return; } if (this.connection.getConnection().isDisconnected()) { return; } if (this.asyncLogin.getLoginResult() != LoginResultEvent.LoginResult.ALLOWED) { this.connection.disconnect(this.asyncLogin.getKickMessage().orElse(null)); return; } final var player = new ShirukaPlayer(this.connection, this, this.profile); this.connection.initialize(player); this.asyncLogin.getActions().forEach(action -> action.accept(player)); } /** * sets the async login event. * * @param asyncLogin async login event to set. */ public void setAsyncLogin(@NotNull final PlayerAsyncLoginEvent asyncLogin) { this.asyncLogin = asyncLogin; } /** * sets the should login. * * @param shouldLogin the should login to set. */ public synchronized void setShouldLogin(final boolean shouldLogin) { this.shouldLogin.set(shouldLogin); } /** * obtains the should login. * * @return should login. */ public synchronized boolean shouldLogin() { return this.shouldLogin.get(); } }
<filename>tika-example/src/main/java/org/apache/tika/example/InterruptableParsingExample.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.tika.example; import java.io.BufferedInputStream; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; import java.util.Locale; import org.xml.sax.SAXException; import org.xml.sax.helpers.DefaultHandler; import org.apache.tika.Tika; import org.apache.tika.exception.TikaException; import org.apache.tika.metadata.Metadata; import org.apache.tika.parser.ParseContext; import org.apache.tika.parser.Parser; /** * This example demonstrates how to interrupt document parsing if * some condition is met. * <p> * {@link InterruptableParsingExample.InterruptingContentHandler} throws special exception as soon as * find {@code query} string in parsed file. * <p> * See also http://stackoverflow.com/questions/31939851 */ public class InterruptableParsingExample { private Tika tika = new Tika(); // for default autodetect parser public boolean findInFile(String query, Path path) { InterruptingContentHandler handler = new InterruptingContentHandler(query); Metadata metadata = new Metadata(); ParseContext context = new ParseContext(); context.set(Parser.class, tika.getParser()); try (InputStream is = new BufferedInputStream(Files.newInputStream(path))) { tika.getParser().parse(is, handler, metadata, context); } catch (QueryMatchedException e) { return true; } catch (SAXException | TikaException | IOException e) { // something went wrong with parsing... e.printStackTrace(); } return false; } class QueryMatchedException extends SAXException { } /** * Trivial content handler that searched for {@code query} in characters send to it. * <p> * Throws {@link QueryMatchedException} when query string is found. */ class InterruptingContentHandler extends DefaultHandler { private String query; private StringBuilder sb = new StringBuilder(); InterruptingContentHandler(String query) { this.query = query; } @Override public void characters(char[] ch, int start, int length) throws SAXException { sb.append(new String(ch, start, length).toLowerCase(Locale.getDefault())); if (sb.toString().contains(query)) { throw new QueryMatchedException(); } if (sb.length() > 2 * query.length()) { sb.delete(0, sb.length() - query.length()); // keep tail with query.length() chars } } } }
<gh_stars>0 /* ** ** Copyright 2015, The Android Open Source Project ** ** Licensed under the Apache License, Version 2.0 (the "License"); ** you may not use this file except in compliance with the License. ** You may obtain a copy of the License at ** ** http://www.apache.org/licenses/LICENSE-2.0 ** ** Unless required by applicable law or agreed to in writing, software ** distributed under the License is distributed on an "AS IS" BASIS, ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ** See the License for the specific language governing permissions and ** limitations under the License. */ //#define LOG_NDEBUG 0 #define LOG_TAG "ResourceManagerService" #include <utils/Log.h> #include <binder/IMediaResourceMonitor.h> #include <binder/IServiceManager.h> #include <dirent.h> #include <media/stagefright/ProcessInfo.h> #include <string.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/time.h> #include <unistd.h> #include "ResourceManagerService.h" #include "ServiceLog.h" #include "mediautils/SchedulingPolicyService.h" #include <cutils/sched_policy.h> namespace android { namespace { class DeathNotifier : public IBinder::DeathRecipient { public: DeathNotifier(const wp<ResourceManagerService> &service, int pid, int64_t clientId) : mService(service), mPid(pid), mClientId(clientId) {} virtual void binderDied(const wp<IBinder> & /* who */) override { // Don't check for pid validity since we know it's already dead. sp<ResourceManagerService> service = mService.promote(); if (service == nullptr) { ALOGW("ResourceManagerService is dead as well."); return; } service->removeResource(mPid, mClientId, false); } private: wp<ResourceManagerService> mService; int mPid; int64_t mClientId; }; } // namespace template <typename T> static String8 getString(const Vector<T> &items) { String8 itemsStr; for (size_t i = 0; i < items.size(); ++i) { itemsStr.appendFormat("%s ", items[i].toString().string()); } return itemsStr; } static bool hasResourceType(MediaResource::Type type, const Vector<MediaResource>& resources) { for (size_t i = 0; i < resources.size(); ++i) { if (resources[i].mType == type) { return true; } } return false; } static bool hasResourceType(MediaResource::Type type, const ResourceInfos& infos) { for (size_t i = 0; i < infos.size(); ++i) { if (hasResourceType(type, infos[i].resources)) { return true; } } return false; } static ResourceInfos& getResourceInfosForEdit( int pid, PidResourceInfosMap& map) { ssize_t index = map.indexOfKey(pid); if (index < 0) { // new pid ResourceInfos infosForPid; map.add(pid, infosForPid); } return map.editValueFor(pid); } static ResourceInfo& getResourceInfoForEdit( int64_t clientId, const sp<IResourceManagerClient>& client, ResourceInfos& infos) { for (size_t i = 0; i < infos.size(); ++i) { if (infos[i].clientId == clientId) { return infos.editItemAt(i); } } ResourceInfo info; info.clientId = clientId; info.client = client; info.cpuBoost = false; infos.push_back(info); return infos.editItemAt(infos.size() - 1); } static void notifyResourceGranted(int pid, const Vector<MediaResource> &resources) { static const char* const kServiceName = "media_resource_monitor"; sp<IBinder> binder = defaultServiceManager()->checkService(String16(kServiceName)); if (binder != NULL) { sp<IMediaResourceMonitor> service = interface_cast<IMediaResourceMonitor>(binder); for (size_t i = 0; i < resources.size(); ++i) { if (resources[i].mSubType == MediaResource::kAudioCodec) { service->notifyResourceGranted(pid, IMediaResourceMonitor::TYPE_AUDIO_CODEC); } else if (resources[i].mSubType == MediaResource::kVideoCodec) { service->notifyResourceGranted(pid, IMediaResourceMonitor::TYPE_VIDEO_CODEC); } } } } status_t ResourceManagerService::dump(int fd, const Vector<String16>& /* args */) { String8 result; if (checkCallingPermission(String16("android.permission.DUMP")) == false) { result.format("Permission Denial: " "can't dump ResourceManagerService from pid=%d, uid=%d\n", IPCThreadState::self()->getCallingPid(), IPCThreadState::self()->getCallingUid()); write(fd, result.string(), result.size()); return PERMISSION_DENIED; } PidResourceInfosMap mapCopy; bool supportsMultipleSecureCodecs; bool supportsSecureWithNonSecureCodec; String8 serviceLog; { Mutex::Autolock lock(mLock); mapCopy = mMap; // Shadow copy, real copy will happen on write. supportsMultipleSecureCodecs = mSupportsMultipleSecureCodecs; supportsSecureWithNonSecureCodec = mSupportsSecureWithNonSecureCodec; serviceLog = mServiceLog->toString(" " /* linePrefix */); } const size_t SIZE = 256; char buffer[SIZE]; snprintf(buffer, SIZE, "ResourceManagerService: %p\n", this); result.append(buffer); result.append(" Policies:\n"); snprintf(buffer, SIZE, " SupportsMultipleSecureCodecs: %d\n", supportsMultipleSecureCodecs); result.append(buffer); snprintf(buffer, SIZE, " SupportsSecureWithNonSecureCodec: %d\n", supportsSecureWithNonSecureCodec); result.append(buffer); result.append(" Processes:\n"); for (size_t i = 0; i < mapCopy.size(); ++i) { snprintf(buffer, SIZE, " Pid: %d\n", mapCopy.keyAt(i)); result.append(buffer); const ResourceInfos &infos = mapCopy.valueAt(i); for (size_t j = 0; j < infos.size(); ++j) { result.append(" Client:\n"); snprintf(buffer, SIZE, " Id: %lld\n", (long long)infos[j].clientId); result.append(buffer); snprintf(buffer, SIZE, " Name: %s\n", infos[j].client->getName().string()); result.append(buffer); Vector<MediaResource> resources = infos[j].resources; result.append(" Resources:\n"); for (size_t k = 0; k < resources.size(); ++k) { snprintf(buffer, SIZE, " %s\n", resources[k].toString().string()); result.append(buffer); } } } result.append(" Events logs (most recent at top):\n"); result.append(serviceLog); write(fd, result.string(), result.size()); return OK; } ResourceManagerService::ResourceManagerService() : ResourceManagerService(new ProcessInfo()) {} ResourceManagerService::ResourceManagerService(sp<ProcessInfoInterface> processInfo) : mProcessInfo(processInfo), mServiceLog(new ServiceLog()), mSupportsMultipleSecureCodecs(true), mSupportsSecureWithNonSecureCodec(true), mCpuBoostCount(0) {} ResourceManagerService::~ResourceManagerService() {} void ResourceManagerService::config(const Vector<MediaResourcePolicy> &policies) { String8 log = String8::format("config(%s)", getString(policies).string()); mServiceLog->add(log); Mutex::Autolock lock(mLock); for (size_t i = 0; i < policies.size(); ++i) { String8 type = policies[i].mType; String8 value = policies[i].mValue; if (type == kPolicySupportsMultipleSecureCodecs) { mSupportsMultipleSecureCodecs = (value == "true"); } else if (type == kPolicySupportsSecureWithNonSecureCodec) { mSupportsSecureWithNonSecureCodec = (value == "true"); } } } void ResourceManagerService::addResource( int pid, int64_t clientId, const sp<IResourceManagerClient> client, const Vector<MediaResource> &resources) { String8 log = String8::format("addResource(pid %d, clientId %lld, resources %s)", pid, (long long) clientId, getString(resources).string()); mServiceLog->add(log); Mutex::Autolock lock(mLock); if (!mProcessInfo->isValidPid(pid)) { ALOGE("Rejected addResource call with invalid pid."); return; } ResourceInfos& infos = getResourceInfosForEdit(pid, mMap); ResourceInfo& info = getResourceInfoForEdit(clientId, client, infos); // TODO: do the merge instead of append. info.resources.appendVector(resources); for (size_t i = 0; i < resources.size(); ++i) { if (resources[i].mType == MediaResource::kCpuBoost && !info.cpuBoost) { info.cpuBoost = true; // Request it on every new instance of kCpuBoost, as the media.codec // could have died, if we only do it the first time subsequent instances // never gets the boost. if (requestCpusetBoost(true, this) != OK) { ALOGW("couldn't request cpuset boost"); } mCpuBoostCount++; } } if (info.deathNotifier == nullptr) { info.deathNotifier = new DeathNotifier(this, pid, clientId); IInterface::asBinder(client)->linkToDeath(info.deathNotifier); } notifyResourceGranted(pid, resources); } void ResourceManagerService::removeResource(int pid, int64_t clientId) { removeResource(pid, clientId, true); } void ResourceManagerService::removeResource(int pid, int64_t clientId, bool checkValid) { String8 log = String8::format( "removeResource(pid %d, clientId %lld)", pid, (long long) clientId); mServiceLog->add(log); Mutex::Autolock lock(mLock); if (checkValid && !mProcessInfo->isValidPid(pid)) { ALOGE("Rejected removeResource call with invalid pid."); return; } ssize_t index = mMap.indexOfKey(pid); if (index < 0) { ALOGV("removeResource: didn't find pid %d for clientId %lld", pid, (long long) clientId); return; } bool found = false; ResourceInfos &infos = mMap.editValueAt(index); for (size_t j = 0; j < infos.size(); ++j) { if (infos[j].clientId == clientId) { if (infos[j].cpuBoost && mCpuBoostCount > 0) { if (--mCpuBoostCount == 0) { requestCpusetBoost(false, this); } } IInterface::asBinder(infos[j].client)->unlinkToDeath(infos[j].deathNotifier); j = infos.removeAt(j); found = true; break; } } if (!found) { ALOGV("didn't find client"); } } void ResourceManagerService::getClientForResource_l( int callingPid, const MediaResource *res, Vector<sp<IResourceManagerClient>> *clients) { if (res == NULL) { return; } sp<IResourceManagerClient> client; if (getLowestPriorityBiggestClient_l(callingPid, res->mType, &client)) { clients->push_back(client); } } bool ResourceManagerService::reclaimResource( int callingPid, const Vector<MediaResource> &resources) { String8 log = String8::format("reclaimResource(callingPid %d, resources %s)", callingPid, getString(resources).string()); mServiceLog->add(log); Vector<sp<IResourceManagerClient>> clients; { Mutex::Autolock lock(mLock); if (!mProcessInfo->isValidPid(callingPid)) { ALOGE("Rejected reclaimResource call with invalid callingPid."); return false; } const MediaResource *secureCodec = NULL; const MediaResource *nonSecureCodec = NULL; const MediaResource *graphicMemory = NULL; for (size_t i = 0; i < resources.size(); ++i) { MediaResource::Type type = resources[i].mType; if (resources[i].mType == MediaResource::kSecureCodec) { secureCodec = &resources[i]; } else if (type == MediaResource::kNonSecureCodec) { nonSecureCodec = &resources[i]; } else if (type == MediaResource::kGraphicMemory) { graphicMemory = &resources[i]; } } // first pass to handle secure/non-secure codec conflict if (secureCodec != NULL) { if (!mSupportsMultipleSecureCodecs) { if (!getAllClients_l(callingPid, MediaResource::kSecureCodec, &clients)) { return false; } } if (!mSupportsSecureWithNonSecureCodec) { if (!getAllClients_l(callingPid, MediaResource::kNonSecureCodec, &clients)) { return false; } } } if (nonSecureCodec != NULL) { if (!mSupportsSecureWithNonSecureCodec) { if (!getAllClients_l(callingPid, MediaResource::kSecureCodec, &clients)) { return false; } } } if (clients.size() == 0) { // if no secure/non-secure codec conflict, run second pass to handle other resources. getClientForResource_l(callingPid, graphicMemory, &clients); } if (clients.size() == 0) { // if we are here, run the third pass to free one codec with the same type. getClientForResource_l(callingPid, secureCodec, &clients); getClientForResource_l(callingPid, nonSecureCodec, &clients); } if (clients.size() == 0) { // if we are here, run the fourth pass to free one codec with the different type. if (secureCodec != NULL) { MediaResource temp(MediaResource::kNonSecureCodec, 1); getClientForResource_l(callingPid, &temp, &clients); } if (nonSecureCodec != NULL) { MediaResource temp(MediaResource::kSecureCodec, 1); getClientForResource_l(callingPid, &temp, &clients); } } } if (clients.size() == 0) { return false; } sp<IResourceManagerClient> failedClient; for (size_t i = 0; i < clients.size(); ++i) { log = String8::format("reclaimResource from client %p", clients[i].get()); mServiceLog->add(log); if (!clients[i]->reclaimResource()) { failedClient = clients[i]; break; } } if (failedClient == NULL) { return true; } { Mutex::Autolock lock(mLock); bool found = false; for (size_t i = 0; i < mMap.size(); ++i) { ResourceInfos &infos = mMap.editValueAt(i); for (size_t j = 0; j < infos.size();) { if (infos[j].client == failedClient) { j = infos.removeAt(j); found = true; } else { ++j; } } if (found) { break; } } if (!found) { ALOGV("didn't find failed client"); } } return false; } bool ResourceManagerService::getAllClients_l( int callingPid, MediaResource::Type type, Vector<sp<IResourceManagerClient>> *clients) { Vector<sp<IResourceManagerClient>> temp; for (size_t i = 0; i < mMap.size(); ++i) { ResourceInfos &infos = mMap.editValueAt(i); for (size_t j = 0; j < infos.size(); ++j) { if (hasResourceType(type, infos[j].resources)) { if (!isCallingPriorityHigher_l(callingPid, mMap.keyAt(i))) { // some higher/equal priority process owns the resource, // this request can't be fulfilled. ALOGE("getAllClients_l: can't reclaim resource %s from pid %d", asString(type), mMap.keyAt(i)); return false; } temp.push_back(infos[j].client); } } } if (temp.size() == 0) { ALOGV("getAllClients_l: didn't find any resource %s", asString(type)); return true; } clients->appendVector(temp); return true; } bool ResourceManagerService::getLowestPriorityBiggestClient_l( int callingPid, MediaResource::Type type, sp<IResourceManagerClient> *client) { int lowestPriorityPid; int lowestPriority; int callingPriority; if (!mProcessInfo->getPriority(callingPid, &callingPriority)) { ALOGE("getLowestPriorityBiggestClient_l: can't get process priority for pid %d", callingPid); return false; } if (!getLowestPriorityPid_l(type, &lowestPriorityPid, &lowestPriority)) { return false; } if (lowestPriority <= callingPriority) { ALOGE("getLowestPriorityBiggestClient_l: lowest priority %d vs caller priority %d", lowestPriority, callingPriority); return false; } if (!getBiggestClient_l(lowestPriorityPid, type, client)) { return false; } return true; } bool ResourceManagerService::getLowestPriorityPid_l( MediaResource::Type type, int *lowestPriorityPid, int *lowestPriority) { int pid = -1; int priority = -1; for (size_t i = 0; i < mMap.size(); ++i) { if (mMap.valueAt(i).size() == 0) { // no client on this process. continue; } if (!hasResourceType(type, mMap.valueAt(i))) { // doesn't have the requested resource type continue; } int tempPid = mMap.keyAt(i); int tempPriority; if (!mProcessInfo->getPriority(tempPid, &tempPriority)) { ALOGV("getLowestPriorityPid_l: can't get priority of pid %d, skipped", tempPid); // TODO: remove this pid from mMap? continue; } if (pid == -1 || tempPriority > priority) { // initial the value pid = tempPid; priority = tempPriority; } } if (pid != -1) { *lowestPriorityPid = pid; *lowestPriority = priority; } return (pid != -1); } bool ResourceManagerService::isCallingPriorityHigher_l(int callingPid, int pid) { int callingPidPriority; if (!mProcessInfo->getPriority(callingPid, &callingPidPriority)) { return false; } int priority; if (!mProcessInfo->getPriority(pid, &priority)) { return false; } return (callingPidPriority < priority); } bool ResourceManagerService::getBiggestClient_l( int pid, MediaResource::Type type, sp<IResourceManagerClient> *client) { ssize_t index = mMap.indexOfKey(pid); if (index < 0) { ALOGE("getBiggestClient_l: can't find resource info for pid %d", pid); return false; } sp<IResourceManagerClient> clientTemp; uint64_t largestValue = 0; const ResourceInfos &infos = mMap.valueAt(index); for (size_t i = 0; i < infos.size(); ++i) { Vector<MediaResource> resources = infos[i].resources; for (size_t j = 0; j < resources.size(); ++j) { if (resources[j].mType == type) { if (resources[j].mValue > largestValue) { largestValue = resources[j].mValue; clientTemp = infos[i].client; } } } } if (clientTemp == NULL) { ALOGE("getBiggestClient_l: can't find resource type %s for pid %d", asString(type), pid); return false; } *client = clientTemp; return true; } } // namespace android
<reponame>RosaAsk/apicurio-studio<filename>front-end/studio/src/app/components/page-base.component.ts /** * @license * Copyright 2017 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import {OnDestroy, OnInit} from "@angular/core"; import {ActivatedRoute} from "@angular/router"; import {Title} from "@angular/platform-browser"; export class DataMap { [key: string]: boolean; } export abstract class AbstractPageComponent implements OnInit, OnDestroy { public dataLoaded: DataMap = new DataMap(); public pageError: any; protected _params: any; protected _queryParams: any; /** * C'tor. * @param route * @param titleService */ protected constructor(protected route: ActivatedRoute, protected titleService: Title) {} /** * Called when the page is initialized. Triggers the loading of asynchronous * page data. */ public ngOnInit(): void { // Extract route params and query params and pass them to "loadAsyncPageData" this._params = null; this._queryParams = null; this.route.params.subscribe( params => { this._params = params; if (this._queryParams !== null) { this.loadAsyncPageData(this._params, this._queryParams); } }); this.route.queryParams.subscribe( params => { this._queryParams = params; if (this._params !== null) { this.loadAsyncPageData(this._params, this._queryParams); } }); this.updatePageTitle(); } /** * Called to update the page title. */ protected updatePageTitle(): void { this.titleService.setTitle(this.pageTitle()); } /** * Returns the appropriate page title for this page. * */ protected abstract pageTitle(): string; /** * Called when the page is destroyed. */ public ngOnDestroy(): void { } /** * Called to kick off loading the page's async data. Subclasses should * override to provide page-specific data loading. * @param pathParams * @param queryParams */ public loadAsyncPageData(pathParams: any, queryParams: any): void { } /** * Called by a subclass (page) to report an error during loading of data. * @param error */ public error(error: any): void { console.error(" Error: %o", error); this.pageError = error; } /** * Called when page data has been loaded. * @param key */ public loaded(key: string): void { this.dataLoaded[key] = true; } /** * Called to determine whether some page data has been loaded yet. * @param key * */ public isLoaded(key: string): boolean { if (this.dataLoaded[key]) { return true; } else { return false; } } }
def changeFitMode(self, mode): self.fit_mode = mode
PIPEGLADE Graphical User Interfaces, The UNIX Way Pipeglade is a helper program that displays graphical user interfaces for other programs. It renders the GUI definition found in a GtkBuilder file (created using the Glade Interface Designer), and communicates with the main program solely via pipes or fifos. To have its GUI rendered by pipeglade, a program must be able to send plain text commands to standard output or a named pipe and/or receive and parse simple plain text messages from standard input or a named pipe. Pipeglade strives for simplicity; it provides access to a subset of the features available in GTK+ v3.2. See below for the set of widgets accessible via pipeglade. (Widgets of any kind can grab focus and change visibility, sensitivity, style, size, and tooltip.) DOCUMENTATION Pipeglade has a manual page: pipeglade(1)[PDF] Pipeglade has a manual page: pipeglade(1)[HTML] EXAMPLE The shell script The shell script clock.sh uses pipeglade to display an analog clock on the interface defined in the GtkBuilder file clock.ui . The script was also used to generate the image below. SOURCE CODE Download pipeglade v4.8.0: pipeglade-4.8.0.tar.gz GitHub
/** * 3D Graph scene. * @author elezeta */ public final class Scene implements IModel { Definition[] definitions; @Prefix("scene") Sentence sceneContent; public void draw() { ColorData black = new ColorData(0.5,0.5,0.5,0.5); TextureData grass = Resources.getTextures()[0]; black.draw(); RunData rd = new RunData(black,grass); sceneContent.run(rd,1); } }
The Role of Co-Enzyme Q10 in The Respiratory Chain and Some of Its Clinical Indications: A review Background: The usage of supplements becomes an important part of everyone health since many of these materials have produced real improvement in general health. These materials are not approved by organization like Food and Drug Administration in the US and the Food Safety Authority in Europe as drugs but their usage as supplement are approved globally. The supplements that will discuss in this review is co-enzyme Q10. It is a potent antioxidant and an essential part of respiratory chain. It acts as a mobile electron carrier between respiratory complexes. When co-enzyme Q10 transfer electron in the oxidative system, it acts to transport proton out of the mitochondria and this produce concentration gradient across membrane. Proton returns inside by enzymatic machine which involved ATP synthesis. Co-enzyme Q10 is the third wide-world used supplement. There are many studies confirm it benefit in many clinical conditions. These include the cardiovascular system, protection against statin induces myopathy, diabetes, neurodegenerative disease in addition to improvement in liver functions. These benefits occurred mainly due to its antioxidant effect and electron scavenging ability which result in reducing oxidative stress and related cell and tissue damage. Objective This study tries to show the important role of coenzyme Q10 in energy production and spots the light toward the main clinical application of this supplement. Coenzyme Q10 is one of the main elements in the respiratory chain and it is endogenously synthesis since its presence is essential for life. Its availability may reduce in many disease conditions so supplementation of it within diet may become important in management of various disorders. In truth, those are not only the main leading causes of death that produce killing in people more than all other causes combined, but they are also replacing malnutrition and communicable diseases as a cause of disability or early death. Although non-infectious diseases have become epidemic in their magnitudes, they can be prevented, in addition they could be reduced to significant extant by early detection, minimizing of their risk factors, and suitable management. In this spirit, governments are working to support lifestyles in healthy manner, and this to improve morbidity and mortality issues, and also to reduce the costs of health care (2). Furthermore, knowledge about health, including applications related to food and fitness, make persons more alert about their own health's (3). Clients are now being more alert than any other time about the constituents in foods and the characteristic properties of these constituents, and by the increase number of clients who consider food as medication, food supplementation be obvious as one of the most rapid developing health related goods (4). These in general have positive inferences in wellbeing condition, still, health related costs and the consuming growth are necessitating a critical need for physicians, specialized dietitians, and other regulators to get systematic data on safety and efficacy of a wide range of active constituents occurred in food supplements. Many clients think that supplements are natural occurring healthy foodstuffs with no possible interactions with drugs or I contraindications. In fact, enormous of this health related products are having a hazard for a specific users (1). For this cause, besides getting correct nutritional information from healthcare workers, users should be educated about the extent of available scientific information for particular indication of supplement, in addition, the interactions, precautions, and safe dose of the ingredients. For this reason, authorities like the European Food Safety Authority and the US Food and Drug Administration constantly perform efficacy and safety evaluations of constituents of supplement to dismiss or approve them (3,5). Conversely, more studies on pharmacokinetics, physical & chemical characteristics, efficacy, and stability are required to expand the data about these constituents, and to enhance formulation by using appropriate technique. Suitable formulations can enhance supplements' bioavailability, and this can lead to greater efficacy also suitable formulation can improve stability (6-8). When we talk about co-enzyme Q10, which is sold as a nutritional supplement and not as FDA-approved drug, it is represents 3 rd most used food supplement subsequent to omega 3 in fish oil and preparation contain multivitamins (1,9). It has strong antioxidant activity, and can be considered as an important part of physiological mitochondrial bioenergetics. In addition, it has been considered as an important choice for the management of various oxidative stress associated disorders such as neurodegenerative disorders, cardiovascular diseases and diabetes, which are in the top 10 main reasons of mortality (10, 11) Coenzyme Q10 has important role in physiological mitochondrial bioenergetics Co-enzyme Q10 is an important element of cell mechanism used for generate ATP which acts to provide energy for almost all cellular functions. Co-enzyme Q10 exists in three redox states and this nature is essential for its function (12) : The production of ATP is occurred in the inner mitochondrial membrane, where a higher concentration of co-enzyme Q10 can be detected. The coenzyme Q10 has a distinctive role since it can not only transfer electrons from primary substrates to oxidative system but can, in the same time, transfer protons to the out of the mitochondrial. By this transfer, proton gradient across the mitochondrial membrane can generate and when the protons return to inside mitochondria by the enzymatic machine for ATP synthesis, they act as a method to ATP formation. The coenzyme Q10 bounds to the oriented enzymatic complexes. It acts to oxidize and release protons to the outside of mitochondria and picks up protons and electrons on the inside part of the mitochondrial membrane (12, 13). There are five types of protein complexes in the mitochondrial membrane, two of them involved in protons and electrons transferred through coenzyme Q10. The first is known as the primary reductase where co-enzyme Q10 is reduced by the action of NADH this also known as (complex I). The reduction process involved four protons transport across membrane for every co-enzyme Q10 reduced (12, 13) The exact details about electron transport in (complex I) are still not fully understood, still, it has suggested that coenzyme Q10 is reduced and then reoxidized in (complex I) two times after that, electrons transferring to another lightly bound co-enzyme Q10 to produce ubiquinol which travels across the lipid of mitochondrial membrane to the another complex where ubiquinol oxidation occurred again in (complex III) and this happens with the transfer of protons against it concentration gradient across membrane (12, 13). Unlike the details about protons transport in complex I, the details about ubiquinol oxidation and binding at the binding site of complex III are well known. Equal to complex I, there is oxidation, reduction and re-oxidation, also with the oxidation there is a proton release step to the outside so that protons released occurred in the right direction. Again oxidationreduction cycle permits four protons to pass the membrane for each ubiquinol oxidation cycle (12). In a normal forward electron transfer chain, co-enzyme Q10 accepts electrons from both complexes I and II and transport them to complex III. In complex III, the Q cycle happened, with impelling of protons from the matrix to the intermembrane space. The complex III contains two distinct binding sites of coenzyme Q10. Reduced ubiquinol (UQH2) bind at the O site (Qo site), transporting one electron to cytochrome c (cyt c) and the other electron pass down to the I site (Qi site), where the electron attaches to coenzyme Q10 (ubiquinone), forming ubisemiquinone intermediate (UQH •-), or attach to (UQH •-) generated ubiquinol (UQH2). The formation of oxidized coenzyme Q10 occurred at the (Qo site), while reduced ubiquinol (UQH2) formed at the (Qi site). When electrons transport is occurred, they may leak out and attach to oxygen, forming superoxide anion (O2 •-) figure (1) (12). Co-enzyme Q10 may have a potential advantage for subjects with cardiovascular disorders. Impaired mitochondrial function and subsequent oxidative stress has a fundamental effect in the pathophysiology of cardiovascular diseases (14). For this cause, in addition to its vital role in energy generation and its antioxidant effects, coenzyme Q10 could be considered as an auspicious candidate to prevent or management of cardiovascular disease. The review of Flowers et al. about the present data about the effect of coenzyme Q10 supplementation for the management of cardiovascular disorders indicate that despite that coenzyme Q10 has possible role for the management of current cardiovascular disorders, additional researches are required to determine whether it has an effect in the avoidance of such conditions in population have no current disorder (15). It has been noticed that low concentrations of coenzyme Q10 are linked with many disorders such as heart failure and cardiomyopathy (16). Coenzyme Q10 has an effect in the generation of energy, so its administration may have valuable properties in patients with low contractility of cardiac muscle owing to low energy condition that linked with coenzyme Q10 deficiency and result in heart failure (11). In addition, the protective effect of coenzyme Q10 may be recognized because of its own useful effect on heart and circulation such as its desirable effect on atherosclerosis and hypertension (15). The proposed mechanism of antiatherosclerotic and antihypertensive may relate to its antioxidant effects. In atherosclerosis, the antioxidant effect of coenzyme Q10 may diminish low density lipoprotein peroxidation (peroxidation of LDL) and improve endothelial function since the main marker for atherosclerosis is endothelial dysfunction so co-enzyme Q10 may act to improve this condition (17). While in oxidative stress conditions, there is a reduction in the availability nitric oxide and this lead to vasoconstriction and increase blood pressure, co-enzyme Q10 supplementation may preserve nitric oxide (18). Despite a clinical trial that has been carried out, further researches with a larger sample size and a longer follow up duration has required to detect whether coenzyme Q10 has a protective effect in regard of atherosclerosis & hypertension (19). ❖ Is coenzyme Q10 has role in the prevention of statins induced muscle problem/myopathy? Statins are a vital group of drugs that is applied in the management of dyslipidemia, so they have a role in the avoidance of cardiovascular disorders. Statins are in general safe drugs, although, a diversity of muscle problem/ myopathies have reported after its administration(20). Actually, about 10.5% of statins user may have myalgia, which is the main side effect of statin therapy (21). Even though, this is a minor side effect, it may affect patient's compliance. The surveys among statin users suggest that about 30% of statin users discontinue their treatment because of weakness, pain in muscle, cramps, fatigue, and stiffness (22). In addition to these minor adverse effects, rhabdomyolysis a severe life threatening condition, may occur but it is extremely rare(23). The precise mechanism of this muscle problem/myopathy have not fully detected, but among suggested mechanisms, the effect on coenzyme Q10 de nova synthesis (24). The management of hypercholesterolemia with these drugs results in a low concentrations of co-enzyme Q10 (24, 25). Statins act by inhibiting HMG-CoA reductase, the enzyme that participate in regulation of the mevalonate pathway, the main precursor in the synthesis of many important substance include cholesterol and other agents include coenzyme Q10 (26). The consequence of coenzyme Q10 synthesis inhibition is compromise respiratory chain in the mitochondria and impairing production of energy and this may result in induction of myopathy (27). Additionally, serum co-enzyme Q10 is carried by lipoproteins, statins-produce LDL reduction and that is another probable mechanism for co-enzyme Q10 depletion (28). Statin therapy will discontinue according to patients complain if the possibility of rhabdomyolysis is presence. The supplementation of co-enzyme Q10 has recommended to treat and prevent myopathy induced by statin. In reality, there is a study that recommend the usage of supplemental coenzyme Q10 in all HMG-CoA reductase inhibitors users (29).Other studies reported that coenzyme Q10 supplementation in dose (30 to 200 mg/day) may reduce muscle symptoms related to usage of statin, this may act as solution instead of discontinued treatment (30,31). On the other hand, the result of meta-analysis study and systematic review cannot detect statistically significant beneficial effect of coenzyme Q10 administration in the improvement of myopathy induced by statin (24, 28). ❖ The role of coenzyme Q10 in diabetic patients Hyperglycemia is the main characteristic sign of diabetes, it has detected that the high extent of free radicals generation subsequence resultant oxidative stress, can play significant role in the pathophysiology of diabetes and diabeticrelated complications (32). Actually, increase generation of free radicals associated with exhaustion of antioxidant defense can result in increased lipid peroxidation and cellular damage, this may lead to insulin resistance and diabetic complications (33). Coenzyme Q10 is one of the most potent antioxidants and it can act as a scavenger of free radical, so the assessment of its level can consider as an indicator of oxidative stress status within the body. High blood concentration of coenzyme Q10 had been reported not only in diabetic rats, but also in pediatrics with diabetes type 1 when their co-enzyme Q10 levels compared with healthy individuals(34, 35). Despite this high blood levels, mitochondrial levels of coenzyme Q10 in liver & heart of rats with diabetes are low. According to those, coenzyme Q10 can consider as a one of mechanisms that provide protection against oxidative stress condition. Moreover, many studies had performed to assess the promising positive effect of co-enzyme Q10 administration in diabetic patients. The complications of diabetic are linked to persist high levels of glucose, and that result in overproduction of superoxide anion in the mitochondria which acts to produce cellular and tissue damage by stimulate cell death and apoptosis (36). Cardiovascular complications are the major drawbacks of diabetes type 2 and it associated with malfunction of endothelium. When oxidative stress lead to the inhibit normal endothelial function, Many researches indicate, co-enzyme Q10 administration may enhance endothelial to produce their function properly by triggering mitochondrial oxidative phosphorylation and endothelial nitric oxide synthase(37). ❖ The effect of co-enzyme Q 10 in neurodegenerative disorders Mitochondrial dysfunction that produces abnormal energy metabolism, oxidative stress with its related increased oxidative damage and inflammation considers important etiologies for various neurodegenerative disorders. The studying of antioxidant effect in the treatments of various condition is a site of interest (38). Co-enzyme Q10 is a mitochondrial function enhancer and strong antioxidant, so it could be a hopeful neuroprotectant to slow the progress of Parkinson's, Alzheimer's, and Huntington's diseases, along with other neurodegenerative disorders such as Friedrich's ataxia and amyotrophic lateral sclerosis (10, 39). In actual fact, co-enzyme Q10 had shown a neuroprotective role in vitro, and that since it has stabilizing effect on the mitochondrial membrane of neuronal cells where they are exposed to oxidative stress. That result in to reduced cell death and/or damage, those are involved a cause of the abovementioned neurodegenerative disorders (40). In animal studies, the detected results confirm the neuroprotective effect of co-enzyme Q10 (41). Actually, co-enzyme Q10 has the ability to defend neurons from injury by oxidation in a model of Parkinson's and (42) Alzheimer diseases, also it decreases generation of -amyloid plaque in vivo a in mouse model with Alzheimer disease (43) and this lead to survival and behavioral enhancement in treated mouse with frontotemporal dementia (44). In neurodegenerative diseases, there are association with low serum levels of coenzyme Q10, and that indicates poor antioxidant status, and a higher oxidative damage to cells and tissues. Consequently, there is an indication for co-enzyme Q10 levels assessment to predict and follow up the development of neurodegenerative diseases, include dementia (45). Regardless of findings in animal models, that have confirmed a relationship between coenzyme Q10 and neuroprotection, until now, there are unsatisfactory evidences to support coenzyme Q10 usage routinely in humans with neuronal problems. In the study of Chang et al., they established that since there are no clinical trials confirmed that coenzyme Q10 has been involved in the prevention of neurodegenerative disease progression or decreased their risk, this necessitates further clinical trials carried out in order to improve therapeutic strategies for these diseases (46). ❖ Liver Disease Despite that endogenous coenzyme Q10 production take place in all the body, the liver is the main organ responsible for co-enzyme Q10 production this because of high metabolic ability in addition to large size of liver. In liver disease condition, the metabolic ability of liver has been decreased, and this produce lower levels of co-enzyme Q10. Low levels of coenzyme Q10 have possible harmful outcome on heart. So, subjects with liver diseases like nonalcoholic fatty liver disease (NAFLD) have risk factor for cardiovascular diseases. Cardiovascular diseases have described as main causes of death in subjects with NAFLD (47). Many cardiovascular problems are associated with NAFLD, these include arrhythmias, heart failure, atherosclerosis and valve dysfunction. In alcohol related liver disease, there is a similar association with an cardiovascular disorders and increase risk of many conditions like atrial fibrillation, alcoholic cardiomyopathy, and arterial hypertension (48). In individuals with fatty liver disease who consuming statins, reduced concentrations of coenzyme Q10 may be a specific problem, and that in part because the inhibition of mevalonate pathway result in inhibition of cholesterol and co-enzyme Q10 synthesis. Coenzyme Q10 supplementation can act to reducing the of cardiovascular risks in subjects with liver disease, in addition it can produce liver benefit by decreasing oxidative stress and inflammation. It was expected that the main mechanisms of alcohol produces liver injury is by generate free radicals, and by the antioxidant action of co-enzyme Q10 protection of liver cells from such oxidative damage can occurred (48,49). Also free radicals can induce oxidative stress and this has been associated with the pathogenesis of NAFLD (50). Animal studies have confirmed the effect of coenzyme Q10 to prevent or reduce the progress of liver cirrhosis after exposure to different toxins including wide range of substance like toxic chemicals, drugs and even parasitic microorganisms (48,51). One study to assess the effect of coenzyme Q10 supplement on liver toxicity done by the induction of liver damage in mice via administration of paracetamol and this followed by management with coenzyme Q10 to reduce the generation of cirrhosis tissue due to its anti-inflammatory and antioxidant effect. (50). In a similar study, in rats with a high risk to develop NAFLD, supplementation with coenzyme Q10 inhibited progression to cirrhosis via reduction of oxidative stress and inflammation (52). The studies established the ability of co-enzyme Q10 to protect liver against oxidative damage induced by free radical. Conclusion The aim of this review is to established the ability of co-enzyme Q10 to protect liver against certain of diseases such as cardiovascular, diabetes multiuse, neurological disorders and liver disease. The oxidative damage induced by free radical mechanism. on how people react to food-related communication messages. 2016. 5. EFSA Panel on Dietetic Products N, Allergies. Scientific Opinion on the substantiation of health claims related to coenzyme Q10 and contribution to normal energy-yielding metabolism (ID 1508, 1512, 1720, 1912, 4668), maintenance of normal blood pressure (ID 1509, 1721, 1911), protection of DNA, proteins and lipids from oxidative damage (ID 1510), contribution to normal cognitive function (ID 1511), maintenance of normal blood cholesterol concentrations (ID 1721) and increase in endurance capacity and/or endurance performance (ID 1913) Langsjoen PH, Langsjoen AM. The clinical use of HMG CoA-reductase inhibitors and the associated depletion of coenzyme Q10. A review of animal and
/// set the motor speed in radians per second. public void SetMotorSpeed(float speed) { m_bodyA.wakeUp(); m_bodyB.wakeUp(); m_motorSpeed = speed; }
/** * Contestants do not need to worry about anything in this file. This is just * helper code that does the boring stuff for you, so you can focus on the * interesting stuff. That being said, you're welcome to change anything in * this file if you know what you're doing. */ public class DuelPlanetWars { // Store all the planets and fleets. private ArrayList<Planet> planets; private ArrayList<Fleet> myFleets; private ArrayList<Fleet> enemyFleets; // Caching private int[][] distances; private ArrayList<Planet> myPlanets; private ArrayList<Planet> enemyPlanets; private ArrayList<Planet> neutralPlanets; private Planet[][] closestPlanets; // Temporary variable while updating private int updatePlanetID; // Debug stuff private boolean enableLogging = false; private PrintWriter log; /** * Constructs a PlanetWars object instance, given a string containing a * description of a game state. * @param gameStateString */ public DuelPlanetWars(String gameStateString) { planets = new ArrayList<Planet>(); myPlanets = new ArrayList<Planet>(); enemyPlanets = new ArrayList<Planet>(); neutralPlanets = new ArrayList<Planet>(); myFleets = new ArrayList<Fleet>(); enemyFleets = new ArrayList<Fleet>(); parseGameState(gameStateString); } /** * Enables logging to the given PrintWriter * @param log */ public void setLogger(PrintWriter log) { this.log = log; enableLogging = true; } /** * Returns the number of planets. Planets are numbered starting with 0. * @return */ public int getNumPlanets() { return planets.size(); } /** * Returns the planet with the given planet_id. There are NumPlanets() * planets. They are numbered starting at 0. * @param planetID * @return */ public Planet getPlanet(int planetID) { return planets.get(planetID); } /** * Returns the number of fleets. * @return */ public int getNumFleets() { return myFleets.size() + enemyFleets.size(); } /** * Returns the fleet with the given fleet_id. Fleets are numbered starting * with 0. There are NumFleets() fleets. fleet_id's are not consistent from * one turn to the next. * @param fleetID * @return */ public Fleet getFleet(int fleetID) { if (fleetID < myFleets.size()) { return myFleets.get(fleetID); } else { return enemyFleets.get(fleetID - myFleets.size()); } } /** * Return a list of all the fleets. * NOTE: using getMyFleets and getEnemyFleets is quicker * @return */ public List<Fleet> getFleets() { List<Fleet> fleets = new ArrayList<Fleet>(myFleets.size() + enemyFleets.size()); fleets.addAll(myFleets); fleets.addAll(enemyFleets); return fleets; } /** * Return a list of all the fleets owned by the current player. * NOTE: returns a pointer, not a copy, so be careful. * @return */ public List<Fleet> getMyFleets() { return myFleets; } /** * Return a list of all the fleets owned by enemy players. * NOTE: returns a pointer, not a copy, so be careful. * @return */ public List<Fleet> getEnemyFleets() { return enemyFleets; } /** * Returns a list of all fleets with the given planet as destination. * @param dest * @return */ public List<Fleet> getIncomingFleets(Planet dest) { List<Fleet> inc = new ArrayList<Fleet>(); for (Fleet f : myFleets) { if (f.getDestination() == dest.getPlanetID()) { inc.add(f); } } for (Fleet f : enemyFleets) { if (f.getDestination() == dest.getPlanetID()) { inc.add(f); } } return inc; } /** * Returns a list of all fleets with the given planet as destination, sorted by the number of turns left before arrival. * @param dest * @return */ public List<Fleet> getIncomingFleetsSorted(Planet dest) { List<Fleet> inc = getIncomingFleets(dest); Collections.sort(inc, Fleet.arrivalTime); return inc; } /** * Returns a list of all the planets. * @return */ public List<Planet> getPlanets() { return planets; } /** * Return a list of all the planets owned by the current player. By * convention, the current player is always player number 1. * @return */ public List<Planet> getMyPlanets() { if (myPlanets == null) { myPlanets = new ArrayList<Planet>(); for (Planet p : planets) { if (p.getOwner() == 1) { myPlanets.add(p); } } } return myPlanets; } /** * Return a list of all neutral planets. * @return */ public List<Planet> getNeutralPlanets() { if (neutralPlanets == null) { neutralPlanets = new ArrayList<Planet>(); for (Planet p : planets) { if (p.getOwner() == 0) { neutralPlanets.add(p); } } } return neutralPlanets; } /** * Return a list of all the planets owned by rival players. This excludes * planets owned by the current player, as well as neutral planets. * @return */ public List<Planet> getEnemyPlanets() { if (enemyPlanets == null) { enemyPlanets = new ArrayList<Planet>(); for (Planet p : planets) { if (p.getOwner() == 2) { enemyPlanets.add(p); } } } return enemyPlanets; } /** * Return a list of all the planets that are not owned by the current * player. This includes all enemy planets and neutral planets. * NOTE: this method is not being cached (yet), so calling it often will be less efficient than the others. * @return */ public List<Planet> getNotMyPlanets() { List<Planet> r = new ArrayList<Planet>(); r.addAll(getEnemyPlanets()); r.addAll(getNeutralPlanets()); return r; } /** * Returns a list of all planets other than p, sorted according to their distance to p. * @return */ public Planet[] getClosestPlanets(Planet p) { if (closestPlanets[p.getPlanetID()][0] == null) { // Add all planets to the array int n = 0; for (int i = 0; i < planets.size(); i++) { Planet planet = planets.get(i); if (p != planet) { closestPlanets[p.getPlanetID()][n] = planet; n++; } } // Sort this array Arrays.sort(closestPlanets[p.getPlanetID()], new DistanceTo(p)); } return closestPlanets[p.getPlanetID()]; } /** * Returns the distance between two planets, rounded up to the next highest * integer. This is the number of discrete time steps it takes to get * between the two planets. * @param sourcePlanet * @param destinationPlanet * @return */ public int getDistance(int sourcePlanet, int destinationPlanet) { if (sourcePlanet == destinationPlanet) { return 0; } if (distances[sourcePlanet][destinationPlanet] < 0) { Planet source = planets.get(sourcePlanet); Planet destination = planets.get(destinationPlanet); double dx = source.getX() - destination.getX(); double dy = source.getY() - destination.getY(); distances[sourcePlanet][destinationPlanet] = (int) Math.ceil(Math.sqrt(dx * dx + dy * dy)); } return distances[sourcePlanet][destinationPlanet]; } /** * Returns the distance between two planets, rounded up to the next highest * integer. This is the number of discrete time steps it takes to get * between the two planets. * @param source * @param destination * @return */ public int getDistance(Planet source, Planet destination) { if (source == destination) { return 0; } if (distances[source.getPlanetID()][destination.getPlanetID()] < 0) { double dx = source.getX() - destination.getX(); double dy = source.getY() - destination.getY(); int dist = (int) Math.ceil(Math.sqrt(dx * dx + dy * dy)); distances[source.getPlanetID()][destination.getPlanetID()] = dist; distances[destination.getPlanetID()][source.getPlanetID()] = dist; } return distances[source.getPlanetID()][destination.getPlanetID()]; } /** * Sends an order to the game engine. An order is composed of a source * planet number, a destination planet number, and a number of ships. A * few things to keep in mind: * * you can issue many orders per turn if you like. * * the planets are numbered starting at zero, not one. * * you must own the source planet. If you break this rule, the game * engine kicks your bot out of the game instantly. * * you can't move more ships than are currently on the source planet. * * the ships will take a few turns to reach their destination. Travel * is not instant. See the Distance() function for more info. * @param sourcePlanet * @param destinationPlanet * @param numShips */ public void issueOrder(int sourcePlanet, int destinationPlanet, int numShips) { System.out.println(sourcePlanet + " " + destinationPlanet + " " + numShips); System.out.flush(); } /** * Sends an order to the game engine. An order is composed of a source * planet number, a destination planet number, and a number of ships. A * few things to keep in mind: * * you can issue many orders per turn if you like. * * the planets are numbered starting at zero, not one. * * you must own the source planet. If you break this rule, the game * engine kicks your bot out of the game instantly. * * you can't move more ships than are currently on the source planet. * * the ships will take a few turns to reach their destination. Travel * is not instant. See the Distance() function for more info. * @param source * @param dest * @param numShips */ public void issueOrder(Planet source, Planet dest, int numShips) { System.out.println(source.getPlanetID() + " " + dest.getPlanetID() + " " + numShips); System.out.flush(); } /** * Sends the game engine a message to let it know that we're done sending * orders. This signifies the end of our turn. */ public void finishTurn() { System.out.println("go"); System.out.flush(); } /** * Returns true if the named player owns at least one planet or fleet. * Otherwise, the player is deemed to be dead and false is returned. * @param playerID * @return */ public boolean isAlive(int playerID) { if ((playerID == 1 && !myFleets.isEmpty()) || (playerID == 2 && !enemyFleets.isEmpty())) { return true; } for (Planet p : planets) { if (p.getOwner() == playerID) { return true; } } return false; } /** * If the game is not yet over (ie: at least two players have planets or * fleets remaining), returns -1. If the game is over (ie: only one player * is left) then that player's number is returned. If there are no * remaining players, then the game is a draw and 0 is returned. * @return */ public int getWinner() { boolean player1Lives = !myFleets.isEmpty(); boolean player2Lives = !enemyFleets.isEmpty(); // If both players have fleets, there is no winner yet, otherwise we need to check the planets if (player1Lives && player2Lives) { return -1; } for (Planet p : planets) { player1Lives = player1Lives || p.getOwner() == 1; player2Lives = player2Lives || p.getOwner() == 2; } if (player1Lives && player2Lives) { return -1; } else if (player1Lives) { return 1; } else if (player2Lives) { return 2; } else { return 0; } } /** * Returns the number of ships that the current player has, either located * on planets or in flight. * @param playerID * @return */ public int getNumShips(int playerID) { int numShips = 0; for (Planet p : planets) { if (p.getOwner() == playerID) { numShips += p.getNumShips(); } } if (playerID == 1) { for (Fleet f : myFleets) { numShips += f.getNumShips(); } } else if (playerID == 2) { for (Fleet f : enemyFleets) { numShips += f.getNumShips(); } } return numShips; } /** * Returns the total production of the given player, i.e. the sum of all growth rates of his planets. * @param playerID * @return */ public int getProduction(int playerID) { int production = 0; if (playerID == 1) { for (Planet p : myPlanets) { production += p.getGrowthRate(); } } else if (playerID == 2) { for (Planet p : enemyPlanets) { production += p.getGrowthRate(); } } return production; } /** * Updates the game state to reflect the one in the message. Assumes that the map (planets) doesn't change, although other planet attributes are updated. * @param s */ public void update(String s) { initializeUpdate(); String[] lines = s.split("\n"); for (int i = 0; i < lines.length; ++i) { updateLine(lines[i]); } finalizeUpdate(); } public void initializeUpdate() { // Clear the fleets, we'll be building them again myFleets.clear(); enemyFleets.clear(); updatePlanetID = 0; } public void updateLine(String line) { if (enableLogging) { log.println(line); } String s = line; // Remove comments if necessary int commentBegin = s.indexOf('#'); if (commentBegin >= 0) { s = s.substring(0, commentBegin); } if (s.trim().length() == 0) { return; } // Parse each line String[] tokens = s.split(" "); if (tokens.length == 0) { return; } if (tokens[0].equals("P")) { if (tokens.length != 6) { System.err.println("Wrong number of tokens for a planet."); return; } int owner = Integer.parseInt(tokens[3]); int numShips = Integer.parseInt(tokens[4]); Planet p = planets.get(updatePlanetID); if (owner != p.getOwner()) { // Update the per-owner planet lists switch (p.getOwner()) { case 0: neutralPlanets.remove(p); break; case 1: myPlanets.remove(p); break; case 2: enemyPlanets.remove(p); break; } p.setOwner(owner); switch (owner) { case 0: neutralPlanets.add(p); break; case 1: myPlanets.add(p); break; case 2: enemyPlanets.add(p); break; } } p.setNumShips(numShips); updatePlanetID++; } else if (tokens[0].equals("F")) { if (tokens.length != 7) { System.err.println("Wrong number of tokens for a fleet."); return; } int owner = Integer.parseInt(tokens[1]); int numShips = Integer.parseInt(tokens[2]); int source = Integer.parseInt(tokens[3]); int destination = Integer.parseInt(tokens[4]); int totalTripLength = Integer.parseInt(tokens[5]); int turnsRemaining = Integer.parseInt(tokens[6]); Fleet f = new Fleet(owner, numShips, source, destination, totalTripLength, turnsRemaining); if (owner == 1) { myFleets.add(f); } else if (owner == 2) { enemyFleets.add(f); } } else { System.err.println("Unexpected token: " + tokens); return; } } public void finalizeUpdate() { // Nothing, for now } /** * Parses a game state from a string. On success, returns 1. On failure, * returns 0. * @param s * @return */ private int parseGameState(String s) { int planetID = 0; String[] lines = s.split("\n"); for (int i = 0; i < lines.length; ++i) { String line = lines[i]; int commentBegin = line.indexOf('#'); if (commentBegin >= 0) { line = line.substring(0, commentBegin); } if (line.trim().length() == 0) { continue; } String[] tokens = line.split(" "); if (tokens.length == 0) { continue; } if (tokens[0].equals("P")) { if (tokens.length != 6) { return 0; } double x = Double.parseDouble(tokens[1]); double y = Double.parseDouble(tokens[2]); int owner = Integer.parseInt(tokens[3]); int numShips = Integer.parseInt(tokens[4]); int growthRate = Integer.parseInt(tokens[5]); Planet p = new Planet(planetID++, owner, numShips, growthRate, x, y); planets.add(p); switch (p.getOwner()) { case 0: neutralPlanets.add(p); break; case 1: myPlanets.add(p); break; case 2: enemyPlanets.add(p); break; } } else if (tokens[0].equals("F")) { if (tokens.length != 7) { return 0; } int owner = Integer.parseInt(tokens[1]); int numShips = Integer.parseInt(tokens[2]); int source = Integer.parseInt(tokens[3]); int destination = Integer.parseInt(tokens[4]); int totalTripLength = Integer.parseInt(tokens[5]); int turnsRemaining = Integer.parseInt(tokens[6]); Fleet f = new Fleet(owner, numShips, source, destination, totalTripLength, turnsRemaining); if (owner == 1) { myFleets.add(f); } else if (owner == 2) { enemyFleets.add(f); } } else { return 0; } } distances = new int[planets.size()][planets.size()]; for (int i = 0; i < planets.size(); i++) { Arrays.fill(distances[i], -1); } if (planets.size() > 0) { closestPlanets = new Planet[planets.size()][planets.size() - 1]; } else { closestPlanets = new Planet[0][0]; } return 1; } /** * Loads a map from a text file. The text file contains a description of * the starting state of a game. See the project wiki for a description of * the file format. It should be called the Planet Wars Point-in-Time * format. On success, return 1. On failure, returns 0. * @param mapFilename * @return */ private int loadMapFromFile(String mapFilename) { String s = ""; BufferedReader in = null; try { in = new BufferedReader(new FileReader(mapFilename)); int c; while ((c = in.read()) >= 0) { s += (char) c; } } catch (Exception e) { return 0; } finally { try { in.close(); } catch (Exception e) { // Fucked. } } return parseGameState(s); } private class DistanceTo implements Comparator<Planet> { Planet origin; DistanceTo(Planet origin) { this.origin = origin; } public int compare(Planet p1, Planet p2) { return getDistance(p1, origin) - getDistance(p2, origin); } } }
// Struct creates a struct{} expression. The arguments are a series // of name/type/tag tuples. Name must be of type *ast.Ident, type // must be of type ast.Expr, and tag must be of type *ast.BasicLit, // The number of arguments must be a multiple of 3, or a run-time // panic will occur. func Struct(args ...ast.Expr) *ast.StructType { fields := new(ast.FieldList) if len(args)%3 != 0 { panic("Number of args to FieldList must be a multiple of 3, got " + strconv.Itoa(len(args))) } for i := 0; i < len(args); i += 3 { var field ast.Field name, typ, tag := args[i], args[i+1], args[i+2] if name != nil { field.Names = []*ast.Ident{name.(*ast.Ident)} } if typ != nil { field.Type = typ } if tag != nil { field.Tag = tag.(*ast.BasicLit) } fields.List = append(fields.List, &field) } return &ast.StructType{Fields: fields} }
package solace.io.xml; /** * Exception thrown in response to a parse error while parsing an area XML file. * @author <NAME> */ public class AreaParseException extends Exception { /** * Creates a new exception. * @param fileName Name of the file being parsed. * @param msg Message detailing the exact nature of the exception. */ public AreaParseException(String fileName, String msg) { super("Parse error in '" + fileName + "': " + msg); } }
/* * When decompressing, we typically obtain more than one page * per reference. We inject the additional pages into the page * cache as a form of readahead. */ static int zisofs_readpage(struct file *file, struct page *page) { struct inode *inode = file_inode(file); struct address_space *mapping = inode->i_mapping; int err; int i, pcount, full_page; unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1]; unsigned int zisofs_pages_per_cblock = PAGE_CACHE_SHIFT <= zisofs_block_shift ? (1 << (zisofs_block_shift - PAGE_CACHE_SHIFT)) : 0; struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)]; pgoff_t index = page->index, end_index; end_index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (index >= end_index) { SetPageUptodate(page); unlock_page(page); return 0; } if (PAGE_CACHE_SHIFT <= zisofs_block_shift) { full_page = index & (zisofs_pages_per_cblock - 1); pcount = min_t(int, zisofs_pages_per_cblock, end_index - (index & ~(zisofs_pages_per_cblock - 1))); index -= full_page; } else { full_page = 0; pcount = 1; } pages[full_page] = page; for (i = 0; i < pcount; i++, index++) { if (i != full_page) pages[i] = grab_cache_page_nowait(mapping, index); if (pages[i]) { ClearPageError(pages[i]); kmap(pages[i]); } } err = zisofs_fill_pages(inode, full_page, pcount, pages); for (i = 0; i < pcount; i++) { if (pages[i]) { flush_dcache_page(pages[i]); if (i == full_page && err) SetPageError(pages[i]); kunmap(pages[i]); unlock_page(pages[i]); if (i != full_page) page_cache_release(pages[i]); } } return err; }
<filename>GuildEdit/cogs/settings.py from discord.ext import commands import discord class Settings(commands.Cog): def __init__(self, bot): self.bot = bot self.database = self.bot.cogs["Database"] def locked_guilds(self): request = self.database.select_request("guildedit", "locked_guilds", "guild_id", (), ()) if type(request) == list or type(request) == tuple: return [int(x["bot_id"]) for x in request] if type(request) == dict: return [int(request["bot_id"])] return list() def ignored_bots(self): request = self.database.select_request("guildedit", "ignored_bots", "bot_id", (), ()) if type(request) == list or type(request) == tuple: return [int(x["bot_id"]) for x in request] if type(request) == dict: return [int(request["bot_id"])] return list() def add_entry(self, id: int, identified: bool = False, password: str = None, lisys: bool = False, banraidbots: bool = True, heuristic: bool = False, perms_lock: bool = True): if password is None: password = "" return self.database.insert_request("guildedit", "guilds_settings", ("guild_id", "identified", "password", "likesys", "banraidbots", "heuristic", "perms_lock"), (id, identified, password, lisys, banraidbots, heuristic, perms_lock)) def remove_entry(self, id: int): return self.database.delete_request("guildedit", "guilds_settings", ("guild_id",), (id,)) def get_entry(self, id: int): request = self.database.select_request("guildedit", "guilds_settings", '*', ("guild_id",), (id,)) result = None if type(request) != str and request is not None: result = { "ID": id, "identified": bool(request["identified"]), "password": request["password"], "likesys": bool(request["likesys"]), "banraidbots": bool(request["banraidbots"]), "heuristic": bool(request["heuristic"]), "perms_lock": bool(request["perms_lock"]) } else: result = request return result def edit_identified(self, id: int): entry = self.get_entry(id) if entry is None: self.add_entry(id, identified=True) return ":white_check_mark: Votre serveur est désormais visible !" else: if type(entry) == str: return ":x: **L'erreur suivante s'est produite :** `{}`".format(entry) request = self.database.update_request("guildedit", "guilds_settings", ("identified",), (not entry["identified"],), ("guild_id",), (id,)) if type(request) == str: return ":x: **L'erreur suivante s'est produite :** `{}`".format(request) return ":white_check_mark: Votre serveur n'est plus visible !" if entry[ "identified"] else ":white_check_mark: Votre serveur est de nouveau visible !" def edit_password(self, id: int, password: str): return self.database.update_request("guildedit", "guilds_settings", ("password",), (password,), ("guild_id",), (id,)) def like_system_enabled(self, id: int): entry = self.get_entry(id) if type(entry) == str or entry is None: return False else: return entry["likesys"] def edit_likesys(self, id: int): entry = self.get_entry(id) if entry is None: self.add_entry(id) return ":white_check_mark: Système de like activé !" else: if type(entry) == str: return ":x: **L'erreur suivante s'est produite :** `{}`".format(entry) request = self.database.update_request("guildedit", "guilds_settings", ("likesys",), (not entry["likesys"],), ("guild_id",), (id,)) if type(request) == str: return ":x: **L'erreur suivante s'est produite :** `{}`".format(request) return ":white_check_mark: Système de like désactivé !" if entry[ "likesys"] else ":white_check_mark: Système de like activé !" def ban_raidbots(self, id: int): entry = self.get_entry(id) if type(entry) == str or entry is None: return True else: return entry["banraidbots"] def edit_ban_raidbots(self, id: int): entry = self.get_entry(id) if entry is None: self.add_entry(id, banraidbots=False) return ":x: **Les bots détectés comme bots de raid ne seront plus bannis !**" else: if type(entry) == str: return ":x: **L'erreur suivante s'est produite :** `{}`".format(entry) if entry["heuristic"]: return ":warning: **Le mode heuristique est activé ! Veuillez d'abord le désactiver avec `>hmode`.**" request = self.database.update_request("guildedit", "guilds_settings", ("banraidbots",), (not entry["banraidbots"],), ("guild_id",), (id,)) if type(request) == str: return ":x: **L'erreur suivante s'est produite :** `{}`".format(request) return ":x: **Les bots détectés comme bots de raid ne seront plus bannis.**" if entry[ "banraidbots"] else ":white_check_mark: Les bots détectés comme bots de raid seront désormais bannis." def heuristic(self, id: int): entry = self.get_entry(id) if type(entry) == str or entry is None: return False else: return entry["heuristic"] def edit_heuristic(self, id: int): entry = self.get_entry(id) if entry is None: self.add_entry(id, heuristic=True, banraidbots=False) return ":white_check_mark: **Mode heuristique activé !**" else: if type(entry) == str: return ":x: **L'erreur suivante s'est produite :** `{}`".format(entry) request = self.database.update_request("guildedit", "guilds_settings", ("heuristic", "banraidbots"), (not entry["heuristic"], not entry["banraidbots"]), ("guild_id",), (id,)) if type(request) == str: return ":x: **L'erreur suivante s'est produite :** `{}`".format(request) return ":white_check_mark: **Mode heuristique désactivé !**" if entry[ "heuristic"] else ":white_check_mark: **Mode heuristique activé !**" def perms_lock(self, id: int): entry = self.get_entry(id) if type(entry) == str or entry is None: return True else: return entry["perms_lock"] def edit_perms_lock(self, id: int): entry = self.get_entry(id) if entry is None: self.add_entry(id, perms_lock=False) return ":warning: **Les administrateurs ont désormais accès aux commandes sensibles !**" else: if type(entry) == str: return ":x: **L'erreur suivante s'est produite :** `{}`".format(entry) request = self.database.update_request("guildedit", "guilds_settings", ("perms_lock",), (not entry["perms_lock"],), ("guild_id",), (id,)) if type(request) == str: return ":x: **L'erreur suivante s'est produite :** `{}`".format(request) return ":warning: **Les administrateurs ont désormais accès aux commandes sensibles !**" if entry[ "perms_lock"] else ":white_check_mark: Seul le propriétaire du serveur a désormais accès aux commandes sensibles !" async def get_like_channel(self, guild: discord.Guild): for c in guild.text_channels: if c.name == "fil-des-likes": return c try: perms = {guild.default_role: discord.PermissionOverwrite(send_messages=False, add_reactions=False)} channel = await guild.create_text_channel(name="fil-des-likes", overwrites=perms) await channel.send( ":white_check_mark: Les messages avec plus de 15 réactions :heart: s'afficheront ici ! :smile:\nMerci de ne pas changer le nom de ce salon, sinon un autre sera créé avec ce nom.") return channel except Exception as e: return ":x: Impossible de créer le salon : `{}`".format(str(e)) def setup(bot): bot.add_cog(Settings(bot))
<gh_stars>1-10 package com.hermant.graphics.cameras; import com.hermant.graphics.scene.GameObject; import org.joml.Matrix4f; import org.joml.Vector3f; public class ThirdPersonCamera implements Camera { private GameObject thirdPerson; private float distance; private Vector3f up = new Vector3f(0, 1, 0); public ThirdPersonCamera(GameObject thirdPerson, float distance){ this.distance = distance; this.thirdPerson = thirdPerson; } @Override public Matrix4f setupViewMatrix(Matrix4f viewMatrix) { Vector3f cameraPosition = new Vector3f(thirdPerson.getPosition()); cameraPosition.sub( (float)Math.cos(Math.toRadians(thirdPerson.getRotation().x)) * (float)Math.cos(Math.toRadians(thirdPerson.getRotation().y)) * distance, (float)Math.sin(Math.toRadians(thirdPerson.getRotation().x)) * distance - distance, (float)Math.cos(Math.toRadians(thirdPerson.getRotation().x)) * (float)Math.sin(Math.toRadians(thirdPerson.getRotation().y)) * distance ); viewMatrix.identity(); viewMatrix.lookAt(cameraPosition, thirdPerson.getPosition(), up); return viewMatrix; } @Override public void rotate(float dx, float dy, float dz) { } @Override public void move(float dx, float dy, float dz) { } }
Breathing Pulses in an Excitatory Neural Network In this paper we show how a local inhomogeneous input can stabilize a stationary-pulse solution in an excitatory neural network. A subsequent reduction of the input amplitude can then induce a Hopf instability of the stationary solution resulting in the formation of a breather. The breather can itself undergo a secondary instability leading to the periodic emission of traveling waves. In one dimension such waves consist of pairs of counterpropagating pulses, whereas in two dimensions the waves are circular target patterns.
import argparse import os import sys from cryptography.exceptions import InvalidTag from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC SALT_LEN = 16 TAG_LEN = 16 IV_LEN = 12 def aes_decrypt_file(f_name, key, mode=modes.GCM): """ Decrypts a given file's contents from the given key. It reads the salt, IV, and GCM tag from the file, and then attempts to decipher the cipher text :param f_name: File who's contents will be decrypted :param key: Key to decrypt the contents with :param mode: AES mode, defaults to GCM :return: The original plain text if decryption is correct. """ with open(f_name, 'rb') as f: text = f.read() salt = text[0:SALT_LEN] tag = text[SALT_LEN:SALT_LEN + TAG_LEN] iv = text[SALT_LEN + TAG_LEN:SALT_LEN + IV_LEN + TAG_LEN] cipher_text = text[SALT_LEN + IV_LEN + TAG_LEN:] key = derive_key(key.encode('utf-8'), salt) cipher = Cipher(algorithm=algorithms.AES(key), mode=mode(iv), backend=default_backend()) decryptor = cipher.decryptor() try: return decryptor.update(cipher_text) + decryptor.finalize_with_tag(tag) except InvalidTag: raise Exception("Unable to decrypt text.") def aes_encrypt_file(f_name, key, iv=os.urandom(IV_LEN), mode=modes.GCM): """ Encrypts the contents of a file, and saves it to "f_name.aes" The file is laid out with the first 16 bytes as salt, next 12 as IV, and next 16 as GCM tag, with the remaining bytes the cipher text to decrypt. :param f_name: File to read and encrypt :param key: The key to encrypt the file with :param iv: The initialization vector, if not supplied, is a 12 byte random number, 12 bytes has been shown to be the best if its random, since it doesn't require additional computations to encrypt it, but is still computationally secure. :param mode: The encryption mode, defaults to GCM, the method only uses AES to create cipher text """ salt = os.urandom(SALT_LEN) key = derive_key(key.encode('utf-8'), salt) encryptor = Cipher(algorithm=algorithms.AES(key), mode=mode(iv), backend=default_backend()).encryptor() with open(f_name, 'rb') as f: f_text = f.read() cipher_text = encryptor.update(f_text) + encryptor.finalize() with open(f_name + '.aes', 'wb') as o: # salt 16 bytes # tag 16 bytes # iv 12 bytes o.write(salt) o.write(encryptor.tag) o.write(iv) o.write(cipher_text) def derive_key(key, salt): """ Given a key and a salt, derives a cryptographically secure key to be used in following computations. This is to allow any size key as input to the program, as we can extend it to the required multiple of 16,24,32 that AES requires :return: Derived key from python's cryptography library """ backend = default_backend() kdf = PBKDF2HMAC( algorithm=hashes.SHA256(), length=32, salt=salt, iterations=2 ** 20, backend=backend ) return kdf.derive(key) def main(): args = parse_args(sys.argv[1:]) if args.encrypt: aes_encrypt_file(f_name=args.input, key=args.key) else: with open("decrypted", "wb") as f: f.write(aes_decrypt_file(f_name=args.input, key=args.key)) def parse_args(args): parser = argparse.ArgumentParser(description='Encrypt a file with AES encryption.') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-e', '--encrypt', help='Flag that we encrypt the file.', action='store_true') group.add_argument('-d', '--decrypt', help='Flag decrypt the file.', action='store_true') parser.add_argument('-k', '--key', help='The key to encrypt the file with', required=True) parser.add_argument('-i', '--input', help='The data file you want hidden', required=True) return parser.parse_args(args) if __name__ == '__main__': main()
/** * Find a root of a logical tree containing this file, if any. * @param file a file on disk * @return an ancestor directory which is the root of a logical tree, * if any (else null) * @since 1.27 */ public static URI findRoot(URI file) { if (!file.equals(BaseUtilities.normalizeURI(file))) { throw new IllegalArgumentException("Parameter file was not "+ "normalized. Was "+file+" instead of "+BaseUtilities.normalizeURI(file)); } for (CollocationQueryImplementation2 cqi : implementations2.allInstances()) { URI root = cqi.findRoot(file); if (root != null) { return root; } } if ("file".equals(file.getScheme())) { File f = FileUtil.normalizeFile(BaseUtilities.toFile(file)); for (org.netbeans.spi.queries.CollocationQueryImplementation cqi : implementations.allInstances()) { File root = cqi.findRoot(f); if (root != null) { return BaseUtilities.toURI(root); } } } return null; }
A silver line train approaches at The Spring Hill Station in Tysons. (Photo by Yue Wu/The Washington Post) Northern Virginia’s economy is undergoing a rocky transition, with ongoing losses of federal jobs, empty office buildings lined up along Route 28, the state’s biggest private building empty for more than a year and Tysons Corner trying to remake itself. On his blog, George Mason University professor, economist and writer Tyler Cowen regularly offers insight into issues local, national and international. He sees something larger happening in the commonwealth, and this week he wrote a post suggesting that Northern Virginia is gradually splitting into two different places. Cowen posits that a identity is forming around north Arlington and Northwest D.C. that resembles Santa Monica, while much of Fairfax County is failing. Here’s his post in full: When I visited Santa Monica in January it struck me how much it reminded me of…Arlington. Arlington is now essentially a part of Northwest, at least Arlington above Route 50 or so. Arlington and Santa Monica have never been more alike, or less distinctive. Parts of east Falls Church will meld into Arlington, and south Arlington will become more like north Arlington. Real estate prices east/north of a particular line are rising and west of that line are falling. Fairfax is definitely west of that line. The Tysons Corner remake will fail, Vienna is not the new Clarendon, and the Silver Line and the monstrously wide Rt.7 will form a new dividing line between parts of Virginia which resemble Santa Monica and parts which do not. Incumbents aside, no one lives in Fairfax any more to commute into D.C. Why would you? The alternatives are getting better and Metro parking became too difficult some time ago. Fairfax is not being transformed, although some parts are morphing into “the new Shirlington.” Most of it will stay dumpy on the retail side. Annandale will stay with Fairfax, whether it likes it or not. For ten years now I have been predicting various Fairfax restaurants will close — casualties of too-high rents — and mostly I have been wrong. The good Annandale restaurants are running strong too. Annandale won’t look much better anytime soon, thank goodness for that. “Northern Virginia” is becoming two different places, albeit slowly. Planners and commercial real estate types have begun saying something similar about Northern Virginia and the suburbs in general, though with some differences. Since the recession, a premium has been placed on walkable, urban (or at least urban-feeling) places. In the suburbs, these places are often built around public transit hubs, as Arlington has done around Orange Line stations. When Arlington was referred to recently as “the suburb of the future,” one imagines it was north Arlington the writer had in mind. On the flip side, places built to serve a single use — shopping malls, office parks, subdivisions — and which are accessible only by car have sometimes ended up losers. Suburban office parks that are not near transit, for instance, are suffering high vacancy, to the point where experts believe some of them will have to be torn down rather than re-leased. Mall owners are trying to re-invent their properties as mixed-use town centers. A major question for Fairfax County stakeholders is whether Tysons, outfitted with four Silver Line stations, will join the first group or the latter. Will the growing companies that make up the private sector as government declines choose to relocate in Tysons, or will they shun it they way they do Sterling, because of young workers who ride transit or do not own cars? Cowen is firmly in the camp betting against Tysons. He wrote in 2009 that Tysons is “like a large box with distant extremities protruding, all laid on top of some multi-level and impassable thick bones.” Harder to classify perhaps is Reston Town Center, where rents are already higher than they are in the Rosslyn-Ballston corridor even though Metro has not yet arrived. Follow Jonathan O’Connell on Twitter: @oconnellpostbiz
/// Sets up the highlight groups for the Lsp icons, linking them to /// pre-existing defaults. pub fn hlgroups(lua: &Lua) -> LuaResult<()> { let opts = lua.create_table_with_capacity(0, 2)?; // Don't override existing definitions. opts.set("default", true)?; opts.set("link", "TSNone")?; api::set_hl(lua, 0, hlgroup::TEXT, opts.clone())?; opts.set("link", "TSFunction")?; api::set_hl(lua, 0, hlgroup::METHOD, opts.clone())?; opts.set("link", "TSFunction")?; api::set_hl(lua, 0, hlgroup::FUNCTION, opts.clone())?; opts.set("link", "TSFunction")?; api::set_hl(lua, 0, hlgroup::CONSTRUCTOR, opts.clone())?; opts.set("link", "TSConstructor")?; api::set_hl(lua, 0, hlgroup::FIELD, opts.clone())?; opts.set("link", "TSConstructor")?; api::set_hl(lua, 0, hlgroup::VARIABLE, opts.clone())?; opts.set("link", "TSParameter")?; api::set_hl(lua, 0, hlgroup::CLASS, opts.clone())?; opts.set("link", "TSConstructor")?; api::set_hl(lua, 0, hlgroup::INTERFACE, opts.clone())?; opts.set("link", "TSNone")?; api::set_hl(lua, 0, hlgroup::MODULE, opts.clone())?; opts.set("link", "TSNone")?; api::set_hl(lua, 0, hlgroup::PROPERTY, opts.clone())?; opts.set("link", "TSNone")?; api::set_hl(lua, 0, hlgroup::UNIT, opts.clone())?; opts.set("link", "TSParameter")?; api::set_hl(lua, 0, hlgroup::VALUE, opts.clone())?; opts.set("link", "TSParameter")?; api::set_hl(lua, 0, hlgroup::ENUM, opts.clone())?; opts.set("link", "TSNone")?; api::set_hl(lua, 0, hlgroup::KEYWORD, opts.clone())?; opts.set("link", "TSNone")?; api::set_hl(lua, 0, hlgroup::SNIPPET, opts.clone())?; opts.set("link", "TSNone")?; api::set_hl(lua, 0, hlgroup::COLOR, opts.clone())?; opts.set("link", "TSNone")?; api::set_hl(lua, 0, hlgroup::FILE, opts.clone())?; opts.set("link", "TSNone")?; api::set_hl(lua, 0, hlgroup::REFERENCE, opts.clone())?; opts.set("link", "TSNone")?; api::set_hl(lua, 0, hlgroup::FOLDER, opts.clone())?; opts.set("link", "TSNone")?; api::set_hl(lua, 0, hlgroup::ENUM_MEMBER, opts.clone())?; opts.set("link", "TSNone")?; api::set_hl(lua, 0, hlgroup::CONSTANT, opts.clone())?; opts.set("link", "TSNone")?; api::set_hl(lua, 0, hlgroup::STRUCT, opts.clone())?; opts.set("link", "TSParameter")?; api::set_hl(lua, 0, hlgroup::EVENT, opts.clone())?; opts.set("link", "TSNone")?; api::set_hl(lua, 0, hlgroup::OPERATOR, opts.clone())?; opts.set("link", "TSNone")?; api::set_hl(lua, 0, hlgroup::TYPE_PARAMETER, opts.clone())?; Ok(()) }
import { TsCode } from '../abstruct/tsCode'; import { Identifier, NumericLiteral, StringLiteral } from './expressions'; export class EnumMember extends TsCode { constructor(private readonly identifier: Identifier, private readonly value?: StringLiteral | NumericLiteral) { super(); this.mergeImport(identifier); if (value) this.mergeImport(value); } protected toTsString(): string { if (!this.value) return this.identifier.toString(); return this.identifier + '=' + this.value; } }
// Copyright (C) Rishabh Iyer, John T. Halloran, and Kai Wei // Licensed under the Open Software License version 3.0 // See COPYING or http://opensource.org/licenses/OSL-3.0 #ifndef SET_H #define SET_H #include <unordered_set> class Set { protected: std::unordered_set<int> uset; public: Set(); Set(int max_elements); Set(int max_elements, bool); Set(const Set& other); Set& operator=(const Set& other); void insert(int i); void remove(int i); bool contains(int i) const; void clear(); int size() const; typedef std::unordered_set<int>::iterator iterator; typedef std::unordered_set<int>::const_iterator const_iterator; iterator begin(); iterator end(); const_iterator begin() const; const_iterator end() const; }; #endif
#include "Gripper.hpp" #include <rw/rw.hpp> #include <rwsim/rwsim.hpp> #include <rw/models/RigidObject.hpp> using namespace std; USE_ROBWORK_NAMESPACE; using namespace robwork; using namespace rwsim; Gripper::Gripper(const std::string& name) : _name(name), _baseGeometry(NULL), _leftGeometry(NULL), _rightGeometry(NULL), _tcp(Transform3D<>(Vector3D<>(0, 0, 0.075))), _jawdist(0), _opening(0.05), _force(50) { setBaseGeometry(Q(3, 0.15, 0.1, 0.05)); setJawGeometry(Q(10, 0, 0.1, 0.025, 0.02, 0, 0, 0.05, 0, 90*Deg2Rad, 0)); } void Gripper::updateGripper(rw::models::WorkCell::Ptr wc, rwsim::dynamics::DynamicWorkCell::Ptr dwc, rw::models::TreeDevice::Ptr dev, rwsim::dynamics::RigidDevice::Ptr ddev, rw::kinematics::State& state, TaskDescription::Ptr td) { Geometry::Ptr baseGeometry = getBaseGeometry(); Geometry::Ptr leftGeometry = getJawGeometry(); Geometry::Ptr rightGeometry = getJawGeometry(); // remove existing objects cout << "- Removing objects..." << endl; wc->removeObject(wc->findObject("gripper.Base").get()); wc->removeObject(wc->findObject("gripper.LeftFinger").get()); wc->removeObject(wc->findObject("gripper.RightFinger").get()); cout << "- Objects removed." << endl; // create and add new objects cout << "- Adding new objects..." << endl; // if base is parametrized, the box has to be moved from origin by half its height Transform3D<> baseT; if (_isBaseParametrized) { baseT = Transform3D<>(-0.5*_baseParameters(2)*Vector3D<>::z()); } RigidObject* baseobj = new RigidObject(wc->findFrame("gripper.Base")); Model3D* basemodel = new Model3D("BaseModel"); basemodel->addTriMesh(Model3D::Material("stlmat",0.4f,0.4f,0.4f), *baseGeometry->getGeometryData()->getTriMesh() ); basemodel->setTransform(baseT); baseGeometry->setTransform(baseT); baseobj->addModel(basemodel); baseobj->addGeometry(baseGeometry); wc->add(baseobj); dwc->findBody("gripper.Base")->setObject(baseobj); RigidObject* leftobj = new RigidObject(wc->findFrame("gripper.LeftFinger")); Model3D* leftmodel = new Model3D("LeftModel"); leftmodel->addTriMesh(Model3D::Material("stlmat",0.4f,0.4f,0.4f), *leftGeometry->getGeometryData()->getTriMesh() ); leftmodel->setTransform(Transform3D<>()); leftGeometry->setTransform(Transform3D<>()); leftobj->addModel(leftmodel); leftobj->addGeometry(leftGeometry); wc->add(leftobj); dwc->findBody("gripper.LeftFinger")->setObject(leftobj); RigidObject* rightobj = new RigidObject(wc->findFrame("gripper.RightFinger")); Model3D* rightmodel = new Model3D("RightModel"); rightmodel->addTriMesh(Model3D::Material("stlmat",0.4f,0.4f,0.4f), *rightGeometry->getGeometryData()->getTriMesh() ); rightmodel->setTransform(Transform3D<>(Vector3D<>(), Rotation3D<>(1, 0, 0, 0, 1, 0, 0, 0, -1))); rightGeometry->setTransform(Transform3D<>(Vector3D<>(), Rotation3D<>(1, 0, 0, 0, 1, 0, 0, 0, -1))); rightobj->addModel(rightmodel); rightobj->addGeometry(rightGeometry); wc->add(rightobj); dwc->findBody("gripper.RightFinger")->setObject(rightobj); cout << "Objects added." << endl; // set tcp //string tcpFrameName = wc->getPropertyMap().get<string>("gripperTCP"); MovableFrame* tcp = (MovableFrame*)td->getGripperTCP(); //wc->findFrame<MovableFrame>(tcpFrameName); tcp->setTransform(_tcp, state); //cout << "LOL" << tcp->getName() << endl; // set bounds double minOpening = _jawdist; if (minOpening < 0.0) minOpening = 0.0; dev->setBounds(make_pair(Q(1, minOpening), Q(1, _opening))); dev->setQ(Q(1, minOpening), state); // set force ddev->setMotorForceLimits(Q(2, _force, _force)); cout << "Gripper updated!" << endl; } double Gripper::getCrossHeight(double x) const { if (_isJawParametrized) { // jaw is parametrized -- easy double length = _jawParameters(1); if (x > length) return 0.0; // far beyond the gripper double depth = _jawParameters(3); double width = _jawParameters(2); double lwidth = width; // check if to subtract from the width due to the chamfering double chfDepth = _jawParameters(4); double chfAngle = _jawParameters(5); double d = length - chfDepth * width * tan(chfAngle); if (x > d) { lwidth = width - (x - d) * 1.0/tan(chfAngle); } // check if subtract from the width due to the cut double cutPos = _jawParameters(6); double cutDepth = _jawParameters(7); double cutAngle = _jawParameters(8); double cutDist = abs(x - cutPos); if (cutDist < cutDepth * tan(cutAngle/2.0)) { lwidth -= cutDepth - cutDist * tan(1.57 - cutAngle/2.0); } if (lwidth < 0.0) lwidth = 0.0; return lwidth; } else { // TODO: calculate stl's crossection somehow return 0.0; } } double Gripper::getMaxStress() const { if (!_isJawParametrized) return 0.0; // TODO: add calculations for STL double length = _jawParameters(1); double sigmaMax = 0.0; for (double x = 0.0; x < length; x += 0.001) { double h = 100 * getCrossHeight(x); double b = 100 * _jawParameters(3); double M = x > length ? 0.0 : (length - x) * _force; double sigma = 6 * M / (b * h * h); if (isinf(sigma)) sigma = 1000000.0; if (isnan(sigma)) sigma = 0.0; if (sigma > sigmaMax) sigmaMax = sigma; //cout << x << ' ' << h << ' ' << M << ' ' << sigma << endl; } return sigmaMax; } double Gripper::getVolume() const { return _jawParameters(1) * _jawParameters(2) * _jawParameters(3); } /*void Gripper::loadTasks(std::string filename) { if (filename.empty()) return; //_dataFilename = filename; cout << "Loading tasks from: " << filename << "\n"; _tasks = GraspTask::load(filename); cout << "Tasks loaded!" << endl; } void Gripper::saveTasks(std::string filename) { if (filename.empty()) return; if (!_tasks) return; //_dataFilename = filename; try { cout << "Saving tasks to: " << filename << "\n"; GraspTask::saveRWTask(_tasks, filename); cout << "Tasks saved!" << endl; } catch (...) { } }*/
<gh_stars>1-10 import React from 'react'; import { Example, Props, code, md, AtlassianInternalWarning, DevPreviewWarning, } from '@atlaskit/docs'; const BasicExample = require('../examples/00-usage').default; export default md` ${( <> <div style={{ marginBottom: '0.5rem' }}> <AtlassianInternalWarning /> </div> <div style={{ marginTop: '0.5rem' }}> <DevPreviewWarning /> </div> </> )} This component is used to ask for feedback from the user, without affecting their usage of the page. It is styled similar to a \`flag\`. The user flow for this component is: #### Phase 1: Feedback - Choosing a feedback score - **(Optional)** Writing extra feedback in a \`textarea\` - **(Optional)** User selects if they can be contacted about their feedback. This is automatically set to \`true\` on the first change event for the \`textarea\`. It is set to \`false\` by default. #### Phase 2 (Optional): Atlassian Research Signup This phase will be entered when: 1. The user has selected they want to be contacted about their feedback 2. \`getUserHasAnsweredMailingList()\` has resolved to \`false\` If this phase is not entered then a thank you message is displayed. In this phase a prompt is opened which asks the user if they want to join the **Atlassian Research Group**. After \`onMailingListAnswer()\` has resolved: - If the user selected **yes** a thank you message is displayed - If the user selected **no** the survey is closed. #### Dismissing \`onDismiss\` will be called when the survey is finished. This can happen when: - The user explicitly closes the survey - The user finishes the survey. The survey will be auto dismissed after a small delay - The \`<SurveyComponent/>\` is unmounted \`onDismiss\` will only ever be called once \`onDismiss\` is called with arguments: \`{ trigger: DismissTrigger }\`. This can be used to distinguish between different rationales for dismissing ${code` // Types export enum DismissTrigger { AutoDismiss = 'AUTO_DISMISS', Manual = 'MANUAL', Finished = 'FINISHED', Unmount = 'UNMOUNT', } export type OnDismissArgs = { trigger: DismissTrigger }; onDismiss: (args: OnDismissArgs) => void; // These types are exported publicly for you to use import {DismissTrigger, OnDismissArgs} from '@atlaskit/contextual-survey'; `} #### Responsibilities - \`<SurveyMarshal/>\`: Responsible for placement and animation for the survey. - \`<ContextualSurvey/>\`: Renders the survey questions ## Usage ${code`import { ContextualSurvey, SurveyMarshal } from '@atlaskit/avatar';`} ${( <Example packageName="@atlaskit/contextual-survey" Component={() => <BasicExample height="500px" />} title="Basic example" source={require('!!raw-loader!../examples/00-usage')} /> )} ${( <Props heading="Contextual Survey Props" props={require('!!extract-react-types-loader!../src/components/ContextualSurvey')} /> )} ${( <Props heading="Survey Marshal Props" props={require('!!extract-react-types-loader!../src/components/SurveyMarshal')} /> )} `;
/** * Called to indicate that the last write has been performed. * It updates the state and performs cleanup operations. */ void closed() { while(true) { OutputState state=_state.get(); switch (state) { case CLOSED: { return; } case UNREADY: { if (_state.compareAndSet(state,OutputState.ERROR)) _writeListener.onError(_onError==null?new EofException("Async closed"):_onError); break; } default: { if (!_state.compareAndSet(state, OutputState.CLOSED)) break; try { _channel.getResponse().closeOutput(); } catch (Throwable x) { if (LOG.isDebugEnabled()) LOG.debug(x); abort(x); } finally { releaseBuffer(); } return; } } } }
class SonosActionException(Exception): """An exception occurred with the Sonos Action.""" class ServiceException(SonosActionException): """An exception occurred within a service""" # Request Objects class RequestObjectException(SonosActionException): """An exception occurred with a request object""" class RequestObjectInitializationException(RequestObjectException): """An exception occurred with a request object""" def __init__(self, invalid_request_object): self.invalid_request_object = invalid_request_object # Device Discovery Service class DeviceDiscoveryException(ServiceException): """An exception occurred with the device discovery service""" class DeviceParsingException(DeviceDiscoveryException): """An error occurred while trying to parse a device""" class NoReachableDeviceException(DeviceDiscoveryException): """No connected devices were found by the DeviceDiscovery service""" class ExternalDeviceDiscoveryUnreachable(DeviceDiscoveryException): """An error occurred with the device discovery driver """ # Music Search Service class MusicSearchService(ServiceException): """An error occurred within the Music Search Service""" class MusicSearchCredentialsError(MusicSearchService): """An error occurred with the credentials given to the Music Search Service""" class MusicSearchProviderConnectionError(MusicSearchService): """A connection error occurred with the provider of the Music Search Service""" class APIRequestError(Exception): """An exception occurred when interacting with Sonos API.""" class APIRequestWrongParams(APIRequestError): """The API was called with wrong parameters. """ # Entities injection service class EntityInjectionService(ServiceException): """An error occurred within the Entity Injection Service""" # Entities injection service class InvalidEntitySlotName(EntityInjectionService): """An unknown entity slot name has been used to build the payload""" # Client class ClientException(Exception): """An exception occured with the web client""" # Spotify client class SpotifyClientException(ClientException): """An error occurred within the Spotify Client """ class DeezerClientException(ClientException): """An error occured within the Deezer Client """ class SpotifyClientAuthException(SpotifyClientException): """An error occured when trying to auth the user from Spotify""" class SpotifyClientAuthorizationException(SpotifyClientAuthException): """An error occured when retrieving authorization code from Spotify""" class SpotifyClientAuthRefreshAccessTokenException(SpotifyClientAuthException): """An error occured when retrieving authorization code from Spotify""" # Deezer client class DeezerClientAuthorizationException(DeezerClientException): """An error occured when retrieving auth code from Deezer""" class DeezerClientAuthRefreshAccessTokenException(DeezerClientException): """An error occured when retrieving code from Spotify""" class QueryBuilderException(Exception): """An error occured while building a query""" # Spotify query builder class SpotifyQueryBuilderException(QueryBuilderException): """An error occurred within the Spotify Query Builder""" class SpotifyQueryBuilderNonExistentTimeRange(SpotifyQueryBuilderException): """The time range used does not exist, please use valid time ranges: 'long_term', 'medium_term', 'short_term'""" # Deezer class DeezerQueryBuilderException(QueryBuilderException): """An error occured when building a query for the Deezer API. """ # Adapters exceptions class AdapterException(SonosActionException): """Something wrong happened in the Interface Adapter layer""" # Node query builder class NodeQueryBuilderException(Exception): """An error occurred within the Node Query Builder""" class NodeQueryBuilderUnavailableMusicService(NodeQueryBuilderException): """The music service is not implemented""" class NodeQueryBuilderMissingQueryData(NodeQueryBuilderException): """Result type and/or field filters have not been provided""" # Deezer search and play class DeezerSearchServiceException(ServiceException): """An error occurred within the Deezer Search service""" # Device Discovery Service class DeviceDiscoveryServiceException(ServiceException): """An error occurred within the Device Discovery Service""" # Configuration File Parsing class ConfigurationFileValidationException(Exception): pass
<reponame>andersonfonseka/1ppd-bookstore package com.oneppd.memento; import java.util.Stack; import java.util.concurrent.locks.ReentrantLock; import com.oneppd.domain.ShoppingCart; public class Memento { private static Memento INSTANCE; private static ReentrantLock lock = new ReentrantLock(); private Stack<ShoppingCart> stack = new Stack<ShoppingCart>(); private Memento() {} public static Memento getInstance() { lock.lock(); if (INSTANCE == null) { INSTANCE = new Memento(); } lock.unlock(); return INSTANCE; } public void saveState(ShoppingCart e) { try { this.stack.push(e.clone()); } catch (CloneNotSupportedException e1) {} } public ShoppingCart restore() { return stack.pop(); } }
<reponame>suhaprabhu/vehicle-info-tests package info.vehicle.actions; import info.vehicle.methods.BaseTest; import info.vehicle.methods.TestCaseFailed; import info.vehicle.pages.StartPage; public final class StartPageAction implements BaseTest { public static void navigateToDvla() throws TestCaseFailed { navigationObj.navigateTo("https://www.gov.uk/get-vehicle-information-from-dvla"); assertionObj.checkTitle("Get vehicle information from DVLA - GOV.UK",true); } public static void startButtonClick(){ StartPage.startButton.click(); } }
/** * Section with an output. */ public static class MockOutputSection { @Next("doOutput") public void doInput() { // Testing type } }
Rabbi Lazarus and the Rich Man: A Talmudic Parody of the Late Roman Hell (Yerushalmi Hagigah 2.2, 77d and Sanhedrin 6.9, 23c) The Talmud Yerushalmi absorbs Christian ethical and narrative paradigms at the same time as parodying many aspects of Christian gospel stories that were popular in the fifth century. The rabbis create an entertaining story about a tour of hell and about a lewd disciple that shows their acculturation to early Byzantium as well as their resistance to cultural colonization.
/** * * @author Ahmed Sakr * @since February 28, 2016. */ public class AESWorker { // the default value the worker will assume wit be 256 bits for the key length. private int keyLength = 256; /** * Returns the running key length of this worker instance. * * @return an integer key length value */ public int getKeyLength() { return this.keyLength; } /** * Sets the running key length of this worker instance. Any value bigger than * the maximum policy will be automatically reduced to the maximum policy. * IMPORTANT: Value must be power of two. * * @param keyLength the new key length in bits */ public void setKeyLength(int keyLength) { this.keyLength = keyLength; } /** * Through the power of the advanced encryption standard, a plaintext will be encrypted with a parameter-specified * password, an extra protective layer (salt), and a specified key length. Make sure to acquire the salt and ivBytes * as they are necessary for decrypting the encrypted result. * * Firstly, The password is obtained and instantly overridden with the hashed version of the password, allowing * for stronger security as the plaintext password will not be used. Second, an arbitrary salt is securely * generated. Finally, the encryption standard is carried out and the encrypted text is obtained. * * @param password the password as a char array. * @param text The plaintext bytes to be encrypted. * * @return The Encrypted text in hexadecimal format. */ public char[] encrypt(char[] password, byte[] text) throws NoSuchAlgorithmException, InvalidKeySpecException, NoSuchPaddingException, InvalidKeyException, InvalidParameterSpecException, BadPaddingException, IllegalBlockSizeException { Cipher cipher = Cipher.getInstance("AES/CBC/PKCS5Padding"); if (Cipher.getMaxAllowedKeyLength("AES") < this.keyLength) { this.keyLength = Cipher.getMaxAllowedKeyLength("AES"); System.err.printf("WARNING: YOUR MAXIMUM AES KEY LENGTH POLICY IS %d BITS. KEY LENGTH LIMITED TO %d BITS.\n", this.keyLength, this.keyLength); } // hash the password and acquire a securely and randomly generated salt password = hash(new String(password).getBytes(StandardCharsets.UTF_8)); byte[] salt = new byte[20]; new SecureRandom().nextBytes(salt); // acquire the key SecretKeyFactory factory = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA1"); PBEKeySpec spec = new PBEKeySpec(password, salt, 16384, this.keyLength); SecretKey key = factory.generateSecret(spec); SecretKeySpec keySpec = new SecretKeySpec(key.getEncoded(), "AES"); // init the cipher and process the encryption cipher.init(Cipher.ENCRYPT_MODE, keySpec); AlgorithmParameters ap = cipher.getParameters(); byte[] ivBytes = ap.getParameterSpec(IvParameterSpec.class).getIV(); byte[] result = cipher.doFinal(text); return Hex.encodeHex(mergeByteArrays(ivBytes, result, salt)); } /** * An override of the encrypt method with a default keyLength value of 256-bits. * * @param password The password used to encrypt the text. * @param text The text to be encrypted using the advanced encryption standard. * * @return The bytes of encrypted text. */ public char[] encrypt(String password, String text) throws NoSuchAlgorithmException, InvalidKeySpecException, NoSuchPaddingException, InvalidKeyException, InvalidParameterSpecException, BadPaddingException, IllegalBlockSizeException { return encrypt(password.toCharArray(), text.getBytes(StandardCharsets.UTF_8)); } /** * Decrypting text that is encrypted by the advanced encryption standard. * * @param password The char array containing of the plaintext password * @param encryptedBlock The Encrypted text to be targeted and decrypted. * * @return The decrypted byte array of the encrypted text. */ public byte[] decrypt(char[] password, char[] encryptedBlock) throws NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeySpecException, InvalidKeyException, BadPaddingException, IllegalBlockSizeException, InvalidAlgorithmParameterException, DecoderException { Cipher cipher = Cipher.getInstance("AES/CBC/PKCS5Padding"); if (Cipher.getMaxAllowedKeyLength("AES") < this.keyLength) { this.keyLength = Cipher.getMaxAllowedKeyLength("AES"); System.err.printf("WARNING: YOUR MAXIMUM AES KEY LENGTH POLICY IS %d BITS. KEY LENGTH LIMITED TO %d BITS.\n", this.keyLength, this.keyLength); } // hash the password with the MD5 function and decode the encryptedBlock password = hash(new String(password).getBytes(StandardCharsets.UTF_8)); byte[] decoded = Hex.decodeHex(encryptedBlock); // The decoded byte array has the IV, encryptedText, and salt bytes stored in that order. // The IV bytes are of length 16 and salt is of length 20. byte[] encryptedText = new byte[decoded.length - 36], ivBytes = new byte[16], salt = new byte[20]; // The decoded bytes are ordered in the following form: ivBytes + encryptedText + saltBytes. // Extract the bytes into their corresponding array. System.arraycopy(decoded, 0, ivBytes, 0, ivBytes.length); System.arraycopy(decoded, ivBytes.length, encryptedText, 0, encryptedText.length); System.arraycopy(decoded, decoded.length - salt.length, salt, 0, salt.length); // generate the key from the acquired data SecretKeyFactory factory = SecretKeyFactory .getInstance("PBKDF2WithHmacSHA1"); PBEKeySpec spec = new PBEKeySpec(password, salt, 16384, this.keyLength); SecretKey key = factory.generateSecret(spec); SecretKeySpec keySpec = new SecretKeySpec(key.getEncoded(), "AES"); // finally, attempt to decrypt the encryptedText cipher.init(Cipher.DECRYPT_MODE, keySpec, new IvParameterSpec(ivBytes)); return cipher.doFinal(encryptedText); } /** * An override of the decrypt method with the ability to provide the password and encryptedText as String * objects, and a default value of 256-bits for the AES Key length value. * * @param password The plaintext password as a String. * @param encryptedText The encrypted text as a String. * * @return The decrypted byte array of the encrypted text. */ public byte[] decrypt(String password, String encryptedText) throws NoSuchPaddingException, DecoderException, InvalidAlgorithmParameterException, NoSuchAlgorithmException, IllegalBlockSizeException, BadPaddingException, InvalidKeyException, InvalidKeySpecException { return decrypt(password.toCharArray(), encryptedText.toCharArray()); } /** * Hashes the plain password to provide a more secure experience. * * @param password the bytes of the plaintext password. * * @return The hashed password's characters in an array. */ private char[] hash(byte[] password) throws NoSuchAlgorithmException { MessageDigest md = MessageDigest.getInstance("MD5"); md.reset(); md.update(password); return Hex.encodeHex(md.digest()); } /** * Merges all the byte[] varargs. * * @param arrays The byte[] varargs * * @return The master byte[] containing all the byte arrays. */ private byte[] mergeByteArrays(byte[]... arrays) { int capacity = 0; for (byte[] arr : arrays) { capacity += arr.length; } byte[] result = new byte[capacity]; int index = 0; for (byte[] array : arrays) { System.arraycopy(array, 0, result, index, array.length); index += array.length; } return result; } }
Quality Assurance for Cerebrospinal Fluid Protein Analysis: International Consensus by an Internet-Based Group Discussion Abstract A group of neurologists and clinical neurochemists representing twelve countries worked towards a consensus on laboratory techniques to improve the quality of analysis and interpretation of cerebrospinal fluid (CSF) proteins. Consensus was approached via a virtual Lotus Notes-based TeamRoom. This new approach respecting multicultural differences, common views, and minority opinions, is available in http://www.teamspace.net/CSF, presenting the implicit, complementary version of this explicit, printed consensus. Three key recommendations were made: CSF and (appropriately diluted) serum samples should be analyzed together in one analytical run, i.e., with reference to the same calibration curve. Results are evaluated as CSF/serum quotients, taking into account the non-linear, hyperbolic relation between immunoglobulin (Ig)- and albumin-quotients rather than using the linear IgG index or IgG synthesis rate. Controls should include materials with values within the reference ranges (IgM: 0.5–1.5 mg/l; IgA: 1–3mg/l; IgG: 10–30 mg/l and albumin: 100–300 mg/l). The physiological, methodological and clinical significance of CSF/serum quotients is reviewed. We confirmed the previous consensus on oligoclonal IgG, in particular the usefulness of the five typical interpretation patterns. The group compared current external and internal quality assurance schemes and encouraged all members to maintain national or local traditions. Values for acceptable imprecision in the CSF quality assurance are proposed.
package com.dashradar.dashdhttpconnector.dto; public class MempoolInfoDTO { long size;//number of transactions in mempool long bytes; long usage; long maxmempool; double mempoolminfee; public long getSize() { return size; } public void setSize(long size) { this.size = size; } public long getBytes() { return bytes; } public void setBytes(long bytes) { this.bytes = bytes; } public long getUsage() { return usage; } public void setUsage(long usage) { this.usage = usage; } public long getMaxmempool() { return maxmempool; } public void setMaxmempool(long maxmempool) { this.maxmempool = maxmempool; } public double getMempoolminfee() { return mempoolminfee; } public void setMempoolminfee(double mempoolminfee) { this.mempoolminfee = mempoolminfee; } }
<filename>backend/typescript/types.ts export enum Role { USER = "User", ADMIN = "Admin", VOLUNTEER = "Volunteer", DONOR = "Donor", } export enum Status { APPROVED = "Approved", PENDING = "Pending", REJECTED = "Rejected", } export enum DayPart { EARLY_MORNING = "Early Morning (12am - 6am)", MORNING = "Morning (6am - 11am)", AFTERNOON = "Afternoon (11am - 4pm)", EVENING = "Evening (4pm - 9pm)", NIGHT = "Night (9pm - 12am)", } export enum Frequency { ONE_TIME = "One time", DAILY = "Daily", WEEKLY = "Weekly", MONTHLY = "Monthly", } export const Categories = new Set([ "Dry packaged goods", "Non-perishables", "Fresh produce", "Bread and baked goods", "Oil, spreads, and seasoning", "Tea and coffee", "Frozen meals", "Prepared meals", "Non-alcoholic drinks and juices", "Essential items (masks, hand sanitizer, bags)", "Hygiene products (tampons, pads, soap, etc.)", ]); export const donationSizeDescriptions = new Map<string, string>([ ["Small", "Fills less than a shelf of the fridge/pantry"], ["Medium", "Approximately fills one shelf of the fridge/pantry"], ["Large", "Approximately fills two shelves of the fridge/pantry"], [ "Extra-large", "Approximately fills four shelves of the fridge/ pantry (full capacity)", ], ]); export type Token = { accessToken: string; refreshToken: string; }; export type UserDTO = { id: string; firstName: string; lastName: string; email: string; role: Role; phoneNumber: string; }; export type DonorDTO = { id: string; userId: string; businessName: string; facebookLink?: string; instagramLink?: string; }; export type VolunteerDTO = { id: string; userId: string; status: Status; }; export type ContentDTO = { id: string; foodRescueDescription: string; foodRescueUrl: string; checkinDescription: string; checkinUrl: string; }; export type UserDonorDTO = UserDTO & DonorDTO; export type CreateDonorDTO = Omit<DonorDTO, "id">; export type CreateUserDTO = Omit<UserDTO, "id"> & { password: string }; export type UpdateUserDTO = Omit<UserDTO, "id">; export type RegisterUserDTO = Omit<CreateUserDTO, "role">; export type AuthDTO = Token & UserDTO; export type UserVolunteerDTO = UserDTO & VolunteerDTO; export type UpdateVolunteerDTO = Omit<VolunteerDTO, "id" | "userId">; export type CreateContentDTO = Omit<ContentDTO, "id">; export type UpdateContentDTO = CreateContentDTO; export type SchedulingDTO = { id: string; donorId: string; categories: string[]; size?: string; isPickup: boolean; pickupLocation?: string | null; dayPart: DayPart; startTime: Date; endTime: Date; status: Status; frequency: Frequency; recurringDonationId?: string | null; recurringDonationEndDate?: Date | null; volunteerNeeded: boolean; volunteerTime?: string | null; notes?: string; volunteerId?: string | null; }; export type CreateSchedulingDTO = Omit<SchedulingDTO, "id">; export type UpdateSchedulingDTO = Partial< Omit<SchedulingDTO, "id" | "donorId"> >; export type UpdateDonorDTO = Omit<DonorDTO, "id" | "userId">; export type Letters = "A" | "B" | "C" | "D"; export type NodemailerConfig = { service: "gmail"; auth: { type: "OAuth2"; user?: string; clientId?: string; clientSecret?: string; refreshToken?: string; }; }; export type SignUpMethod = "PASSWORD" | "GOOGLE"; export type CheckInDTO = { id: string; startDate: Date; endDate: Date; notes?: string; volunteerId?: string | null; isAdmin?: boolean; }; export type CreateCheckInDTO = Omit<CheckInDTO, "id">; export type UpdateCheckInDTO = Partial<Omit<CheckInDTO, "id">>; export enum ShiftType { CHECKIN = "checkIn", SCHEDULING = "scheduling", } export type SchedulingDTOWithShiftType = SchedulingDTO & { type: ShiftType.SCHEDULING; }; export type CheckInDTOWithShiftType = CheckInDTO & { type: ShiftType.CHECKIN }; export type DTOTypes = Record< string, Date | string | string[] | boolean | number | null | undefined >;
def prv_axis(dcm): phi = prv_angle(dcm) factor = 1./(2.*np.sin(phi)) e1 = factor * (dcm[1][2] - dcm[2][1]) e2 = factor *(dcm[2][0] - dcm[0][2]) e3 = factor *(dcm[0][1] - dcm[1][0]) evec = np.array([e1, e2, e3]) return evec, phi
module Graphics.Proc.Lib.Typography.Attributes( -- textSize ) where {- import Graphics.Rendering.FTGL import Graphics.Proc.Core import Graphics.Proc.Lib.Typography.Display textSize :: Int -> Pio () textSize size = onFont $ \font -> setFontSize font size -- consider using these types for alignment data TextAlignX = AlignLeft | AlignRight | AlignCenterX data TextAlignY = AlignTop | AlignBottom | AlignCenterY | AlignBaseline textAlignX :: TextAlignX -> Draw textAlignX tx = undefined textAlign :: TextAlignX -> TextAlignY -> Draw textAlign tx ty = undefined -}
package comm import ( "bytes" "crypto/md5" "fmt" "github.com/google/go-github/v30/github" "issue-man/config" "issue-man/global" "issue-man/tools" ) type File struct { PrNumber int MergedAt string MergeCommitSHA string CommitFile *github.CommitFile } // 处理需同步文件 func (f File) Sync(include config.Include, existIssue, preIssue *github.Issue) { // 这里的操作指的是文件的操作,取值来自于 GitHub // 至于 issue 是否存在,调用何种方法,需要额外判断 const ( ADD = "added" MODIFY = "modified" RENAME = "renamed" REMOVE = "removed" ) switch *f.CommitFile.Status { // 更新 issue,不存在则创建 issue case ADD, MODIFY: // 更新 issue if existIssue != nil { f.update(existIssue) } else { // 创建 issue f.create(include) } // 重命名/移动文件 case RENAME: f.rename(include, existIssue, preIssue) // 移除文件 case REMOVE: f.remove(existIssue) default: global.Sugar.Warnw("unknown status", "file", f, "status", *f.CommitFile.Status) } } // 创建 issue,无 comment func (f File) create(include config.Include) { // 创建通用 issue,按照 create 相关配置初始化、分级 _, _ = tools.Issue.Create(tools.Generate.NewIssue(include, *f.CommitFile.Filename)) // 无需 comment } // 更新 issue,并 comment func (f File) update(existIssue *github.Issue) (*github.Issue, error) { // 更新 issue := tools.Generate.UpdateIssue(false, f.CommitFile.GetFilename(), *existIssue) // 对于有 assigner 的 issue,添加和移除一些 label // 反之,不改动 issue label if len(existIssue.Assignees) > 0 { issue.Labels = tools.Convert.SliceAdd(issue.Labels, global.Conf.Repository.Spec.Workspace.Detection.AddLabel...) issue.Labels = tools.Convert.SliceRemove(issue.Labels, global.Conf.Repository.Spec.Workspace.Detection.RemoveLabel...) } updatedIssue, err := tools.Issue.EditByIssueRequest(existIssue.GetNumber(), issue) if err != nil { return nil, err } // comment _ = f.comment(updatedIssue) return updatedIssue, nil } func (f File) commentVerify(issue *github.Issue) bool { if issue == nil || issue.Labels == nil { return false } return tools.Verify.HasAnyLabel(*tools.Convert.Label(issue.Labels), global.Conf.Repository.Spec.Workspace.Detection.NeedLabel...) && len(issue.Assignees) > 0 } // 取 issue 的 number 和 assignees 调用 api 进行 comment // comment 内容为相关文件改动的提示 func (f File) comment(issue *github.Issue) error { // 对于不满足要求的 issue,不进行 comment if !f.commentVerify(issue) { return nil } bf := bytes.Buffer{} bf.WriteString(fmt.Sprintf("Pull Request: https://github.com/%s/%s/pull/%d", global.Conf.Repository.Spec.Source.Owner, global.Conf.Repository.Spec.Source.Repository, f.PrNumber)) bf.WriteString(fmt.Sprintf("\n\nDiff: https://github.com/%s/%s/pull/%d/files#diff-%s", global.Conf.Repository.Spec.Source.Owner, global.Conf.Repository.Spec.Source.Repository, f.PrNumber, fmt.Sprintf("%x", md5.Sum([]byte(f.CommitFile.GetFilename()))))) bf.WriteString(fmt.Sprintf("\n\nCommit SHA: [%s](https://github.com/%s/%s/blob/%s/%s)", f.MergeCommitSHA, global.Conf.Repository.Spec.Source.Owner, global.Conf.Repository.Spec.Source.Repository, f.MergeCommitSHA, f.CommitFile.GetFilename(), )) bf.WriteString(fmt.Sprintf("\n\nMerged At: %s", f.MergedAt)) bf.WriteString(fmt.Sprintf("\n\nFilename: %s", f.CommitFile.GetFilename())) if f.CommitFile.GetPreviousFilename() != "" { bf.WriteString(fmt.Sprintf("\n\nPrevious Filename: %s", f.CommitFile.GetPreviousFilename())) } bf.WriteString(fmt.Sprintf("\n\nStatus: %s", f.CommitFile.GetStatus())) bf.WriteString("\n\nAssignees: ") for _, v := range issue.Assignees { bf.WriteString(fmt.Sprintf("@%s ", v.GetLogin())) } tools.Issue.Comment(issue.GetNumber(), bf.String()) return nil } // 删除 issue 中的文件 func (f File) remove(issue *github.Issue) { if issue == nil { global.Sugar.Warnw("remove exist file issue", "status", "has no match issue", "file", f) return } updatedIssue, err := tools.Issue.EditByIssueRequest(issue.GetNumber(), tools.Generate.UpdateIssue(true, f.CommitFile.GetPreviousFilename(), *issue)) if err != nil { return } // comment _ = f.comment(updatedIssue) } // 对于 renamed 文件,需要: // 1. 更新/创建 新的 issue // 2. 在旧的 issue 中移除对应的文件 func (f File) rename(include config.Include, existIssue, preIssue *github.Issue) { // 更新 issue if existIssue != nil { // preIssue 为空,则仅更新 existIssue // 这种极端情况很难出现 if preIssue == nil { f.update(existIssue) global.Sugar.Warnw("renamed file issue", "status", "has no match previous issue", "filename", f.CommitFile.GetFilename(), "previous filename", f.CommitFile.GetPreviousFilename(), ) return } // existIssue 和 preIssue 是同一个 issue if existIssue.GetNumber() == preIssue.GetNumber() { // 由于是同一个 issue,可以一次性完成更新,移除 updatedIssue, err := tools.Issue.EditByIssueRequest(existIssue.GetNumber(), tools.Generate.UpdateIssueRequest(true, f.CommitFile.GetPreviousFilename(), tools.Generate.UpdateIssue(false, *f.CommitFile.Filename, *existIssue))) if err != nil { return } // comment _ = f.comment(updatedIssue) } else { // 由于 existIssue 和 preIssue 不是同一个 issue // 需要分别完成更新、移除 f.update(existIssue) f.remove(preIssue) } } else { // existIssue == nil, // 此时,创建 issue,并在 preIssue 中移除旧文件名 f.create(include) // 尝试移除 if preIssue != nil { f.remove(preIssue) } } }
def _decode_icu_plurals(data): result = {} OUTER, TRANS, TRANS_VAR = 0,1,2 def _msg_generator(chars): brace_level = 0 buf = [] for x in data: buf.append(x) if x == '{': brace_level += 1 if brace_level > TRANS_VAR: raise ValueError('Too many curly brace levels') if brace_level == TRANS: buf.pop() yield True, ''.join(buf).strip() buf = [] if x == '}': brace_level -= 1 if brace_level < OUTER: raise ValueError('Unexpected %s' % x) if brace_level == OUTER: buf.pop() yield False, ''.join(buf) buf = [] if brace_level != OUTER: raise ValueError('Mismatched { } braces') last_keyword = None for is_keyword, token in _msg_generator(data): if not is_keyword: if last_keyword is None: raise ValueError('Expected a keyword') result[last_keyword] = _icu_decode(token) last_keyword = None else: if token[0] == '=': try: last_keyword = "=%s" % Decimal(token[1:]) except InvalidOperation: raise ValueError('Expected keyword: "=<number>", got: %s' % token) else: if token not in ICU_KEYWORDS: raise ValueError('Expected %s or "=<number", got: "%s"' % (', '.join(_icu_kw), token)) last_keyword = ICU_KEYWORDS[token] return result
/** * Go to the next handler if the given next is none null. Reason for this is for * middleware to provide their instance next if it exists. Since if it exists, * the server hasn't been able to find the handler.yml. * * @param httpServerExchange * The current requests server exchange. * @param next * The next HttpHandler to go to if it's not null. * @throws Exception exception */ public static void next(HttpServerExchange httpServerExchange, HttpHandler next) throws Exception { if (next != null) { next.handleRequest(httpServerExchange); } else { next(httpServerExchange); } }
def handle_post(self, request, user, *args, **kwargs): try: self.log.info('Search Traffic Return by name') if not has_perm(user, AdminPermission.OPTION_VIP, AdminPermission.READ_OPERATION): self.log.error( u'User does not have permission to perform the operation.') raise UserNotAuthorizedError(None) xml_map, attrs_map = loads(request.raw_post_data) networkapi_map = xml_map.get('networkapi') if networkapi_map is None: return self.response_error(3, u'There is no value to the networkapi tag of XML request.') trafficreturn_map = networkapi_map.get('trafficreturn_opt') if trafficreturn_map is None: return self.response_error(3, u'There is no value to the trafficreturn_opt tag of XML request.') name_trafficreturn = trafficreturn_map.get('trafficreturn') if name_trafficreturn is None: self.log.error( u'Missing traffic return option name') return self.response_error(287) queryset = OptionVip.objects.filter() if name_trafficreturn is not None: tipo_opcao = 'Retorno de trafego' queryset = queryset.filter(tipo_opcao=tipo_opcao) queryset = queryset.filter(nome_opcao_txt=name_trafficreturn) if len(queryset) == 0: raise OptionVipError(None) evips = [] for evip in queryset: request_evip_map = {} request_evip_map['id'] = evip.id request_evip_map['tipo_opcao'] = evip.tipo_opcao request_evip_map['nome_opcao'] = evip.nome_opcao_txt evips.append(request_evip_map) self.log.info(str(evips)) return self.response(dumps_networkapi({'trafficreturn': evips})) except UserNotAuthorizedError: return self.not_authorized() except XMLError, x: self.log.error(u'Error reading the XML request.') return self.response_error(1, x) except InvalidValueError, e: return self.response_error(269, e.param, e.value) except OptionVipError: return self.response_error(1) except Exception, e: return self.response_error(1)
a=input() while(a>0): a=a-1 b=input() s=0 t=0 for i in range(0,15): for j in range(0,33): s=(7*i)+(3*j) if(s==b): print"YES" t=1 break elif((7*i)>b): print"NO" t=1 break if(t==1): break
<reponame>iotaledger/inx-chronicle // Copyright 2022 <NAME> // SPDX-License-Identifier: Apache-2.0 /// Module containing the Block document model. mod block; /// Module containing the LedgerUpdate model. mod ledger_update; /// Module containing the Milestone document model. mod milestone; /// Module containing the Output document model. mod output; /// Module containing information about the network and state of the node. mod status; pub use self::milestone::SyncData;
class Product: """ Represents a single grocery product """ def __init__(self, id, upc, brand, description, image, size, price): self.id = id self.upc = upc self.brand = brand self.description = description self.image = image self.size = size self.price = price def __str__(self): verbose = False description = f"({self.brand}) {self.description}" if self.size: description += f" - {self.size}: ${self.price}" if verbose: description += f"\nProduct ID: {self.id}" description += f"\nUPC: {self.upc}" description += f"\nImage: {self.image}" return description def __repr__(self): return self.__str__() @classmethod def from_json(cls, obj): id = obj.get("productId") upc = obj.get("upc") brand = obj.get("brand") description = obj.get("description") image = _get_image_from_images(obj.get("images")) size = _get_product_size(obj.get("items")) price = _get_product_price(obj.get("items")) return Product(id, upc, brand, description, image, size, price)
Regulated and Non-Regulated Mycotoxin Detection in Cereal Matrices Using an Ultra-High-Performance Liquid Chromatography High-Resolution Mass Spectrometry (UHPLC-HRMS) Method Cereals represent a widely consumed food commodity that might be contaminated by mycotoxins, resulting not only in potential consumer health risks upon dietary exposure but also significant financial losses due to contaminated batch disposal. Thus, continuous improvement of the performance characteristics of methods to enable an effective monitoring of such contaminants in food supply is highly needed. In this study, an ultra-high-performance liquid chromatography coupled to a hybrid quadrupole orbitrap mass analyzer (UHPLC-q-Orbitrap MS) method was optimized and validated in wheat, maize and rye flour matrices. Nineteen analytes were monitored, including both regulated mycotoxins, e.g., ochratoxin A (OTA) or deoxynivalenol (DON), and non-regulated mycotoxins, such as ergot alkaloids (EAs), which are analytes that are expected to be regulated soon in the EU. Low limits of quantification (LOQ) at the part per trillion level were achieved as well as wide linear ranges (four orders of magnitude) and recovery rates within the 68–104% range. Overall, the developed method attained fit-for-purpose results and it highlights the applicability of high-resolution mass spectrometry (HRMS) detection in mycotoxin food analysis. Introduction Cereals represent a food commodity with huge impact on human and livestock diet, providing a significant amount of protein globally ; indeed, it is expected that their production will be expanded up to 13% till 2027 . Nevertheless, cereal matrices (in combination with environmental conditions) provide an excellent substrate for fungal growth, which, in turn, can result in contamination by toxic secondary fungal metabolites, the so-called mycotoxins. Unfortunately, mycotoxin-contaminated foodstuffs are commonly monitored in the food chain, impacting both consumer health, such as the recent intoxication cases due to deoxynivalenol (DON) in China , and jeopardizing market integrity, as in the case of the aflatoxin M1 scandal in some Balkan states . Therefore, the development of analytical methods for accurate and specific mycotoxin detection in cereals is very important. A large number of analytical methods for mycotoxin determination have been developed, with immunoassays and chromatographic analysis being the most common analytical choices . In the first case, immunoassays are based on antibody recognition of a selected mycotoxin and represent an affordable and simple approach that can be applied even at the point-of-need (PON) . Nevertheless, most of the mycotoxin immunoassays are singleplex, meaning that only one analyte can be detected per run; they also face specificity problems due to cross reactivity with compounds structurally similar to the analyte and their results are commonly (semi)-quantitative . Consequently, they are mostly preferred to deliver rapid results that need to be confirmed by instrumental analysis. In terms of chromatographic methods, liquid chromatography tandem mass spectrometry (LC-MS/MS) is the golden standard in mycotoxin analysis, providing excellent performance characteristics . This approach is widely preferred in the regulatory control of such contaminants as it fulfills all the requirements of the available legislation, such as Decision 2002/657/EC on performance of analytical methods and Regulation EC 1881/2006 on mycotoxin maximum levels (MLs). However, a trend using high-resolution MS (HRMS) methods, such as time-of-flight (ToF) MS or hybrid quadrupole orbitrap MS (q-Orbitrap), has been noticed . These MS analyzers, besides achieving satisfactory targeted analyte screening (fulfilling regulatory requirements), also permit analyte detection without extensive method tuning and retrospective data mining, features of utmost importance considering the occurrence of new or emerging mycotoxins (or some of their transformation products); i.e., analytes for which analytical standards are commonly not available . In this study, an ultra-high-performance liquid chromatography coupled to a hybrid quadrupole orbitrap mass analyzer (UHPLC-q-Orbitrap MS) method was optimized and validated in wheat, maize and rye matrices. The analyte list contained 19 mycotoxins (Figure 1), namely, 3 regulated mycotoxins (ochratoxin A, deoxynivalenol and zearalenone) and 16 non-regulated mycotoxins, including 11 ergot alkaloids (EAs). In contrast to our recent study that focused on mycotoxin determination using ambient MS , in which the EA concentration was reported as a sum, in this case the EA epitopes can be effectively identified and quantified. In addition, all the detected mycotoxins are considered compounds with significant toxicity, resulting in potential health effects upon certain dietary exposure. In detail, ochratoxin A (OTA) is related to hepatotoxic, teratogenic and immunotoxic effects , and the European Food Safety Authority (EFSA) Panel on Contaminants in the Food Chain (CONTAM Panel) recently complied a risk assessment concluding that more exposure data are needed to better understand the in vivo impact of OTA to humans . Regarding mycotoxins produced by Fusarium species, deoxynivalenol (DON) and nivalenol (NIV), belonging in the type-B trichothecenes, induce ribotoxic stress, including inhibition of protein, DNA and RNA synthesis . Besides DON, also its acetylated metabolites, namely, 3-and 15-acetyldeoxynivalenol (3-ADON, 15-ADON), are analytes of high interest, as they can be absorbed more rapidly than DON and be converted to the parental form during digestion . In terms of zearalenone (ZEA), it has shown strong estrogenic and anabolic effects whilst the T-2 and HT-2 toxins, the most prevalent type-A trichothecenes, inhibit protein synthesis and target liver and spleen functions (mostly T-2 toxin) . Last but not least, EAs produced by Claviceps species can cause ergotism, one of the oldest known human diseases caused by mycotoxins . All in all, the described analyte toxic potential and their occurrence in the food chain (see Section 2) indicates the need to monitor these analytes and the present study provides an efficient and reliable analytical strategy to achieve it. Results and Discussion The development and validation of a fit-for-purpose method for the determinat of 19 mycotoxins was achieved in the current study. Among them, three analytes w regulated, namely, DON, OTA and ZEA (Regulation EC 1881/2006), whilst only indicat levels for cereals and cereal products are available for the HT-2 and T-2 toxins (Reco mendation 2013/165/EU). Importantly, although MLs were set for DON, OTA and ZE several exceedances were reported in the Rapid Alert System for Food and Feed (RAS EU portal (https://webgate.ec.europa.eu/rasff-window/screen/search, last accessed 11 O tober 2021) for all three analytes around Europe, including some in the Czech Repub In terms of EAs, these are common rye contaminants, produced by Claviceps purpurea, b also other cereals can be contaminated by them, such as wheat . Despite being n regulated in the EU, the German Federal Institute for Risk Assessment (BfR) has issu "guidance levels" on EAs in cereal flours and the Standing Committee on Plants, A imals, Food and Feed of the European Commission recently discussed (February 2021) enforcement of MLs for ergot alkaloids (https://ec.europa.eu/food/system/files/20 04/reg-com_toxic_20210226_sum.pdf, last accessed 11 October 2021). Furthermore, EF recently launched (February 2021) a call for data collection of chemical contaminants currence in the food chain, including ergot alkaloids (https://www.efsa. ropa.eu/en/call/call-continuous-collection-chemical-contaminants-occurrence-data-0, l accessed 11 October 2021). Worthy to notice is that although LC-HRMS methods for m cotoxin analysis in cereals were earlier published (see Introduction), they either did target all the ergot alkaloids considered for EU regulations or their detectabi was worse in comparison to the presented study. In fact, excellent analytical perf mance was achieved for all the analytes (see Section 2.1) and the method trueness w further demonstrated by analyzing the proficiency testing (PT) samples, attaining succe ful results. In the last part of this paragraph (see Section 2.2), critical comparison towa already established LC-based methods is presented to highlight the merits and challeng Results and Discussion The development and validation of a fit-for-purpose method for the determination of 19 mycotoxins was achieved in the current study. Among them, three analytes were regulated, namely, DON, OTA and ZEA (Regulation EC 1881/2006), whilst only indicative levels for cereals and cereal products are available for the HT-2 and T-2 toxins (Recommendation 2013/165/EU). Importantly, although MLs were set for DON, OTA and ZEA, several exceedances were reported in the Rapid Alert System for Food and Feed (RASSF) EU portal (https://webgate.ec.europa.eu/rasff-window/screen/search, last accessed 11 October 2021) for all three analytes around Europe, including some in the Czech Republic. In terms of EAs, these are common rye contaminants, produced by Claviceps purpurea, but also other cereals can be contaminated by them, such as wheat . Despite being non-regulated in the EU, the German Federal Institute for Risk Assessment (BfR) has issued "guidance levels" on EAs in cereal flours and the Standing Committee on Plants, Animals, Food and Feed of the European Commission recently discussed (February 2021) the enforcement of MLs for ergot alkaloids (https://ec.europa.eu/food/system/files/20 21-04/reg-com_toxic_20210226_sum.pdf, last accessed 11 October 2021). Furthermore, EFSA recently launched (February 2021) a call for data collection of chemical contaminants occurrence in the food chain, including ergot alkaloids (https://www.efsa.europa.eu/en/ call/call-continuous-collection-chemical-contaminants-occurrence-data-0, last accessed 11 October 2021). Worthy to notice is that although LC-HRMS methods for mycotoxin analysis in cereals were earlier published (see Introduction), they either did not target all the ergot alkaloids considered for EU regulations or their detectability was worse in comparison to the presented study. In fact, excellent analytical performance was achieved for all the analytes (see Section 2.1) and the method trueness was further demonstrated by analyzing the proficiency testing (PT) samples, attaining successful results. In the last part of this paragraph (see Section 2.2), critical comparison towards already established LC-based methods is presented to highlight the merits and challenges of the proposed in-house method. UHPLC-q-Orbitrap MS Method Optimization and Validation One of our objectives was to develop a high-throughput method aiming to deliver a highly effective analytical tool intensifying mycotoxin testing. All 19 mycotoxins targeted in our study were eluted in less than 7 min in both polarity modes using an UHPLCq-Orbitrap MS system. Mycotoxins were detected after fragmentation (parallel reaction monitoring, PRM mode) and normalized collision energies (NCEs) were optimized for each analyte in the range of NCE 10-100%, with a step of 10%. The optimal NCE was selected to provide the highest possible signal for at least two fragment ions (Table 1). Importantly, all analytes were confirmed following the criteria stated in the updated Directorate-General for Health and Food Safety (SANTE) guidelines (SANTE/12682/2019) on method validation for pesticide residues analysis in food and feed as there is no such guidelines for mycotoxin analysis . The illustrative chromatogram of the wheat matrixmatched standard (Figure 2) depicts the efficient separation and sharp peak shape in most of the cases. The multi-mycotoxin method was validated in wheat (Table 2), rye (Table 3) and maize flour (Table 4) matrices. Significantly, the attained LOQs were below the MLs set by the current EU legislation in cereal flours (Regulation EC 1881/2006). Satisfactory trueness expressed as recovery rate was achieved for all the analytes. In detail, the recoveries of the 19 analyzed mycotoxins at two spiking levels were in the range of 72-104% (L1) and 80-99% (L2) for wheat, 68-98% (L1) and 75-99% (L2) for maize and 69-102% (L1) and 75-104% (L2) for rye, respectively. Method repeatability expressed as RSD% fluctuated in the following range per case: 1-10% (L1) and 1-10% (L2) for wheat, 2-6% (L1) and 1-8% (L2) for maize and 1-9% (L1) and 1-7% (L2) for rye. In terms of method detectability, an extremely low LOQ was attained for OTA, ZEA and the 11 ergot alkaloids, specifically 0.5 µg kg −1 , while in the case of trichothecenes, the LOQs were between 1 and 50 µg kg −1 . Linear responses were acquired in all cases in the range LOQ-1000 µg kg −1 , with a correlation coefficient (r 2 ) of ˃0.999. The highest matrix effects % (MEs%) were noticed in rye extracts followed by maize and wheat extracts for all the studied analytes (Table 5). Specifically, considerable signal suppression was observed especially in the ESI (−), highlighting the need for utilizing matrix-matched calibration curves to compensate for the matrix effects. Such differences were expected as a generic sample preparation protocol was used and apparently the different cereals tested have different composition. Nevertheless, the already discussed satisfactory performance characteristics of the method indicate that such a generic sample preparation is fit for purpose. The possibility to use isotopically labeled internal standards (ISTDs) was not adopted since the cost of the method would have grown significantly, considering that this is a multi-mycotoxin method. Finally, to further demonstrate method trueness, we analyzed PT samples obtained within the FAPAS (FERA, York, UK) and RomerLabs (Romer Labs, Tulln, Austria) schemes. Seven The multi-mycotoxin method was validated in wheat (Table 2), rye (Table 3) and maize flour (Table 4) matrices. Significantly, the attained LOQs were below the MLs set by the current EU legislation in cereal flours (Regulation EC 1881/2006). Satisfactory trueness expressed as recovery rate was achieved for all the analytes. In detail, the recoveries of the 19 analyzed mycotoxins at two spiking levels were in the range of 72-104% (L1) and 80-99% (L2) for wheat, 68-98% (L1) and 75-99% (L2) for maize and 69-102% (L1) and 75-104% (L2) for rye, respectively. Method repeatability expressed as RSD% fluctuated in the following range per case: 1-10% (L1) and 1-10% (L2) for wheat, 2-6% (L1) and 1-8% (L2) for maize and 1-9% (L1) and 1-7% (L2) for rye. In terms of method detectability, an extremely low LOQ was attained for OTA, ZEA and the 11 ergot alkaloids, specifically 0.5 µg kg −1 , while in the case of trichothecenes, the LOQs were between 1 and 50 µg kg −1 . Linear responses were acquired in all cases in the range LOQ-1000 µg kg −1 , with a correlation coefficient (r 2 ) of >0.999. The highest matrix effects % (MEs%) were noticed in rye extracts followed by maize and wheat extracts for all the studied analytes (Table 5). Specifically, considerable signal suppression was observed especially in the ESI (−), highlighting the need for utilizing matrix-matched calibration curves to compensate for the matrix effects. Such differences were expected as a generic sample preparation protocol was used and apparently the different cereals tested have different composition. Nevertheless, the already discussed satisfactory performance characteristics of the method indicate that such a generic sample preparation is fit for purpose. The possibility to use isotopically labeled internal standards (ISTDs) was not adopted since the cost of the method would have grown significantly, considering that this is a multi-mycotoxin method. Finally, to further demonstrate method trueness, we analyzed PT samples obtained within the FAPAS (FERA, York, UK) and RomerLabs (Romer Labs, Tulln, Austria) schemes. Seven different PT cereal samples were measured (Table 6), including 5 wheat and 2 maize flour samples, achieving acceptable results (z-score within the ±2 range in all cases). Critical Comparison towards LC-Based Methods for Mycotoxin Detection To compare the results attained by the in-house UHPLC-q-Orbitrap MS method towards already published studies, a critical discussion on important method characteristics for mycotoxin detection is presented. Given this context, it is needed to emphasize that the sample processing prior to instrumental analysis plays an important role. Focusing on studies published during the last four years, Quick, Easy, Cheap, Effective, Rugged, and Safe (QuEChERS) extraction has been commonly used, proving its wide acceptance in the field (see Table 7). Nevertheless, cereal matrices need further clean-up due to their high starch content and high amount of unsaponifiable lipophilic compounds, compounds that can decrease the analytical signal. In the reviewed literature, dispersive solid-phase extraction (dSPE) was applied as a clean-up step utilizing various sorbents. In detail, both conventional sorbents, such as primary secondary amine (PSA) or zirconia-based (z-sep) , and newly introduced sorbents, such as MDN@Fe 3 O 4 (a magnetic sorbent adsorbing hydrophobic and hydrophilic interferences) , were used, achieving great analytical performance in every case (Table 7). Alternatively, immunoaffinity column (IAC) clean-up was also used, acquiring analyte selective recognition due to the use of antibodies, for example in the case of DON . However, it needs to be stated that commonly IAC significantly reduces the portfolio of analytes that can be detected (due to its selectivity) in a single run and thus such an approach is not preferable for multimycotoxin methods. In contrast to the aforementioned cases, in our study a freezing-out approach was used to eliminate the matrix co-extracted components such as lipids and other lipophilic compounds. In this way, a simple and cost-effective sample preparation protocol was applied. Another important aspect impacting analytical performance is the method detector. Although studies using conventional detectors, for example fluorescence detector (FLD), are still being reported , MS detectors have been the most popular option, featuring unequivocal analyte identification and quantification. On the downside, MS detectors are costly, restricting their utilization in cases of limited resources, a fact that can pose a potential health threat to the population of such areas due to limited food testing (e.g., in African states ). The application of both low-resolution MS (LRMS) and high-resolution MS (HRMS) was reported for the determination of both regulated and emerging mycotoxins. In both cases, low LOQs, wide linear ranges and accurate results were acquired, characteristics of utmost importance in the food safety field. Despite using LRMS detectors, such as a triple quadrupole (QqQ), has been the golden standard; this preference is related to certain limitations. Considering that strong MEs (depending the food matrix) are commonly faced when using ESI, the lack of isotopically labelled mycotoxin ISTD pose a challenge in accurate quantification, especially in the case of ESI-QqQ . Apparently, the use of matrix-matched calibration curves can partially solve this problem, but better results can be attained by using nano-LC systems or HRMS detection. Nano-LC permits high dilution of extracts, significantly decreasing the amount of ionizable matrix components; for example, a dilution factor of 40 was applied in a recent study to detect mycotoxins in various cereals . In the case of HRMS, the accurate mass measurement (<5 ppm) and high resolution (>20,000 full width at half maximum (FWHM)) allow mycotoxin identification/quantification without (necessarily) the need for isotopically labelled ISTD. This is clearly demonstrated in our study, as excellent analytical performance was achieved, including LOQs at the part per trillion (ppt) level and wide linear range (four orders of magnitude), without using an isotopically labelled ISTD. In addition, HRMS enables retrospective data analysis, a feature that can be useful for conjugated mycotoxin detection. Conjugated mycotoxins are mycotoxin metabolites, usually connected to hydrophilic groups, formed during metabolism in order to reduce the parent compound toxicity . However, such attached functional groups, e.g., glycosylic or sulfate moieties, are likely to be enzymatically cleaved during digestion upon consumption, resulting in additional dietary exposure to the precursor toxic mycotoxin . Clearly, the use of HRMS methods for conjugated mycotoxin detection, for example, accurately screening such an analyte's mass, is the only available option considering the lack of such analytical standards. In conclusion, the developed UHPLC-q-Orbitrap MS attained satisfactory results, comparable or even better than published studies, while its scope can be expanded to non-targeted screening. Conclusions The development and validation of an UHPLC-q-Orbitrap MS method for the detection of 19 mycotoxins in cereal matrices were presented. QuEChERS extract clean-up was performed by freezing-out, a simple and cost-efficient approach that was able to reduce lipid co-extracted matrix components. Importantly, the method provided rapid results (7 min in both polarity modes) and the attained LOQs were lower than the regulatory limits for all three regulated mycotoxins (OTA, DON and ZEA), indicating the method's potential to be implemented in official food-control schemes. In terms of the non-regulated mycotoxins, excellent detectability was also achieved, a characteristic that can be useful in the effort to gather more occurrence data for non-regulated mycotoxins. Considering that there is discussion (in the EU) on setting MLs for some currently non-regulated mycotoxins, such as EAs, the current study acts proactively and delivers a method for their potential future regulatory control. In terms of ME, it was possible to quantify the analyte content accurately and precisely without employing isotopically labelled ISTD, due to the use of matrix-matched calibration curves. In conclusion, the presented study highlights the merits of HRMS in mycotoxin analysis and provides a comprehensive approach for the detection of high-interest analytes in cereals. Cereal Flour Samples Wheat, rye and maize flour samples were bought from supermarkets and outdoor markets around Prague. The absence of mycotoxins in the purchased matrices was confirmed using the conditions described in prior to method development and validation. To externally evaluate the trueness of the UHPLC-q-Orbitrap MS method, samples from the following PT schemes were analyzed: 17161, 22146, 22166 FAPAS wheat flour samples; 22134, 04384 maize flour samples (FERA, York, UK) and CSSMY018-M20161DZO, CSSMY020-M21161DZO wheat flour samples (Romer Labs, Tulln, Austria). Sample Preparation To extract the analytes, an optimized QuEChERS-based approach was used. Two grams of a cereal sample were weighed in a 50 mL centrifuge tube and 10 mL of acidified water (0.2% formic acid, v/v) were added, mixed and let to soak into the matrix for at least 30 min. For the extraction, 10 mL of acetonitrile were dispended, and samples were shaken for 30 min using a horizontal laboratory shaker (IKA Labortechnik, Staufen, Germany). To initiate phase separation, 4 g of magnesium sulphate (MgSO 4, Fluka, Buchs, Germany) and 1 g sodium chloride (NaCl, Penta, Chrudim, Czech Republic) were added and a tube was vigorously hand-shaken for 1 min. Phase separation was fully achieved by centrifugation at 10,000 revolutions per minute (rpm) (Rotina 380R, Hettich, Tuttlingen, Germany) for 5 min. In total, 5 mL of the supernatant were transferred into a 15 mL centrifuge tube and put into a freezer for 2 h to remove the co-extracted matrix components, such as lipids. Finally, the cleaned-up extract top layer was moved into a vial and was ready to be injected into the chromatographic system. Ultra-High-Performance Liquid Chromatography Coupled to A Hybrid Quadrupole Orbitrap Mass Analyzer An ultra-high-performance liquid chromatograph UltiMateTM 3000 (Thermo Scientific; Waltham, MA, USA) equipped with analytical column Acquity UPLC ® HSS T3 (100 × 2.1 mm, 1.8 µm; Waters, Milford, MA, USA) was used. Chromatographic conditions were adopted from our previous publication and slightly modified, as described. Briefly, the column was held at 40 • C and temperature of the autosampler was at 10 • C. The mobile phases consisted of 5 mM ammonium formate and 0.2% formic acid, both in the Milli-Q water (A) and methanol (B) in the positive electrospray ionization (ESI (+)) and 5 mM ammonium acetate in Milli-Q water (C) and methanol (D) in the negative electrospray ionization (ESI (-)). Importantly, a minimal sample volume was needed in both polarity modes; in detail, 2 µL of the sample were injected into the system. Regarding ESI (+), the gradient started with 10% of B at 0.3 mL min −1 , followed by a linear change to 50% of B and finally set to 100% of B in 8 min. Before injecting the next sample, it was necessary to wash the column with 100% of B for 2 min and to recondition for 2 min applying the initial conditions. In terms of ESI (-), the gradient conditions were (i) 10% of D with a flow of 0.3 mL min −1 ; (ii) increase to 50% of D after 1 min; and (iii) setting 100% of D to complete the chromatographic run. After completing the run, the chromatographic column was cleaned-up with 100% of D for 2 min and reconditioned for 2 min with the initial mobile phase composition. Detection of mycotoxins was carried out using a high-resolution tandem mass spectrometer Q-Exactive PlusTM (Thermo Scientific, Waltham, MA, USA) equipped with Orbitrap-quadrupole mass filters. An overview of the applied mass spectrometric settings based on our previous study is summarized in Table 8. The detection of ions was performed in PRM mode in both polarity modes. The exact masses of the target analyte fragments were calculated in SW Xcalibur 4.2 (Thermo Scientific, Waltham, MA, USA) together with retention times and NCEs. Regarding the detection conditions, the resolution was set at 17,500 full width at half maximum (FWHM) (mass range m/z 50-1000 m/z), the maximum inject time (maxIT) was 50 ms and the automatic gain control target (AGC target) was equal to 1 × 10 5 . Lastly, Xcalibur 4.2 software was utilized to control the instrument and evaluate the attained data. UHPLC-q-Orbitrap MS Validation The UHPLC-q-Orbitrap MS method performance characteristics were investigated for three cereal flour matrices. Wheat, rye and maize flour samples containing non-detectable concentrations of mycotoxins were used. Matrix-matched calibration standards in the range 0.1-200 ng mL −1 (corresponding to 0.5-1000 µg kg −1 ) were prepared by evaporation of a composite analytical standard (at 5 µg mL −1 ) using a gentle nitrogen steam. Then, a blank matrix extract prepared according to the procedure described in the Section 4.3 was used for analyte reconstitution. Solvent standards in acetonitrile were prepared in the same concentration range to express the degree of MEs. The following formula was used to calculate the ME%: ME% = × 100. For the determination of trueness and repeatability, spiking was conducted in two levels, 250 µg kg −1 (level 1, L1) and 25 µg kg −1 (level 2, L2), both in six replicates. Trueness expressed as the recovery rate (R%) was calculated using the formula: R% = (peak area of spiked sample/peak area of matrix-matched standard) × 100. Repeatability was expressed as relative standard deviation % (RSD%) of these six replicates. Limits of quantification (LOQ) were determined as the lowest calibration points for a peak constructed at least from four points (no noise due to the high mass resolving power). The needed volume of composite stock solution (at 5 µg mL −1 ) was pipetted to 2 g of a blank sample (in a 50 mL centrifuge tube). Then, samples were vigorously hand shaken, left for 2 h to permit solvent evaporation and further processed, as described in Section 4.3.
def cmd_catalog(navigation_target=None, detailed_help=False): from .registry import app if detailed_help: print(catalog_cmd_detailed_help) return local_catalog_url = "http://localhost:3000" if navigation_target is None: catalog_url = local_catalog_url elif navigation_target.startswith("s3://"): catalog_url = catalog_s3_url(local_catalog_url, navigation_target) else: num_colons = navigation_target.count(":") assert num_colons == 1, f"To go to Package view, the input should follow the pattern BUCKET:USER/PKG. " \ f"However the input {navigation_target} has {num_colons} colons when it should have exactly one." num_slashes = navigation_target.count("/") assert num_slashes == 1, f"To go to Package view, the input should follow the pattern BUCKET:USER/PKG. " \ f"However the input {navigation_target} has {num_slashes} backslashes when it should have exactly one." bucket, package_name = navigation_target.split(":") catalog_url = catalog_package_url(local_catalog_url, bucket, package_name) if not _test_url(local_catalog_url): _launch_local_catalog() print("Waiting for containers to launch...") failure_timeout_secs = 15 poll_interval_secs = 0.5 start_time = time.time() while True: if time.time() - start_time > failure_timeout_secs: catalog_failed = _test_url(local_catalog_url) if not catalog_failed: break raise QuiltException(f"The backend containers needed to run the catalog did not both successfully launch. " f"Status:\n" f"\tCATALOG: {'FAILED' if catalog_failed else 'SUCCEEDED'}") if _test_url(local_catalog_url): break else: time.sleep(poll_interval_secs) open_url(catalog_url) app.run()
And how are your five fingers doing? I’m happy to say I feel like I’m in the black. The one thing that’s gone into the debit is me being a good friend. Career, marriage, fatherhood—the others are doing good. Probably your most famous friendship has been with Lance Armstrong. Do you maintain contact with him? Yeah. Not as much. We e-mail here and there. How is he doing? He seems to be doing well. I don’t know anyone who can shake hands—meaning look at their situation realistically and deal with it—as quickly as him. He honestly has looked it in the eye and is on the road to recovery. So many actors just hang out with other actors. What was your relationship with Lance based on? I like people who are great at what they do. At that time, we were two single men who were doing good in their careers and came from different places. We would get together and have a steak dinner and philosophize—talk about life, talk about what it meant to be a man, what our responsibilities were, what it meant to be a father. You try to gain a little wisdom from each other. That’s mainly what I do with my male friends. We get together and talk about life. Did you feel like you needed an apology from him for lying to you? To put my own emotions in front of this and go "You didn’t tell me the truth" would be arrogant. And you know what? This is a friend. Who I know to be a good man. If there was an apology, it was said and I heard it. Are you ever called Matt? Never. Never, ever, ever. Some people do it online just to fuck with me. If anybody’s like, "Hey, Matt!" I don’t even turn around. Is there some foundational experience that made you hate it? Yeah, the foundational experience of being on the playground in kindergarten. My buddy John says, "Hey, Matt, you want to go play on the monkey bars?" I’m like, "Sure," and I’m going out there, and all of a sudden—wham—I’m on the ground. I look up and my mom says, "What’s your name?" "Matthew." "Don’t you ever answer to ’Matt’ again." From that day on, it’s always been, "Call me Matthew, please." Whoa. I didn’t expect a full-on scene of violence. What was her problem? She named me Matthew for a reason. Last night at dinner, someone said, "Things have been going so well for McConaughey, it’s probably time for him to step in shit." That’s funny.... "Step in shit." See, I love to see other people’s success. I never wait for anyone to step in shit. But do you feel the pressure of increased expectations? There hasn’t been any pressure. I know what hand I had in all this. It didn’t just fall off and happen. Did I have the whole hand? No. But I know what choices I made. And I sure didn’t make them for the guy who says, "It’s time for him to step in shit." I’ll tell him. Let him know: I step in shit all the time. It’s just that I don’t get that pissed off when I step in it. I just scrape off my boots. I’ve stepped in plenty of shit and will continue to. And by the way: Sometimes stepping in shit is good luck. Brett Martin (@brettmartin) is a GQ correspondent.
/** * Test nb train on iris. * * @throws IOException * Signals that an I/O exception has occurred. */ @Test public void testNBTrainOnIris() throws IOException { Pipeline<File, OnlineEvaluation> pipeline; Map<String, String> labelMap = Maps.newHashMap(); labelMap.put("Iris-setosa", 1 + ""); labelMap.put("Iris-versicolor", 0 + ""); labelMap.put("Iris-virginica", 0 + ""); NaiveBayes model = new NaiveBayes(bins, true); pipeline = Pipeline .newPipeline(new FileSource(file)) .addPipe(new FileToLinesPipe()) .addPipe(new NumericCSVtoLabeledVectorPipe(-1, 4, labelMap)) .addPipe( new BinaryInstancesFromVectorPipe( new BinaryTargetNumericParser())) .addPipe( new SequentialClassifierTrainingPipe<NaiveBayes>(model)); Iterator<Context<OnlineEvaluation>> out = pipeline.process(); assertTrue(out.hasNext()); double tot = 0, ct = 0; while (out.hasNext()) { assertTrue(out.hasNext()); double perf = out.next().getData().computeAUC(); if (!Double.isNaN(perf)) { tot += perf; ct++; } } assertTrue(tot / ct > 0.5); }
def _get_seeds(data_set, tag_set, seeds_category, seeds_size=1): if (seeds_category not in tag_set) \ or (data_set.shape[0] != tag_set.shape[0]): return [], [] if seeds_size <= 0 or seeds_size >= len(data_set): seeds_size = 1 category_set = data_set[(tag_set == seeds_category), :] np.random.shuffle(category_set) seeds_data = category_set[0:seeds_size, :] seeds_target = np.ones(shape=(seeds_size, 1), dtype=int) * seeds_category return seeds_data, seeds_target
package ru.job4j.h2mapping.t1carcatalog.model.impl; import org.hibernate.SessionFactory; import org.hibernate.cfg.Configuration; import ru.job4j.h2mapping.t1carcatalog.entity.Car; import ru.job4j.h2mapping.t1carcatalog.entity.CarBody; import ru.job4j.h2mapping.t1carcatalog.entity.Engine; import ru.job4j.h2mapping.t1carcatalog.entity.Transmission; import ru.job4j.h2mapping.t1carcatalog.model.Store; import ru.job4j.h1config.t2todolist.model.Wrapper; /** * @author <NAME>, date: 30.11.2019, e-mail: <EMAIL> * @version 1.0 */ public class DbStore implements Store<Car> { /** * Фабрика сессий. */ private final SessionFactory factory = new Configuration().configure("ru/job4j/h2mapping/t1carcatalog/carcatalog.cfg.xml") .buildSessionFactory(); /** * @param name название. * @param exstBody кузов. * @param exstEng двигатель. * @param exstTrans транс. * @return объект, который был добавлен. */ @Override public Car add(String name, int exstBody, int exstEng, int exstTrans) { return new Wrapper(factory).perform(session -> { Car car = new Car(); car.setName(name); car.setCarBody(session.get(CarBody.class, exstBody)); car.setEngine(session.get(Engine.class, exstEng)); car.setTransmission(session.get(Transmission.class, exstTrans)); session.save(car); return car; }); } /** * @param id номер. * @param newName новое имя. * @param newBody номер нового кузова. * @param newEng номер нового двигателя. * @param newTrans номер новой кор.передач. * @return объект, который был обновлен. */ @Override public Car update(int id, String newName, int newBody, int newEng, int newTrans) { return new Wrapper(factory).perform(session -> { Car car = session.get(Car.class, id); car.setName(newName); car.setCarBody(session.get(CarBody.class, newBody)); car.setEngine(session.get(Engine.class, newEng)); car.setTransmission(session.get(Transmission.class, newTrans)); session.update(car); return car; }); } /** * @param id номер. */ @Override public void delete(int id) { new Wrapper(factory).perform(session -> { Car car = session.get(Car.class, id); session.delete(car); return car; }); } /** * @throws Exception искл. */ @Override public void close() throws Exception { if (factory != null) { factory.close(); } } }
/** * @return - a new Administrator account */ public static final GDataAccount createAdminAccount(){ GDataAccount retVal = new GDataAccount(); retVal.setName("administrator"); retVal.setPassword("password"); retVal.setRole(AccountRole.USERADMINISTRATOR); retVal.setRole(AccountRole.FEEDAMINISTRATOR); retVal.setRole(AccountRole.ENTRYAMINISTRATOR); return retVal; }
/** * @version 0.1 * * @author Hefei Li * * @since May 9, 2015 */ public class MissingTemperatureFields extends Configured implements Tool { private static final Logger log = LoggerFactory.getLogger(MissingTemperatureFields.class); @Override public int run(String[] args) throws Exception { if (args.length != 1) { JobBuilder.printUsage(this, "<job ID>"); return -1; } String jobID = args[0]; JobClient jobClient = new JobClient(new JobConf(super.getConf())); RunningJob job = jobClient.getJob(JobID.forName(jobID)); if (job == null) { log.warn("No job with ID[{}] found.\n", jobID); return -1; } if (!job.isComplete()) { log.warn("Job [{}] is not complete.\n", jobID); return -1; } Counters counters = job.getCounters(); long missing = counters .getCounter(MaxTemperatureWithCounters.Temperature.MISSING); long total = counters.getCounter(TaskCounter.MAP_INPUT_RECORDS); log.info("Records with missing temperature fields: %.2f%%\n", 100.0 * missing / total); return 0; } public static void main(String[] args) throws Exception{ if (args.length != 1) { args = new String[]{""}; } int exitCode = ToolRunner.run(new MissingTemperatureFields(), args); System.exit(exitCode); } }
/** * @file merge_unittest.cpp * @author lipingan (<EMAIL>) * @brief * @version 0.1 * @date 2022-01-24 * * @copyright Copyright (c) 2022 * */ #include "merge.h" #include "gmock/gmock.h" #include "gtest/gtest.h" namespace leetcode { TEST(merge, case_0) { std::vector<int> nums1{1, 3, 5, 7, 9}; std::vector<int> nums2{2, 4, 6, 8, 10}; merge(nums1, 5, nums2, 5); for (auto iter = nums1.begin(); iter != nums1.end(); ++iter) { std::cout << *iter; } } TEST(merge, case_1) { std::vector<int> nums1{1, 3, 5, 7, 9}; std::vector<int> nums2{}; merge(nums1, 5, nums2, 0); for (auto iter = nums1.begin(); iter != nums1.end(); ++iter) { std::cout << *iter; } } TEST(merge, case_2) { std::vector<int> nums1{1, 3, 5, 7, 9}; std::vector<int> nums2{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; merge(nums1, 5, nums2, nums2.size()); for (auto iter = nums1.begin(); iter != nums1.end(); ++iter) { std::cout << *iter; } } TEST(merge, case_3) { std::vector<int> nums1{1, 2, 3, 0, 0, 0}; std::vector<int> nums2{2, 5, 6}; merge(nums1, 3, nums2, 3); for (auto iter = nums1.begin(); iter != nums1.end(); ++iter) { std::cout << *iter; } } } // namespace leetcode
def select_paths_set(self, calc, pathset, comm, memLimit): self.pathset = pathset self.percircuit_p_polys = {} repcache = self.pathset.highmag_termrep_cache circuitsetup_cache = self.pathset.circuitsetup_cache thresholds = self.pathset.thresholds all_compact_polys = [] for i in self.get_evaluation_order(): circuit = self[i] threshold = thresholds[circuit] rholabel = circuit[0] opstr = circuit[1:] elabels = self.simplified_circuit_elabels[i] raw_polyreps = calc.prs_as_pruned_polyreps(threshold, rholabel, elabels, opstr, repcache, calc.sos.opcache, circuitsetup_cache, comm, memLimit) compact_polys = [polyrep.compact_complex() for polyrep in raw_polyreps] self.percircuit_p_polys[circuit] = (threshold, compact_polys) all_compact_polys.extend(compact_polys) tapes = all_compact_polys vtape = _np.concatenate([t[0] for t in tapes]) ctape = _np.concatenate([t[1] for t in tapes]) self.merged_compact_polys = (vtape, ctape) return
import * as React from 'react' import { Row, Column, Field, Section, InputOnChangeData } from 'decentraland-ui' import { t } from 'decentraland-dapps/dist/modules/translation/utils' import { isValid } from 'lib/address' import ItemImage from 'components/ItemImage' import Icon from 'components/Icon' import { getMaxSupply } from 'modules/item/utils' import { Props } from './MintableItem.types' import './MintableItem.css' export default class MintableItem extends React.PureComponent<Props> { handleAddNewMint = () => { const { item, mints, onChange } = this.props onChange(item, [...mints, { item }]) } getChangeAddressHandler(index: number) { const { item, mints, onChange } = this.props return (_event: React.ChangeEvent<HTMLInputElement>, data: InputOnChangeData) => { const mint = { ...mints[index], address: data.value ? data.value : undefined } const newMints = [...mints.slice(0, index), mint, ...mints.slice(index + 1)] onChange(item, newMints) } } getChangeAmountHandler(index: number) { const { item, mints, onChange } = this.props return (_event: React.ChangeEvent<HTMLInputElement>, data: InputOnChangeData) => { const mint = { ...mints[index], amount: data.value ? Number(data.value) : undefined } const newMints = [...mints.slice(0, index), mint, ...mints.slice(index + 1)] const currentSupply = this.getSupply(newMints) if (this.isValidSupply(currentSupply)) { onChange(item, newMints) } } } getRemoveMintHandler(index: number) { const { item, mints, onChange } = this.props return () => { if (mints.length > 1) { onChange(item, [...mints.slice(0, index), ...mints.slice(index + 1)]) } } } isValidAddress(address?: string) { return address === undefined || isValid(address) } isValidAmount(amount?: number) { return amount === undefined || amount >= 0 } isValidSupply(supply: number) { const { item } = this.props return supply >= 0 && supply <= getMaxSupply(item) } getSupply(mints: Props['mints']) { const { item } = this.props const totalSupply = item.totalSupply || 0 let currentSupply = 0 for (const mint of mints) { currentSupply += mint.amount || 0 } return totalSupply + currentSupply } render() { const { item, mints } = this.props return ( <div className="MintableItem"> <Row> <Column grow={true}> <div className="item-header"> <ItemImage item={item} /> <span>{item.name}</span> </div> </Column> <Column align="right"> <div className="item-header"> <span className="stock"> {t('item.supply')} {this.getSupply(mints)}/{getMaxSupply(item)} </span> <Icon name="plus" className="item-action" onClick={this.handleAddNewMint} /> </div> </Column> </Row> {mints.map(({ address, amount }, index) => ( <Section key={index} className="mint" size="tiny"> <Field className="rounded" type="address" placeholder={t('global.address')} value={address || ''} message={undefined} error={!this.isValidAddress(address)} onChange={this.getChangeAddressHandler(index)} /> <Field className="rounded" type="number" placeholder={t('global.amount')} value={amount || ''} message={undefined} error={!this.isValidAmount(amount)} onChange={this.getChangeAmountHandler(index)} /> {mints.length > 1 ? <Icon name="minus" className="item-action" onClick={this.getRemoveMintHandler(index)} /> : null} </Section> ))} </div> ) } }
/** * Monitors the asynchronous progress of the JSR-88 operation. */ private static final class Monitor implements ProgressListener { public static ProgressObject join(ProgressObject po, String errorMessage) throws Exception { Monitor m = new Monitor(); po.addProgressListener(m); m.join(errorMessage); return po; } private DeploymentStatus completionEvent; public synchronized void handleProgressEvent(ProgressEvent event) { DeploymentStatus s = event.getDeploymentStatus(); if(s.isFailed() || s.isCompleted()) { completionEvent = s; notifyAll(); } } /** * Wait till the asynchronous operation completes. */ public synchronized void join(String errorMessage) throws Exception { while(completionEvent==null) wait(); if(completionEvent.isFailed()) throw new Exception(errorMessage+" : "+completionEvent.getMessage()); } }
def mock_query_generator(suds_objects): for obj in suds_objects: yield obj
/** * Sends scores using the built-in email client. * @param v The view context. */ public void sendScoresViaEmail(View v) { String winner = "Winning Team: "; if (scoreTeamA > scoreTeamB) winner = winner + getTeamAName(); else if (scoreTeamB > scoreTeamA) winner = winner + getTeamBName(); else if (scoreTeamB == scoreTeamA) winner = winner + "Tie!"; String teamAScore = "Team A: " + getTeamAName() + " has: " + scoreTeamA; String teamBScore = "Team B: " + getTeamBName() + " has: " + scoreTeamB; String message = winner + "\n" + teamAScore + "\n" + teamBScore; Intent i = new Intent(Intent.ACTION_SEND); i.setType("message/rfc822"); i.putExtra(Intent.EXTRA_TEXT, message); i.putExtra(Intent.EXTRA_SUBJECT, "CourtCounter Score Report"); i.addFlags(Intent.FLAG_ACTIVITY_FORWARD_RESULT); try { startActivity(i); } catch (android.content.ActivityNotFoundException ex) { Context context = getApplicationContext(); CharSequence text = getString(R.string.toast_email_error); int duration = Toast.LENGTH_SHORT; Toast toast = Toast.makeText(context, text, duration); toast.show(); } }
import { Renderer2 } from '@angular/core'; import { createComponentFactory, createSpyObject, Spectator } from '@ngneat/spectator'; import { PageComponent } from '../page.component'; import { PageFooterComponent } from './page-footer.component'; describe('PageFooterComponent', () => { let spectator: Spectator<PageFooterComponent>; const pageComponent = createSpyObject(PageComponent, { tabBarBottomHidden: true }); const createComponent = createComponentFactory({ component: PageFooterComponent, providers: [ { provide: PageComponent, useValue: pageComponent, }, Renderer2, ], }); beforeEach(() => (spectator = createComponent())); describe('close', () => { it('should show tabs', () => { pageComponent.tabBarBottomHidden = true; spectator.component.close(); expect(pageComponent.tabBarBottomHidden).toBe(false); }); it('should remove host element', () => { expect(spectator.element.parentElement).not.toBeNull(); spectator.component.close(); spectator.detectChanges(); expect(spectator.element.parentElement).toBeNull(); expect(spectator.element.isConnected).toBe(false); }); }); describe('ngOnDestroy', () => { it('should show tabs', () => { pageComponent.tabBarBottomHidden = true; spectator.component.ngOnDestroy(); expect(pageComponent.tabBarBottomHidden).toBe(false); }); }); });
<reponame>dmkuchynski/MAVN.FrontEnd.BackOffice export interface AgentRequirements { TokensAmount: string; }
<filename>p812-largest-triangle-area.py # You have a list of points in the plane. Return the area of the largest # triangle that can be formed by any 3 of the points. # Example: # Input: points = [[0,0],[0,1],[1,0],[0,2],[2,0]] # Output: 2 # Explanation: The five points are show in the figure below. The red triangle # is the largest. # Notes: # 3 <= points.length <= 50. # No points will be duplicated. # -50 <= points[i][j] <= 50. # Answers within 10^-6 of the true value will be accepted as correct. class Solution: def largestTriangleArea(self, points): """ :type points: List[List[int]] :rtype: float """ from math import sqrt def dist(p1, p2): return sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) def area(d1, d2, d3): h = (d1 + d2 + d3) / 2.0 return sqrt(abs(h * (h - d1) * (h - d2) * (h - d3))) max_area = 0 n = len(points) for i in range(n): p1 = points[i] for j in range(i + 1, n): p2 = points[j] for k in range(j + 1, n): p3 = points[k] d1 = dist(p1, p2) d2 = dist(p2, p3) d3 = dist(p3, p1) a = area(d1, d2, d3) if a > max_area: max_area = a return max_area sol = Solution().largestTriangleArea print(sol([[0, 0], [0, 1], [1, 0], [0, 2], [2, 0]]), 2)
/** * @author Tom * * Controller for Neo4j movie API */ public class Controller { private static final String ADDRESS = "bolt://localhost"; private static final String USERNAME = "neo4j"; private static final String PASSWORD = "1234"; private static Driver driver = null; private static Session session = null; /** * Simple variable used to force the connection to stay open. * This is to prevent redundant overhead from repeatedly opening and closing * Neo4j connections by sub-methods if the upper method knows it will need * the connection once more afterwards. */ private static boolean keepOpen = false; /** * Creates a connection to the local Neo4j database if not already connected. */ public static void establishConnection(){ if (isConnected()) return; else closeConnection(); driver = GraphDatabase.driver(ADDRESS, AuthTokens.basic(USERNAME, PASSWORD)); session = driver.session(); } /** * Closes the connection unless the keepOpen variable is set. */ public static void closeConnection(){ if (session!=null && (!keepOpen || !session.isOpen())){ session.close(); session = null; driver.close(); driver = null; } } /** * Performs the given query and results the StatementResult. * This method will automatically connect to the database if required. * @param q Query to perform. * @return */ public static StatementResult query(Statement q){ establishConnection(); StatementResult result = session.run(q); return result; } /** * Tells this controller to not close the Neo4j connection even if told to do so. */ public static void keepOpen(){ establishConnection(); keepOpen = true; } /** * Overrides the keepOpen variable and closes the session. */ public static void forceClose(){ keepOpen = false; closeConnection(); } /** * Quick check to ensure the connection is set and open. * @return */ public static boolean isConnected(){ return session!=null && session.isOpen(); } }
<filename>semester-3/programming-design-and-programming-languages/labs/lab_12/headers/UserClass.hpp #ifndef UserClass_hpp #define UserClass_hpp #include <iostream> #include <vector> #include <map> #include <stdlib.h> #include <fstream> #include "Library.hpp" using namespace std; class UserClass { private: std::map<char, int> data; public: UserClass(); void Operation(); void Display(); void Setdata(char key , int Data); void WriteInFile(); void AddInformation(); void MathOperation(); }; #endif /* UserClass_hpp */
import { MINode } from "./mi_parse"; import { DebugProtocol } from "vscode-debugprotocol/lib/debugProtocol"; export interface BackendBreakpoint { file?: string; line?: number; raw?: string; condition: string; countCondition?: string; } export interface Variable { name: string; valueStr?: string; type?: string; raw?: any; } export interface Stack { level: number; address: string; function: string; fileName: string; file: string; line: number; args: Variable[]; } export class VariableObject { name: string; exp: string; numchild: number; type: string; value: string; threadId: string; frozen: boolean; dynamic: boolean; displayhint: string; has_more: boolean; id: number; constructor(node: any) { this.name = MINode.valueOf(node, "name"); this.exp = MINode.valueOf(node, "exp"); this.numchild = parseInt(MINode.valueOf(node, "numchild")); this.type = MINode.valueOf(node, "type"); this.value = MINode.valueOf(node, "value"); this.threadId = MINode.valueOf(node, "thread-id"); this.frozen = !!MINode.valueOf(node, "frozen"); this.dynamic = !!MINode.valueOf(node, "dynamic"); this.displayhint = MINode.valueOf(node, "displayhint"); // TODO: use has_more when it's > 0 this.has_more = !!MINode.valueOf(node, "has_more"); } public applyChanges(node: MINode) { this.value = MINode.valueOf(node, "value"); if (!!MINode.valueOf(node, "type_changed")) { this.type = MINode.valueOf(node, "new_type"); } this.dynamic = !!MINode.valueOf(node, "dynamic"); this.displayhint = MINode.valueOf(node, "displayhint"); this.has_more = !!MINode.valueOf(node, "has_more"); } public isCompound(): boolean { return this.numchild > 0 || this.value === "{...}" || (this.dynamic && (this.displayhint === "array" || this.displayhint === "map")); } public toProtocolVariable(): DebugProtocol.Variable { let res: DebugProtocol.Variable = { name: this.exp, evaluateName: this.exp, value: (this.value === void 0) ? "<unknown>" : this.value, type: this.type, namedVariables: this.numchild, //presentationHint: { kind: this.displayhint, attributes: ["readOnly"]}, variablesReference: this.id }; return res; } } // from https://gist.github.com/justmoon/15511f92e5216fa2624b#gistcomment-1928632 export interface MIError extends Error { readonly name: string; readonly message: string; readonly source: string; }; export interface MIErrorConstructor { new (message: string, source: string): MIError; readonly prototype: MIError; } export const MIError: MIErrorConstructor = <any>class MIError { readonly name: string; readonly message: string; readonly source: string; public constructor(message: string, source: string) { Object.defineProperty(this, 'name', { get: () => (this.constructor as any).name, }); Object.defineProperty(this, 'message', { get: () => message, }); Object.defineProperty(this, 'source', { get: () => source, }); Error.captureStackTrace(this, this.constructor); } public toString() { return `${this.message} (from ${this.source})`; } }; Object.setPrototypeOf(MIError as any, Object.create(Error.prototype)); MIError.prototype.constructor = MIError;
from solutions.FIZ import fizz_buzz_solution class TestHlo2(): def test_fiz(self): assert fizz_buzz_solution.fizz_buzz(3) == 'fizz fake deluxe' assert fizz_buzz_solution.fizz_buzz(12) == 'fizz' assert fizz_buzz_solution.fizz_buzz(10) == 'buzz' assert fizz_buzz_solution.fizz_buzz(15) == 'fizz buzz fake deluxe' assert fizz_buzz_solution.fizz_buzz(31) == 'fizz' assert fizz_buzz_solution.fizz_buzz(51) == 'fizz buzz' assert fizz_buzz_solution.fizz_buzz(53) == 'fizz buzz' assert fizz_buzz_solution.fizz_buzz(55) == 'buzz fake deluxe' assert fizz_buzz_solution.fizz_buzz(33) == 'fizz fake deluxe' assert fizz_buzz_solution.fizz_buzz(8) == 8
import { User } from './user' import { Post } from './post' import { Tag } from './tag' import { TagPost } from './tagPost' import { UserDetail } from './userDetails' // 1:n User.hasMany(Post, { constraints: false, as: 'posts', foreignKey: 'authorId' }) Post.belongsTo(User, { constraints: false, as: 'author', foreignKey: 'authorId' }) // n:m Post.belongsToMany(Tag, { constraints: false, through: TagPost }) Tag.belongsToMany(Post, { constraints: false, through: TagPost }) // 1:1 User.hasOne(UserDetail, { constraints: false, as: 'detail', foreignKey: 'userId' }) export { User, Post, Tag, UserDetail }
/** * Uses fitted/trained model to make predictions on features. * * @param features The features to make predictions on. * @return The models predicted labels. * @throws IllegalArgumentException Thrown if the features are not correctly sized per * the specification when the model was compiled. */ @Override public double[][] predict(double[][] features) { double[][] results = new double[features.length][layers.get(layers.size()-1).getOutDim()]; for(int i=0; i<features.length; i++) { results[i] = feedForward(new Vector(features[i])).T().getValuesAsDouble()[0]; } return results; }
/// Transforms a parser's combination of top terms into a term, if possible. pub fn to_term(&self) -> Res<Option<Term>> { let mut stack = Vec::with_capacity(17); let mut ptterm = self; 'go_down: loop { let mut term = match *ptterm { PTTerms::And(ref args) => { let mut args = args.iter(); if let Some(head) = args.next() { stack.push((Op::And, args, vec![])); ptterm = head; continue 'go_down; } else { bail!("illegal nullary conjunction") } } PTTerms::Or(ref args) => { let mut args = args.iter(); if let Some(head) = args.next() { stack.push((Op::Or, args, vec![])); ptterm = head; continue 'go_down; } else { bail!("illegal nullary conjunction") } } PTTerms::TTerm(ref tterm) => { if let Some(term) = tterm.term() { term.clone() } else { return Ok(None); } } PTTerms::NTTerm(ref tterm) => { if let Some(term) = tterm.term() { term::not(term.clone()) } else { return Ok(None); } } }; 'go_up: loop { if let Some((op, mut to_do, mut done)) = stack.pop() { done.push(term); if let Some(next) = to_do.next() { stack.push((op, to_do, done)); ptterm = next; continue 'go_down; } else { term = term::app(op, done); continue 'go_up; } } else { break 'go_down Ok(Some(term)); } } } }
//------------------------------------------------------------------------------ // GB_serialize_array: serialize an array, with optional compression //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2023, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // JIT: not needed. Only one variant possible. // Parallel compression method for an array. The array is compressed into // a sequence of independently allocated blocks, or returned as-is if not // compressed. Currently, only LZ4, LZ4HC, and ZSTD are supported. #include "GB.h" #include "GB_serialize.h" #include "GB_lz4.h" #include "GB_zstd.h" #define GB_FREE_ALL \ { \ GB_FREE (&Sblocks, Sblocks_size) ; \ GB_serialize_free_blocks (&Blocks, Blocks_size, nblocks) ; \ } GrB_Info GB_serialize_array ( // output: GB_blocks **Blocks_handle, // Blocks: array of size nblocks+1 size_t *Blocks_size_handle, // size of Blocks int64_t **Sblocks_handle, // Sblocks: array of size nblocks+1 size_t *Sblocks_size_handle, // size of Sblocks int32_t *nblocks_handle, // # of blocks int32_t *method_used, // method used size_t *compressed_size, // size of compressed block, or upper // bound if dryrun is true // input: bool dryrun, // if true, just esimate the size GB_void *X, // input array of size len int64_t len, // size of X, in bytes int32_t method, // compression method requested int32_t algo, // compression algorithm int32_t level, // compression level GB_Werk Werk ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (Blocks_handle != NULL) ; ASSERT (Blocks_size_handle != NULL) ; ASSERT (Sblocks_handle != NULL) ; ASSERT (Sblocks_size_handle != NULL) ; ASSERT (nblocks_handle != NULL) ; ASSERT (method_used != NULL) ; ASSERT (compressed_size != NULL) ; GB_blocks *Blocks = NULL ; size_t Blocks_size = 0, Sblocks_size = 0 ; int32_t nblocks = 0 ; int64_t *Sblocks = NULL ; //-------------------------------------------------------------------------- // check for quick return //-------------------------------------------------------------------------- (*Blocks_handle) = NULL ; (*Blocks_size_handle) = 0 ; (*Sblocks_handle) = NULL ; (*Sblocks_size_handle) = 0 ; (*nblocks_handle) = 0 ; (*method_used) = GxB_COMPRESSION_NONE ; (*compressed_size) = 0 ; if (X == NULL || len == 0) { // input array is empty return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // check for no compression //-------------------------------------------------------------------------- if (method <= GxB_COMPRESSION_NONE || len < 256) { // no compression, return result as a single block (plus the sentinel) if (!dryrun) { Blocks = GB_MALLOC (2, GB_blocks, &Blocks_size) ; Sblocks = GB_MALLOC (2, int64_t, &Sblocks_size) ; if (Blocks == NULL || Sblocks == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } Blocks [0].p = X ; // first block is all of the array X Blocks [0].p_size_allocated = 0 ; // p is shallow Sblocks [0] = 0 ; // start of first block Blocks [1].p = NULL ; // 2nd block is the final sentinel Blocks [1].p_size_allocated = 0 ; // p is shallow Sblocks [1] = len ; // first block ends at len-1 (*Blocks_handle) = Blocks ; (*Blocks_size_handle) = Blocks_size ; (*Sblocks_handle) = Sblocks ; (*Sblocks_size_handle) = Sblocks_size ; } (*compressed_size) = len ; (*nblocks_handle) = 1 ; return (GrB_SUCCESS) ; } (*method_used) = method ; //-------------------------------------------------------------------------- // determine # of threads to use //-------------------------------------------------------------------------- int nthreads_max = GB_Context_nthreads_max ( ) ; double chunk = GB_Context_chunk ( ) ; int nthreads = GB_nthreads (len, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // determine # of blocks and allocate them //-------------------------------------------------------------------------- // divide the array into blocks, 4 per thread, or a single block if 1 thread int64_t blocksize = (nthreads == 1) ? len : GB_ICEIL (len, 4*nthreads) ; // ensure the blocksize does not exceed the LZ4 maximum // ... this is also fine for ZSTD ASSERT (LZ4_MAX_INPUT_SIZE < INT32_MAX) ; blocksize = GB_IMIN (blocksize, LZ4_MAX_INPUT_SIZE/2) ; // ensure the blocksize is not too small blocksize = GB_IMAX (blocksize, (64*1024)) ; // determine the final # of blocks nblocks = GB_ICEIL (len, blocksize) ; nthreads = GB_IMIN (nthreads, nblocks) ; (*nblocks_handle) = nblocks ; // allocate the output Blocks: one per block plus the sentinel block if (!dryrun) { Blocks = GB_CALLOC (nblocks+1, GB_blocks, &Blocks_size) ; Sblocks = GB_CALLOC (nblocks+1, int64_t, &Sblocks_size) ; if (Blocks == NULL || Sblocks == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } } // allocate the blocks, one at a time int32_t blockid ; bool ok = true ; for (blockid = 0 ; blockid < nblocks && ok ; blockid++) { // allocate a single block for the compression of X [kstart:kend-1] int64_t kstart, kend ; GB_PARTITION (kstart, kend, len, blockid, nblocks) ; size_t uncompressed = kend - kstart ; ASSERT (uncompressed < INT32_MAX) ; ASSERT (uncompressed > 0) ; size_t s ; switch (algo) { case GxB_COMPRESSION_LZ4 : case GxB_COMPRESSION_LZ4HC : s = (size_t) LZ4_compressBound ((int) uncompressed) ; break ; default : case GxB_COMPRESSION_ZSTD : s = ZSTD_compressBound (uncompressed) ; break ; } ASSERT (s < INT32_MAX) ; if (dryrun) { // do not allocate the block; just sum up the upper bound sizes (*compressed_size) += s ; } else { // allocate the block size_t size_allocated = 0 ; GB_void *p = GB_MALLOC (s, GB_void, &size_allocated) ; ok = (p != NULL) ; Blocks [blockid].p = p ; Blocks [blockid].p_size_allocated = size_allocated ; } } if (dryrun) { // GrB_Matrix_serializeSize: no more work to do. (*compressed_size) is // an upper bound of the blob_size required when the matrix is // compressed, and (*nblocks_handle) is the number of blocks to be used. // No space has been allocated. return (GrB_SUCCESS) ; } if (!ok) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // compress the blocks in parallel //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic) \ reduction(&&:ok) for (blockid = 0 ; blockid < nblocks ; blockid++) { // compress X [kstart:kend-1] into Blocks [blockid].p int64_t kstart, kend ; GB_PARTITION (kstart, kend, len, blockid, nblocks) ; const char *src = (const char *) (X + kstart) ; // source char *dst = (char *) Blocks [blockid].p ; // destination int srcSize = (int) (kend - kstart) ; // size of source size_t dsize = Blocks [blockid].p_size_allocated ; // size of dest int dstCapacity = (int) GB_IMIN (dsize, INT32_MAX) ; int s ; size_t s64 ; switch (algo) { case GxB_COMPRESSION_LZ4 : s = LZ4_compress_default (src, dst, srcSize, dstCapacity) ; ok = ok && (s > 0) ; // compressed block is now in dst [0:s-1], of size s Sblocks [blockid] = (int64_t) s ; break ; case GxB_COMPRESSION_LZ4HC : s = LZ4_compress_HC (src, dst, srcSize, dstCapacity, level) ; ok = ok && (s > 0) ; // compressed block is now in dst [0:s-1], of size s Sblocks [blockid] = (int64_t) s ; break ; default : case GxB_COMPRESSION_ZSTD : s64 = ZSTD_compress (dst, dstCapacity, src, srcSize, level) ; ok = ok && (s64 <= dstCapacity) ; // compressed block is now in dst [0:s64-1], of size s64 Sblocks [blockid] = (int64_t) s64 ; break ; } } if (!ok) { // compression failure: this can "never" occur GB_FREE_ALL ; return (GrB_INVALID_OBJECT) ; } //-------------------------------------------------------------------------- // compute cumulative sum of the compressed blocks //-------------------------------------------------------------------------- GB_cumsum (Sblocks, nblocks, NULL, 1, Werk) ; //-------------------------------------------------------------------------- // free workspace return result //-------------------------------------------------------------------------- (*Blocks_handle) = Blocks ; (*Blocks_size_handle) = Blocks_size ; (*Sblocks_handle) = Sblocks ; (*Sblocks_size_handle) = Sblocks_size ; (*compressed_size) = Sblocks [nblocks] ; // actual size of the blob return (GrB_SUCCESS) ; }
#!/usr/bin/env python # coding: utf-8 # Loading Libraries and data import sys import unittest from SvmModel import Model from Hyperoptsvm import controller # BaseClass class TestSVMHyperopt(unittest.TestCase): """ This class defines a different methods which are used to tests the various values. """ def test_test_svm_degree(self): """ Tests method which take input parameters . Tests the values of methods aganist known values. """ train_path = "./data_transformed_10.csv" test_path = "./data_transformed_10.csv" obj = controller(Model(train_path, test_path, CV=5, label='Class')) expected_best_params = { 'C': 1.77, 'coef': 1.6216340381955197, 'degree': 8.0, 'kernel': 'poly', 'gamma': 'scale'} self.assertEqual( expected_best_params['degree'], obj.optimize_hyperparam()['degree']) def test_test_svm_C(self): """ Tests method which take input parameters . Tests the values of methods aganist known values. """ train_path = "./data_transformed_10.csv" test_path = "./data_transformed_10.csv" obj = controller(Model(train_path, test_path, CV=5, label='Class')) expected_best_params = { 'C': 1.7718619582441852, 'coef0': 1.6216340381955197, 'degree': 8.0, 'kernel': 'poly', 'gamma': 'scale'} self.assertEqual( expected_best_params['C'], obj.optimize_hyperparam()['C']) def test_test_svm_kernel(self): """ Tests method which take input parameters . Tests the values of methods aganist known values. """ train_path = "./data_transformed_10.csv" test_path = "./data_transformed_10.csv" obj = controller(Model(train_path, test_path, CV=5, label='Class')) expected_best_params = { 'C': 1.7718619582441852, 'coef': 1.6216340381955197, 'degree': 8.0, 'kernel': 'poly', 'gamma': 'scale'} self.assertEqual( expected_best_params['kernel'], obj.optimize_hyperparam()['kernel']) def test_test_svm_coef(self): """ Tests method which take input parameters . Tests the values of methods aganist known values. """ train_path = "./data_transformed_10.csv" test_path = "./data_transformed_10.csv" obj = controller(Model(train_path, test_path, CV=5, label='Class')) expected_best_params = { 'C': 1.7718619582441852, 'coef0': 1.6216340381955197, 'degree': 8.0, 'kernel': 'poly', 'gamma': 'scale'} self.assertEqual( expected_best_params['coef0'], obj.optimize_hyperparam()['coef0']) def test_test_svm_gamma(self): """ Tests method which take input parameters . Tests the values of methods aganist known values. """ train_path = "./data_transformed_10.csv" test_path = "./data_transformed_10.csv" obj = controller(Model(train_path, test_path, CV=5, label='Class')) expected_best_params = { 'C': 1.7718619582441852, 'coef': 1.6216340381955197, 'degree': 8.0, 'kernel': 'poly', 'gamma': 'scale'} self.assertEqual( expected_best_params['gamma'], obj.optimize_hyperparam()['gamma']) # test suite is used to aggregate tests that should be executed together. suite = unittest.TestLoader().loadTestsFromTestCase(TestSVMHyperopt) # test runner which orchestrates the execution of tests and provide he outcome to the user. # sys.stderr : Fileobject used by the interpreter for standard errors. unittest.TextTestRunner(verbosity=1, stream=sys.stderr).run(suite)
def build_causal_graph(train_data: pd.DataFrame, column_names: List[str], inputs: jnp.ndarray): make_multinomial = functools.partial( causal_network.MLPMultinomial.from_frame, hidden_shape=(100,)) make_gaussian = functools.partial( causal_network.Gaussian, hidden_shape=(100,)) node_a = Node(MLPMultinomial.from_frame(train_data, 'sex')) node_c1 = Node(MLPMultinomial.from_frame(train_data, 'native-country')) node_c2 = Node(Gaussian('age', column_names.index('age'))) node_hm = Node(causal_network.GaussianMixture('hm', 10, dim=2), hidden=True) node_hl = Node(causal_network.GaussianMixture('hl', 10, dim=2), hidden=True) node_hr1 = Node( causal_network.GaussianMixture('hr1', 10, dim=2), hidden=True) node_hr2 = Node( causal_network.GaussianMixture('hr2', 10, dim=2), hidden=True) node_hr3 = Node( causal_network.GaussianMixture('hr3', 10, dim=2), hidden=True) node_m = Node( make_multinomial(train_data, 'marital-status'), [node_a, node_hm, node_c1, node_c2]) node_l = Node( make_gaussian('education-num', column_names.index('education-num')), [node_a, node_hl, node_c1, node_c2, node_m]) node_r1 = Node( make_multinomial(train_data, 'occupation'), [node_a, node_c1, node_c2, node_m, node_l]) node_r2 = Node( make_gaussian('hours-per-week', column_names.index('hours-per-week')), [node_a, node_c1, node_c2, node_m, node_l]) node_r3 = Node( make_multinomial(train_data, 'workclass'), [node_a, node_c1, node_c2, node_m, node_l]) node_y = Node( MLPMultinomial.from_frame(train_data, 'income'), [node_a, node_c1, node_c2, node_m, node_l, node_r1, node_r2, node_r3]) observable_nodes = (node_a, node_c1, node_c2, node_l, node_m, node_r1, node_r2, node_r3, node_y) nodes_on_which_hm_depends = (node_a, node_c1, node_c2, node_m) nodes_on_which_hl_depends = (node_a, node_c1, node_c2, node_m, node_l) nodes_on_which_hr1_depends = (node_a, node_c1, node_c2, node_m, node_l, node_r1) nodes_on_which_hr2_depends = (node_a, node_c1, node_c2, node_m, node_l, node_r2) nodes_on_which_hr3_depends = (node_a, node_c1, node_c2, node_m, node_l, node_r3) hidden_nodes = (node_hm, node_hl, node_hr1, node_hr2, node_hr3) def make_q_x_obs_module(node): assert node.hidden return variational.Variational( common_layer_sizes=(20, 20), output_dim=node.dim) q_hm_obs_module = make_q_x_obs_module(node_hm) q_hl_obs_module = make_q_x_obs_module(node_hl) q_hr1_obs_module = make_q_x_obs_module(node_hr1) q_hr2_obs_module = make_q_x_obs_module(node_hr2) q_hr3_obs_module = make_q_x_obs_module(node_hr3) causal_network.populate(observable_nodes, inputs) q_hm_obs = q_hm_obs_module( *(node.observed_value for node in nodes_on_which_hm_depends)) q_hl_obs = q_hl_obs_module( *(node.observed_value for node in nodes_on_which_hl_depends)) q_hr1_obs = q_hr1_obs_module( *(node.observed_value for node in nodes_on_which_hr1_depends)) q_hr2_obs = q_hr2_obs_module( *(node.observed_value for node in nodes_on_which_hr2_depends)) q_hr3_obs = q_hr3_obs_module( *(node.observed_value for node in nodes_on_which_hr3_depends)) q_hidden_obs = (q_hm_obs, q_hl_obs, q_hr1_obs, q_hr2_obs, q_hr3_obs) return observable_nodes, hidden_nodes, q_hidden_obs
def schedule_next_event(self, p_delay=0): l_delay, l_list = self._find_next_scheduled_events() if p_delay != 0: l_delay = p_delay lightingUtilitySch(self.m_pyhouse_obj).run_after_delay(l_delay, l_list)
def exercise_key_with_algorithm( cls, key_type: psa_storage.Expr, bits: int, alg: psa_storage.Expr ) -> bool: if key_type.string == 'PSA_KEY_TYPE_RAW_DATA': return False m = cls.RSA_OAEP_RE.match(alg.string) if m: hash_alg = m.group(1) hash_length = crypto_knowledge.Algorithm.hash_length(hash_alg) key_length = (bits + 7) // 8 return key_length > 2 * hash_length + 2 m = cls.BRAINPOOL_RE.match(key_type.string) if m and alg.string != 'PSA_ALG_ECDSA_ANY': return False return True
/** * @author Dylan Cai */ public class MultipleHeaderActivity extends AppCompatActivity implements SearchHeaderViewDelegate.OnSearchListener { private LoadingStateView loadingStateView; @Override protected void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.layout_content); loadingStateView = new LoadingStateView(this); loadingStateView.register(new NothingViewDelegate()); loadingStateView.setHeaders( new ToolbarViewDelegate("MultipleHeader(search)", NavIconType.BACK), new SearchHeaderViewDelegate(this) ); loadingStateView.showEmptyView(); } @Override public void onSearch(String keyword) { Toast.makeText(this, "search: " + keyword, Toast.LENGTH_SHORT).show(); loadingStateView.showLoadingView(); HttpUtils.requestSuccess(new HttpUtils.Callback() { @Override public void onSuccess() { loadingStateView.showContentView(); } @Override public void onFailure() { loadingStateView.showErrorView(); } }); } }
/** * gst_adapter_available: * @adapter: a #GstAdapter * * Gets the maximum amount of bytes available, that is it returns the maximum * value that can be supplied to gst_adapter_map() without that function * returning %NULL. * * Returns: number of bytes available in @adapter */ gsize gst_adapter_available (GstAdapter * adapter) { g_return_val_if_fail (GST_IS_ADAPTER (adapter), 0); return adapter->size; }
// Copyright 2021 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "media_capabilities/v4l2.h" #include <fcntl.h> #include <linux/videodev2.h> #include <string.h> #include <sys/stat.h> #include <sys/types.h> #include <algorithm> #include <map> #include <string> #include <utility> #include <vector> #include <base/containers/contains.h> #include <base/files/file_path.h> #include <base/files/scoped_file.h> #include <base/logging.h> #include <base/posix/eintr_wrapper.h> #include "media_capabilities/common.h" namespace { enum class Codec { kH264 = 0, kVP8, kVP9, kJPEG, kUnknown, }; const char* CodecToString(Codec codec) { switch (codec) { case Codec::kH264: return "H264"; case Codec::kVP8: return "VP8"; case Codec::kVP9: return "VP9"; case Codec::kJPEG: return "JPEG"; default: LOG(FATAL) << "Unknown codec: " << static_cast<int>(codec); return ""; } } Codec GetCodec(uint32_t format) { switch (format) { case V4L2_PIX_FMT_H264: case V4L2_PIX_FMT_H264_SLICE: return Codec::kH264; case V4L2_PIX_FMT_VP8: case V4L2_PIX_FMT_VP8_FRAME: return Codec::kVP8; case V4L2_PIX_FMT_VP9: case V4L2_PIX_FMT_VP9_FRAME: return Codec::kVP9; case V4L2_PIX_FMT_JPEG: case V4L2_PIX_FMT_JPEG_RAW: return Codec::kJPEG; default: return Codec::kUnknown; } } Profile V4L2ProfileToProfile(Codec codec, uint32_t profile) { switch (codec) { case Codec::kH264: switch (profile) { case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE: case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE: return Profile::kH264Baseline; case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN: return Profile::kH264Main; case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH: return Profile::kH264High; case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED: case V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH: case V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH: break; } break; case Codec::kVP8: switch (profile) { case V4L2_MPEG_VIDEO_VP8_PROFILE_0: case V4L2_MPEG_VIDEO_VP8_PROFILE_1: case V4L2_MPEG_VIDEO_VP8_PROFILE_2: case V4L2_MPEG_VIDEO_VP8_PROFILE_3: return Profile::kVP8; } break; case Codec::kVP9: switch (profile) { case V4L2_MPEG_VIDEO_VP9_PROFILE_0: return Profile::kVP9Profile0; case V4L2_MPEG_VIDEO_VP9_PROFILE_2: return Profile::kVP9Profile2; case V4L2_MPEG_VIDEO_VP9_PROFILE_1: case V4L2_MPEG_VIDEO_VP9_PROFILE_3: break; } break; default: break; } return Profile::kNone; } // Return supported profiles for |codec|. If this function is called, a driver // must support at least one profile because the codec is enumerated by // VIDIOC_ENUM_FMT. std::vector<Profile> GetSupportedProfiles(int device_fd, const Codec codec) { // Since there is only one JPEG profile, there is no API to acquire the // supported JPEG profile. Returns the only JPEG profile. if (codec == Codec::kJPEG) return {Profile::kJPEG}; // TODO(b/189169588): Once drivers support V4L2_CID_MPEG_VIDEO_VP8_PROFILE, // call VIDIOC_QUERYMENU with it. if (codec == Codec::kVP8) return {Profile::kVP8}; uint32_t query_id = 0; switch (codec) { case Codec::kH264: query_id = V4L2_CID_MPEG_VIDEO_H264_PROFILE; break; case Codec::kVP9: query_id = V4L2_CID_MPEG_VIDEO_VP9_PROFILE; break; case Codec::kVP8: case Codec::kJPEG: default: LOG(FATAL) << "Unknown codec: " << static_cast<uint32_t>(codec); return {}; } v4l2_queryctrl query_ctrl; memset(&query_ctrl, 0, sizeof(query_ctrl)); query_ctrl.id = query_id; if (Ioctl(device_fd, VIDIOC_QUERYCTRL, &query_ctrl) != 0) { PLOG(FATAL) << "VIDIOC_QUERYCTRL failed: "; return {}; } std::vector<Profile> profiles; v4l2_querymenu query_menu; memset(&query_menu, 0, sizeof(query_menu)); query_menu.id = query_ctrl.id; for (query_menu.index = query_ctrl.minimum; static_cast<int>(query_menu.index) <= query_ctrl.maximum; query_menu.index++) { if (Ioctl(device_fd, VIDIOC_QUERYMENU, &query_menu) == 0) { const Profile profile = V4L2ProfileToProfile(codec, query_menu.index); if (profile != Profile::kNone && !base::Contains(profiles, profile)) profiles.push_back(profile); } } LOG_IF(FATAL, profiles.empty()) << "No profile is supported even though the " << "codec is enumerated by VIDIOC_ENUM_FMT"; return profiles; } std::pair<int, int> GetMaxResolution(int device_fd, const uint32_t format) { std::pair<int, int> max_resolution(0, 0); v4l2_frmsizeenum frame_size; memset(&frame_size, 0, sizeof(frame_size)); frame_size.pixel_format = format; for (; Ioctl(device_fd, VIDIOC_ENUM_FRAMESIZES, &frame_size) == 0; ++frame_size.index) { if (frame_size.type == V4L2_FRMSIZE_TYPE_DISCRETE) { if (frame_size.discrete.width >= static_cast<uint32_t>(max_resolution.first) && frame_size.discrete.height >= static_cast<uint32_t>(max_resolution.second)) { max_resolution.first = frame_size.discrete.width; max_resolution.second = frame_size.discrete.height; } } else if (frame_size.type == V4L2_FRMSIZE_TYPE_STEPWISE || frame_size.type == V4L2_FRMSIZE_TYPE_CONTINUOUS) { max_resolution.first = frame_size.stepwise.max_width; max_resolution.second = frame_size.stepwise.max_height; break; } } return max_resolution; } std::vector<Capability> GetCapabilitiesInPath(const base::FilePath& path, bool decode) { base::ScopedFD device_fd( HANDLE_EINTR(open(path.value().c_str(), O_RDWR | O_CLOEXEC))); if (!device_fd.is_valid()) return {}; std::vector<uint32_t> formats; v4l2_fmtdesc fmtdesc; memset(&fmtdesc, 0, sizeof(fmtdesc)); fmtdesc.type = decode ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; for (; Ioctl(device_fd.get(), VIDIOC_ENUM_FMT, &fmtdesc) == 0; ++fmtdesc.index) { formats.push_back(fmtdesc.pixelformat); } std::vector<Capability> capabilities; for (uint32_t format : formats) { const Codec codec = GetCodec(format); if (codec == Codec::kUnknown) continue; const std::vector<Profile> profiles = GetSupportedProfiles(device_fd.get(), codec); LOG_ASSERT(!profiles.empty()); const std::pair<int, int> max_resolution = GetMaxResolution(device_fd.get(), format); const std::vector<Resolution> resolutions = GetInterestingResolutionsUpTo(max_resolution); LOG_IF(FATAL, resolutions.empty()) << "The maximum supported resolution for " << CodecToString(codec) << " is too small: " << max_resolution.first << "x" << max_resolution.second; // V4L2 API doesn't have a way of querying supported subsamplings and color // depth. for (const Profile profile : profiles) { // TODO(b/172229001, b/188598699): For JPEG profiles, actually, supported // subsamplings can be queried by V4L2_CID_JPEG_CHROMA_SUBSAMPLING. But it // has never been used in Chrome OS. Call it once we confirm that it works // on all V4L2 devices. We temporarily do as if all subsamplings are // supported to avoid false negatives. // TODO(b/172229001): For other profiles, we should guess them from // supported YUV formats of CAPTURE queue for decoding and of OUTPUT queue // for encoding. std::vector<Subsampling> subsamplings = {Subsampling::kYUV420}; if (profile == Profile::kJPEG) { subsamplings = {Subsampling::kYUV420, Subsampling::kYUV422, Subsampling::kYUV444}; } for (const Subsampling subsampling : subsamplings) { const ColorDepth color_depth = profile == Profile::kVP9Profile2 ? ColorDepth::k10bit : ColorDepth::k8bit; for (const Resolution resolution : resolutions) { capabilities.push_back(Capability(profile, decode, resolution, subsampling, color_depth)); } } } } return capabilities; } std::vector<Capability> GetCapabilitiesInPaths( const std::vector<base::FilePath>& paths, bool decode) { std::vector<Capability> capabilities; for (const base::FilePath& path : paths) { for (auto&& c : GetCapabilitiesInPath(path, decode)) { if (!base::Contains(capabilities, c)) capabilities.push_back(std::move(c)); } } return capabilities; } std::vector<Capability> GetDecodeCapabilities() { const base::FilePath kVideoDecoderDevicePath("/dev/video-dec"); const base::FilePath kJpegDecoderDevicePath("/dev/jpeg-dec"); std::vector<base::FilePath> device_paths; auto video_decoder_device_paths = GetAllFilesWithPrefix(kVideoDecoderDevicePath); auto jpeg_decoder_device_paths = GetAllFilesWithPrefix(kJpegDecoderDevicePath); device_paths.insert(device_paths.end(), video_decoder_device_paths.begin(), video_decoder_device_paths.end()); device_paths.insert(device_paths.end(), jpeg_decoder_device_paths.begin(), jpeg_decoder_device_paths.end()); return GetCapabilitiesInPaths(device_paths, /*decode=*/true); } std::vector<Capability> GetEncodeCapabilities() { const base::FilePath kVideoEncoderDevicePath("/dev/video-enc"); const base::FilePath kJpegEncoderDevicePath("/dev/jpeg-enc"); std::vector<base::FilePath> device_paths; auto video_encoder_device_paths = GetAllFilesWithPrefix(kVideoEncoderDevicePath); auto jpeg_encoder_device_paths = GetAllFilesWithPrefix(kJpegEncoderDevicePath); device_paths.insert(device_paths.end(), video_encoder_device_paths.begin(), video_encoder_device_paths.end()); device_paths.insert(device_paths.end(), jpeg_encoder_device_paths.begin(), jpeg_encoder_device_paths.end()); return GetCapabilitiesInPaths(device_paths, /*decode=*/false); } } // namespace std::vector<Capability> DetectV4L2Capabilities() { auto decode_capabilities = GetEncodeCapabilities(); auto encode_capabilities = GetDecodeCapabilities(); auto& capabilities = decode_capabilities; capabilities.insert(capabilities.end(), encode_capabilities.begin(), encode_capabilities.end()); return capabilities; }
// CompressedClone creates a LZ4 compressed clone if not compressed, otherwise // it returns itself. func (r *RecordMessage) CompressedClone() (*RecordMessage, error) { if r.RecordHeader.Compression() != Compression_None || len(r.Data) == 0 { return r, nil } if len(r.Data) > MaxDataSizeUncompressed { return nil, ErrOverflow } if len(r.Data) < MinCompressSize { return r, nil } helper := lz4Pool.Get().(*lz4Helper) n, err := lz4.CompressBlock(r.Data, helper.buf[0:], helper.ht[0:]) lz4Pool.Put(helper) if err != nil { return nil, err } if n >= len(r.Data) { return r, nil } if n > MaxDataSize { return nil, ErrOverflow } m := GetRecord(n) m.RecordHeader = r.RecordHeader copy(m.Data, helper.buf[0:n]) m.RecordHeader.Mut().SetCompression(Compression_LZ4). SetSizeX(uint16(n)). SetSize(m.RecordHeader.SizeX()) return m, nil }
The new series of The X-Files has been partly prompted by recent exposés about government snooping, according to the show’s creator, Chris Carter. “It’s a perfect time to come back with the X-Files considering global politics,” said Carter, who was speaking in Cannes shortly after the first episode of the new series had premiered. “We’re trying to be honest with the changes dealing with digital technology: the capability of spying. Clearly we’re being spied on in the US – or at least spying on you – and there seems to be no shame in it.” The new six-episode series reunites agents Mulder and Scully – reprised by David Duchovny and Gillian Anderson – with its first episode setting up a series arc promising “the most evil conspiracy the world has ever known”. Fans can expect some tearing up of history – “Roswell? That was a smokescreen” – and some sharp observations on developments in national security since the 9/11 attacks. There is also fresh insight into Mulder and Scully’s relationship. Following the screening at the Mipcom conference, Carter said he had never lost his appetite for the show. “Every day I look at the newspaper and I see a possible X-Files episode,” he said. “I did it for a long time and you never quite lose the eye for what would be good X-Files storytelling.” He described the show as “a story about the modern pursuit of science by scientists”, and while admitting that he was part Mulder and part Scully in his views on all things extra-terrestrial, he said: “Believe it or not, I’m a sceptic. I’ve never been visited by aliens.” Carter praised Duchovny and Anderson, saying their chemistry was the reason that The X-Files was so successful first time round and has been so fondly remembered by fans since. “If not for them, the anchor wouldn’t be there. We’ve told good stories along the way but without those two characters and their involvement, I think you’d have had less of a show,” he said. Carter said he was optimistic of making more new series after the new six-episode run, but warned that they would not be as long as the original seasons. “For the future of X Files, if we are to come back, I think you’ll see us come back with these mini-series, if you will. Special events. We used to tell a story over a long saga, 22 to 25 episodes. Now the arc is much sharper, from one to six. So I think we pack a lot more in to these six episodes.” Carter was careful to keep spoilers to a minimum during his interview, although he hinted at surprises for fans hoping for more characters from the original series to reappear. But possibly also some shocks. “Modern television asks that you kill off a main character very quickly,” he said. “I can tell you that … well, I’m not going to tell you!” One new character, Tad O’Malley, a conspiracy theorist who makes online videos, is based on Carter’s own web surfing in the years since The X-Files was last broadcast. “Right now in the internet there are 500 conspiracy sites, and there are people like Tad O’Malley out here who have got the public’s attention. And I’m interested in these people,” he said. Carter said the new series would include at least one “monster-of-the-week” episode – a standalone story featuring a single alien – but that they would also explore the mythology of the X-Files characters. He is already planning for the future. “I love telling X-Files stories. The truth is out there! If we satisfy audience expectations, certainly there’s an opportunity,” he said. “Mulder and Scully will be in wheelchairs before they are wheeled off stage!”
/** * Container class for TF potions * * @since 0.3.6.4 * @version 0.3.6.4 * @author Stadler76 */ public class TransformativePotion extends AbstractPotion { private final Body body; public TransformativePotion(AbstractItemType itemType, List<PossibleItemEffect> effects, Body body) { super(itemType, effects); this.body = body; } public TransformativePotion(AbstractItemType itemType, List<PossibleItemEffect> effects) { this(itemType, effects, null); } public Body getBody() { return body; } }
import Camera from "./camera" import Calc from "../calc" const HALF = 0.5 const HALF_PI = Math.PI * HALF const ALMOST_HALF_PI = 1.570796326794896 /** * Abstract camera for 3D space. */ export default abstract class Space extends Camera { protected cameraMatrix = new Float32Array(16) /** * The camera looks at (targetX, targetY, targetZ) * and it is at a distance of `distance`. * That defines a sphere. We use `latitude` and `longitude` to know * where the camera lies on the sphere. * @param latitude - Expressed in radians. * @param longitude - Expressed in radians. */ orbit( targetX: number, targetY: number, targetZ: number, distance: number, latitude: number, longitude: number ) { const lat = Calc.clamp(latitude, -ALMOST_HALF_PI, ALMOST_HALF_PI) const lng = longitude - HALF_PI const cosLat = Math.cos(lat) const sinLat = Math.sin(lat) const cosLng = Math.cos(lng) const sinLng = Math.sin(lng) // Vecteur Z de la caméra. const Zx = cosLng * cosLat const Zy = sinLat const Zz = sinLng * cosLat // Le vecteur X se déduit par un produit vectoriel de (0,1,0) avec Z. let Xx = -Zz; let Xy = 0; let Xz = Zx; // Comme (0,0,1) n'est pas orthogonal à Z, il faut normaliser X. const len = Math.sqrt(Xx * Xx + Xy * Xy + Xz * Xz); Xx /= len; Xy /= len; Xz /= len; // Y peut alors se déduire par le produit vectoriel de X par Z. // Et il n'y aura pas besoin de le normaliser. const Yx = Zz * Xy - Zy * Xz const Yy = Xz * Zx - Xx * Zz const Yz = Zy * Xx - Zx * Xy // Translation. const Tx = -(Zx * distance + targetX); const Ty = -(Zy * distance + targetY); const Tz = -(Zz * distance + targetZ); // Le résultat est la multiplication de la projection avec la translation. const result = this.cameraMatrix result[Calc.M4_00] = Xx; result[Calc.M4_01] = Xy; result[Calc.M4_02] = Xz; result[Calc.M4_03] = Tx * Xx + Ty * Xy + Tz * Xz; result[Calc.M4_10] = Yx; result[Calc.M4_11] = Yy; result[Calc.M4_12] = Yz; result[Calc.M4_13] = Tx * Yx + Ty * Yy + Tz * Yz; result[Calc.M4_20] = Zx; result[Calc.M4_21] = Zy; result[Calc.M4_22] = Zz; result[Calc.M4_23] = Tx * Zx + Ty * Zy + Tz * Zz; result[Calc.M4_30] = 0; result[Calc.M4_31] = 0; result[Calc.M4_32] = 0; result[Calc.M4_33] = 1; } }
/** create a Bundle to simulate an incoming notification message */ static Bundle createNotificationMessage() { Bundle bundle = new Bundle(); bundle.putString(KEY_BODY, "Hello World"); return bundle; }
#! /usr/bin/env python3.5 # Copyright 2017 <NAME> # vim:ts=4:sw=4:ai:et:si:sts=4 import logging from peewee import Model, Proxy, OperationalError, IntegrityError from playhouse.pool import PooledSqliteDatabase logger = logging.getLogger(__name__) db_proxy = Proxy() class BaseModel(Model): fields = [] class Meta: database = db_proxy def __str__(self): attrs = {attr: str(getattr(self, attr, None)) for attr in self.fields} return str(attrs) class Database(object): def __init__(self, filename, tables, foreign_keys=None): self.filename = filename maxConn = 50 timeout = 600 self.db = PooledSqliteDatabase(self.filename, max_connections=maxConn, stale_timeout=timeout) db_proxy.initialize(self.db) logger.info("Connecting to database %s" % self.filename) self.db.connect() self.db.create_tables(tables, safe=True) if foreign_keys: for (klass, key) in foreign_keys.items(): try: self.db.create_foreign_key(klass, key) except OperationalError as e: (code, message) = e.args if code != 1022: logger.exception(exceptionDetails(e)) except IntegrityError as e: (code, message) = e.args if code != 1215: logger.exception(exceptionDetails(e)) except Exception as e: logger.exception(exceptionDetails(e)) self.db.close() def execution_context(self): return self.db.execution_context() def bulkSave(self, objList, ignoreDupes=False): with self.db.execution_context(): for obj in objList: try: obj.save() except IntegrityError as e: if not ignoreDupes: logger.exception(exceptionDetails(e)) pass except Exception as e: logger.exception(exceptionDetails(e)) pass def get_or_create_save(self, klass, item): with self.db.execution_context(): (dbitem, created) = klass.get_or_create(**item) if created: dbitem.save() return dbitem
summan = input() num = [] for i in range(len(summan)): if summan[i] > '0' and summan[i] < '4': num += [int(summan[i])] num.sort() for i in range(len(num)): if i < len(num)-1: print(num[i], end='+') else: print(num[i])
{-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} module Data.Functor.Constraint.Wrap where import Data.Proxy (Proxy) -- | A wrapper type where the first argument is a given constraint on the value. data WrapC c a where WrapC :: c a => a -> WrapC c a instance Show a => Show (WrapC Show a) where show (WrapC x) = show x -- | No `Functor` instance for `WrapC`, the constraints won't allow it wmap :: c b => (a -> b) -> WrapC c a -> WrapC c b wmap f (WrapC x) = WrapC (f x) -- | Variant of @c a => a -> _ c a@, apply constraint differently -- -- @ -- resolveE :: P c (E c) -> P c (E ()) -- @ -- data WrapE c a where WrapE :: c (WrapE c) => a -> WrapE c a instance Functor (WrapE c) where fmap f (WrapE x) = WrapE (f x) -- | Variant of @c a => a -> _ c a@, no value -- -- @ -- resolveE :: P c (E c) -> P c (E ()) -- -- -- • Could not deduce: c b arising from a use of ‘P’ -- from the context: c a -- bound by a pattern with constructor: -- P :: forall (c :: * -> Constraint) a. c a => Proxy a -> P c a, -- in an equation for ‘fmap’ -- at /Users/michaelklein/Desktop/flock/pieces/through/src/Scratch/Hs4.hs:1458:11-13 -- • In the expression: P (fmap f x) -- In an equation for ‘fmap’: fmap f (P x) = P (fmap f x) -- In the instance declaration for ‘Functor (P c)’ -- -- instance Functor (P c) where -- fmap f (P x) = P (fmap f x) -- @ -- data WrapP c a where WrapP :: c a => Proxy a -> WrapP c a -- | No `Functor` instance for `WrapP`, the constraints won't allow it pmap :: c b => (a -> b) -> WrapP c a -> WrapP c b pmap f (WrapP x) = WrapP (fmap f x)
import settingsSchemaJSON from '../../../schema/settings.schema.json' import { mergeSettingsSchemas } from './configuration' describe('mergeSettingsSchemas', () => { it('handles empty', () => expect(mergeSettingsSchemas([])).toEqual({ allOf: [{ $ref: settingsSchemaJSON.$id }], })) it('overwrites additionalProperties and required', () => expect( mergeSettingsSchemas([ { manifest: { url: '', activationEvents: [], contributes: { configuration: { additionalProperties: false, properties: { a: { type: 'string' } } }, }, }, }, { manifest: { url: '', activationEvents: [], contributes: { configuration: { required: ['b'], properties: { b: { type: 'string' } } }, }, }, }, ]) ).toEqual({ allOf: [ { $ref: settingsSchemaJSON.$id }, { additionalProperties: true, required: [], properties: { a: { type: 'string' } } }, { additionalProperties: true, required: [], properties: { b: { type: 'string' } } }, ], })) it('handles error and null configuration', () => expect( mergeSettingsSchemas([ { manifest: { url: '', activationEvents: [], contributes: { configuration: { additionalProperties: false, properties: { a: { type: 'string' } } }, }, }, }, { manifest: new Error('x'), }, { manifest: null, }, { manifest: { url: '', activationEvents: [] }, }, { manifest: { url: '', activationEvents: [], contributes: {} }, }, ]) ).toEqual({ allOf: [ { $ref: settingsSchemaJSON.$id }, { additionalProperties: true, required: [], properties: { a: { type: 'string' } } }, ], })) })
from math import gcd def mlt(): return map(int, input().split()) def arp(): return [*mlt()] def solv(): x, y, z = mlt() if z == min(x, y): print(10**(x-1), 10**(y-1)) return p = x-z+1 q = y-z+1 a = 1 while a < 10**p: a *= 2 a //= 2 b = 1 while b < 10**q: b *= 3 b //= 3 x1 = a*10**(x-p ) x2 = b*10**(y-q ) print(x1, x2 ) for _ in range(int(input())): solv()
<gh_stars>0 import { ViewEncapsulation, Component, OnInit, ViewChild } from '@angular/core'; import { ActivatedRoute, Router } from '@angular/router'; import { OTextInputComponent, OTranslateService } from 'ontimize-web-ngx' import { MatSnackBar } from '@angular/material'; @Component({ selector: 'branches-detail', templateUrl: './branches-detail.component.html', styleUrls: ['./branches-detail.component.scss'], encapsulation: ViewEncapsulation.None, host: { '[class.branches-detail]': 'true' } }) export class BranchesDetailComponent implements OnInit { @ViewChild('officeId') officeId: OTextInputComponent; public longitide; public latitude; public name; constructor( public snackBar: MatSnackBar, private translateService: OTranslateService ) { } ngOnInit() { } getFileData() { return { 'OFFICEID': this.officeId.getValue() }; } onUploadFile(e: Event) { this.snackBar.open(this.translateService.get('BRANCH_PLAN_UPLOADED'), this.translateService.get('ACCEPT'), { duration: 2000, }); } onFormDataLoaded(data: any) { if (data.MINLATITUDE) { this.latitude = data.MINLATITUDE; } if (data.MINLONGITUDE) { this.longitide = data.MINLONGITUDE; } } hasGPSPositition() { if (this.latitude && this.longitide) { return true; } return false; } getPositionGPS() { return this.latitude + ',' + this.longitide } }
import Vue from "vue"; import App from "./App.vue"; import { createRouter } from "./router"; import VueTruncate from "vue-truncate-filter"; import "./assets/custom.scss"; import Meta from "vue-meta"; import * as Sentry from "@sentry/browser"; import "@/assets/global.css"; import VueScrollTo from "vue-scrollto"; import Store from "@/store/Store"; import VueClipboard from "vue-clipboard2"; import "core-js/stable"; import "regenerator-runtime/runtime"; import { Vue as VueIntegration } from "@sentry/integrations"; import { ResizeObserver as ResizeObserverPolyfill } from "@juggle/resize-observer"; const isProd = process.env.NODE_ENV === "production"; Vue.config.productionTip = false; if (isProd) { Sentry.init({ dsn: process.env.VUE_APP_SENTRY_DSN, integrations: [new VueIntegration({ Vue, attachProps: true })], }); } else { //makeServer(); } if (typeof window !== "undefined") { //needed for chartjs3.x compatibility with older browsers window.ResizeObserver = window.ResizeObserver || ResizeObserverPolyfill; } //Vue.use(AsyncComputedPlugin); VueClipboard.config.autoSetContainer = true; Vue.use(VueClipboard); Vue.use(VueTruncate); Vue.use(Meta); Vue.use(VueScrollTo); const router = createRouter(); const createApp = () => { const store = new Store(); const app = new Vue({ router, data() { return { store: store, }; }, render: (h) => h(App), mounted() { document.dispatchEvent(new Event("x-app-rendered")); }, }); return { app, router, store }; }; export default createApp;