code
stringlengths 3
1.05M
| repo_name
stringlengths 4
116
| path
stringlengths 3
942
| language
stringclasses 30
values | license
stringclasses 15
values | size
int32 3
1.05M
|
---|---|---|---|---|---|
// filesystem path.hpp ---------------------------------------------------------------//
// Copyright Beman Dawes 2002-2005, 2009
// Copyright Vladimir Prus 2002
// Distributed under the Boost Software License, Version 1.0.
// See http://www.boost.org/LICENSE_1_0.txt
// Library home page: http://www.boost.org/libs/filesystem
// path::stem(), extension(), and replace_extension() are based on
// basename(), extension(), and change_extension() from the original
// filesystem/convenience.hpp header by Vladimir Prus.
#ifndef BOOST_FILESYSTEM_PATH_HPP
#define BOOST_FILESYSTEM_PATH_HPP
#include <boost/config.hpp>
# if defined( BOOST_NO_STD_WSTRING )
# error Configuration not supported: Boost.Filesystem V3 and later requires std::wstring support
# endif
#include <boost/assert.hpp>
#include <boost/filesystem/config.hpp>
#include <boost/filesystem/path_traits.hpp> // includes <cwchar>
#include <boost/system/error_code.hpp>
#include <boost/system/system_error.hpp>
#include <boost/iterator/iterator_facade.hpp>
#include <boost/core/enable_if.hpp>
#include <boost/io/detail/quoted_manip.hpp>
#include <boost/functional/hash_fwd.hpp>
#include <boost/type_traits/is_integral.hpp>
#include <string>
#include <iterator>
#include <cstring>
#include <iosfwd>
#include <stdexcept>
#include <cassert>
#include <locale>
#include <algorithm>
#include <boost/config/abi_prefix.hpp> // must be the last #include
namespace boost
{
namespace filesystem
{
namespace path_detail // intentionally don't use filesystem::detail to not bring internal Boost.Filesystem functions into ADL via path_constants
{
template< typename Char, Char Separator, Char PreferredSeparator, Char Dot >
struct path_constants
{
typedef path_constants< Char, Separator, PreferredSeparator, Dot > path_constants_base;
typedef Char value_type;
static BOOST_CONSTEXPR_OR_CONST value_type separator = Separator;
static BOOST_CONSTEXPR_OR_CONST value_type preferred_separator = PreferredSeparator;
static BOOST_CONSTEXPR_OR_CONST value_type dot = Dot;
};
template< typename Char, Char Separator, Char PreferredSeparator, Char Dot >
BOOST_CONSTEXPR_OR_CONST typename path_constants< Char, Separator, PreferredSeparator, Dot >::value_type
path_constants< Char, Separator, PreferredSeparator, Dot >::separator;
template< typename Char, Char Separator, Char PreferredSeparator, Char Dot >
BOOST_CONSTEXPR_OR_CONST typename path_constants< Char, Separator, PreferredSeparator, Dot >::value_type
path_constants< Char, Separator, PreferredSeparator, Dot >::preferred_separator;
template< typename Char, Char Separator, Char PreferredSeparator, Char Dot >
BOOST_CONSTEXPR_OR_CONST typename path_constants< Char, Separator, PreferredSeparator, Dot >::value_type
path_constants< Char, Separator, PreferredSeparator, Dot >::dot;
} // namespace path_detail
//------------------------------------------------------------------------------------//
// //
// class path //
// //
//------------------------------------------------------------------------------------//
class path :
public filesystem::path_detail::path_constants<
#ifdef BOOST_WINDOWS_API
wchar_t, L'/', L'\\', L'.'
#else
char, '/', '/', '.'
#endif
>
{
public:
// value_type is the character type used by the operating system API to
// represent paths.
typedef path_constants_base::value_type value_type;
typedef std::basic_string<value_type> string_type;
typedef std::codecvt<wchar_t, char,
std::mbstate_t> codecvt_type;
// ----- character encoding conversions -----
// Following the principle of least astonishment, path input arguments
// passed to or obtained from the operating system via objects of
// class path behave as if they were directly passed to or
// obtained from the O/S API, unless conversion is explicitly requested.
//
// POSIX specfies that path strings are passed unchanged to and from the
// API. Note that this is different from the POSIX command line utilities,
// which convert according to a locale.
//
// Thus for POSIX, char strings do not undergo conversion. wchar_t strings
// are converted to/from char using the path locale or, if a conversion
// argument is given, using a conversion object modeled on
// std::wstring_convert.
//
// The path locale, which is global to the thread, can be changed by the
// imbue() function. It is initialized to an implementation defined locale.
//
// For Windows, wchar_t strings do not undergo conversion. char strings
// are converted using the "ANSI" or "OEM" code pages, as determined by
// the AreFileApisANSI() function, or, if a conversion argument is given,
// using a conversion object modeled on std::wstring_convert.
//
// See m_pathname comments for further important rationale.
// TODO: rules needed for operating systems that use / or .
// differently, or format directory paths differently from file paths.
//
// **********************************************************************************
//
// More work needed: How to handle an operating system that may have
// slash characters or dot characters in valid filenames, either because
// it doesn't follow the POSIX standard, or because it allows MBCS
// filename encodings that may contain slash or dot characters. For
// example, ISO/IEC 2022 (JIS) encoding which allows switching to
// JIS x0208-1983 encoding. A valid filename in this set of encodings is
// 0x1B 0x24 0x42 [switch to X0208-1983] 0x24 0x2F [U+304F Kiragana letter KU]
// ^^^^
// Note that 0x2F is the ASCII slash character
//
// **********************************************************************************
// Supported source arguments: half-open iterator range, container, c-array,
// and single pointer to null terminated string.
// All source arguments except pointers to null terminated byte strings support
// multi-byte character strings which may have embedded nulls. Embedded null
// support is required for some Asian languages on Windows.
// "const codecvt_type& cvt=codecvt()" default arguments are not used because this
// limits the impact of locale("") initialization failures on POSIX systems to programs
// that actually depend on locale(""). It further ensures that exceptions thrown
// as a result of such failues occur after main() has started, so can be caught.
// ----- constructors -----
path() BOOST_NOEXCEPT {}
path(const path& p) : m_pathname(p.m_pathname) {}
template <class Source>
path(Source const& source,
typename boost::enable_if<path_traits::is_pathable<
typename boost::decay<Source>::type> >::type* =0)
{
path_traits::dispatch(source, m_pathname);
}
path(const value_type* s) : m_pathname(s) {}
path(value_type* s) : m_pathname(s) {}
path(const string_type& s) : m_pathname(s) {}
path(string_type& s) : m_pathname(s) {}
// As of October 2015 the interaction between noexcept and =default is so troublesome
// for VC++, GCC, and probably other compilers, that =default is not used with noexcept
// functions. GCC is not even consistent for the same release on different platforms.
# if !defined(BOOST_NO_CXX11_RVALUE_REFERENCES)
path(path&& p) BOOST_NOEXCEPT : m_pathname(std::move(p.m_pathname)) {}
path& operator=(path&& p) BOOST_NOEXCEPT
{ m_pathname = std::move(p.m_pathname); return *this; }
# endif
template <class Source>
path(Source const& source, const codecvt_type& cvt)
{
path_traits::dispatch(source, m_pathname, cvt);
}
template <class InputIterator>
path(InputIterator begin, InputIterator end)
{
if (begin != end)
{
// convert requires contiguous string, so copy
std::basic_string<typename std::iterator_traits<InputIterator>::value_type>
seq(begin, end);
path_traits::convert(seq.c_str(), seq.c_str()+seq.size(), m_pathname);
}
}
template <class InputIterator>
path(InputIterator begin, InputIterator end, const codecvt_type& cvt)
{
if (begin != end)
{
// convert requires contiguous string, so copy
std::basic_string<typename std::iterator_traits<InputIterator>::value_type>
seq(begin, end);
path_traits::convert(seq.c_str(), seq.c_str()+seq.size(), m_pathname, cvt);
}
}
// ----- assignments -----
path& operator=(const path& p)
{
m_pathname = p.m_pathname;
return *this;
}
template <class Source>
typename boost::enable_if<path_traits::is_pathable<
typename boost::decay<Source>::type>, path&>::type
operator=(Source const& source)
{
m_pathname.clear();
path_traits::dispatch(source, m_pathname);
return *this;
}
// value_type overloads
path& operator=(const value_type* ptr) // required in case ptr overlaps *this
{m_pathname = ptr; return *this;}
path& operator=(value_type* ptr) // required in case ptr overlaps *this
{m_pathname = ptr; return *this;}
path& operator=(const string_type& s) {m_pathname = s; return *this;}
path& operator=(string_type& s) {m_pathname = s; return *this;}
path& assign(const value_type* ptr, const codecvt_type&) // required in case ptr overlaps *this
{m_pathname = ptr; return *this;}
template <class Source>
path& assign(Source const& source, const codecvt_type& cvt)
{
m_pathname.clear();
path_traits::dispatch(source, m_pathname, cvt);
return *this;
}
template <class InputIterator>
path& assign(InputIterator begin, InputIterator end)
{
m_pathname.clear();
if (begin != end)
{
std::basic_string<typename std::iterator_traits<InputIterator>::value_type>
seq(begin, end);
path_traits::convert(seq.c_str(), seq.c_str()+seq.size(), m_pathname);
}
return *this;
}
template <class InputIterator>
path& assign(InputIterator begin, InputIterator end, const codecvt_type& cvt)
{
m_pathname.clear();
if (begin != end)
{
std::basic_string<typename std::iterator_traits<InputIterator>::value_type>
seq(begin, end);
path_traits::convert(seq.c_str(), seq.c_str()+seq.size(), m_pathname, cvt);
}
return *this;
}
// ----- concatenation -----
template <class Source>
typename boost::enable_if<path_traits::is_pathable<
typename boost::decay<Source>::type>, path&>::type
operator+=(Source const& source)
{
return concat(source);
}
// value_type overloads. Same rationale as for constructors above
path& operator+=(const path& p) { m_pathname += p.m_pathname; return *this; }
path& operator+=(const value_type* ptr) { m_pathname += ptr; return *this; }
path& operator+=(value_type* ptr) { m_pathname += ptr; return *this; }
path& operator+=(const string_type& s) { m_pathname += s; return *this; }
path& operator+=(string_type& s) { m_pathname += s; return *this; }
path& operator+=(value_type c) { m_pathname += c; return *this; }
template <class CharT>
typename boost::enable_if<boost::is_integral<CharT>, path&>::type
operator+=(CharT c)
{
CharT tmp[2];
tmp[0] = c;
tmp[1] = 0;
return concat(tmp);
}
template <class Source>
path& concat(Source const& source)
{
path_traits::dispatch(source, m_pathname);
return *this;
}
template <class Source>
path& concat(Source const& source, const codecvt_type& cvt)
{
path_traits::dispatch(source, m_pathname, cvt);
return *this;
}
template <class InputIterator>
path& concat(InputIterator begin, InputIterator end)
{
if (begin == end)
return *this;
std::basic_string<typename std::iterator_traits<InputIterator>::value_type>
seq(begin, end);
path_traits::convert(seq.c_str(), seq.c_str()+seq.size(), m_pathname);
return *this;
}
template <class InputIterator>
path& concat(InputIterator begin, InputIterator end, const codecvt_type& cvt)
{
if (begin == end)
return *this;
std::basic_string<typename std::iterator_traits<InputIterator>::value_type>
seq(begin, end);
path_traits::convert(seq.c_str(), seq.c_str()+seq.size(), m_pathname, cvt);
return *this;
}
// ----- appends -----
// if a separator is added, it is the preferred separator for the platform;
// slash for POSIX, backslash for Windows
BOOST_FILESYSTEM_DECL path& operator/=(const path& p);
template <class Source>
typename boost::enable_if<path_traits::is_pathable<
typename boost::decay<Source>::type>, path&>::type
operator/=(Source const& source)
{
return append(source);
}
BOOST_FILESYSTEM_DECL path& operator/=(const value_type* ptr);
path& operator/=(value_type* ptr)
{
return this->operator/=(const_cast<const value_type*>(ptr));
}
path& operator/=(const string_type& s) { return this->operator/=(path(s)); }
path& operator/=(string_type& s) { return this->operator/=(path(s)); }
path& append(const value_type* ptr) // required in case ptr overlaps *this
{
this->operator/=(ptr);
return *this;
}
path& append(const value_type* ptr, const codecvt_type&) // required in case ptr overlaps *this
{
this->operator/=(ptr);
return *this;
}
template <class Source>
path& append(Source const& source);
template <class Source>
path& append(Source const& source, const codecvt_type& cvt);
template <class InputIterator>
path& append(InputIterator begin, InputIterator end);
template <class InputIterator>
path& append(InputIterator begin, InputIterator end, const codecvt_type& cvt);
// ----- modifiers -----
void clear() BOOST_NOEXCEPT { m_pathname.clear(); }
# ifdef BOOST_POSIX_API
path& make_preferred() { return *this; } // POSIX no effect
# else // BOOST_WINDOWS_API
BOOST_FILESYSTEM_DECL path& make_preferred(); // change slashes to backslashes
# endif
BOOST_FILESYSTEM_DECL path& remove_filename();
BOOST_FILESYSTEM_DECL path& remove_trailing_separator();
BOOST_FILESYSTEM_DECL path& replace_extension(const path& new_extension = path());
void swap(path& rhs) BOOST_NOEXCEPT { m_pathname.swap(rhs.m_pathname); }
// ----- observers -----
// For operating systems that format file paths differently than directory
// paths, return values from observers are formatted as file names unless there
// is a trailing separator, in which case returns are formatted as directory
// paths. POSIX and Windows make no such distinction.
// Implementations are permitted to return const values or const references.
// The string or path returned by an observer are specified as being formatted
// as "native" or "generic".
//
// For POSIX, these are all the same format; slashes and backslashes are as input and
// are not modified.
//
// For Windows, native: as input; slashes and backslashes are not modified;
// this is the format of the internally stored string.
// generic: backslashes are converted to slashes
// ----- native format observers -----
const string_type& native() const BOOST_NOEXCEPT { return m_pathname; }
const value_type* c_str() const BOOST_NOEXCEPT { return m_pathname.c_str(); }
string_type::size_type size() const BOOST_NOEXCEPT { return m_pathname.size(); }
template <class String>
String string() const;
template <class String>
String string(const codecvt_type& cvt) const;
# ifdef BOOST_WINDOWS_API
const std::string string() const
{
std::string tmp;
if (!m_pathname.empty())
path_traits::convert(m_pathname.c_str(), m_pathname.c_str()+m_pathname.size(),
tmp);
return tmp;
}
const std::string string(const codecvt_type& cvt) const
{
std::string tmp;
if (!m_pathname.empty())
path_traits::convert(m_pathname.c_str(), m_pathname.c_str()+m_pathname.size(),
tmp, cvt);
return tmp;
}
// string_type is std::wstring, so there is no conversion
const std::wstring& wstring() const { return m_pathname; }
const std::wstring& wstring(const codecvt_type&) const { return m_pathname; }
# else // BOOST_POSIX_API
// string_type is std::string, so there is no conversion
const std::string& string() const { return m_pathname; }
const std::string& string(const codecvt_type&) const { return m_pathname; }
const std::wstring wstring() const
{
std::wstring tmp;
if (!m_pathname.empty())
path_traits::convert(m_pathname.c_str(), m_pathname.c_str()+m_pathname.size(),
tmp);
return tmp;
}
const std::wstring wstring(const codecvt_type& cvt) const
{
std::wstring tmp;
if (!m_pathname.empty())
path_traits::convert(m_pathname.c_str(), m_pathname.c_str()+m_pathname.size(),
tmp, cvt);
return tmp;
}
# endif
// ----- generic format observers -----
// Experimental generic function returning generic formatted path (i.e. separators
// are forward slashes). Motivation: simpler than a family of generic_*string
// functions.
# ifdef BOOST_WINDOWS_API
BOOST_FILESYSTEM_DECL path generic_path() const;
# else
path generic_path() const { return path(*this); }
# endif
template <class String>
String generic_string() const;
template <class String>
String generic_string(const codecvt_type& cvt) const;
# ifdef BOOST_WINDOWS_API
const std::string generic_string() const { return generic_path().string(); }
const std::string generic_string(const codecvt_type& cvt) const { return generic_path().string(cvt); }
const std::wstring generic_wstring() const { return generic_path().wstring(); }
const std::wstring generic_wstring(const codecvt_type&) const { return generic_wstring(); }
# else // BOOST_POSIX_API
// On POSIX-like systems, the generic format is the same as the native format
const std::string& generic_string() const { return m_pathname; }
const std::string& generic_string(const codecvt_type&) const { return m_pathname; }
const std::wstring generic_wstring() const { return this->wstring(); }
const std::wstring generic_wstring(const codecvt_type& cvt) const { return this->wstring(cvt); }
# endif
// ----- compare -----
BOOST_FILESYSTEM_DECL int compare(const path& p) const BOOST_NOEXCEPT; // generic, lexicographical
int compare(const std::string& s) const { return compare(path(s)); }
int compare(const value_type* s) const { return compare(path(s)); }
// ----- decomposition -----
BOOST_FILESYSTEM_DECL path root_path() const;
BOOST_FILESYSTEM_DECL path root_name() const; // returns 0 or 1 element path
// even on POSIX, root_name() is non-empty() for network paths
BOOST_FILESYSTEM_DECL path root_directory() const; // returns 0 or 1 element path
BOOST_FILESYSTEM_DECL path relative_path() const;
BOOST_FILESYSTEM_DECL path parent_path() const;
BOOST_FILESYSTEM_DECL path filename() const; // returns 0 or 1 element path
BOOST_FILESYSTEM_DECL path stem() const; // returns 0 or 1 element path
BOOST_FILESYSTEM_DECL path extension() const; // returns 0 or 1 element path
// ----- query -----
bool empty() const BOOST_NOEXCEPT { return m_pathname.empty(); }
bool filename_is_dot() const;
bool filename_is_dot_dot() const;
bool has_root_path() const { return has_root_directory() || has_root_name(); }
bool has_root_name() const { return !root_name().empty(); }
bool has_root_directory() const { return !root_directory().empty(); }
bool has_relative_path() const { return !relative_path().empty(); }
bool has_parent_path() const { return !parent_path().empty(); }
bool has_filename() const { return !m_pathname.empty(); }
bool has_stem() const { return !stem().empty(); }
bool has_extension() const { return !extension().empty(); }
bool is_relative() const { return !is_absolute(); }
bool is_absolute() const
{
// Windows CE has no root name (aka drive letters)
# if defined(BOOST_WINDOWS_API) && !defined(UNDER_CE)
return has_root_name() && has_root_directory();
# else
return has_root_directory();
# endif
}
// ----- lexical operations -----
BOOST_FILESYSTEM_DECL path lexically_normal() const;
BOOST_FILESYSTEM_DECL path lexically_relative(const path& base) const;
path lexically_proximate(const path& base) const
{
path tmp(lexically_relative(base));
return tmp.empty() ? *this : tmp;
}
// ----- iterators -----
class iterator;
typedef iterator const_iterator;
class reverse_iterator;
typedef reverse_iterator const_reverse_iterator;
BOOST_FILESYSTEM_DECL iterator begin() const;
BOOST_FILESYSTEM_DECL iterator end() const;
reverse_iterator rbegin() const;
reverse_iterator rend() const;
// ----- static member functions -----
static BOOST_FILESYSTEM_DECL std::locale imbue(const std::locale& loc);
static BOOST_FILESYSTEM_DECL const codecvt_type& codecvt();
// ----- deprecated functions -----
# if defined(BOOST_FILESYSTEM_DEPRECATED) && defined(BOOST_FILESYSTEM_NO_DEPRECATED)
# error both BOOST_FILESYSTEM_DEPRECATED and BOOST_FILESYSTEM_NO_DEPRECATED are defined
# endif
# if !defined(BOOST_FILESYSTEM_NO_DEPRECATED)
// recently deprecated functions supplied by default
path& normalize() {
path tmp(lexically_normal());
m_pathname.swap(tmp.m_pathname);
return *this;
}
path& remove_leaf() { return remove_filename(); }
path leaf() const { return filename(); }
path branch_path() const { return parent_path(); }
path generic() const { return generic_path(); }
bool has_leaf() const { return !m_pathname.empty(); }
bool has_branch_path() const { return !parent_path().empty(); }
bool is_complete() const { return is_absolute(); }
# endif
# if defined(BOOST_FILESYSTEM_DEPRECATED)
// deprecated functions with enough signature or semantic changes that they are
// not supplied by default
const std::string file_string() const { return string(); }
const std::string directory_string() const { return string(); }
const std::string native_file_string() const { return string(); }
const std::string native_directory_string() const { return string(); }
const string_type external_file_string() const { return native(); }
const string_type external_directory_string() const { return native(); }
// older functions no longer supported
//typedef bool (*name_check)(const std::string & name);
//basic_path(const string_type& str, name_check) { operator/=(str); }
//basic_path(const typename string_type::value_type* s, name_check)
// { operator/=(s);}
//static bool default_name_check_writable() { return false; }
//static void default_name_check(name_check) {}
//static name_check default_name_check() { return 0; }
//basic_path& canonize();
# endif
//--------------------------------------------------------------------------------------//
// class path private members //
//--------------------------------------------------------------------------------------//
private:
# if defined(_MSC_VER)
# pragma warning(push) // Save warning settings
# pragma warning(disable : 4251) // disable warning: class 'std::basic_string<_Elem,_Traits,_Ax>'
# endif // needs to have dll-interface...
/*
m_pathname has the type, encoding, and format required by the native
operating system. Thus for POSIX and Windows there is no conversion for
passing m_pathname.c_str() to the O/S API or when obtaining a path from the
O/S API. POSIX encoding is unspecified other than for dot and slash
characters; POSIX just treats paths as a sequence of bytes. Windows
encoding is UCS-2 or UTF-16 depending on the version.
*/
string_type m_pathname; // Windows: as input; backslashes NOT converted to slashes,
// slashes NOT converted to backslashes
# if defined(_MSC_VER)
# pragma warning(pop) // restore warning settings.
# endif
// Returns: If separator is to be appended, m_pathname.size() before append. Otherwise 0.
// Note: An append is never performed if size()==0, so a returned 0 is unambiguous.
BOOST_FILESYSTEM_DECL string_type::size_type m_append_separator_if_needed();
BOOST_FILESYSTEM_DECL void m_erase_redundant_separator(string_type::size_type sep_pos);
BOOST_FILESYSTEM_DECL string_type::size_type m_parent_path_end() const;
// Was qualified; como433beta8 reports:
// warning #427-D: qualified name is not allowed in member declaration
friend class iterator;
friend bool operator<(const path& lhs, const path& rhs);
// see path::iterator::increment/decrement comment below
static BOOST_FILESYSTEM_DECL void m_path_iterator_increment(path::iterator& it);
static BOOST_FILESYSTEM_DECL void m_path_iterator_decrement(path::iterator& it);
}; // class path
namespace detail
{
BOOST_FILESYSTEM_DECL
int lex_compare(path::iterator first1, path::iterator last1,
path::iterator first2, path::iterator last2);
BOOST_FILESYSTEM_DECL
const path& dot_path();
BOOST_FILESYSTEM_DECL
const path& dot_dot_path();
}
# ifndef BOOST_FILESYSTEM_NO_DEPRECATED
typedef path wpath;
# endif
//------------------------------------------------------------------------------------//
// class path::iterator //
//------------------------------------------------------------------------------------//
class path::iterator
: public boost::iterator_facade<
path::iterator,
path const,
boost::bidirectional_traversal_tag >
{
private:
friend class boost::iterator_core_access;
friend class boost::filesystem::path;
friend class boost::filesystem::path::reverse_iterator;
friend void m_path_iterator_increment(path::iterator & it);
friend void m_path_iterator_decrement(path::iterator & it);
const path& dereference() const { return m_element; }
bool equal(const iterator & rhs) const
{
return m_path_ptr == rhs.m_path_ptr && m_pos == rhs.m_pos;
}
// iterator_facade derived classes don't seem to like implementations in
// separate translation unit dll's, so forward to class path static members
void increment() { m_path_iterator_increment(*this); }
void decrement() { m_path_iterator_decrement(*this); }
path m_element; // current element
const path* m_path_ptr; // path being iterated over
string_type::size_type m_pos; // position of m_element in
// m_path_ptr->m_pathname.
// if m_element is implicit dot, m_pos is the
// position of the last separator in the path.
// end() iterator is indicated by
// m_pos == m_path_ptr->m_pathname.size()
}; // path::iterator
//------------------------------------------------------------------------------------//
// class path::reverse_iterator //
//------------------------------------------------------------------------------------//
class path::reverse_iterator
: public boost::iterator_facade<
path::reverse_iterator,
path const,
boost::bidirectional_traversal_tag >
{
public:
explicit reverse_iterator(iterator itr) : m_itr(itr)
{
if (itr != itr.m_path_ptr->begin())
m_element = *--itr;
}
private:
friend class boost::iterator_core_access;
friend class boost::filesystem::path;
const path& dereference() const { return m_element; }
bool equal(const reverse_iterator& rhs) const { return m_itr == rhs.m_itr; }
void increment()
{
--m_itr;
if (m_itr != m_itr.m_path_ptr->begin())
{
iterator tmp = m_itr;
m_element = *--tmp;
}
}
void decrement()
{
m_element = *m_itr;
++m_itr;
}
iterator m_itr;
path m_element;
}; // path::reverse_iterator
//------------------------------------------------------------------------------------//
// //
// non-member functions //
// //
//------------------------------------------------------------------------------------//
// std::lexicographical_compare would infinitely recurse because path iterators
// yield paths, so provide a path aware version
inline bool lexicographical_compare(path::iterator first1, path::iterator last1,
path::iterator first2, path::iterator last2)
{ return detail::lex_compare(first1, last1, first2, last2) < 0; }
inline bool operator==(const path& lhs, const path& rhs) {return lhs.compare(rhs) == 0;}
inline bool operator==(const path& lhs, const path::string_type& rhs) {return lhs.compare(rhs) == 0;}
inline bool operator==(const path::string_type& lhs, const path& rhs) {return rhs.compare(lhs) == 0;}
inline bool operator==(const path& lhs, const path::value_type* rhs) {return lhs.compare(rhs) == 0;}
inline bool operator==(const path::value_type* lhs, const path& rhs) {return rhs.compare(lhs) == 0;}
inline bool operator!=(const path& lhs, const path& rhs) {return lhs.compare(rhs) != 0;}
inline bool operator!=(const path& lhs, const path::string_type& rhs) {return lhs.compare(rhs) != 0;}
inline bool operator!=(const path::string_type& lhs, const path& rhs) {return rhs.compare(lhs) != 0;}
inline bool operator!=(const path& lhs, const path::value_type* rhs) {return lhs.compare(rhs) != 0;}
inline bool operator!=(const path::value_type* lhs, const path& rhs) {return rhs.compare(lhs) != 0;}
// TODO: why do == and != have additional overloads, but the others don't?
inline bool operator<(const path& lhs, const path& rhs) {return lhs.compare(rhs) < 0;}
inline bool operator<=(const path& lhs, const path& rhs) {return !(rhs < lhs);}
inline bool operator> (const path& lhs, const path& rhs) {return rhs < lhs;}
inline bool operator>=(const path& lhs, const path& rhs) {return !(lhs < rhs);}
inline std::size_t hash_value(const path& x) BOOST_NOEXCEPT
{
# ifdef BOOST_WINDOWS_API
std::size_t seed = 0;
for(const path::value_type* it = x.c_str(); *it; ++it)
hash_combine(seed, *it == L'/' ? L'\\' : *it);
return seed;
# else // BOOST_POSIX_API
return hash_range(x.native().begin(), x.native().end());
# endif
}
inline void swap(path& lhs, path& rhs) BOOST_NOEXCEPT { lhs.swap(rhs); }
inline path operator/(const path& lhs, const path& rhs)
{
path p = lhs;
p /= rhs;
return p;
}
# if !defined(BOOST_NO_CXX11_RVALUE_REFERENCES)
inline path operator/(path&& lhs, const path& rhs)
{
lhs /= rhs;
return std::move(lhs);
}
# endif
// inserters and extractors
// use boost::io::quoted() to handle spaces in paths
// use '&' as escape character to ease use for Windows paths
template <class Char, class Traits>
inline std::basic_ostream<Char, Traits>&
operator<<(std::basic_ostream<Char, Traits>& os, const path& p)
{
return os
<< boost::io::quoted(p.template string<std::basic_string<Char> >(), static_cast<Char>('&'));
}
template <class Char, class Traits>
inline std::basic_istream<Char, Traits>&
operator>>(std::basic_istream<Char, Traits>& is, path& p)
{
std::basic_string<Char> str;
is >> boost::io::quoted(str, static_cast<Char>('&'));
p = str;
return is;
}
// name_checks
// These functions are holdovers from version 1. It isn't clear they have much
// usefulness, or how to generalize them for later versions.
BOOST_FILESYSTEM_DECL bool portable_posix_name(const std::string & name);
BOOST_FILESYSTEM_DECL bool windows_name(const std::string & name);
BOOST_FILESYSTEM_DECL bool portable_name(const std::string & name);
BOOST_FILESYSTEM_DECL bool portable_directory_name(const std::string & name);
BOOST_FILESYSTEM_DECL bool portable_file_name(const std::string & name);
BOOST_FILESYSTEM_DECL bool native(const std::string & name);
namespace detail
{
// For POSIX, is_directory_separator() and is_element_separator() are identical since
// a forward slash is the only valid directory separator and also the only valid
// element separator. For Windows, forward slash and back slash are the possible
// directory separators, but colon (example: "c:foo") is also an element separator.
inline bool is_directory_separator(path::value_type c) BOOST_NOEXCEPT
{
return c == path::separator
# ifdef BOOST_WINDOWS_API
|| c == path::preferred_separator
# endif
;
}
inline bool is_element_separator(path::value_type c) BOOST_NOEXCEPT
{
return c == path::separator
# ifdef BOOST_WINDOWS_API
|| c == path::preferred_separator || c == L':'
# endif
;
}
} // namespace detail
//------------------------------------------------------------------------------------//
// class path miscellaneous function implementations //
//------------------------------------------------------------------------------------//
inline path::reverse_iterator path::rbegin() const { return reverse_iterator(end()); }
inline path::reverse_iterator path::rend() const { return reverse_iterator(begin()); }
inline bool path::filename_is_dot() const
{
// implicit dot is tricky, so actually call filename(); see path::filename() example
// in reference.html
path p(filename());
return p.size() == 1 && *p.c_str() == dot;
}
inline bool path::filename_is_dot_dot() const
{
return size() >= 2 && m_pathname[size()-1] == dot && m_pathname[size()-2] == dot
&& (m_pathname.size() == 2 || detail::is_element_separator(m_pathname[size()-3]));
// use detail::is_element_separator() rather than detail::is_directory_separator
// to deal with "c:.." edge case on Windows when ':' acts as a separator
}
//--------------------------------------------------------------------------------------//
// class path member template implementation //
//--------------------------------------------------------------------------------------//
template <class InputIterator>
path& path::append(InputIterator begin, InputIterator end)
{
if (begin == end)
return *this;
string_type::size_type sep_pos(m_append_separator_if_needed());
std::basic_string<typename std::iterator_traits<InputIterator>::value_type>
seq(begin, end);
path_traits::convert(seq.c_str(), seq.c_str()+seq.size(), m_pathname);
if (sep_pos)
m_erase_redundant_separator(sep_pos);
return *this;
}
template <class InputIterator>
path& path::append(InputIterator begin, InputIterator end, const codecvt_type& cvt)
{
if (begin == end)
return *this;
string_type::size_type sep_pos(m_append_separator_if_needed());
std::basic_string<typename std::iterator_traits<InputIterator>::value_type>
seq(begin, end);
path_traits::convert(seq.c_str(), seq.c_str()+seq.size(), m_pathname, cvt);
if (sep_pos)
m_erase_redundant_separator(sep_pos);
return *this;
}
template <class Source>
path& path::append(Source const& source)
{
if (path_traits::empty(source))
return *this;
string_type::size_type sep_pos(m_append_separator_if_needed());
path_traits::dispatch(source, m_pathname);
if (sep_pos)
m_erase_redundant_separator(sep_pos);
return *this;
}
template <class Source>
path& path::append(Source const& source, const codecvt_type& cvt)
{
if (path_traits::empty(source))
return *this;
string_type::size_type sep_pos(m_append_separator_if_needed());
path_traits::dispatch(source, m_pathname, cvt);
if (sep_pos)
m_erase_redundant_separator(sep_pos);
return *this;
}
//--------------------------------------------------------------------------------------//
// class path member template specializations //
//--------------------------------------------------------------------------------------//
template <> inline
std::string path::string<std::string>() const
{ return string(); }
template <> inline
std::wstring path::string<std::wstring>() const
{ return wstring(); }
template <> inline
std::string path::string<std::string>(const codecvt_type& cvt) const
{ return string(cvt); }
template <> inline
std::wstring path::string<std::wstring>(const codecvt_type& cvt) const
{ return wstring(cvt); }
template <> inline
std::string path::generic_string<std::string>() const
{ return generic_string(); }
template <> inline
std::wstring path::generic_string<std::wstring>() const
{ return generic_wstring(); }
template <> inline
std::string path::generic_string<std::string>(const codecvt_type& cvt) const
{ return generic_string(cvt); }
template <> inline
std::wstring path::generic_string<std::wstring>(const codecvt_type& cvt) const
{ return generic_wstring(cvt); }
//--------------------------------------------------------------------------------------//
// path_traits convert function implementations //
// requiring path::codecvt() be visable //
//--------------------------------------------------------------------------------------//
namespace path_traits
{ // without codecvt
inline
void convert(const char* from,
const char* from_end, // 0 for null terminated MBCS
std::wstring & to)
{
convert(from, from_end, to, path::codecvt());
}
inline
void convert(const wchar_t* from,
const wchar_t* from_end, // 0 for null terminated MBCS
std::string & to)
{
convert(from, from_end, to, path::codecvt());
}
inline
void convert(const char* from,
std::wstring & to)
{
BOOST_ASSERT(!!from);
convert(from, 0, to, path::codecvt());
}
inline
void convert(const wchar_t* from,
std::string & to)
{
BOOST_ASSERT(!!from);
convert(from, 0, to, path::codecvt());
}
} // namespace path_traits
} // namespace filesystem
} // namespace boost
//----------------------------------------------------------------------------//
#include <boost/config/abi_suffix.hpp> // pops abi_prefix.hpp pragmas
#endif // BOOST_FILESYSTEM_PATH_HPP
| CauldronDevelopmentLLC/cbang | src/boost/boost/filesystem/path.hpp | C++ | lgpl-2.1 | 40,644 |
package org.intermine.task.project;
/*
* Copyright (C) 2002-2017 FlyMine
*
* This code may be freely distributed and modified under the
* terms of the GNU Lesser General Public Licence. This should
* be distributed with the code. See the LICENSE file for more
* information or http://www.gnu.org/copyleft/lesser.html.
*
*/
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.parsers.SAXParserFactory;
import org.xml.sax.Attributes;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
/**
* Code for reading project.xml files.
*
* @author Kim Rutherford
*/
public final class ProjectXmlBinding
{
private ProjectXmlBinding() {
// don't
}
/**
* Create a Project object from a project.xml file.
* @param file the File
* @return the Project
*/
public static Project unmarshall(File file) {
FileReader reader = null;
try {
reader = new FileReader(file);
} catch (IOException e) {
throw new RuntimeException (e);
}
try {
ProjectXmlHandler handler = new ProjectXmlHandler();
SAXParserFactory factory = SAXParserFactory.newInstance();
factory.setValidating(true);
factory.newSAXParser().parse(new InputSource(reader), handler);
Project project = handler.project;
project.validate(file);
return project;
} catch (IOException e) {
throw new RuntimeException (e);
} catch (ParserConfigurationException e) {
throw new RuntimeException("The underlying parser does not support "
+ " the requested features", e);
} catch (SAXException e) {
throw new RuntimeException("Error parsing the project.xml file, "
+ "please check the format.", e);
}
}
private static class ProjectXmlHandler extends DefaultHandler
{
private final Pattern projectPattern = Pattern.compile(".*project$");
private final Matcher projectMatcher = projectPattern.matcher("");
Project project;
Action action;
//boolean postProcesses = false;
/**
* @see DefaultHandler#startElement
*/
public void startElement(String uri, String localName, String qName, Attributes attrs) {
if (qName == null) {
return;
}
projectMatcher.reset(qName);
if (projectMatcher.matches()) {
project = new Project();
if (attrs.getValue("type") == null) {
throw new IllegalArgumentException("project type must be set in project.xml");
} else {
project.setType(attrs.getValue("type"));
}
} else if ("post-process".equals(qName)) {
PostProcess postProcess = new PostProcess();
action = postProcess;
project.addPostProcess(attrs.getValue("name"), postProcess);
} else if ("source".equals(qName)) {
Source source = new Source();
source.setType(attrs.getValue("type"));
source.setName(attrs.getValue("name"));
project.addSource(attrs.getValue("name"), source);
action = source;
} else if ("property".equals(qName)) {
UserProperty property = new UserProperty();
property.setName(attrs.getValue("name"));
property.setValue(attrs.getValue("value"));
property.setLocation(attrs.getValue("location"));
if (action == null) {
// global property
project.addProperty(property);
} else {
// property for a source or post-process
action.addUserProperty(property);
}
}
}
/**
* @see DefaultHandler#endElement
*/
public void endElement(String uri, String localName, String qName) {
if (qName == null) {
return;
}
if ("source".equals(qName) || "post-process".equals(qName)) {
action = null;
}
}
}
}
| elsiklab/intermine | imbuild/im-ant-tasks/src/org/intermine/task/project/ProjectXmlBinding.java | Java | lgpl-2.1 | 4,505 |
//
// Copyright (C) 2004-2006 SIPfoundry Inc.
// Licensed by SIPfoundry under the LGPL license.
//
// Copyright (C) 2004-2006 Pingtel Corp. All rights reserved.
// Licensed to SIPfoundry under a Contributor Agreement.
//
// $$
///////////////////////////////////////////////////////////////////////////////
#ifndef _SipDialogEvent_h_
#define _SipDialogEvent_h_
// SYSTEM INCLUDES
// APPLICATION INCLUDES
#include <utl/UtlSList.h>
#include <utl/UtlSListIterator.h>
#include <net/HttpBody.h>
#include <net/Url.h>
#include <os/OsDateTime.h>
#include <os/OsBSem.h>
// DEFINES
// MACROS
// EXTERNAL FUNCTIONS
// EXTERNAL VARIABLES
// CONSTANTS
#define DIALOG_EVENT_TYPE "dialog"
#define BEGIN_DIALOG_INFO "<dialog-info xmlns=\"urn:ietf:params:xml:ns:dialog-info\""
#define END_DIALOG_INFO "</dialog-info>\n"
#define VERSION_EQUAL " version="
#define STATE_EQUAL " state="
#define ENTITY_EQUAL " entity="
#define DOUBLE_QUOTE "\""
#define END_BRACKET ">"
#define END_LINE ">\n"
#define BEGIN_DIALOG "<dialog id="
#define CALL_ID_EQUAL " call-id="
#define LOCAL_TAG_EQUAL " local-tag="
#define REMOTE_TAG_EQUAL " remote-tag="
#define DIRECTION_EQUAL " direction="
#define END_DIALOG "</dialog>\n"
#define BEGIN_STATE "<state"
#define EVENT_EQUAL " event="
#define CODE_EQUAL " code="
#define END_STATE "</state>\n"
#define BEGIN_DURATION "<duration>"
#define END_DURATION "</duration>\n"
#define BEGIN_LOCAL "<local>\n"
#define END_LOCAL "</local>\n"
#define BEGIN_REMOTE "<remote>\n"
#define END_REMOTE "</remote>\n"
#define BEGIN_IDENTITY "<identity"
#define DISPLAY_EQUAL " display="
#define END_IDENTITY "</identity>\n"
#define BEGIN_TARTGET "<target uri=\""
#define END_TARGET "\"/>\n"
#define STATE_TRYING "trying"
#define STATE_PROCEEDING "proceeding"
#define STATE_EARLY "early"
#define STATE_CONFIRMED "confirmed"
#define STATE_TERMINATED "terminated"
// STRUCTS
// TYPEDEFS
// FORWARD DECLARATIONS
//! Container for dialog element in the dialog event package
/**
* This class contains all the contents presented in a dialog element of the
* dialog event package described in draft-ietf-sipping-dialog-package-06.txt
* (An INVITE Initiated Dialog Event Package for SIP). This class has the
* methods to construct and manipulate the dialog and its sub-elements.
*/
class Dialog : public UtlContainable
{
/* //////////////////////////// PUBLIC //////////////////////////////////// */
public:
/**
* @name ====================== Constructors and Destructors
* @{
*/
/// Constructor
Dialog(const char* dialogId,
const char* callId,
const char* localTag,
const char* remoteTag,
const char* direction);
/// Destructor
~Dialog();
virtual UtlContainableType getContainableType() const;
static const UtlContainableType TYPE;
virtual unsigned int hash() const;
int compareTo(const UtlContainable *b) const;
///@}
/**
* @name ====================== Dialog Setting Interfaces
*
* These methods set/get the dialog element and sub-elements.
*
* @{
*/
void getDialog(UtlString& dialogId,
UtlString& callId,
UtlString& localTag,
UtlString& remoteTag,
UtlString& direction) const;
void getCallId(UtlString& callId) const;
void setDialogId(const char* dialogId);
void getDialogId(UtlString& dialogId) const;
void setState(const char* state, const char* event, const char* code);
void setTags(const char* local, const char* remote);
void getState(UtlString& state, UtlString& event, UtlString& code) const;
void setDuration(const unsigned long duration);
unsigned long getDuration() const;
void setReplaces(const char* callId,
const char* localTag,
const char* remoteTag);
void getReplaces(UtlString& callId,
UtlString& localTag,
UtlString& remoteTag) const;
void setReferredBy(const char* url,
const char* display);
void getReferredBy(UtlString& url,
UtlString& display) const;
void setLocalIdentity(const char* identity,
const char* display);
void getLocalIdentity(UtlString& identity,
UtlString& display) const;
void setRemoteIdentity(const char* identity,
const char* display);
void getRemoteIdentity(UtlString& identity,
UtlString& display) const;
void setLocalTarget(const char* url);
void getLocalTarget(UtlString& url) const;
void setRemoteTarget(const char* url);
void getRemoteTarget(UtlString& url) const;
///@}
/* //////////////////////////// PROTECTED ///////////////////////////////// */
protected:
// Set the unique identifier member by concatenating the call-id,
// to-tag, and from-tag.
void setIdentifier();
/* //////////////////////////// PRIVATE /////////////////////////////////// */
private:
// Variables for dialog element
UtlString mId;
UtlString mCallId;
UtlString mLocalTag;
UtlString mRemoteTag;
UtlString mDirection;
// Unique identifier of the dialog
UtlString mIdentifier;
// Variables for state element
UtlString mState;
UtlString mEvent;
UtlString mCode;
// Variables for duration element
long mDuration;
// Variables for replaces element
UtlString mNewCallId;
UtlString mNewLocalTag;
UtlString mNewRemoteTag;
// Variables for referred-by element
UtlString mReferredBy;
UtlString mDisplay;
// Variables for local element
UtlString mLocalIdentity;
UtlString mLocalDisplay;
UtlString mLocalTarget;
UtlString mLocalSessionDescription;
// Variables for remote element
UtlString mRemoteIdentity;
UtlString mRemoteDisplay;
UtlString mRemoteTarget;
UtlString mRemoteSessionDescription;
// Disabled copy constructor
Dialog(const Dialog& rDialog);
// Disabled assignment operator
Dialog& operator=(const Dialog& rhs);
};
//! Container for MIME type application/dialog-info+xml.
/**
* This class contains all the contents presented in a dialog event package
* described in draft-ietf-sipping-dialog-package-06.txt (An INVITE Initiated
* Dialog Event Package for SIP). This class has the methods to construct and
* manipulate the dialog events in a dialog event package.
*/
class SipDialogEvent : public HttpBody
{
/* //////////////////////////// PUBLIC //////////////////////////////////// */
public:
/**
* @name ====================== Constructors and Destructors
* @{
*/
//! Construct an empty body of a dialog event package
SipDialogEvent(const char* state,
const char* entity);
//! Construct from an existing dialog event package in XML format
SipDialogEvent(const char* bodyBytes);
//! Destructor that will free up the memory allocated for dialog contents if it is not being deleted
virtual
~SipDialogEvent();
///@}
/**
* @name ====================== Dialog Event Serialization Interfaces
*
* @{
*/
//! Build the body of this object
void buildBody() const;
//! Get the string length of this object
virtual int getLength() const;
//! Get the serialized char representation of this dialog event.
/*! \param bytes - pointer to the body text of the dialog event will
* be placed here.
* \param length - the number of bytes written (not including the
* null terminator).
*/
virtual void getBytes(const char** bytes,
int* length) const;
//! Get the serialized string representation of this dialog event.
/*! \param bytes - UtlString into which the body text will be copied.
* \param length - the number of bytes written (not including the
* null terminator).
*/
virtual void getBytes(UtlString* bytes,
int* length) const;
// Import HttpBody's getBytes methods, except as overridden here.
using HttpBody::getBytes;
void setEntity(const char* entity);
void getEntity(UtlString& entity) const;
void setState(const char* state);
void getState(UtlString& state) const;
///@}
/**
* @name ====================== Dialog Setting Interfaces
*
* These methods set/get the dialog element.
*
* @{
*/
//! Insert a Dialog object
void insertDialog(Dialog* dialog);
//! Get the Dialog object from the hash table based on the callId
//and tags. If the mRemoteTag of a Dialog object in the hash table
//is empty, then testing for match is only done on callId and
//localTag. Otherwise, all three fields are used.
Dialog* getDialog(UtlString& callId,
UtlString& localTag,
UtlString& remoteTag);
//! In the case where a empty SipDialog object is retrieved from the
//DialogEventPublisher in handling a DISCONNECTED or FAILED message
//the publisher still needs to find the dialog, even if it is just
//by the callId. Work-around for XCL-98.
Dialog* getDialogByCallId(UtlString& callId);
//! Remove a Dialog object
Dialog* removeDialog(Dialog* dialog);
//! Check whether there is are any dialogs or not
UtlBoolean isEmpty();
//! Return an iterator that will retrieve all dialogs in the event.
// This iterator is only valid as long as the SipDialogEvent is not
// modified, and must be deleted by the caller before the SipDialogEvent
// is deleted.
UtlSListIterator* getDialogIterator();
///@}
/* //////////////////////////// PROTECTED ///////////////////////////////// */
protected:
/// Parse an existing dialog event package from xml format into the internal representation.
void parseBody(const char* bytes);
/* //////////////////////////// PRIVATE /////////////////////////////////// */
private:
//! Variables for dialog-info
int mVersion;
UtlString mDialogState;
UtlString mEntity;
//! Variables for dialog element
UtlSList mDialogs;
//! reader/writer lock for synchronization
OsBSem mLock;
//! Disabled copy constructor
SipDialogEvent(const SipDialogEvent& rSipDialogEvent);
//! Disabled assignment operator
SipDialogEvent& operator=(const SipDialogEvent& rhs);
};
/* ============================ INLINE METHODS ============================ */
#endif // _SipDialogEvent_h_
| litalidev/sipxtapi | sipXtackLib/include/net/SipDialogEvent.h | C | lgpl-2.1 | 10,430 |
//
// Copyright (C) 2004-2006 SIPfoundry Inc.
// Licensed by SIPfoundry under the LGPL license.
//
// Copyright (C) 2004-2006 Pingtel Corp. All rights reserved.
// Licensed to SIPfoundry under a Contributor Agreement.
//
// $$
///////////////////////////////////////////////////////////////////////////////
#ifndef __sipXTabbedDlg_H__
#define __sipXTabbedDlg_H__
#ifndef WX_PRECOMP
#include "wx/wx.h"
#endif
#include <utl/UtlString.h>
#include "sipXezPhone_wdr.h"
#define ID_TAB_NOTEBOOK_CTRL 2999
#define ID_TAB_IDENTITY_CTRL 3000
#define ID_TAB_REALM_CTRL 3001
#define ID_TAB_USERNAME_CTRL 3002
#define ID_TAB_PASSWORD_CTRL 3003
#define ID_TAB_LOCATION_CTRL 3004
#define ID_TAB_PROXY_CTRL 3005
#define ID_TAB_STUNSERVER_CTRL 3006
#define ID_TAB_STUNPORT_CTRL 3007
#define ID_TAB_TURNSERVER_CTRL 3008
#define ID_TAB_TURNPORT_CTRL 3009
#define ID_TAB_SIPPORT_CTRL 3010
#define ID_TAB_RTPPORT_CTRL 3011
#define ID_TAB_ICE_CTRL 3012
#define ID_TAB_RPORT_CTRL 3013
#define ID_TAB_ANSWER_CTRL 3014
#define ID_TAB_REGISTER_CTRL 3015
#define ID_TAB_BANDWIDTH_CHOICE 3016
#define ID_TAB_CODEC_LIST 3017
#define ID_TAB_CODEC_SELECT 3018
#define ID_TAB_ECHO_CTRL 3019
#define ID_TAB_DTMF_CTRL 3020
#define ID_TAB_IDLE_CTRL 3021
#define ID_TAB_SHORT_CTRL 3022
//----------------------------------------------------------------------------
// sipXTabbedDlg
//----------------------------------------------------------------------------
/***************************************************
* Base class for all pages in the notebook
*/
class sipXTabbedPage : public wxPanel
{
public:
sipXTabbedPage(wxWindow *parent, const UtlString& title, wxWindowID id = -1,
const wxPoint& pos = wxDefaultPosition,
const wxSize& size = wxDefaultSize);
virtual const char* GetTitle()
{return mTitle.data();}
virtual void getData() = 0; // Get page data
virtual void restoreData() = 0; // Some pages have to remember original settings
// and restore them on Cancel
virtual bool validateData() = 0; // Some pages have to validate data on page change
protected:
bool mbInitialized;
private:
virtual void setData() = 0;
UtlString mTitle;
};
/***************************************************
* Base class for all codec pages in the notebook
*
* The sipXTabbedCodecPage handles the basic three
* controls relating to codecs - the bandwidth selection,
* the select button for explicit codec selection, and
* the codec list box.
*/
class sipXTabbedCodecPage : public sipXTabbedPage
{
public:
sipXTabbedCodecPage(wxWindow* parent, const UtlString& title);
protected:
void rebuildCodecList(int sel);
void OnBandwidthChange(wxCommandEvent &event);
void OnDblClick(wxCommandEvent &event);
void OnSelect(wxCommandEvent &event);
int mCodecPref;
int mOrigCodecPref;
UtlString mCodecName;
UtlString mOrigCodecName;
bool mbCodecByName;
wxChoice* mpCodecPref;
wxListBox* mpCodecList;
wxButton* mpSelectBtn;
DECLARE_EVENT_TABLE()
};
/**************************************************
* Specialized tabbed pages
*/
class sipXIdentityPage : public sipXTabbedPage
{
public:
sipXIdentityPage(wxWindow *parent, const UtlString& title);
void getData();
void restoreData();
bool validateData();
private:
void setData();
wxTextCtrl* mpIdentity;
wxTextCtrl* mpRealm;
wxTextCtrl* mpUser;
wxTextCtrl* mpPassword;
wxTextCtrl* mpLocation;
wxCheckBox* mpAnswer;
DECLARE_EVENT_TABLE()
};
class sipXNetworkPage : public sipXTabbedPage
{
public:
sipXNetworkPage(wxWindow *parent, const UtlString& title);
void getData();
void restoreData();
bool validateData();
private:
void setData();
wxTextCtrl* mpProxy;
wxTextCtrl* mpTurnServer;
wxTextCtrl* mpTurnPort;
wxTextCtrl* mpStunServer;
wxTextCtrl* mpStunPort;
wxTextCtrl* mpSipPort;
wxTextCtrl* mpRtpPort;
wxTextCtrl* mpIdle;
wxCheckBox* mpIce;
wxCheckBox* mpRport;
wxCheckBox* mpShortNames;
};
class sipXAudioPage : public sipXTabbedCodecPage
{
public:
sipXAudioPage(wxWindow *parent, const UtlString& title);
void getData();
void restoreData();
bool validateData();
private:
void setData();
wxCheckBox* mpEcho;
wxCheckBox* mpDtmf;
};
class sipXVideoPage : public sipXTabbedCodecPage
{
public:
sipXVideoPage(wxWindow *parent, const UtlString& title);
void getData();
void restoreData();
bool validateData();
private:
void setData();
};
class sipXSecurityPage : public sipXTabbedPage
{
public:
sipXSecurityPage(wxWindow *parent, const UtlString& title);
void getData();
void restoreData();
bool validateData();
private:
void setData();
};
class sipXTabbedDlg : public wxDialog
{
public:
sipXTabbedDlg( wxWindow *parent, wxWindowID id, const wxString &title,
const wxPoint& pos = wxDefaultPosition,
const wxSize& size = wxDefaultSize,
long style = wxDEFAULT_DIALOG_STYLE,
long options = 0);
virtual ~sipXTabbedDlg();
void InitDialog();
private:
void OnOk( wxCommandEvent &event );
void OnCancel( wxCommandEvent &event );
void OnRegister( wxCommandEvent &event );
void OnChanging( wxNotebookEvent &event );
void OnChanged( wxNotebookEvent &event );
int mXpos, mYpos; // Position of dialog
int mWidth, mHeight;
long mOptions; // Notebook options
int mPages; // Number of pages in notebook
int mLastPage; // Index of last selected page
wxNotebook* mpNotebook;
sipXTabbedPage* mpPage[9];
wxButton* mpOkBtn;
wxButton* mpCancelBtn;
wxButton* mpRegisterBtn;
DECLARE_EVENT_TABLE()
};
#endif
| litalidev/sipxtapi | sipXcallLib/examples/sipXezPhone/sipXTabbedDlg.h | C | lgpl-2.1 | 6,037 |
/*
winFLTK.h:
Copyright (C) 2006 Istvan Varga
This file is part of Csound.
The Csound Library is free software; you can redistribute it
and/or modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
Csound is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with Csound; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA
*/
#ifndef CSOUND_WINFLTK_H
#define CSOUND_WINFLTK_H
#include "csdl.h"
#ifdef __cplusplus
#include <FL/Fl.H>
#endif
/**
* FLTK flags is the sum of any of the following values:
* 1 (input): disable widget opcodes by setting up dummy opcodes instead
* 2 (input): disable FLTK graphs
* 4 (input): disable the use of a separate thread for widget opcodes
* 8 (input): disable the use of Fl::lock() and Fl::unlock()
* 16 (input): disable the use of Fl::awake()
* 32 (output): widget opcodes are used
* 64 (output): FLTK graphs are used
* 128 (input): disable widget opcodes by not registering any opcodes
* 256 (input): disable the use of Fl::wait() (implies no widget thread)
*/
static inline int getFLTKFlags(CSOUND *csound)
{
return (*((int*) csound->QueryGlobalVariableNoCheck(csound, "FLTK_Flags")));
}
static inline int *getFLTKFlagsPtr(CSOUND *csound)
{
return ((int*) csound->QueryGlobalVariableNoCheck(csound, "FLTK_Flags"));
}
#ifdef __cplusplus
static inline void Fl_lock(CSOUND *csound)
{
#ifdef NO_FLTK_THREADS
(void) csound;
#else
if (!(getFLTKFlags(csound) & 8)) {
Fl::lock();
}
#endif
}
static inline void Fl_unlock(CSOUND *csound)
{
#ifdef NO_FLTK_THREADS
(void) csound;
#else
if (!(getFLTKFlags(csound) & 8)) {
Fl::unlock();
}
#endif
}
static inline void Fl_awake(CSOUND *csound)
{
#ifdef NO_FLTK_THREADS
(void) csound;
#else
if (!(getFLTKFlags(csound) & 16)) {
Fl::awake();
}
#endif
}
static inline void Fl_wait(CSOUND *csound, double seconds)
{
if (!(getFLTKFlags(csound) & 256))
Fl::wait(seconds);
}
static inline void Fl_wait_locked(CSOUND *csound, double seconds)
{
int fltkFlags;
fltkFlags = getFLTKFlags(csound);
if (!(fltkFlags & 256)) {
#ifndef NO_FLTK_THREADS
if (!(fltkFlags & 8))
Fl::lock();
#endif
Fl::wait(seconds);
#ifndef NO_FLTK_THREADS
if (!(fltkFlags & 8))
Fl::unlock();
#endif
}
}
#endif /* __cplusplus */
#ifdef __cplusplus
extern "C" {
#endif
extern int CsoundYield_FLTK(CSOUND *);
extern void DrawGraph_FLTK(CSOUND *, WINDAT *);
extern int ExitGraph_FLTK(CSOUND *);
extern void kill_graph(CSOUND *, uintptr_t);
extern void KillXYin_FLTK(CSOUND *, XYINDAT *);
extern uintptr_t MakeWindow_FLTK(CSOUND *, char *);
extern void MakeXYin_FLTK(CSOUND *, XYINDAT *, MYFLT, MYFLT);
extern int myFLwait(void);
extern void ReadXYin_FLTK(CSOUND *, XYINDAT *);
extern void flgraph_init(CSOUND *csound);
extern void widget_init(CSOUND *);
extern int widget_reset(CSOUND *, void *);
extern const OENTRY widgetOpcodes_[];
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* CSOUND_WINFLTK_H */
| Angeldude/csound | InOut/winFLTK.h | C | lgpl-2.1 | 3,600 |
// @(#)root/fitpanel:$Id$
// Author: Ilka Antcheva, Lorenzo Moneta, David Gonzalez Maline 10/08/2006
/*************************************************************************
* Copyright (C) 1995-2006, Rene Brun and Fons Rademakers. *
* All rights reserved. *
* *
* For the licensing terms see $ROOTSYS/LICENSE. *
* For the list of contributors see $ROOTSYS/README/CREDITS. *
*************************************************************************/
#ifndef ROOT_TFitEditor
#define ROOT_TFitEditor
//////////////////////////////////////////////////////////////////////////
// //
// TFitEditor //
// //
// Allows to explore and compare various fits. //
// //
//////////////////////////////////////////////////////////////////////////
#include "TGFrame.h"
#include "TGButton.h"
#include "Foption.h"
#include "Math/MinimizerOptions.h"
#include "Fit/DataRange.h"
#include <vector>
#include <map>
//--- Object types
enum EObjectType {
kObjectHisto,
kObjectGraph,
kObjectGraph2D,
kObjectHStack,
kObjectTree,
kObjectMultiGraph
};
class TGTab;
class TVirtualPad;
class TCanvas;
class TGLabel;
class TGComboBox;
class TGTextEntry;
class TGNumberEntry;
class TGDoubleHSlider;
class TGNumberEntry;
class TGNumberEntryField;
class TGStatusBar;
class TAxis;
class TF1;
class TF1NormSum;
class TF1Convolution;
class TFitEditor : public TGMainFrame {
protected:
TGTab *fTab; // tab widget holding the editor
TGCompositeFrame *fTabContainer; // main tab container
TGCompositeFrame *fGeneral; // general tab
TGCompositeFrame *fMinimization; // minimization tab
TGTextButton *fUpdateButton; // updates data from gROOT and gDirectory
TGTextButton *fFitButton; // performs fitting
TGTextButton *fResetButton; // resets fit parameters
TGTextButton *fCloseButton; // close the fit panel
TGLabel *fSelLabel; // contains selected fit function
TGComboBox *fDataSet; // contains list of data set to be fitted
TGComboBox *fTypeFit; // contains the types of functions to be selected
TGComboBox *fFuncList; // contains function list
TGTextEntry *fEnteredFunc; // contains user function file name
TGTextButton *fUserButton; // opens a dialog for user-defined fit method
TGRadioButton *fNone; // set no operation mode
TGRadioButton *fAdd; // set addition mode
TGRadioButton *fNormAdd; // set normalized addition mode
TGRadioButton *fConv; // set convolution mode
TGLayoutHints *fLayoutNone; // layout hints of fNone radio button
TGLayoutHints *fLayoutAdd; // layout hints of fAdd radio button
TGLayoutHints *fLayoutNormAdd; // layout hints of fNOrmAdd radio button
TGLayoutHints *fLayoutConv; // layout hints of fConv radio button
TGTextButton *fSetParam; // open set parameters dialog
TGCheckButton *fIntegral; // switch on/off option 'integral'
TGCheckButton *fBestErrors; // switch on/off option 'improve errors'
TGCheckButton *fUseRange; // switch on/off option 'use function range'
TGCheckButton *fAdd2FuncList; // switch on/off option 'add to list'
TGCheckButton *fUseGradient ; // switch on/off option 'use gradient'
TGCheckButton *fAllWeights1; // switch on/off option 'all weights=1'
TGCheckButton *fImproveResults; // switch on/off option 'improve fit results'
TGCheckButton *fEmptyBinsWghts1; // switch on/off option 'include empry bins'
TGComboBox *fMethodList; // contains method list
TGCheckButton *fLinearFit; // switch on/off linear fit option
TGCheckButton *fNoChi2; // switch on/off option 'No Chi-square'
TGCheckButton *fNoStoreDrawing; // switch on/off 'no store/drwing' option
TGCheckButton *fNoDrawing; // switch on/off 'no drawing' option
TGCheckButton *fDrawSame; // switch on/off fit function drawing
TGTextButton *fDrawAdvanced; // opens a dialog for advanced draw options
TGDoubleHSlider *fSliderX; // slider to set fit range along x-axis
TGNumberEntry *fSliderXMax; // entry to set the maximum in the range
TGNumberEntry *fSliderXMin; // entry to set the minumum in the range
TGDoubleHSlider *fSliderY; // slider to set fit range along y-axis
TGNumberEntry *fSliderYMax; // entry to set the maximum in the range
TGNumberEntry *fSliderYMin; // entry to set the minumum in the range
TGDoubleHSlider *fSliderZ; // slider to set fit range along z-axis
TGHorizontalFrame *fSliderXParent; // parent of fSliderX
TGHorizontalFrame *fSliderYParent; // parent of fSliderY
TGHorizontalFrame *fSliderZParent; // parent of fSliderZ
TGCheckButton *fEnableRobust; // switch on/off robust option
TGNumberEntry *fRobustValue; // contains robust value for linear fit
TGRadioButton *fOptDefault; // set default printing mode
TGRadioButton *fOptVerbose; // set printing mode to 'Verbose'
TGRadioButton *fOptQuiet; // set printing mode to 'Quiet'
TVirtualPad *fParentPad; // pad containing the object
TObject *fFitObject; // selected object to fit
EObjectType fType; // object type info
Int_t fDim; // object dimension
TAxis *fXaxis; // x-axis
TAxis *fYaxis; // y-axis
TAxis *fZaxis; // z-axis
TF1NormSum *fSumFunc; //! TF1NormSum object
TF1Convolution *fConvFunc; //! TF1Convolution object
// structure holding parameter value and limits
struct FuncParamData_t {
FuncParamData_t() {
fP[0] = 0; fP[1] = 0; fP[2] = 0;
}
Double_t & operator[](UInt_t i) { return fP[i];}
Double_t fP[3];
};
std::vector<FuncParamData_t> fFuncPars; // function parameters (value + limits)
std::multimap<TObject*, TF1*> fPrevFit; // Previous successful fits.
std::vector<TF1*> fSystemFuncs; // functions managed by the fitpanel
TGRadioButton *fLibMinuit; // set default minimization library (Minuit)
TGRadioButton *fLibMinuit2; // set Minuit2 as minimization library
TGRadioButton *fLibFumili; // set Fumili as minimization library
TGRadioButton *fLibGSL; // set GSL as minimization library
TGRadioButton *fLibGenetics; // set Genetic/GALib as minimization library
TGComboBox *fMinMethodList; // set the minimization method
TGNumberEntryField *fErrorScale; // contains error scale set for minimization
TGNumberEntryField *fTolerance; // contains tolerance set for minimization
TGNumberEntryField *fIterations; // contains maximum number of iterations
TGStatusBar *fStatusBar; // statusbar widget
Bool_t fChangedParams; // flag to indicate if the parameters have been set in the ParameterDialog GUI
static TFitEditor *fgFitDialog; // singleton fit panel
protected:
void GetFunctionsFromSystem();
void ProcessTreeInput(TObject* objSelected, Int_t selected,
TString variables, TString cuts);
TF1* FindFunction();
void FillDataSetList();
TGComboBox* BuildMethodList(TGFrame *parent, Int_t id);
void GetRanges(ROOT::Fit::DataRange&);
TF1* GetFitFunction();
TList* GetFitObjectListOfFunctions();
void DrawSelection(bool restore = false);
Int_t CheckFunctionString(const char* str);
void CreateFunctionGroup();
void CreateGeneralTab();
void CreateMinimizationTab();
void MakeTitle(TGCompositeFrame *parent, const char *title);
TF1* HasFitFunction();
void SetEditable(Bool_t);
private:
TFitEditor(const TFitEditor&); // not implemented
TFitEditor& operator=(const TFitEditor&); // not implemented
void RetrieveOptions(Foption_t&, TString&, ROOT::Math::MinimizerOptions&, Int_t);
public:
TFitEditor(TVirtualPad* pad, TObject *obj);
virtual ~TFitEditor();
TList* GetListOfFittingFunctions(TObject* obj = 0);
static TFitEditor *GetInstance(TVirtualPad* pad = 0, TObject *obj = 0);
virtual Option_t *GetDrawOption() const;
virtual void Hide();
virtual void Show(TVirtualPad* pad, TObject *obj);
void ShowObjectName(TObject* obj);
Bool_t SetObjectType(TObject* obj);
virtual void Terminate();
void UpdateGUI();
virtual void CloseWindow();
virtual void ConnectSlots();
virtual void DisconnectSlots();
virtual void RecursiveRemove(TObject* obj);
protected:
virtual void SetCanvas(TCanvas *c);
public:
virtual void SetFitObject(TVirtualPad *pad, TObject *obj, Int_t event);
virtual void SetFunction(const char *function);
// slot methods 'General' tab
void FillFunctionList(Int_t selected = -1);
void FillMinMethodList(Int_t selected = -1);
virtual void DoAddition(Bool_t on);
virtual void DoNormAddition(Bool_t on);
virtual void DoConvolution(Bool_t on);
virtual void DoAdvancedOptions();
virtual void DoAllWeights1();
virtual void DoClose();
virtual void DoEmptyBinsAllWeights1();
virtual void DoEnteredFunction();
virtual void DoUpdate();
virtual void DoFit();
virtual void DoMaxIterations();
virtual void DoDataSet(Int_t sel);
virtual void DoFunction(Int_t sel);
virtual void DoLinearFit();
virtual void DoNoChi2();
virtual void DoNoSelection();
virtual void DoNoStoreDrawing();
virtual void DoReset();
virtual void DoRobustFit();
virtual void DoSetParameters();
virtual void DoSliderXMoved();
virtual void DoNumericSliderXChanged();
virtual void DoSliderYMoved();
virtual void DoNumericSliderYChanged();
virtual void DoSliderZMoved();
virtual void DoUserDialog();
virtual void DoUseFuncRange();
// slot methods 'Minimization' tab
virtual void DoLibrary(Bool_t on);
virtual void DoMinMethod(Int_t );
virtual void DoPrintOpt(Bool_t on);
public:
typedef std::vector<FuncParamData_t > FuncParams_t;
friend class FitEditorUnitTesting;
ClassDef(TFitEditor,0) //Fit Panel interface
};
#endif
| karies/root | gui/fitpanel/inc/TFitEditor.h | C | lgpl-2.1 | 11,353 |
/*
* #%L
* Alfresco Repository
* %%
* Copyright (C) 2005 - 2016 Alfresco Software Limited
* %%
* This file is part of the Alfresco software.
* If the software was purchased under a paid Alfresco license, the terms of
* the paid license agreement will prevail. Otherwise, the software is
* provided under the following open source license terms:
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
* #L%
*/
package org.alfresco.repo.security.authentication;
import net.sf.acegisecurity.Authentication;
import net.sf.acegisecurity.UserDetails;
/**
* Low-level interface allowing control and retrieval of the authentication information held for the current thread.
*
* @author dward
*/
public interface AuthenticationContext
{
/**
* Remove the current security information
*/
public void clearCurrentSecurityContext();
/**
* Explicitly set the current suthentication. If the authentication is <tt>null</tt> the the current authentication
* is {@link #clearCurrentSecurityContext() cleared}.
*
* @param authentication
* the current authentication (may be <tt>null</tt>).
* @return Returns the modified authentication instance or <tt>null</tt> if it was cleared.
*/
public Authentication setCurrentAuthentication(Authentication authentication);
/**
* Explicitly set the given validated user details to be authenticated.
*
* @param ud
* the User Details
* @return Authentication
*/
public Authentication setUserDetails(UserDetails ud);
/**
* @throws AuthenticationException
*/
public Authentication getCurrentAuthentication() throws AuthenticationException;
/**
* Set the system user as the current user.
*/
public Authentication setSystemUserAsCurrentUser();
/**
* Set the system user as the current user.
*/
public Authentication setSystemUserAsCurrentUser(String tenantDomain);
/**
* Get the name of the system user. Note: for MT, will get system for default domain only
*/
public String getSystemUserName();
/**
* Get the name of the system user
*/
public String getSystemUserName(String tenantDomain);
/**
* True if this is the System user ?
*/
public boolean isSystemUserName(String userName);
/**
* Is the current user the system user?
*/
public boolean isCurrentUserTheSystemUser();
/**
* Get the name of the Guest User. Note: for MT, will get guest for default domain only
*/
public String getGuestUserName();
/**
* Get the name of the guest user
*/
public String getGuestUserName(String tenantDomain);
/**
* True if this is a guest user ?
*/
public boolean isGuestUserName(String userName);
/**
* Get the current user name.
*
* @throws AuthenticationException
*/
public String getCurrentUserName() throws AuthenticationException;
/**
* Extracts the tenant domain name from a user name
*
* @param userName
* a user name
* @return a tenant domain name
*/
public String getUserDomain(String userName);
}
| Alfresco/alfresco-repository | src/main/java/org/alfresco/repo/security/authentication/AuthenticationContext.java | Java | lgpl-3.0 | 3,939 |
//
// windows/basic_stream_handle.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BOOST_ASIO_WINDOWS_BASIC_STREAM_HANDLE_HPP
#define BOOST_ASIO_WINDOWS_BASIC_STREAM_HANDLE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include <boost/asio/detail/config.hpp>
#if defined(BOOST_ASIO_HAS_WINDOWS_STREAM_HANDLE) \
|| defined(GENERATING_DOCUMENTATION)
#include <cstddef>
#include <boost/asio/error.hpp>
#include <boost/asio/windows/basic_handle.hpp>
#include <boost/asio/windows/stream_handle_service.hpp>
#include <boost/asio/detail/throw_error.hpp>
#include <boost/asio/detail/push_options.hpp>
namespace boost {
namespace asio {
namespace windows {
/// Provides stream-oriented handle functionality.
/**
* The windows::basic_stream_handle class template provides asynchronous and
* blocking stream-oriented handle functionality.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*
* @par Concepts:
* AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream.
*/
template <typename StreamHandleService = stream_handle_service>
class basic_stream_handle
: public basic_handle<StreamHandleService>
{
public:
/// The native representation of a handle.
typedef typename StreamHandleService::native_type native_type;
/// Construct a basic_stream_handle without opening it.
/**
* This constructor creates a stream handle without opening it. The handle
* needs to be opened and then connected or accepted before data can be sent
* or received on it.
*
* @param io_service The io_service object that the stream handle will use to
* dispatch handlers for any asynchronous operations performed on the handle.
*/
explicit basic_stream_handle(boost::asio::io_service& io_service)
: basic_handle<StreamHandleService>(io_service)
{
}
/// Construct a basic_stream_handle on an existing native handle.
/**
* This constructor creates a stream handle object to hold an existing native
* handle.
*
* @param io_service The io_service object that the stream handle will use to
* dispatch handlers for any asynchronous operations performed on the handle.
*
* @param native_handle The new underlying handle implementation.
*
* @throws boost::system::system_error Thrown on failure.
*/
basic_stream_handle(boost::asio::io_service& io_service,
const native_type& native_handle)
: basic_handle<StreamHandleService>(io_service, native_handle)
{
}
/// Write some data to the handle.
/**
* This function is used to write data to the stream handle. The function call
* will block until one or more bytes of the data has been written
* successfully, or until an error occurs.
*
* @param buffers One or more data buffers to be written to the handle.
*
* @returns The number of bytes written.
*
* @throws boost::system::system_error Thrown on failure. An error code of
* boost::asio::error::eof indicates that the connection was closed by the
* peer.
*
* @note The write_some operation may not transmit all of the data to the
* peer. Consider using the @ref write function if you need to ensure that
* all data is written before the blocking operation completes.
*
* @par Example
* To write a single data buffer use the @ref buffer function as follows:
* @code
* handle.write_some(boost::asio::buffer(data, size));
* @endcode
* See the @ref buffer documentation for information on writing multiple
* buffers in one go, and how to use it with arrays, boost::array or
* std::vector.
*/
template <typename ConstBufferSequence>
std::size_t write_some(const ConstBufferSequence& buffers)
{
boost::system::error_code ec;
std::size_t s = this->service.write_some(this->implementation, buffers, ec);
boost::asio::detail::throw_error(ec);
return s;
}
/// Write some data to the handle.
/**
* This function is used to write data to the stream handle. The function call
* will block until one or more bytes of the data has been written
* successfully, or until an error occurs.
*
* @param buffers One or more data buffers to be written to the handle.
*
* @param ec Set to indicate what error occurred, if any.
*
* @returns The number of bytes written. Returns 0 if an error occurred.
*
* @note The write_some operation may not transmit all of the data to the
* peer. Consider using the @ref write function if you need to ensure that
* all data is written before the blocking operation completes.
*/
template <typename ConstBufferSequence>
std::size_t write_some(const ConstBufferSequence& buffers,
boost::system::error_code& ec)
{
return this->service.write_some(this->implementation, buffers, ec);
}
/// Start an asynchronous write.
/**
* This function is used to asynchronously write data to the stream handle.
* The function call always returns immediately.
*
* @param buffers One or more data buffers to be written to the handle.
* Although the buffers object may be copied as necessary, ownership of the
* underlying memory blocks is retained by the caller, which must guarantee
* that they remain valid until the handler is called.
*
* @param handler The handler to be called when the write operation completes.
* Copies will be made of the handler as required. The function signature of
* the handler must be:
* @code void handler(
* const boost::system::error_code& error, // Result of operation.
* std::size_t bytes_transferred // Number of bytes written.
* ); @endcode
* Regardless of whether the asynchronous operation completes immediately or
* not, the handler will not be invoked from within this function. Invocation
* of the handler will be performed in a manner equivalent to using
* boost::asio::io_service::post().
*
* @note The write operation may not transmit all of the data to the peer.
* Consider using the @ref async_write function if you need to ensure that all
* data is written before the asynchronous operation completes.
*
* @par Example
* To write a single data buffer use the @ref buffer function as follows:
* @code
* handle.async_write_some(boost::asio::buffer(data, size), handler);
* @endcode
* See the @ref buffer documentation for information on writing multiple
* buffers in one go, and how to use it with arrays, boost::array or
* std::vector.
*/
template <typename ConstBufferSequence, typename WriteHandler>
void async_write_some(const ConstBufferSequence& buffers,
WriteHandler handler)
{
this->service.async_write_some(this->implementation, buffers, handler);
}
/// Read some data from the handle.
/**
* This function is used to read data from the stream handle. The function
* call will block until one or more bytes of data has been read successfully,
* or until an error occurs.
*
* @param buffers One or more buffers into which the data will be read.
*
* @returns The number of bytes read.
*
* @throws boost::system::system_error Thrown on failure. An error code of
* boost::asio::error::eof indicates that the connection was closed by the
* peer.
*
* @note The read_some operation may not read all of the requested number of
* bytes. Consider using the @ref read function if you need to ensure that
* the requested amount of data is read before the blocking operation
* completes.
*
* @par Example
* To read into a single data buffer use the @ref buffer function as follows:
* @code
* handle.read_some(boost::asio::buffer(data, size));
* @endcode
* See the @ref buffer documentation for information on reading into multiple
* buffers in one go, and how to use it with arrays, boost::array or
* std::vector.
*/
template <typename MutableBufferSequence>
std::size_t read_some(const MutableBufferSequence& buffers)
{
boost::system::error_code ec;
std::size_t s = this->service.read_some(this->implementation, buffers, ec);
boost::asio::detail::throw_error(ec);
return s;
}
/// Read some data from the handle.
/**
* This function is used to read data from the stream handle. The function
* call will block until one or more bytes of data has been read successfully,
* or until an error occurs.
*
* @param buffers One or more buffers into which the data will be read.
*
* @param ec Set to indicate what error occurred, if any.
*
* @returns The number of bytes read. Returns 0 if an error occurred.
*
* @note The read_some operation may not read all of the requested number of
* bytes. Consider using the @ref read function if you need to ensure that
* the requested amount of data is read before the blocking operation
* completes.
*/
template <typename MutableBufferSequence>
std::size_t read_some(const MutableBufferSequence& buffers,
boost::system::error_code& ec)
{
return this->service.read_some(this->implementation, buffers, ec);
}
/// Start an asynchronous read.
/**
* This function is used to asynchronously read data from the stream handle.
* The function call always returns immediately.
*
* @param buffers One or more buffers into which the data will be read.
* Although the buffers object may be copied as necessary, ownership of the
* underlying memory blocks is retained by the caller, which must guarantee
* that they remain valid until the handler is called.
*
* @param handler The handler to be called when the read operation completes.
* Copies will be made of the handler as required. The function signature of
* the handler must be:
* @code void handler(
* const boost::system::error_code& error, // Result of operation.
* std::size_t bytes_transferred // Number of bytes read.
* ); @endcode
* Regardless of whether the asynchronous operation completes immediately or
* not, the handler will not be invoked from within this function. Invocation
* of the handler will be performed in a manner equivalent to using
* boost::asio::io_service::post().
*
* @note The read operation may not read all of the requested number of bytes.
* Consider using the @ref async_read function if you need to ensure that the
* requested amount of data is read before the asynchronous operation
* completes.
*
* @par Example
* To read into a single data buffer use the @ref buffer function as follows:
* @code
* handle.async_read_some(boost::asio::buffer(data, size), handler);
* @endcode
* See the @ref buffer documentation for information on reading into multiple
* buffers in one go, and how to use it with arrays, boost::array or
* std::vector.
*/
template <typename MutableBufferSequence, typename ReadHandler>
void async_read_some(const MutableBufferSequence& buffers,
ReadHandler handler)
{
this->service.async_read_some(this->implementation, buffers, handler);
}
};
} // namespace windows
} // namespace asio
} // namespace boost
#include <boost/asio/detail/pop_options.hpp>
#endif // defined(BOOST_ASIO_HAS_WINDOWS_STREAM_HANDLE)
// || defined(GENERATING_DOCUMENTATION)
#endif // BOOST_ASIO_WINDOWS_BASIC_STREAM_HANDLE_HPP
| kerautret/DGtal-forIPOL | src/boost-1.48/boost/asio/windows/basic_stream_handle.hpp | C++ | lgpl-3.0 | 11,635 |
package new_model;
import java.util.Date;
/**
* Class new_objet : description of objects
* @author CHTIWI El Mehdi
*
*/
public class new_objet {
private new_User proprietaire;
private String titre;
private Categorie categorie;
private String description;
private String image;
private Zone zone;
private String contact;
private long date;
private long vie;
private boolean proposition;
private boolean souhaite;
public new_objet(new_User proprietaire,String titre,
Categorie categorie, String description, String image,
Zone zone,String contact,long date,long vie,boolean proposition,
boolean souhaite){
this.proprietaire = proprietaire;
this.titre = titre;
this.categorie = categorie;
this.description = description ;
this.image = image;
this.zone = zone;
this.contact = contact;
this.date = date;
this.vie = vie;
this.proposition = proposition;
this.souhaite = souhaite;
}
public new_objet(){
}
public new_User getProprietaire() {
return proprietaire;
}
public void setProprietaire(new_User proprietaire) {
this.proprietaire = proprietaire;
}
public String getTitre() {
return titre;
}
public void setTitre(String titre) {
this.titre = titre;
}
public Categorie getCategorie() {
return categorie;
}
public void setCategorie(Categorie categorie) {
this.categorie = categorie;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getImage() {
return image;
}
public void setImage(String image) {
this.image = image;
}
public Zone getZone() {
return zone;
}
public void setZone(Zone zone) {
this.zone = zone;
}
public String getContact() {
return contact;
}
public void setContact(String contact) {
this.contact = contact;
}
public long getDate() {
return date;
}
public void setDate(long date) {
this.date = date;
}
public long getVie() {
return vie;
}
public void setVie(long vie) {
this.vie = vie;
}
public boolean isProposition() {
return proposition;
}
public void setProposition(boolean proposition) {
this.proposition = proposition;
}
public boolean isSouhaite() {
return souhaite;
}
public void setSouhaite(boolean souhaite) {
this.souhaite = souhaite;
}
}
| chafca/p2pEngine | old/src/new_model/new_objet.java | Java | lgpl-3.0 | 2,347 |
/**
* This class was created by <Vazkii>. It's distributed as
* part of the Botania Mod. Get the Source Code in github:
* https://github.com/Vazkii/Botania
*
* Botania is Open Source and distributed under a
* Creative Commons Attribution-NonCommercial-ShareAlike 3.0 License
* (http://creativecommons.org/licenses/by-nc-sa/3.0/deed.en_GB)
*
* File Created @ [May 25, 2014, 7:34:00 PM (GMT)]
*/
package vazkii.botania.api.mana;
import net.minecraft.item.ItemStack;
/**
* Have an item implement this to flag it as an infinite
* mana source for the purposes of the HUD rendered when
* an IManaUserItem implementing item is present.
*/
public interface ICreativeManaProvider {
public boolean isCreative(ItemStack stack);
}
| LordSaad44/Metallurgy4 | src/api/java/vazkii/botania/api/mana/ICreativeManaProvider.java | Java | lgpl-3.0 | 741 |
package lab.s2jh.sys.dao;
import lab.s2jh.core.dao.BaseDao;
import lab.s2jh.sys.entity.LoggingEvent;
import org.springframework.stereotype.Repository;
@Repository
public interface LoggingEventDao extends BaseDao<LoggingEvent, Long> {
} | xautlx/s2jh | common-service/src/main/java/lab/s2jh/sys/dao/LoggingEventDao.java | Java | lgpl-3.0 | 239 |
/*
* @BEGIN LICENSE
*
* Psi4: an open-source quantum chemistry software package
*
* Copyright (c) 2007-2021 The Psi4 Developers.
*
* The copyrights for code used from other parties are included in
* the corresponding files.
*
* This file is part of Psi4.
*
* Psi4 is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, version 3.
*
* Psi4 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License along
* with Psi4; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* @END LICENSE
*/
/*! \defgroup PSIO libpsio: The PSI I/O Library */
/*!
** \file
** \ingroup PSIO
*/
#ifdef _MSC_VER
#include <io.h>
#define SYSTEM_CLOSE ::_close
#define SYSTEM_UNLINK ::_unlink
#else
#include <unistd.h>
#define SYSTEM_CLOSE ::close
#define SYSTEM_UNLINK ::unlink
#endif
#include <cstring>
#include <cstdlib>
#include "psi4/libpsio/psio.h"
#include "psi4/libpsio/psio.hpp"
#include "psi4/psi4-dec.h"
namespace psi {
void PSIO::close(size_t unit, int keep) {
size_t i;
psio_ud *this_unit;
psio_tocentry *this_entry, *next_entry;
this_unit = &(psio_unit[unit]);
/* First check to see if this unit is already closed */
if (this_unit->vol[0].stream == -1) psio_error(unit, PSIO_ERROR_RECLOSE);
/* Dump the current TOC back out to disk */
tocwrite(unit);
/* Free the TOC */
this_entry = this_unit->toc;
for (i = 0; i < this_unit->toclen; i++) {
next_entry = this_entry->next;
free(this_entry);
this_entry = next_entry;
}
/* Close each volume (remove if necessary) and free the path */
for (i = 0; i < this_unit->numvols; i++) {
int errcod;
errcod = SYSTEM_CLOSE(this_unit->vol[i].stream);
if (errcod == -1) psio_error(unit, PSIO_ERROR_CLOSE);
/* Delete the file completely if requested */
if (!keep) SYSTEM_UNLINK(this_unit->vol[i].path);
PSIOManager::shared_object()->close_file(std::string(this_unit->vol[i].path), unit, (keep ? true : false));
free(this_unit->vol[i].path);
this_unit->vol[i].path = nullptr;
this_unit->vol[i].stream = -1;
}
/* Reset the global page stats to zero */
this_unit->numvols = 0;
this_unit->toclen = 0;
this_unit->toc = nullptr;
}
int psio_close(size_t unit, int keep) {
_default_psio_lib_->close(unit, keep);
return 0;
}
} // namespace psi
| ashutoshvt/psi4 | psi4/src/psi4/libpsio/close.cc | C++ | lgpl-3.0 | 2,806 |
//
// Copyright (c) 2013, 2014 CNRS
// Author: Florent Lamiraux
//
//
// This file is part of hpp-model
// hpp-model is free software: you can redistribute it
// and/or modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation, either version
// 3 of the License, or (at your option) any later version.
//
// hpp-model is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Lesser Public License for more details. You should have
// received a copy of the GNU Lesser General Public License along with
// hpp-model If not, see
// <http://www.gnu.org/licenses/>.
#include <hpp/fcl/distance.h>
#include <hpp/fcl/collision.h>
#include <hpp/util/debug.hh>
#include <hpp/model/body.hh>
#include <hpp/model/joint.hh>
#include <hpp/model/collision-object.hh>
#include <hpp/model/object-factory.hh>
namespace fcl {
HPP_PREDEF_CLASS (CollisionGeometry);
} // namespace fcl
namespace hpp {
namespace model {
//-----------------------------------------------------------------------
static ObjectVector_t::iterator
findObject (ObjectVector_t& vector,
const fcl::CollisionObjectConstPtr_t& object)
{
ObjectVector_t::iterator it;
for (it = vector.begin (); it != vector.end (); ++it) {
const CollisionObjectPtr_t& local = *it;
if (local->fcl ()->collisionGeometry () ==
object->collisionGeometry ()) return it;
}
return it;
}
//-----------------------------------------------------------------------
Body:: Body () : collisionInnerObjects_ (), collisionOuterObjects_ (),
distanceInnerObjects_ (), distanceOuterObjects_ (),
joint_ (0x0), name_ (), localCom_ (), inertiaMatrix_ (),
mass_ (0), radius_ (0)
{
}
//-----------------------------------------------------------------------
Body::Body (const Body& body) :
collisionInnerObjects_ (), collisionOuterObjects_ (),
distanceInnerObjects_ (), distanceOuterObjects_ (),
joint_ (0x0), name_ (body.name_), localCom_ (body.localCom_),
inertiaMatrix_ (body.inertiaMatrix_), mass_ (body.mass_),
radius_ (body.radius_)
{
}
//-----------------------------------------------------------------------
BodyPtr_t Body::clone (const JointPtr_t& joint) const
{
BodyPtr_t newBody = new Body (*this);
joint->setLinkedBody (newBody);
// Copy collision object lists
for (ObjectVector_t::const_iterator itObj =
collisionInnerObjects_.begin ();
itObj != collisionInnerObjects_.end (); ++itObj) {
newBody->collisionInnerObjects_.push_back ((*itObj)->clone (joint));
}
for (ObjectVector_t::const_iterator itObj =
distanceInnerObjects_.begin ();
itObj != distanceInnerObjects_.end (); ++itObj) {
newBody->distanceInnerObjects_.push_back ((*itObj)->clone (joint));
}
return newBody;
}
//-----------------------------------------------------------------------
void Body::updateRadius (const CollisionObjectPtr_t& object)
{
fcl::CollisionGeometryConstPtr_t geom =
object->fcl ()->collisionGeometry();
const Transform3f& positionInJoint = object->positionInJointFrame ();
fcl::Vec3f p;
p [0] = geom->aabb_local.min_ [0];
p [1] = geom->aabb_local.min_ [1];
p [2] = geom->aabb_local.min_ [2];
value_type newLength = positionInJoint.transform (p).length ();
if (newLength > radius_) radius_ = newLength;
p [0] = geom->aabb_local.max_ [0];
p [1] = geom->aabb_local.min_ [1];
p [2] = geom->aabb_local.min_ [2];
newLength = positionInJoint.transform (p).length ();
if (newLength > radius_) radius_ = newLength;
p [0] = geom->aabb_local.min_ [0];
p [1] = geom->aabb_local.max_ [1];
p [2] = geom->aabb_local.min_ [2];
newLength = positionInJoint.transform (p).length ();
if (newLength > radius_) radius_ = newLength;
p [0] = geom->aabb_local.max_ [0];
p [1] = geom->aabb_local.max_ [1];
p [2] = geom->aabb_local.min_ [2];
newLength = positionInJoint.transform (p).length ();
if (newLength > radius_) radius_ = newLength;
p [0] = geom->aabb_local.min_ [0];
p [1] = geom->aabb_local.min_ [1];
p [2] = geom->aabb_local.max_ [2];
newLength = positionInJoint.transform (p).length ();
if (newLength > radius_) radius_ = newLength;
p [0] = geom->aabb_local.max_ [0];
p [1] = geom->aabb_local.min_ [1];
p [2] = geom->aabb_local.max_ [2];
newLength = positionInJoint.transform (p).length ();
if (newLength > radius_) radius_ = newLength;
p [0] = geom->aabb_local.min_ [0];
p [1] = geom->aabb_local.max_ [1];
p [2] = geom->aabb_local.max_ [2];
newLength = positionInJoint.transform (p).length ();
if (newLength > radius_) radius_ = newLength;
p [0] = geom->aabb_local.max_ [0];
p [1] = geom->aabb_local.max_ [1];
p [2] = geom->aabb_local.max_ [2];
newLength = positionInJoint.transform (p).length ();
if (newLength > radius_) radius_ = newLength;
hppDout (info, "joint " << joint_->name () << ", radius " << radius_);
}
//-----------------------------------------------------------------------
void Body::addInnerObject (const CollisionObjectPtr_t& object,
bool collision, bool distance)
{
if (collision) {
if (findObject (collisionInnerObjects_, object->fcl ()) ==
collisionInnerObjects_.end ()) {
if (joint () == 0) {
throw std::runtime_error ("Body should be connected to a joint "
"before inserting objects.");
}
object->joint (joint ());
updateRadius (object);
collisionInnerObjects_.push_back (object);
}
}
if (distance) {
if (findObject (distanceInnerObjects_, object->fcl ()) ==
distanceInnerObjects_.end ()) {
if (joint () == 0) {
throw std::runtime_error ("Body should be connected to a joint "
"before inserting objects.");
}
object->joint (joint ());
distanceInnerObjects_.push_back (object);
if (!joint ()->robot ()) {
throw std::runtime_error ("Body should be connected to a robot "
"before inserting inner objects.");
}
joint ()->robot ()->updateDistances ();
}
}
}
//-----------------------------------------------------------------------
void Body::addOuterObject (const CollisionObjectPtr_t& object,
bool collision, bool distance)
{
if (collision) {
if (findObject (collisionOuterObjects_, object->fcl ()) ==
collisionOuterObjects_.end ()) {
hppDout (info, "adding " << object->name () << " to body "
<< this->name_ << " for collision");
collisionOuterObjects_.push_back (object);
}
}
if (distance) {
if (findObject (distanceOuterObjects_, object->fcl ()) ==
distanceOuterObjects_.end ()) {
hppDout (info, "adding " << object->name () << " to body "
<< this->name_ << " for distance");
distanceOuterObjects_.push_back (object);
if (!joint ()->robot ()) {
throw std::runtime_error ("Body should be connected to a robot "
"before inserting outer objects.");
}
joint ()->robot ()->updateDistances ();
}
}
}
//-----------------------------------------------------------------------
void Body::removeInnerObject (const CollisionObjectPtr_t& object,
bool collision, bool distance)
{
if (collision) {
ObjectVector_t::iterator it =
findObject (collisionInnerObjects_, object->fcl ());
if (it != collisionInnerObjects_.end ())
collisionInnerObjects_.erase (it);
}
if (distance) {
ObjectVector_t::iterator it =
findObject (distanceInnerObjects_, object->fcl ());
if (it != distanceInnerObjects_.end ())
distanceInnerObjects_.erase (it);
if (!joint ()->robot ()) {
throw std::runtime_error ("Body should be connected to a robot "
"before inserting outer objects.");
}
joint ()->robot ()->updateDistances ();
}
}
//-----------------------------------------------------------------------
void Body::removeOuterObject (const CollisionObjectPtr_t& object,
bool collision, bool distance)
{
if (collision) {
ObjectVector_t::iterator it =
findObject (collisionOuterObjects_, object->fcl ());
if (it != collisionOuterObjects_.end ()) {
collisionOuterObjects_.erase (it);
}
}
if (distance) {
ObjectVector_t::iterator it =
findObject (distanceOuterObjects_, object->fcl ());
if (it != distanceOuterObjects_.end ()) {
distanceOuterObjects_.erase (it);
}
if (!joint ()->robot ()) {
throw std::runtime_error ("Body should be connected to a robot "
"before inserting outer objects.");
}
joint ()->robot ()->updateDistances ();
}
}
//-----------------------------------------------------------------------
const ObjectVector_t& Body::innerObjects (Request_t type) const
{
switch (type) {
case COLLISION:
return collisionInnerObjects_;
case DISTANCE:
return distanceInnerObjects_;
default:
throw std::runtime_error
("Please choose between COLLISION and DISTANCE.");
}
}
//-----------------------------------------------------------------------
const ObjectVector_t& Body::outerObjects (Request_t type) const
{
switch (type) {
case COLLISION:
return collisionOuterObjects_;
case DISTANCE:
return distanceOuterObjects_;
default:
throw std::runtime_error
("Please choose between COLLISION and DISTANCE.");
}
}
bool Body::collisionTest () const
{
fcl::CollisionRequest collisionRequest (1, false, false, 1, false, true,
fcl::GST_INDEP);
fcl::CollisionResult collisionResult;
for (ObjectVector_t::const_iterator itInner =
collisionInnerObjects_.begin ();
itInner != collisionInnerObjects_.end (); ++itInner) {
for (ObjectVector_t::const_iterator itOuter =
collisionOuterObjects_.begin ();
itOuter != collisionOuterObjects_.end (); ++itOuter) {
if (fcl::collide ((*itInner)->fcl ().get (),
(*itOuter)->fcl ().get (),
collisionRequest, collisionResult) != 0) {
hppDout (info, "Collision between " << (*itInner)->name ()
<< " and " << (*itOuter)->name ());
return true;
}
}
}
return false;
}
void Body::computeDistances (DistanceResults_t& results,
DistanceResults_t::size_type& offset)
{
fcl::DistanceRequest distanceRequest (true, 0, 0, fcl::GST_INDEP);
for (ObjectVector_t::iterator itInner = distanceInnerObjects_.begin ();
itInner != distanceInnerObjects_.end (); ++itInner) {
// Compute global position if inner object
fcl::Transform3f globalPosition = joint ()->currentTransformation ()*
(*itInner)->positionInJointFrame ();
(*itInner)->fcl ()->setTransform (globalPosition);
for (ObjectVector_t::iterator itOuter = distanceOuterObjects_.begin ();
itOuter != distanceOuterObjects_.end (); ++itOuter) {
// Compute global position if inner object
results [offset].fcl.clear ();
fcl::distance ((*itInner)->fcl ().get (), (*itOuter)->fcl ().get (),
distanceRequest, results [offset].fcl);
results [offset].innerObject = *itInner;
results [offset].outerObject = *itOuter;
offset++;
}
}
}
} // namespace model
} // namespace hpp
| jmirabel/hpp-model | src/body.cc | C++ | lgpl-3.0 | 11,551 |
/* Raven 2 Control - Control software for the Raven II robot
* Copyright (C) 2005-2012 H. Hawkeye King, Blake Hannaford, and the University of Washington BioRobotics Laboratory
*
* This file is part of Raven 2 Control.
*
* Raven 2 Control is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Raven 2 Control is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Raven 2 Control. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* hmatrix.h
*
* Created on: Jan 22, 2010
* Author: glozmand
*/
#ifndef HMATRIX_H_
#define HMATRIX_H_
#include "struct.h" /*Includes DS0, DS1, DOF_type, defines, etc*/
#include "utils.h"
#define HMATRIX_SIZE 4
#define SQR(x) ( (x) * (x) )
#define ABS(x) ( (x) > (0) ? (x) : (-(x)) )
void mulMatrix(float A[HMATRIX_SIZE][HMATRIX_SIZE], float B[HMATRIX_SIZE][HMATRIX_SIZE], float C[HMATRIX_SIZE][HMATRIX_SIZE]);
void mulMatrix3x3i(int A[3][3], int B[3][3], int C[3][3]);
void mulMatrix3x3f(float A[3][3], float B[3][3], float C[3][3]);
void transpose3x3f(float R[3][3], float R_transpose[3][3]);
void invOrthMatrix(float tip[HMATRIX_SIZE][HMATRIX_SIZE], float tip_inv[HMATRIX_SIZE][HMATRIX_SIZE]);
void printMatrix4x4(double rot_matrix[4][4], char *str);
void printMatrix3x3(double rot_matrix[3][3], char *str);
void printMatrix3x3i(int rot_matrix[3][3], char *str);
void orientation_from_hmatrix(float tip[HMATRIX_SIZE][HMATRIX_SIZE], struct orientation *ori);
void hmatrix_from_orientation(struct orientation *ori, double tip[HMATRIX_SIZE][HMATRIX_SIZE]);
void create_rotation_matrix(float n[3], float phi, float mr[3][3]);
void create_rotation_from_rmatrix(float R[3][3], float rvec[3], float *angle);
#endif /* HMATRIX_H_ */
| CSLDepend/raven2_sim | include/raven/hmatrix.h | C | lgpl-3.0 | 2,196 |
#ifndef KEYTOKEY_HPP
#define KEYTOKEY_HPP
#include "RemapFuncBase.hpp"
#include "TimerWrapper.hpp"
namespace org_pqrs_Karabiner {
namespace RemapFunc {
class KeyToKey final : public RemapFuncBase {
public:
static void static_initialize(IOWorkLoop& workloop);
static void static_terminate(void);
KeyToKey(AutogenId autogenId) : RemapFuncBase(BRIDGE_REMAPTYPE_KEYTOKEY, autogenId),
index_(0),
currentToEvent_(CurrentToEvent::TO_KEYS),
keyboardRepeatID_(-1),
isRepeatEnabled_(true),
lastPhysicalEventType_(PhysicalEventType::DOWN),
delayUntilRepeat_(-1),
keyRepeat_(-1) {}
virtual ~KeyToKey(void) {
if (target_ == this) {
fire_timer_.cancelTimeout();
target_ = nullptr;
}
}
void prepare(RemapParams& remapParams) override;
bool remap(RemapParams& remapParams) override;
// ----------------------------------------
// [0] => fromEvent_
// [1] => toKeys_[0]
// [2] => toKeys_[1]
// [3] => ...
void add(AddDataType datatype, AddValue newval) override;
// ----------------------------------------
// utility functions
void add(KeyCode newval) { add(AddDataType(BRIDGE_DATATYPE_KEYCODE), AddValue(newval.get())); }
void add(Option newval) { add(AddDataType(BRIDGE_DATATYPE_OPTION), AddValue(newval.get())); }
bool call_remap_with_VK_PSEUDO_KEY(EventType eventType, PhysicalEventType physicalEventType);
size_t toKeysSize(void) const { return toKeys_.size(); }
void clearToKeys(void);
bool isLastToEventModifierKeyOrLikeModifier(void) const {
if (toKeys_.empty()) return false;
auto& lastToEvent = toKeys_[toKeys_.size() - 1];
if (lastToEvent.getModifierFlag() != ModifierFlag::ZERO) {
return true;
}
if (lastToEvent.isEventLikeModifier()) {
return true;
}
return false;
}
bool isPressing(void) const { return fromEvent_.isPressing(); }
private:
static void fire_timer_callback(OSObject* owner, IOTimerEventSource* sender);
void doDelayedAction(const Vector_ToEvent& keys, bool delayedActionCanceledBy);
int getDelayUntilRepeat(void);
int getKeyRepeat(void);
class CurrentToEvent final {
public:
enum Value {
TO_KEYS,
BEFORE_KEYS,
AFTER_KEYS,
DELAYED_ACTION_KEYS,
DELAYED_ACTION_CANCELED_DEFAULT_KEYS,
DELAYED_ACTION_CANCELED_BY_KEYS,
INCREASE_MODIFIER_FLAGS,
};
};
Vector_ToEvent& getCurrentToEvent(void) {
switch (currentToEvent_) {
case CurrentToEvent::TO_KEYS:
return toKeys_;
case CurrentToEvent::BEFORE_KEYS:
return beforeKeys_;
case CurrentToEvent::AFTER_KEYS:
return afterKeys_.back();
case CurrentToEvent::DELAYED_ACTION_KEYS:
return delayedActionKeys_;
case CurrentToEvent::DELAYED_ACTION_CANCELED_DEFAULT_KEYS:
return delayedActionCanceledDefaultKeys_;
case CurrentToEvent::DELAYED_ACTION_CANCELED_BY_KEYS:
return delayedActionCanceledByKeys_.back();
case CurrentToEvent::INCREASE_MODIFIER_FLAGS:
return increaseModifierFlags_;
}
}
size_t index_;
FromEvent fromEvent_;
Vector_ModifierFlag fromModifierFlags_;
Vector_ModifierFlag pureFromModifierFlags_; // fromModifierFlags_ - fromEvent_.getModifierFlag()
Vector_ToEvent toKeys_;
Vector_ToEvent beforeKeys_;
Vector_Vector_ToEvent afterKeys_;
Vector_ToEvent delayedActionKeys_;
Vector_ToEvent delayedActionCanceledDefaultKeys_;
Vector_Vector_ToEvent delayedActionCanceledByKeys_;
Vector_ToEvent increaseModifierFlags_;
CurrentToEvent::Value currentToEvent_;
static TimerWrapper fire_timer_;
static KeyToKey* target_;
static FlagStatus flagStatusForDelayedActionKeys_;
int keyboardRepeatID_;
bool isRepeatEnabled_;
PhysicalEventType lastPhysicalEventType_;
int delayUntilRepeat_;
int keyRepeat_;
};
}
}
#endif
| astachurski/Karabiner | src/core/kext/RemapFunc/KeyToKey.hpp | C++ | unlicense | 4,013 |
/*
* Copyright 2000-2018 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.roots;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.module.Module;
import com.intellij.openapi.module.StdModuleTypes;
import com.intellij.openapi.module.impl.ModuleEx;
import com.intellij.openapi.module.impl.scopes.LibraryScope;
import com.intellij.openapi.roots.DependencyScope;
import com.intellij.openapi.roots.ModuleRootManager;
import com.intellij.openapi.roots.ModuleRootModificationUtil;
import com.intellij.openapi.roots.OrderEnumerator;
import com.intellij.openapi.roots.libraries.Library;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.psi.impl.ResolveScopeManager;
import com.intellij.psi.search.GlobalSearchScope;
import com.intellij.testFramework.JavaModuleTestCase;
import com.intellij.testFramework.PsiTestUtil;
import com.intellij.testFramework.fixtures.impl.LightTempDirTestFixtureImpl;
import com.intellij.util.PathsList;
import java.io.IOException;
import java.util.Collections;
/**
* @author yole
*/
public class ModuleScopesTest extends JavaModuleTestCase {
private LightTempDirTestFixtureImpl myFixture;
@Override
protected void setUp() throws Exception {
super.setUp();
myFixture = new LightTempDirTestFixtureImpl();
myFixture.setUp();
}
@Override
protected void tearDown() throws Exception {
try {
myFixture.tearDown();
}
catch (Throwable e) {
addSuppressedException(e);
}
finally {
myFixture = null;
super.tearDown();
}
}
public void testBasics() throws Exception {
Module moduleA = createModule("a.iml", StdModuleTypes.JAVA);
addDependentModule(moduleA, DependencyScope.COMPILE);
addLibrary(moduleA, DependencyScope.COMPILE);
VirtualFile classB = myFixture.createFile("b/Test.java", "public class Test { }");
VirtualFile libraryClass = myFixture.createFile("lib/Test.class");
assertFalse(moduleA.getModuleScope().contains(classB));
assertFalse(moduleA.getModuleScope().contains(libraryClass));
assertFalse(moduleA.getModuleWithLibrariesScope().contains(classB));
assertTrue(moduleA.getModuleWithLibrariesScope().contains(libraryClass));
assertTrue(moduleA.getModuleWithDependenciesScope().contains(classB));
assertFalse(moduleA.getModuleWithDependenciesScope().contains(libraryClass));
assertTrue(moduleA.getModuleWithDependenciesAndLibrariesScope(true).contains(classB));
assertTrue(moduleA.getModuleWithDependenciesAndLibrariesScope(true).contains(libraryClass));
assertTrue(moduleA.getModuleRuntimeScope(true).contains(classB));
assertTrue(moduleA.getModuleRuntimeScope(true).contains(libraryClass));
}
public void testLibraryScope() throws IOException {
VirtualFile libraryClass = myFixture.createFile("lib/classes/Test.class");
VirtualFile librarySrc = myFixture.createFile("lib/src/Test.java", "public class Test { }");
Library library = PsiTestUtil.addProjectLibrary(myModule, "my-lib", Collections.singletonList(libraryClass.getParent()),
Collections.singletonList(librarySrc.getParent()));
LibraryScope scope = new LibraryScope(myProject, library);
assertTrue(scope.contains(libraryClass));
assertTrue(scope.contains(librarySrc));
}
public void testTestOnlyModuleDependency() throws Exception {
Module moduleA = createModule("a.iml", StdModuleTypes.JAVA);
Module moduleB = addDependentModule(moduleA, DependencyScope.TEST);
VirtualFile classB = myFixture.createFile("b/Test.java", "public class Test { }");
assertTrue(moduleA.getModuleWithDependenciesAndLibrariesScope(true).contains(classB));
assertFalse(moduleA.getModuleWithDependenciesAndLibrariesScope(false).contains(classB));
assertFalse(moduleA.getModuleWithDependenciesAndLibrariesScope(false).isSearchInModuleContent(moduleB));
final VirtualFile[] compilationClasspath = getCompilationClasspath(moduleA);
assertEquals(1, compilationClasspath.length);
final VirtualFile[] productionCompilationClasspath = getProductionCompileClasspath(moduleA);
assertEmpty(productionCompilationClasspath);
final PathsList pathsList = OrderEnumerator.orderEntries(moduleA).recursively().getPathsList();
assertEquals(1, pathsList.getPathList().size());
final PathsList pathsListWithoutTests = OrderEnumerator.orderEntries(moduleA).productionOnly().recursively().getPathsList();
assertEquals(0, pathsListWithoutTests.getPathList().size());
}
private Module addDependentModule(final Module moduleA, final DependencyScope scope) {
return addDependentModule("b", moduleA, scope, false);
}
private Module addDependentModule(final String name, final Module moduleA,
final DependencyScope scope,
final boolean exported) {
final Module moduleB = createModule(name + ".iml", StdModuleTypes.JAVA);
ApplicationManager.getApplication().runWriteAction(() -> {
VirtualFile rootB = myFixture.findOrCreateDir(name);
VirtualFile outB = myFixture.findOrCreateDir("out");
ModuleRootModificationUtil.addDependency(moduleA, moduleB, scope, exported);
PsiTestUtil.addSourceRoot(moduleB, rootB);
PsiTestUtil.setCompilerOutputPath(moduleB, outB.getUrl(), false);
});
return moduleB;
}
public void testModuleTwiceInDependents() {
Module m = createModule("m.iml", StdModuleTypes.JAVA);
Module a = createModule("a.iml", StdModuleTypes.JAVA);
Module b = createModule("b.iml", StdModuleTypes.JAVA);
Module c = createModule("c.iml", StdModuleTypes.JAVA);
ModuleRootModificationUtil.addDependency(a, m, DependencyScope.COMPILE, false);
ModuleRootModificationUtil.addDependency(b, m, DependencyScope.COMPILE, true);
ModuleRootModificationUtil.addDependency(a, b, DependencyScope.COMPILE, true);
ModuleRootModificationUtil.addDependency(c, a, DependencyScope.COMPILE, true);
VirtualFile root = myFixture.findOrCreateDir("c");
PsiTestUtil.addSourceContentToRoots(c, root);
VirtualFile file = createChildData(root, "x.txt");
GlobalSearchScope deps = m.getModuleWithDependentsScope();
assertTrue(deps.contains(file));
}
public void testModuleContentWithDependenciesScopeRootOrdering() {
Module m = createModule("m.iml", StdModuleTypes.JAVA);
Module a = createModule("a.iml", StdModuleTypes.JAVA);
Module b = createModule("b.iml", StdModuleTypes.JAVA);
Module c = createModule("c.iml", StdModuleTypes.JAVA);
ModuleRootModificationUtil.addDependency(b, m, DependencyScope.COMPILE, true);
ModuleRootModificationUtil.addDependency(a, b, DependencyScope.COMPILE, true);
ModuleRootModificationUtil.addDependency(a, m, DependencyScope.COMPILE, true);
ModuleRootModificationUtil.addDependency(c, a, DependencyScope.COMPILE, true);
VirtualFile mRoot = myFixture.findOrCreateDir("m");
PsiTestUtil.addSourceContentToRoots(m, mRoot);
VirtualFile aRoot = myFixture.findOrCreateDir("a");
PsiTestUtil.addSourceContentToRoots(a, aRoot);
VirtualFile bRoot = myFixture.findOrCreateDir("b");
PsiTestUtil.addSourceContentToRoots(b, bRoot);
VirtualFile cRoot = myFixture.findOrCreateDir("c");
PsiTestUtil.addSourceContentToRoots(c, cRoot);
VirtualFile file = createChildData(cRoot, "x.txt");
GlobalSearchScope deps = c.getModuleContentWithDependenciesScope();
assertTrue(deps.contains(file));
assertTrue(deps.compare(mRoot, aRoot) < 0);
assertTrue(deps.compare(mRoot, bRoot) < 0);
assertTrue(deps.compare(mRoot, cRoot) < 0);
assertTrue(deps.compare(bRoot, aRoot) < 0);
assertTrue(deps.compare(bRoot, cRoot) < 0);
assertTrue(deps.compare(aRoot, cRoot) < 0);
assertTrue(deps.compare(cRoot, mRoot) > 0);
assertTrue(deps.compare(cRoot, aRoot) > 0);
assertTrue(deps.compare(cRoot, bRoot) > 0);
assertEquals(0, deps.compare(cRoot, cRoot));
}
public void testTestOnlyLibraryDependency() {
Module m = createModule("a.iml", StdModuleTypes.JAVA);
addLibrary(m, DependencyScope.TEST);
VirtualFile libraryClass = myFixture.createFile("lib/Test.class");
assertTrue(m.getModuleWithDependenciesAndLibrariesScope(true).contains(libraryClass));
assertFalse(m.getModuleWithDependenciesAndLibrariesScope(false).contains(libraryClass));
final VirtualFile[] compilationClasspath = getCompilationClasspath(m);
assertEquals(1, compilationClasspath.length);
final VirtualFile[] productionCompilationClasspath = getProductionCompileClasspath(m);
assertEmpty(productionCompilationClasspath);
}
public void testRuntimeModuleDependency() {
Module moduleA = createModule("a.iml", StdModuleTypes.JAVA);
addDependentModule(moduleA, DependencyScope.RUNTIME);
final VirtualFile[] runtimeClasspath = getRuntimeClasspath(moduleA);
assertEquals(1, runtimeClasspath.length);
final VirtualFile[] compilationClasspath = getCompilationClasspath(moduleA);
assertEquals(1, compilationClasspath.length);
VirtualFile[] production = getProductionCompileClasspath(moduleA);
assertEmpty(production);
}
public void testRuntimeLibraryDependency() {
Module m = createModule("a.iml", StdModuleTypes.JAVA);
VirtualFile libraryRoot = addLibrary(m, DependencyScope.RUNTIME);
final VirtualFile[] runtimeClasspath = getRuntimeClasspath(m);
assertOrderedEquals(runtimeClasspath, libraryRoot);
final VirtualFile[] compilationClasspath = getCompilationClasspath(m);
assertEquals(1, compilationClasspath.length);
VirtualFile[] production = getProductionCompileClasspath(m);
assertEmpty(production);
VirtualFile libraryClass = myFixture.createFile("lib/Test.class");
assertFalse(m.getModuleWithDependenciesAndLibrariesScope(true).contains(libraryClass));
assertFalse(m.getModuleWithDependenciesAndLibrariesScope(false).contains(libraryClass));
assertTrue(m.getModuleRuntimeScope(true).contains(libraryClass));
assertTrue(m.getModuleRuntimeScope(false).contains(libraryClass));
}
public void testProvidedModuleDependency() {
Module moduleA = createModule("a.iml", StdModuleTypes.JAVA);
addDependentModule(moduleA, DependencyScope.PROVIDED);
VirtualFile[] runtimeClasspath = getRuntimeClasspath(moduleA);
assertEmpty(runtimeClasspath);
final VirtualFile[] compilationClasspath = getCompilationClasspath(moduleA);
assertEquals(1, compilationClasspath.length);
}
public void testProvidedLibraryDependency() {
Module m = createModule("a.iml", StdModuleTypes.JAVA);
VirtualFile libraryRoot = addLibrary(m, DependencyScope.PROVIDED);
final VirtualFile[] runtimeClasspath = getRuntimeClasspath(m);
assertEmpty(runtimeClasspath);
final VirtualFile[] compilationClasspath = getCompilationClasspath(m);
assertOrderedEquals(compilationClasspath, libraryRoot);
VirtualFile libraryClass = myFixture.createFile("lib/Test.class");
assertTrue(m.getModuleWithDependenciesAndLibrariesScope(true).contains(libraryClass));
assertTrue(m.getModuleWithDependenciesAndLibrariesScope(false).contains(libraryClass));
assertTrue(m.getModuleRuntimeScope(true).contains(libraryClass));
assertTrue(m.getModuleRuntimeScope(false).contains(libraryClass));
}
private static VirtualFile[] getRuntimeClasspath(Module m) {
return ModuleRootManager.getInstance(m).orderEntries().productionOnly().runtimeOnly().recursively().getClassesRoots();
}
private static VirtualFile[] getProductionCompileClasspath(Module moduleA) {
return ModuleRootManager.getInstance(moduleA).orderEntries().productionOnly().compileOnly().recursively().exportedOnly()
.getClassesRoots();
}
private static VirtualFile[] getCompilationClasspath(Module m) {
return ModuleRootManager.getInstance(m).orderEntries().recursively().exportedOnly().getClassesRoots();
}
private VirtualFile addLibrary(final Module m, final DependencyScope scope) {
final VirtualFile libraryRoot = myFixture.findOrCreateDir("lib");
ModuleRootModificationUtil.addModuleLibrary(m, "l", Collections.singletonList(libraryRoot.getUrl()),
Collections.emptyList(), scope);
return libraryRoot;
}
public void testLibUnderModuleContent() {
VirtualFile lib = myFixture.findOrCreateDir("lib");
PsiTestUtil.addContentRoot(myModule, lib);
VirtualFile file = createChildData(lib, "a.txt");
addLibrary(myModule, DependencyScope.COMPILE);
assertTrue(myModule.getModuleWithDependenciesAndLibrariesScope(false).contains(file));
}
public void testScopeEquality() {
Module module = createModule("a.iml", StdModuleTypes.JAVA);
addDependentModule(module, DependencyScope.COMPILE);
addLibrary(module, DependencyScope.COMPILE);
GlobalSearchScope deps = module.getModuleWithDependentsScope();
GlobalSearchScope depsTests = module.getModuleTestsWithDependentsScope();
assertFalse(deps.equals(depsTests));
assertFalse(depsTests.equals(deps));
((ModuleEx)module).clearScopesCache();
GlobalSearchScope deps2 = module.getModuleWithDependentsScope();
GlobalSearchScope depsTests2 = module.getModuleTestsWithDependentsScope();
assertFalse(deps2.equals(depsTests2));
assertFalse(depsTests2.equals(deps2));
assertNotSame(deps, deps2);
assertNotSame(depsTests, depsTests2);
assertEquals(deps, deps2);
assertEquals(depsTests, depsTests2);
}
public void testHonorExportsWhenCalculatingLibraryScope() throws IOException {
Module a = createModule("a.iml", StdModuleTypes.JAVA);
Module b = createModule("b.iml", StdModuleTypes.JAVA);
Module c = createModule("c.iml", StdModuleTypes.JAVA);
ModuleRootModificationUtil.addDependency(a, b, DependencyScope.COMPILE, true);
ModuleRootModificationUtil.addDependency(b, c, DependencyScope.COMPILE, true);
final VirtualFile libFile1 = myFixture.createFile("lib1/a.txt", "");
final VirtualFile libFile2 = myFixture.createFile("lib2/a.txt", "");
ModuleRootModificationUtil.addModuleLibrary(a, "l", Collections.singletonList(libFile1.getParent().getUrl()),
Collections.emptyList(), Collections.emptyList(), DependencyScope.COMPILE, true);
ModuleRootModificationUtil.addModuleLibrary(c, "l", Collections.singletonList(libFile2.getParent().getUrl()),
Collections.emptyList(), Collections.emptyList(), DependencyScope.COMPILE, true);
assertTrue(ResolveScopeManager.getElementResolveScope(getPsiManager().findFile(libFile1)).contains(libFile2));
assertTrue(ResolveScopeManager.getElementResolveScope(getPsiManager().findFile(libFile2)).contains(libFile1));
}
}
| ingokegel/intellij-community | java/java-tests/testSrc/com/intellij/roots/ModuleScopesTest.java | Java | apache-2.0 | 15,396 |
package containerimage
import (
"context"
"encoding/json"
"fmt"
"strings"
distref "github.com/docker/distribution/reference"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/reference"
"github.com/moby/buildkit/exporter"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
keyImageName = "name"
)
// Differ can make a moby layer from a snapshot
type Differ interface {
EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error)
}
// Opt defines a struct for creating new exporter
type Opt struct {
ImageStore image.Store
ReferenceStore reference.Store
Differ Differ
}
type imageExporter struct {
opt Opt
}
// New creates a new moby imagestore exporter
func New(opt Opt) (exporter.Exporter, error) {
im := &imageExporter{opt: opt}
return im, nil
}
func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
i := &imageExporterInstance{imageExporter: e}
for k, v := range opt {
switch k {
case keyImageName:
for _, v := range strings.Split(v, ",") {
ref, err := distref.ParseNormalizedNamed(v)
if err != nil {
return nil, err
}
i.targetNames = append(i.targetNames, ref)
}
case exptypes.ExporterImageConfigKey:
if i.meta == nil {
i.meta = make(map[string][]byte)
}
i.meta[k] = []byte(v)
default:
logrus.Warnf("image exporter: unknown option %s", k)
}
}
return i, nil
}
type imageExporterInstance struct {
*imageExporter
targetNames []distref.Named
meta map[string][]byte
}
func (e *imageExporterInstance) Name() string {
return "exporting to image"
}
func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source) (map[string]string, error) {
if len(inp.Refs) > 1 {
return nil, fmt.Errorf("exporting multiple references to image store is currently unsupported")
}
ref := inp.Ref
if ref != nil && len(inp.Refs) == 1 {
return nil, fmt.Errorf("invalid exporter input: Ref and Refs are mutually exclusive")
}
// only one loop
for _, v := range inp.Refs {
ref = v
}
var config []byte
switch len(inp.Refs) {
case 0:
config = inp.Metadata[exptypes.ExporterImageConfigKey]
case 1:
platformsBytes, ok := inp.Metadata[exptypes.ExporterPlatformsKey]
if !ok {
return nil, fmt.Errorf("cannot export image, missing platforms mapping")
}
var p exptypes.Platforms
if err := json.Unmarshal(platformsBytes, &p); err != nil {
return nil, errors.Wrapf(err, "failed to parse platforms passed to exporter")
}
if len(p.Platforms) != len(inp.Refs) {
return nil, errors.Errorf("number of platforms does not match references %d %d", len(p.Platforms), len(inp.Refs))
}
config = inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, p.Platforms[0].ID)]
}
var diffs []digest.Digest
if ref != nil {
layersDone := oneOffProgress(ctx, "exporting layers")
if err := ref.Finalize(ctx, true); err != nil {
return nil, layersDone(err)
}
diffIDs, err := e.opt.Differ.EnsureLayer(ctx, ref.ID())
if err != nil {
return nil, layersDone(err)
}
diffs = make([]digest.Digest, len(diffIDs))
for i := range diffIDs {
diffs[i] = digest.Digest(diffIDs[i])
}
_ = layersDone(nil)
}
if len(config) == 0 {
var err error
config, err = emptyImageConfig()
if err != nil {
return nil, err
}
}
history, err := parseHistoryFromConfig(config)
if err != nil {
return nil, err
}
diffs, history = normalizeLayersAndHistory(diffs, history, ref)
config, err = patchImageConfig(config, diffs, history, inp.Metadata[exptypes.ExporterInlineCache])
if err != nil {
return nil, err
}
configDigest := digest.FromBytes(config)
configDone := oneOffProgress(ctx, fmt.Sprintf("writing image %s", configDigest))
id, err := e.opt.ImageStore.Create(config)
if err != nil {
return nil, configDone(err)
}
_ = configDone(nil)
if e.opt.ReferenceStore != nil {
for _, targetName := range e.targetNames {
tagDone := oneOffProgress(ctx, "naming to "+targetName.String())
if err := e.opt.ReferenceStore.AddTag(targetName, digest.Digest(id), true); err != nil {
return nil, tagDone(err)
}
_ = tagDone(nil)
}
}
return map[string]string{
"containerimage.digest": id.String(),
}, nil
}
| SvenDowideit/docker | builder/builder-next/exporter/export.go | GO | apache-2.0 | 4,418 |
/* $Id$ */
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.manifoldcf.connectorcommon.throttler;
import org.apache.manifoldcf.core.interfaces.*;
import org.apache.manifoldcf.connectorcommon.interfaces.*;
import org.apache.manifoldcf.core.system.ManifoldCF;
import java.util.concurrent.atomic.*;
import java.util.*;
/** Connection tracking for a bin.
*
* This class keeps track of information needed to figure out throttling for connections,
* on a bin-by-bin basis. It is *not*, however, a connection pool. Actually establishing
* connections, and pooling established connections, is functionality that must reside in the
* caller.
*
* The 'connections' each connection bin tracks are connections outstanding that share this bin name.
* Not all such connections are identical; some may in fact have entirely different sets of
* bins associated with them, but they all have the specific bin in common. Since each bin has its
* own unique limit, this effectively means that in order to get a connection, you need to find an
* available slot in ALL of its constituent connection bins. If the connections are pooled, it makes
* the most sense to divide the pool up by characteristics such that identical connections are all
* handled together - and it is reasonable to presume that an identical connection has identical
* connection bins.
*
* NOTE WELL: This is entirely local in operation
*/
public class ConnectionBin
{
/** True if this bin is alive still */
protected boolean isAlive = true;
/** This is the bin name which this connection pool belongs to */
protected final String binName;
/** Service type name */
protected final String serviceTypeName;
/** The (anonymous) service name */
protected final String serviceName;
/** The target calculation lock name */
protected final String targetCalcLockName;
/** This is the maximum number of active connections allowed for this bin */
protected int maxActiveConnections = 0;
/** This is the local maximum number of active connections allowed for this bin */
protected int localMax = 0;
/** This is the number of connections in this bin that have been reserved - that is, they
* are promised to various callers, but those callers have not yet committed to obtaining them. */
protected int reservedConnections = 0;
/** This is the number of connections in this bin that are connected; immaterial whether they are
* in use or in a pool somewhere. */
protected int inUseConnections = 0;
/** This is the number of active referring connection pools. We increment this number
* whenever a poolCount goes from zero to 1, and we decrement it whenever a poolCount
* goes from one to zero. */
protected int referencingPools = 0;
/** The service type prefix for connection bins */
protected final static String serviceTypePrefix = "_CONNECTIONBIN_";
/** The target calculation lock prefix */
protected final static String targetCalcLockPrefix = "_CONNECTIONBINTARGET_";
/** Random number */
protected final static Random randomNumberGenerator = new Random();
/** Constructor. */
public ConnectionBin(IThreadContext threadContext, String throttlingGroupName, String binName)
throws ManifoldCFException
{
this.binName = binName;
this.serviceTypeName = buildServiceTypeName(throttlingGroupName, binName);
this.targetCalcLockName = buildTargetCalcLockName(throttlingGroupName, binName);
// Now, register and activate service anonymously, and record the service name we get.
ILockManager lockManager = LockManagerFactory.make(threadContext);
this.serviceName = lockManager.registerServiceBeginServiceActivity(serviceTypeName, null, null);
}
protected static String buildServiceTypeName(String throttlingGroupName, String binName)
{
return serviceTypePrefix + throttlingGroupName + "_" + binName;
}
protected static String buildTargetCalcLockName(String throttlingGroupName, String binName)
{
return targetCalcLockPrefix + throttlingGroupName + "_" + binName;
}
/** Get the bin name. */
public String getBinName()
{
return binName;
}
/** Update the maximum number of active connections.
*/
public synchronized void updateMaxActiveConnections(int maxActiveConnections)
{
// Update the number; the poller will wake up any waiting threads.
this.maxActiveConnections = maxActiveConnections;
}
/** Wait for a connection to become available, in the context of an existing connection pool.
*@param poolCount is the number of connections in the pool times the number of bins per connection.
* This parameter is only ever changed in this class!!
*@return a recommendation as to how to proceed, using the IConnectionThrottler values. If the
* recommendation is to create a connection, a slot will be reserved for that purpose. A
* subsequent call to noteConnectionCreation() will be needed to confirm the reservation, or clearReservation() to
* release the reservation.
*/
public synchronized int waitConnectionAvailable(AtomicInteger poolCount, IBreakCheck breakCheck)
throws InterruptedException, BreakException
{
// Reserved connections keep a slot available which can't be used by anyone else.
// Connection bins are always sorted so that deadlocks can't occur.
// Once all slots are reserved, the caller will go ahead and create the necessary connection
// and convert the reservation to a new connection.
while (true)
{
if (!isAlive)
return IConnectionThrottler.CONNECTION_FROM_NOWHERE;
int currentPoolCount = poolCount.get();
if (currentPoolCount > 0)
{
// Recommendation is to pull the connection from the pool.
poolCount.set(currentPoolCount - 1);
if (currentPoolCount == 1)
referencingPools--;
return IConnectionThrottler.CONNECTION_FROM_POOL;
}
if (inUseConnections + reservedConnections < localMax)
{
reservedConnections++;
return IConnectionThrottler.CONNECTION_FROM_CREATION;
}
// Wait for a connection to free up. Note that it is up to the caller to free stuff up.
if (breakCheck == null)
{
wait();
}
else
{
long amt = breakCheck.abortCheck();
wait(amt);
}
// Back around
}
}
/** Undo what we had decided to do before.
*@param recommendation is the decision returned by waitForConnection() above.
*/
public synchronized void undoReservation(int recommendation, AtomicInteger poolCount)
{
if (recommendation == IConnectionThrottler.CONNECTION_FROM_CREATION)
{
if (reservedConnections == 0)
throw new IllegalStateException("Can't clear a reservation we don't have");
reservedConnections--;
notifyAll();
}
else if (recommendation == IConnectionThrottler.CONNECTION_FROM_POOL)
{
int currentCount = poolCount.get();
poolCount.set(currentCount + 1);
if (currentCount == 0)
referencingPools++;
notifyAll();
}
}
/** Note the creation of an active connection that belongs to this bin. The connection MUST
* have been reserved prior to the connection being created.
*/
public synchronized void noteConnectionCreation()
{
if (reservedConnections == 0)
throw new IllegalStateException("Creating a connection when no connection slot reserved!");
reservedConnections--;
inUseConnections++;
// No notification needed because the total number of reserved+active connections did not change.
}
/** Figure out whether we are currently over target or not for this bin.
*/
public synchronized boolean shouldReturnedConnectionBeDestroyed()
{
// We don't count reserved connections here because those are not yet committed
return inUseConnections > localMax;
}
public static final int CONNECTION_DESTROY = 0;
public static final int CONNECTION_POOLEMPTY = 1;
public static final int CONNECTION_WITHINBOUNDS = 2;
/** Figure out whether we are currently over target or not for this bin, and whether a
* connection should be pulled from the pool and destroyed.
* Note that this is tricky in conjunction with other bins, because those other bins
* may conclude that we can't destroy a connection. If so, we just return the stolen
* connection back to the pool.
*@return CONNECTION_DESTROY, CONNECTION_POOLEMPTY, or CONNECTION_WITHINBOUNDS.
*/
public synchronized int shouldPooledConnectionBeDestroyed(AtomicInteger poolCount)
{
int currentPoolCount = poolCount.get();
if (currentPoolCount > 0)
{
int individualPoolAllocation = localMax / referencingPools;
// Consider it removed from the pool for the purposes of consideration. If we change our minds, we'll
// return it, and no harm done.
poolCount.set(currentPoolCount-1);
if (currentPoolCount == 1)
referencingPools--;
// We don't count reserved connections here because those are not yet committed.
if (inUseConnections > individualPoolAllocation)
{
return CONNECTION_DESTROY;
}
return CONNECTION_WITHINBOUNDS;
}
return CONNECTION_POOLEMPTY;
}
/** Check only if there's a pooled connection, and make moves to take it from the pool.
*/
public synchronized boolean hasPooledConnection(AtomicInteger poolCount)
{
int currentPoolCount = poolCount.get();
if (currentPoolCount > 0)
{
poolCount.set(currentPoolCount-1);
if (currentPoolCount == 1)
referencingPools--;
return true;
}
return false;
}
/** Undo the decision to destroy a pooled connection.
*/
public synchronized void undoPooledConnectionDecision(AtomicInteger poolCount)
{
int currentPoolCount = poolCount.get();
poolCount.set(currentPoolCount + 1);
if (currentPoolCount == 0)
referencingPools++;
notifyAll();
}
/** Note a connection returned to the pool.
*/
public synchronized void noteConnectionReturnedToPool(AtomicInteger poolCount)
{
int currentPoolCount = poolCount.get();
poolCount.set(currentPoolCount + 1);
if (currentPoolCount == 0)
referencingPools++;
// Wake up threads possibly waiting on a pool return.
notifyAll();
}
/** Note the destruction of an active connection that belongs to this bin.
*/
public synchronized void noteConnectionDestroyed()
{
inUseConnections--;
notifyAll();
}
/** Poll this bin */
public synchronized void poll(IThreadContext threadContext)
throws ManifoldCFException
{
// The meat of the cross-cluster apportionment algorithm goes here!
// Two global numbers each service posts: "in-use" and "target". At no time does a service *ever* post either a "target"
// that, together with all other active service targets, is in excess of the max. Also, at no time a service post
// a target that, when added to the other "in-use" values, exceeds the max. If the "in-use" values everywhere else
// already equal or exceed the max, then the target will be zero.
// The target quota is calculated as follows:
// (1) Target is summed, excluding ours. This is GlobalTarget.
// (2) In-use is summed, excluding ours. This is GlobalInUse.
// (3) Our MaximumTarget is computed, which is Maximum - GlobalTarget or Maximum - GlobalInUse, whichever is
// smaller, but never less than zero.
// (4) Our FairTarget is computed. The FairTarget divides the Maximum by the number of services, and adds
// 1 randomly based on the remainder.
// (5) We compute OptimalTarget as follows: We start with current local target. If current local target
// exceeds current local in-use count, we adjust OptimalTarget downward by one. Otherwise we increase it
// by one.
// (6) Finally, we compute Target by taking the minimum of MaximumTarget, FairTarget, and OptimalTarget.
ILockManager lockManager = LockManagerFactory.make(threadContext);
lockManager.enterWriteLock(targetCalcLockName);
try
{
// Compute MaximumTarget
SumClass sumClass = new SumClass(serviceName);
lockManager.scanServiceData(serviceTypeName, sumClass);
//System.out.println("numServices = "+sumClass.getNumServices()+"; globalTarget = "+sumClass.getGlobalTarget()+"; globalInUse = "+sumClass.getGlobalInUse());
int numServices = sumClass.getNumServices();
if (numServices == 0)
return;
int globalTarget = sumClass.getGlobalTarget();
int globalInUse = sumClass.getGlobalInUse();
int maximumTarget = maxActiveConnections - globalTarget;
if (maximumTarget > maxActiveConnections - globalInUse)
maximumTarget = maxActiveConnections - globalInUse;
if (maximumTarget < 0)
maximumTarget = 0;
// Compute FairTarget
int fairTarget = maxActiveConnections / numServices;
int remainder = maxActiveConnections % numServices;
// Randomly choose whether we get an addition to the FairTarget
if (randomNumberGenerator.nextInt(numServices) < remainder)
fairTarget++;
// Compute OptimalTarget
int localInUse = inUseConnections;
int optimalTarget = localMax;
if (localMax > localInUse)
optimalTarget--;
else
{
// We want a fast ramp up, so make this proportional to maxActiveConnections
int increment = maxActiveConnections >> 2;
if (increment == 0)
increment = 1;
optimalTarget += increment;
}
//System.out.println("maxTarget = "+maximumTarget+"; fairTarget = "+fairTarget+"; optimalTarget = "+optimalTarget);
// Now compute actual target
int target = maximumTarget;
if (target > fairTarget)
target = fairTarget;
if (target > optimalTarget)
target = optimalTarget;
// Write these values to the service data variables.
// NOTE that there is a race condition here; the target value depends on all the calculations above being accurate, and not changing out from under us.
// So, that's why we have a write lock around the pool calculations.
lockManager.updateServiceData(serviceTypeName, serviceName, pack(target, localInUse));
// Now, update our localMax, if it needs it.
if (target == localMax)
return;
localMax = target;
notifyAll();
}
finally
{
lockManager.leaveWriteLock(targetCalcLockName);
}
}
/** Shut down the bin, and release everything that is waiting on it.
*/
public synchronized void shutDown(IThreadContext threadContext)
throws ManifoldCFException
{
isAlive = false;
notifyAll();
ILockManager lockManager = LockManagerFactory.make(threadContext);
lockManager.endServiceActivity(serviceTypeName, serviceName);
}
// Protected classes and methods
protected static class SumClass implements IServiceDataAcceptor
{
protected final String serviceName;
protected int numServices = 0;
protected int globalTargetTally = 0;
protected int globalInUseTally = 0;
public SumClass(String serviceName)
{
this.serviceName = serviceName;
}
@Override
public boolean acceptServiceData(String serviceName, byte[] serviceData)
throws ManifoldCFException
{
numServices++;
if (!serviceName.equals(this.serviceName))
{
globalTargetTally += unpackTarget(serviceData);
globalInUseTally += unpackInUse(serviceData);
}
return false;
}
public int getNumServices()
{
return numServices;
}
public int getGlobalTarget()
{
return globalTargetTally;
}
public int getGlobalInUse()
{
return globalInUseTally;
}
}
protected static int unpackTarget(byte[] data)
{
if (data == null || data.length != 8)
return 0;
return (((int)data[0]) & 0xff) +
((((int)data[1]) << 8) & 0xff00) +
((((int)data[2]) << 16) & 0xff0000) +
((((int)data[3]) << 24) & 0xff000000);
}
protected static int unpackInUse(byte[] data)
{
if (data == null || data.length != 8)
return 0;
return (((int)data[4]) & 0xff) +
((((int)data[5]) << 8) & 0xff00) +
((((int)data[6]) << 16) & 0xff0000) +
((((int)data[7]) << 24) & 0xff000000);
}
protected static byte[] pack(int target, int inUse)
{
byte[] rval = new byte[8];
rval[0] = (byte)(target & 0xff);
rval[1] = (byte)((target >> 8) & 0xff);
rval[2] = (byte)((target >> 16) & 0xff);
rval[3] = (byte)((target >> 24) & 0xff);
rval[4] = (byte)(inUse & 0xff);
rval[5] = (byte)((inUse >> 8) & 0xff);
rval[6] = (byte)((inUse >> 16) & 0xff);
rval[7] = (byte)((inUse >> 24) & 0xff);
return rval;
}
}
| gladyscarrizales/manifoldcf | framework/connector-common/src/main/java/org/apache/manifoldcf/connectorcommon/throttler/ConnectionBin.java | Java | apache-2.0 | 17,653 |
/*
* JBoss, Home of Professional Open Source
* Copyright 2009, Red Hat Middleware LLC, and individual contributors
* by the @authors tag. See the copyright.txt in the distribution for a
* full listing of individual contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.arquillian.container.test.spi.client.deployment;
import org.jboss.shrinkwrap.api.Archive;
/**
* Extension point to alter system defined deployments.
*
* Example:
* - Add beans.xml to EE modules
*
* @author <a href="mailto:[email protected]">Aslak Knutsen</a>
* @version $Revision: $
*/
public interface AuxiliaryArchiveProcessor
{
/**
* Called once for each found ArchiveAppender
*
* @param auxiliaryArchive The system defined deployment archive
*/
void process(Archive<?> auxiliaryArchive);
}
| topicusonderwijs/arquillian-core | container/test-spi/src/main/java/org/jboss/arquillian/container/test/spi/client/deployment/AuxiliaryArchiveProcessor.java | Java | apache-2.0 | 1,330 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.5.0_22) on Mon Dec 20 13:46:21 EST 2010 -->
<TITLE>
Mapper.MapperType (Apache Ant API)
</TITLE>
<META NAME="keywords" CONTENT="org.apache.tools.ant.types.Mapper.MapperType class">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../stylesheet.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
parent.document.title="Mapper.MapperType (Apache Ant API)";
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Class</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../org/apache/tools/ant/types/Mapper.html" title="class in org.apache.tools.ant.types"><B>PREV CLASS</B></A>
<A HREF="../../../../../org/apache/tools/ant/types/Parameter.html" title="class in org.apache.tools.ant.types"><B>NEXT CLASS</B></A></FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../index.html?org/apache/tools/ant/types/Mapper.MapperType.html" target="_top"><B>FRAMES</B></A>
<A HREF="Mapper.MapperType.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
<TR>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
SUMMARY: NESTED | <A HREF="#fields_inherited_from_class_org.apache.tools.ant.types.EnumeratedAttribute">FIELD</A> | <A HREF="#constructor_summary">CONSTR</A> | <A HREF="#method_summary">METHOD</A></FONT></TD>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
DETAIL: FIELD | <A HREF="#constructor_detail">CONSTR</A> | <A HREF="#method_detail">METHOD</A></FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<!-- ======== START OF CLASS DATA ======== -->
<H2>
<FONT SIZE="-1">
org.apache.tools.ant.types</FONT>
<BR>
Class Mapper.MapperType</H2>
<PRE>
java.lang.Object
<IMG SRC="../../../../../resources/inherit.gif" ALT="extended by "><A HREF="../../../../../org/apache/tools/ant/types/EnumeratedAttribute.html" title="class in org.apache.tools.ant.types">org.apache.tools.ant.types.EnumeratedAttribute</A>
<IMG SRC="../../../../../resources/inherit.gif" ALT="extended by "><B>org.apache.tools.ant.types.Mapper.MapperType</B>
</PRE>
<DL>
<DT><B>Enclosing class:</B><DD><A HREF="../../../../../org/apache/tools/ant/types/Mapper.html" title="class in org.apache.tools.ant.types">Mapper</A></DD>
</DL>
<HR>
<DL>
<DT><PRE>public static class <B>Mapper.MapperType</B><DT>extends <A HREF="../../../../../org/apache/tools/ant/types/EnumeratedAttribute.html" title="class in org.apache.tools.ant.types">EnumeratedAttribute</A></DL>
</PRE>
<P>
Class as Argument to FileNameMapper.setType.
<P>
<P>
<HR>
<P>
<!-- =========== FIELD SUMMARY =========== -->
<A NAME="field_summary"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
<B>Field Summary</B></FONT></TH>
</TR>
</TABLE>
<A NAME="fields_inherited_from_class_org.apache.tools.ant.types.EnumeratedAttribute"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#EEEEFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left"><B>Fields inherited from class org.apache.tools.ant.types.<A HREF="../../../../../org/apache/tools/ant/types/EnumeratedAttribute.html" title="class in org.apache.tools.ant.types">EnumeratedAttribute</A></B></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><CODE><A HREF="../../../../../org/apache/tools/ant/types/EnumeratedAttribute.html#value">value</A></CODE></TD>
</TR>
</TABLE>
<!-- ======== CONSTRUCTOR SUMMARY ======== -->
<A NAME="constructor_summary"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
<B>Constructor Summary</B></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><CODE><B><A HREF="../../../../../org/apache/tools/ant/types/Mapper.MapperType.html#Mapper.MapperType()">Mapper.MapperType</A></B>()</CODE>
<BR>
Constructor for the MapperType enumeration</TD>
</TR>
</TABLE>
<!-- ========== METHOD SUMMARY =========== -->
<A NAME="method_summary"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
<B>Method Summary</B></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> java.lang.String</CODE></FONT></TD>
<TD><CODE><B><A HREF="../../../../../org/apache/tools/ant/types/Mapper.MapperType.html#getImplementation()">getImplementation</A></B>()</CODE>
<BR>
</TD>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> java.lang.String[]</CODE></FONT></TD>
<TD><CODE><B><A HREF="../../../../../org/apache/tools/ant/types/Mapper.MapperType.html#getValues()">getValues</A></B>()</CODE>
<BR>
This is the only method a subclass needs to implement.</TD>
</TR>
</TABLE>
<A NAME="methods_inherited_from_class_org.apache.tools.ant.types.EnumeratedAttribute"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#EEEEFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left"><B>Methods inherited from class org.apache.tools.ant.types.<A HREF="../../../../../org/apache/tools/ant/types/EnumeratedAttribute.html" title="class in org.apache.tools.ant.types">EnumeratedAttribute</A></B></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><CODE><A HREF="../../../../../org/apache/tools/ant/types/EnumeratedAttribute.html#containsValue(java.lang.String)">containsValue</A>, <A HREF="../../../../../org/apache/tools/ant/types/EnumeratedAttribute.html#getIndex()">getIndex</A>, <A HREF="../../../../../org/apache/tools/ant/types/EnumeratedAttribute.html#getInstance(java.lang.Class, java.lang.String)">getInstance</A>, <A HREF="../../../../../org/apache/tools/ant/types/EnumeratedAttribute.html#getValue()">getValue</A>, <A HREF="../../../../../org/apache/tools/ant/types/EnumeratedAttribute.html#indexOfValue(java.lang.String)">indexOfValue</A>, <A HREF="../../../../../org/apache/tools/ant/types/EnumeratedAttribute.html#setValue(java.lang.String)">setValue</A>, <A HREF="../../../../../org/apache/tools/ant/types/EnumeratedAttribute.html#toString()">toString</A></CODE></TD>
</TR>
</TABLE>
<A NAME="methods_inherited_from_class_java.lang.Object"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#EEEEFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left"><B>Methods inherited from class java.lang.Object</B></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><CODE>clone, equals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, wait</CODE></TD>
</TR>
</TABLE>
<P>
<!-- ========= CONSTRUCTOR DETAIL ======== -->
<A NAME="constructor_detail"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="1"><FONT SIZE="+2">
<B>Constructor Detail</B></FONT></TH>
</TR>
</TABLE>
<A NAME="Mapper.MapperType()"><!-- --></A><H3>
Mapper.MapperType</H3>
<PRE>
public <B>Mapper.MapperType</B>()</PRE>
<DL>
<DD>Constructor for the MapperType enumeration
<P>
</DL>
<!-- ============ METHOD DETAIL ========== -->
<A NAME="method_detail"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="1"><FONT SIZE="+2">
<B>Method Detail</B></FONT></TH>
</TR>
</TABLE>
<A NAME="getValues()"><!-- --></A><H3>
getValues</H3>
<PRE>
public java.lang.String[] <B>getValues</B>()</PRE>
<DL>
<DD><B>Description copied from class: <CODE><A HREF="../../../../../org/apache/tools/ant/types/EnumeratedAttribute.html#getValues()">EnumeratedAttribute</A></CODE></B></DD>
<DD>This is the only method a subclass needs to implement.
<P>
<DD><DL>
<DT><B>Specified by:</B><DD><CODE><A HREF="../../../../../org/apache/tools/ant/types/EnumeratedAttribute.html#getValues()">getValues</A></CODE> in class <CODE><A HREF="../../../../../org/apache/tools/ant/types/EnumeratedAttribute.html" title="class in org.apache.tools.ant.types">EnumeratedAttribute</A></CODE></DL>
</DD>
<DD><DL>
<DT><B>Returns:</B><DD>the filenamemapper names</DL>
</DD>
</DL>
<HR>
<A NAME="getImplementation()"><!-- --></A><H3>
getImplementation</H3>
<PRE>
public java.lang.String <B>getImplementation</B>()</PRE>
<DL>
<DD><DL>
<DT><B>Returns:</B><DD>the classname for the filenamemapper name</DL>
</DD>
</DL>
<!-- ========= END OF CLASS DATA ========= -->
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Class</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../org/apache/tools/ant/types/Mapper.html" title="class in org.apache.tools.ant.types"><B>PREV CLASS</B></A>
<A HREF="../../../../../org/apache/tools/ant/types/Parameter.html" title="class in org.apache.tools.ant.types"><B>NEXT CLASS</B></A></FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../index.html?org/apache/tools/ant/types/Mapper.MapperType.html" target="_top"><B>FRAMES</B></A>
<A HREF="Mapper.MapperType.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
<TR>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
SUMMARY: NESTED | <A HREF="#fields_inherited_from_class_org.apache.tools.ant.types.EnumeratedAttribute">FIELD</A> | <A HREF="#constructor_summary">CONSTR</A> | <A HREF="#method_summary">METHOD</A></FONT></TD>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
DETAIL: FIELD | <A HREF="#constructor_detail">CONSTR</A> | <A HREF="#method_detail">METHOD</A></FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
</BODY>
</HTML>
| splunk/splunk-shuttl | contrib/apache-ant-1.8.2/docs/manual/api/org/apache/tools/ant/types/Mapper.MapperType.html | HTML | apache-2.0 | 14,384 |
//---------------------------------------------------------- -*- Mode: C++ -*-
// $Id$
//
// Created 2009/05/20
// Author: Mike Ovsiannikov
//
// Copyright 2009-2012 Quantcast Corp.
//
// This file is part of Kosmos File System (KFS).
//
// Licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
//
//----------------------------------------------------------------------------
#ifndef KFS_NET_CLIENT_H
#define KFS_NET_CLIENT_H
#include "common/kfstypes.h"
#include <cerrno>
#include <string>
class QCThread;
namespace KFS
{
class IOBuffer;
class NetManager;
struct ServerLocation;
class ClientAuthContext;
namespace client
{
struct KfsOp;
using std::ostream;
using std::string;
// Generic KFS request / response protocol state machine.
class KfsNetClient
{
private:
class Impl;
public:
class OpOwner
{
// protected:
public:
virtual void OpDone(
KfsOp* inOpPtr,
bool inCanceledFlag,
IOBuffer* inBufferPtr) = 0;
virtual ~OpOwner() {}
friend class Impl;
};
struct Stats
{
typedef int64_t Counter;
Stats()
: mConnectCount(0),
mConnectFailureCount(0),
mNetErrorCount(0),
mConnectionIdleTimeoutCount(0),
mResponseTimeoutCount(0),
mOpsQueuedCount(0),
mOpsTimeoutCount(0),
mOpsRetriedCount(0),
mOpsCancelledCount(0),
mSleepTimeSec(0),
mBytesReceivedCount(0),
mBytesSentCount(0)
{}
void Clear()
{ *this = Stats(); }
Stats& Add(
const Stats& inStats)
{
mConnectCount += inStats.mConnectCount;
mConnectFailureCount += inStats.mConnectFailureCount;
mNetErrorCount += inStats.mNetErrorCount;
mConnectionIdleTimeoutCount += inStats.mConnectionIdleTimeoutCount;
mResponseTimeoutCount += inStats.mResponseTimeoutCount;
mOpsQueuedCount += inStats.mOpsQueuedCount;
mOpsTimeoutCount += inStats.mOpsTimeoutCount;
mOpsRetriedCount += inStats.mOpsRetriedCount;
mOpsCancelledCount += inStats.mOpsCancelledCount;
mSleepTimeSec += inStats.mSleepTimeSec;
mBytesReceivedCount += inStats.mBytesReceivedCount;
mBytesSentCount += inStats.mBytesSentCount;
return *this;
}
template<typename T>
void Enumerate(
T& inFunctor) const
{
inFunctor("Connect", mConnectCount);
inFunctor("ConnectFailure", mConnectFailureCount);
inFunctor("NetError", mNetErrorCount);
inFunctor("ConnectionIdleTimeout", mConnectionIdleTimeoutCount);
inFunctor("ResponseTimeout", mResponseTimeoutCount);
inFunctor("OpsQueued", mOpsQueuedCount);
inFunctor("OpsTimeout", mOpsTimeoutCount);
inFunctor("OpsRetried", mOpsRetriedCount);
inFunctor("OpsCancelled", mOpsCancelledCount);
inFunctor("SleepTimeSec", mSleepTimeSec);
inFunctor("BytesReceived", mBytesReceivedCount);
inFunctor("BytesSent", mBytesSentCount);
}
Counter mConnectCount;
Counter mConnectFailureCount;
Counter mNetErrorCount;
Counter mConnectionIdleTimeoutCount;
Counter mResponseTimeoutCount;
Counter mOpsQueuedCount;
Counter mOpsTimeoutCount;
Counter mOpsRetriedCount;
Counter mOpsCancelledCount;
Counter mSleepTimeSec;
Counter mBytesReceivedCount;
Counter mBytesSentCount;
};
enum {
kErrorMaxRetryReached = -(10000 + ETIMEDOUT),
kErrorRequeueRequired = -(10000 + ETIMEDOUT + 1)
};
class EventObserver
{
public:
virtual bool Event(
int& ioCode,
void*& ioDataPtr) = 0;
protected:
EventObserver() {}
virtual ~EventObserver() {}
};
KfsNetClient(
NetManager& inNetManager,
string inHost = string(),
int inPort = 0,
int inMaxRetryCount = 0,
int inTimeSecBetweenRetries = 10,
int inOpTimeoutSec = 5 * 60,
int inIdleTimeoutSec = 30 * 60,
int64_t inInitialSeqNum = 1,
const char* inLogPrefixPtr = 0,
bool inResetConnectionOnOpTimeoutFlag = true,
int inMaxContentLength = MAX_RPC_HEADER_LEN,
bool inFailAllOpsOnOpTimeoutFlag = false,
bool inMaxOneOutstandingOpFlag = false,
ClientAuthContext* inAuthContextPtr = 0,
const QCThread* inThreadPtr = 0);
virtual ~KfsNetClient();
bool IsConnected() const;
int64_t GetDisconnectCount() const; // Used to detect disconnects
bool Start(
string inServerName,
int inServerPort,
string* inErrMsgPtr,
bool inRetryPendingOpsFlag,
int inMaxRetryCount,
int inTimeSecBetweenRetries,
bool inRetryConnectOnlyFlag,
ClientAuthContext* inAuthContextPtr);
bool SetServer(
const ServerLocation& inLocation,
bool inCancelPendingOpsFlag = true,
string* inErrMsgPtr = 0,
bool inForceConnectFlag = true);
void SetKey(
const char* inKeyIdPtr,
const char* inKeyDataPtr,
int inKeyDataSize);
void SetKey(
const char* inKeyIdPtr,
int inKeyIdLen,
const char* inKeyDataPtr,
int inKeyDataSize);
const string& GetKey() const; // Key presently set
const string& GetKeyId() const;
const string& GetSessionKey() const; // Key used with connection.
const string& GetSessionKeyId() const;
void SetShutdownSsl(
bool inFlag);
bool IsShutdownSsl() const;
void SetAuthContext(
ClientAuthContext* inAuthContextPtr);
bool SetServer(
const ServerLocation& inLocation,
ClientAuthContext* inAuthContextPtr,
const char* inKeyIdPtr,
const char* inKeyDataPtr,
int inKeyDataSize,
bool inCancelPendingOpsFlag = true)
{
SetAuthContext(inAuthContextPtr);
SetKey(inKeyIdPtr, inKeyDataPtr, inKeyDataSize);
return SetServer(inLocation, inCancelPendingOpsFlag);
}
ClientAuthContext* GetAuthContext() const;
void Stop();
int GetMaxRetryCount() const;
void SetMaxRetryCount(
int inMaxRetryCount);
int GetOpTimeoutSec() const;
void SetOpTimeoutSec(
int inTimeout);
int GetOpTimeout() const;
int GetIdleTimeoutSec() const;
void SetIdleTimeoutSec(
int inTimeout);
int GetTimeSecBetweenRetries();
void SetTimeSecBetweenRetries(
int inTimeSec);
bool IsAllDataSent() const;
bool IsDataReceived() const;
bool IsDataSent() const;
bool IsRetryConnectOnly() const;
bool WasDisconnected() const;
void SetRetryConnectOnly(
bool inFlag);
void SetOpTimeout(
int inOpTimeoutSec);
void GetStats(
Stats& outStats) const;
bool Enqueue(
KfsOp* inOpPtr,
OpOwner* inOwnerPtr,
IOBuffer* inBufferPtr = 0);
bool Cancel(
KfsOp* inOpPtr,
OpOwner* inOwnerPtr);
bool Cancel();
void CancelAllWithOwner(
OpOwner* inOwnerPtr);
const ServerLocation& GetServerLocation() const;
NetManager& GetNetManager() const;
void SetEventObserver(
EventObserver* inEventObserverPtr); // Debug hook
void SetMaxContentLength(
int inMax);
void ClearMaxOneOutstandingOpFlag();
void SetFailAllOpsOnOpTimeoutFlag(
bool inFlag);
void SetMaxRpcHeaderLength(
int inMaxRpcHeaderLength);
// Debug
void SetThread(
const QCThread* inThreadPtr);
private:
Impl& mImpl;
private:
KfsNetClient(
const KfsNetClient& inClient);
KfsNetClient& operator=(
const KfsNetClient& inClient);
};
}}
#endif /* KFS_NET_CLIENT_H */
| chanwit/qfs | src/cc/libclient/KfsNetClient.h | C | apache-2.0 | 9,338 |
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from ggrc import db
from sqlalchemy.orm import validates
from .mixins import deferred, BusinessObject, Timeboxed, CustomAttributable
from .object_document import Documentable
from .object_owner import Ownable
from .object_person import Personable
from .option import Option
from .relationship import Relatable
from .utils import validate_option
from .track_object_state import HasObjectState, track_state_for_class
class Product(HasObjectState, CustomAttributable, Documentable, Personable,
Relatable, Timeboxed, Ownable, BusinessObject, db.Model):
__tablename__ = 'products'
kind_id = deferred(db.Column(db.Integer), 'Product')
version = deferred(db.Column(db.String), 'Product')
kind = db.relationship(
'Option',
primaryjoin='and_(foreign(Product.kind_id) == Option.id, '\
'Option.role == "product_type")',
uselist=False,
)
_publish_attrs = [
'kind',
'version',
]
_sanitize_html = ['version',]
_aliases = {
"url": "Product URL",
"kind": {
"display_name": "Kind/Type",
"filter_by": "_filter_by_kind",
},
}
@validates('kind')
def validate_product_options(self, key, option):
return validate_option(
self.__class__.__name__, key, option, 'product_type')
@classmethod
def _filter_by_kind(cls, predicate):
return Option.query.filter(
(Option.id == cls.kind_id) & predicate(Option.title)
).exists()
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Product, cls).eager_query()
return query.options(orm.joinedload('kind'))
track_state_for_class(Product)
| hyperNURb/ggrc-core | src/ggrc/models/product.py | Python | apache-2.0 | 1,890 |
/* Copyright 2019 Google LLC. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This test covers non-basic specs.
#include "tensorflow/lite/experimental/ruy/test.h"
namespace ruy {
template <typename AccumScalar, typename DstScalar,
LoopStructure tLoopStructure>
struct LoopStructureSpec : BasicSpec<AccumScalar, DstScalar> {
static constexpr LoopStructure kLoopStructure = tLoopStructure;
};
template <typename AccumScalar, typename DstScalar,
ZeroPointSupport tZeroPointSupport>
struct ZeroPointSupportSpec : BasicSpec<AccumScalar, DstScalar> {
static constexpr ZeroPointSupport kZeroPointSupport = tZeroPointSupport;
};
template <typename AccumScalar, typename DstScalar>
struct RCCSpec : BasicSpec<AccumScalar, DstScalar> {
static constexpr LayoutSupport kLayoutSupport = LayoutSupport::kRCC;
};
template <typename AccumScalar, typename DstScalar, typename LhsKernelLayout,
typename RhsKernelLayout>
struct StandardCppKernelLayoutSpec : BasicSpec<AccumScalar, DstScalar> {
using StandardCppKernelLhsLayout = LhsKernelLayout;
using StandardCppKernelRhsLayout = RhsKernelLayout;
static int cache_friendly_traversal_threshold() { return 0; }
};
using LhsScalar = RUY_TEST_LHSSCALAR;
using RhsScalar = RUY_TEST_RHSSCALAR;
using AccumScalar = RUY_TEST_ACCUMSCALAR;
using DstScalar = RUY_TEST_DSTSCALAR;
template <LoopStructure tLoopStructure>
void TestLoopStructure() {
using SpecType = LoopStructureSpec<AccumScalar, DstScalar, tLoopStructure>;
using TestSetType = TestSet<LhsScalar, RhsScalar, SpecType>;
for (int size = 1; size < 10; size++) {
TestLinearAllOrders<TestSetType>(size, size, size);
}
TestLinearAllOrders<TestSetType>(3, 5, 78);
TestLinearAllOrders<TestSetType>(19, 91, 7);
TestLinearAllOrders<TestSetType>(71, 26, 44);
TestLinearAllOrders<TestSetType>(81, 93, 72);
}
TEST(TestSpecialSpecs, LoopStructure) {
static_assert(BasicSpec<std::uint8_t, std::int32_t>::kLoopStructure ==
LoopStructure::kAuto,
"");
static_assert(BasicSpec<float, float>::kLoopStructure == LoopStructure::kAuto,
"");
TestLoopStructure<LoopStructure::kSimple>();
TestLoopStructure<LoopStructure::kGeneral>();
}
template <ZeroPointSupport tZeroPointSupport>
void TestZeroPointSupport(LhsScalar lhs_zero_point, RhsScalar rhs_zero_point,
DstScalar dst_zero_point,
ExpectedOutcome expected_outcome) {
using SpecType =
ZeroPointSupportSpec<AccumScalar, DstScalar, tZeroPointSupport>;
using TestSetType = TestSet<LhsScalar, RhsScalar, SpecType>;
TestSetType test_set;
test_set.rows = 11;
test_set.depth = 12;
test_set.cols = 13;
test_set.lhs_order = Order::kRowMajor;
test_set.rhs_order = Order::kColMajor;
test_set.dst_order = Order::kColMajor;
test_set.layout_style = LayoutStyle::kPackedLinear;
test_set.expected_outcome = expected_outcome;
test_set.lhs_zero_point = lhs_zero_point;
test_set.rhs_zero_point = rhs_zero_point;
test_set.dst_zero_point = dst_zero_point;
test_set.use_specified_zero_points = true;
test_set.Run();
}
TEST(TestSpecialSpecs, ZeroPointSupport) {
// Sanity check
RUY_CHECK_EQ(SymmetricZeroPoint<std::uint8_t>(), 128);
RUY_CHECK_EQ(SymmetricZeroPoint<std::int8_t>(), 0);
if (std::is_floating_point<LhsScalar>::value) {
return;
}
TestZeroPointSupport<ZeroPointSupport::kGeneral>(
SymmetricZeroPoint<LhsScalar>(), SymmetricZeroPoint<RhsScalar>(),
SymmetricZeroPoint<DstScalar>(), ExpectedOutcome::kSuccess);
TestZeroPointSupport<ZeroPointSupport::kGeneral>(
SymmetricZeroPoint<LhsScalar>() - 1, SymmetricZeroPoint<RhsScalar>(),
SymmetricZeroPoint<DstScalar>(), ExpectedOutcome::kSuccess);
TestZeroPointSupport<ZeroPointSupport::kSymmetric>(
SymmetricZeroPoint<LhsScalar>(), SymmetricZeroPoint<RhsScalar>(),
SymmetricZeroPoint<DstScalar>(), ExpectedOutcome::kSuccess);
TestZeroPointSupport<ZeroPointSupport::kSymmetric>(
SymmetricZeroPoint<LhsScalar>() + 1, SymmetricZeroPoint<RhsScalar>(),
SymmetricZeroPoint<DstScalar>(), ExpectedOutcome::kDeath);
TestZeroPointSupport<ZeroPointSupport::kSymmetric>(
SymmetricZeroPoint<LhsScalar>(), SymmetricZeroPoint<RhsScalar>() + 1,
SymmetricZeroPoint<DstScalar>(), ExpectedOutcome::kDeath);
TestZeroPointSupport<ZeroPointSupport::kSymmetric>(
SymmetricZeroPoint<LhsScalar>(), SymmetricZeroPoint<RhsScalar>(),
SymmetricZeroPoint<DstScalar>() - 1, ExpectedOutcome::kDeath);
}
TEST(TestSpecialSpecs, RCC) {
using RCCSpec = RCCSpec<AccumScalar, DstScalar>;
using RCCTestSet = TestSet<LhsScalar, RhsScalar, RCCSpec>;
TestRCC<RCCTestSet>(81, 93, 72);
TestNonRCC<RCCTestSet>(81, 93, 72, ExpectedOutcome::kDeath);
}
template <typename LhsKernelLayout, typename RhsKernelLayout>
void TestStandardCppKernelLayout() {
using SpecType =
StandardCppKernelLayoutSpec<AccumScalar, DstScalar, LhsKernelLayout,
RhsKernelLayout>;
using TestSetType = TestSet<LhsScalar, RhsScalar, SpecType>;
for (int size = 1; size < 10; size++) {
TestLinearAllOrders<TestSetType>(size, size, size);
}
TestLinearAllOrders<TestSetType>(87, 34, 56);
TestLinearAllOrders<TestSetType>(123, 234, 78);
}
TEST(TestSpecialSpecs, StandardCppKernelLayoutTrivial1x1) {
TestStandardCppKernelLayout<FixedKernelLayout<Order::kColMajor, 1, 1>,
FixedKernelLayout<Order::kColMajor, 1, 1>>();
}
TEST(TestSpecialSpecs, StandardCppKernelLayoutSquare4x4) {
TestStandardCppKernelLayout<FixedKernelLayout<Order::kRowMajor, 4, 4>,
FixedKernelLayout<Order::kRowMajor, 4, 4>>();
}
TEST(TestSpecialSpecs, StandardCppKernelLayoutRectangular4x8) {
TestStandardCppKernelLayout<FixedKernelLayout<Order::kColMajor, 1, 4>,
FixedKernelLayout<Order::kColMajor, 1, 8>>();
}
} // namespace ruy
| adit-chandra/tensorflow | tensorflow/lite/experimental/ruy/test_special_specs.cc | C++ | apache-2.0 | 6,536 |
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System.Collections.Immutable;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.CodeAnalysis.Host;
using Microsoft.CodeAnalysis.Text;
namespace Microsoft.CodeAnalysis.EditAndContinue
{
internal interface IEditAndContinueAnalyzer : ILanguageService
{
Task<DocumentAnalysisResults> AnalyzeDocumentAsync(Project baseProjectOpt, ImmutableArray<ActiveStatement> activeStatements, Document document, IActiveStatementTrackingService trackingService, CancellationToken cancellationToken);
ImmutableArray<LinePositionSpan> GetExceptionRegions(SourceText text, SyntaxNode syntaxRoot, LinePositionSpan activeStatementSpan, bool isLeaf, out bool isCovered);
}
}
| nguerrera/roslyn | src/Features/Core/Portable/EditAndContinue/IEditAndContinueAnalyzer.cs | C# | apache-2.0 | 870 |
/* Copyright 2011 Jukka Jylänki
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
/** @file float4.cpp
@author Jukka Jylänki
@brief */
#include "Math/float4.h"
#ifdef MATH_ENABLE_STL_SUPPORT
#include "myassert.h"
#include <utility>
#include <iostream>
#endif
#include <stdlib.h>
#include "Math/float2.h"
#include "Math/float3.h"
#include "Geometry/Sphere.h"
#include "Algorithm/Random/LCG.h"
#include "Math/float4x4.h"
#include "Math/MathFunc.h"
#include "SSEMath.h"
MATH_BEGIN_NAMESPACE
using namespace std;
float4::float4(float x_, float y_, float z_, float w_)
:x(x_), y(y_), z(z_), w(w_)
{
}
float4::float4(const float3 &xyz, float w_)
:x(xyz.x), y(xyz.y), z(xyz.z), w(w_)
{
}
float4::float4(const float2 &xy, float z_, float w_)
:x(xy.x), y(xy.y), z(z_), w(w_)
{
}
float4::float4(const float *data)
{
assume(data);
#ifndef MATH_ENABLE_INSECURE_OPTIMIZATIONS
if (!data)
return;
#endif
x = data[0];
y = data[1];
z = data[2];
w = data[3];
}
float *float4::ptr()
{
return &x;
}
const float *float4::ptr() const
{
return &x;
}
CONST_WIN32 float float4::At(int index) const
{
assume(index >= 0);
assume(index < Size);
#ifndef MATH_ENABLE_INSECURE_OPTIMIZATIONS
if (index < 0 || index >= Size)
return FLOAT_NAN;
#endif
return ptr()[index];
}
float &float4::At(int index)
{
assume(index >= 0);
assume(index < Size);
#ifndef MATH_ENABLE_INSECURE_OPTIMIZATIONS
if (index < 0 || index >= Size)
return ptr()[0];
#endif
return ptr()[index];
}
float2 float4::xy() const
{
return float2(x, y);
}
float3 float4::xyz() const
{
return float3(x, y, z);
}
float2 float4::Swizzled(int i, int j) const
{
return float2(At(i), At(j));
}
float3 float4::Swizzled(int i, int j, int k) const
{
return float3(At(i), At(j), At(k));
}
float4 float4::Swizzled(int i, int j, int k, int l) const
{
#ifdef MATH_SSE
return float4(Swizzled_SSE(i,j,k,l));
#else
return float4(At(i), At(j), At(k), At(l));
#endif
}
#ifdef MATH_SSE
__m128 float4::Swizzled_SSE(int i, int j, int k, int l) const
{
#ifdef MATH_AVX
__m128i permute = _mm_set_epi32(l, k, j, i);
return _mm_permutevar_ps(v, permute);
#else
///\todo How to perform an efficient swizzle if AVX is not available?
/// We need a dynamic runtime shuffle operation, so _mm_shuffle_ps
/// cannot be used. The following does a slow SSE->memory->SSE shuffle.
float4 v(At(i), At(j), At(k), At(l));
return v.v;
#endif
}
/// The returned vector contains the squared length of the float3 part in the lowest channel of the vector.
__m128 float4::LengthSq3_SSE() const
{
return _mm_dot3_ps(v, v);
}
/// The returned vector contains the length of the float3 part in the lowest channel of the vector.
__m128 float4::Length3_SSE() const
{
return _mm_sqrt_ss(_mm_dot3_ps(v, v));
}
/// The returned vector contains the squared length of the float4 in each channel of the vector.
__m128 float4::LengthSq4_SSE() const
{
#ifdef MATH_SSE41 // If we have SSE 4.1, we can use the dpps (dot product) instruction, _mm_dp_ps intrinsic.
__m128 v2 = _mm_dp_ps(v, v, 0xF0 | 0x0F); // Choose to multiply x, y, z and w (0xF0 = 1111 0000), and store the output to all indices (0x0F == 0000 1111).
return v2;
#else // Otherwise, use SSE3 haddps or SSE1 with individual shuffling.
__m128 v2 = _mm_mul_ps(v, v);
return _mm_sum_xyzw_ps(v2);
#endif
}
/// The returned vector contains the length of the float4 in the lowest channel of the vector.
__m128 float4::Length4_SSE() const
{
return _mm_sqrt_ss(_mm_dot4_ps(v, v));
}
__m128 float4::Normalize3_SSE()
{
__m128 len = Length3_SSE();
// Broadcast the length from the lowest index to all indices.
len = _mm_shuffle1_ps(len, _MM_SHUFFLE(0,0,0,0));
__m128 isZero = _mm_cmplt_ps(len, epsilonFloat); // Was the length zero?
__m128 normalized = _mm_div_ps(v, len); // Normalize.
normalized = _mm_cmov_ps(normalized, float4::unitX.v, isZero); // If length == 0, output the vector (1,0,0).
v = _mm_cmov_ps(v, normalized, SSEMaskXYZ()); // Return the original .w component to the vector (this function is supposed to preserve original .w).
return len;
}
void float4::Normalize3_Fast_SSE()
{
__m128 len = Length3_SSE();
// Broadcast the length from the lowest index to all indices.
len = _mm_shuffle1_ps(len, _MM_SHUFFLE(0,0,0,0));
__m128 normalized = _mm_div_ps(v, len); // Normalize.
v = _mm_cmov_ps(v, normalized, SSEMaskXYZ()); // Return the original .w component to the vector (this function is supposed to preserve original .w).
}
__m128 float4::Normalize4_SSE()
{
__m128 len = Length4_SSE();
// Broadcast the length from the lowest index to all indices.
len = _mm_shuffle1_ps(len, _MM_SHUFFLE(0,0,0,0));
__m128 isZero = _mm_cmplt_ps(len, epsilonFloat); // Was the length zero?
__m128 normalized = _mm_div_ps(v, len); // Normalize.
v = _mm_cmov_ps(normalized, float4::unitX.v, isZero); // If length == 0, output the vector (1,0,0,0).
return len;
}
void float4::Normalize4_Fast_SSE()
{
__m128 len = Length4_SSE();
// Broadcast the length from the lowest index to all indices.
len = _mm_shuffle1_ps(len, _MM_SHUFFLE(0,0,0,0));
v = _mm_div_ps(v, len); // Normalize.
}
void float4::NormalizeW_SSE()
{
__m128 div = _mm_shuffle1_ps(v, _MM_SHUFFLE(3,3,3,3));
v = _mm_div_ps(v, div);
}
#endif
float float4::LengthSq3() const
{
#ifdef MATH_SSE
return M128_TO_FLOAT(LengthSq3_SSE());
#else
return x*x + y*y + z*z;
#endif
}
float float4::Length3() const
{
#ifdef MATH_SSE
return M128_TO_FLOAT(Length3_SSE());
#else
return sqrtf(x*x + y*y + z*z);
#endif
}
float float4::LengthSq4() const
{
#ifdef MATH_SSE
return M128_TO_FLOAT(LengthSq4_SSE());
#else
return x*x + y*y + z*z + w*w;
#endif
}
float float4::Length4() const
{
#ifdef MATH_SSE
return M128_TO_FLOAT(Length4_SSE());
#else
return sqrtf(x*x + y*y + z*z + w*w);
#endif
}
float float4::Normalize3()
{
#ifdef MATH_SSE
__m128 len = Normalize3_SSE();
return M128_TO_FLOAT(len);
#else
assume(IsFinite());
float lengthSq = LengthSq3();
if (lengthSq > 1e-6f)
{
float length = sqrtf(lengthSq);
float invLength = 1.f / length;
x *= invLength;
y *= invLength;
z *= invLength;
return length;
}
else
{
Set(1.f, 0.f, 0.f, w); // We will always produce a normalized vector.
return 0; // But signal failure, so user knows we have generated an arbitrary normalization.
}
#endif
}
float4 float4::Normalized3() const
{
float4 copy = *this;
float length = copy.Normalize3();
assume(length > 0);
MARK_UNUSED(length);
return copy;
}
float float4::Normalize4()
{
#ifdef MATH_SSE
__m128 len = Normalize4_SSE();
return M128_TO_FLOAT(len);
#else
assume(IsFinite());
float lengthSq = LengthSq4();
if (lengthSq > 1e-6f)
{
float length = sqrtf(lengthSq);
*this *= 1.f / length;
return length;
}
else
{
Set(1.f, 0.f, 0.f, 0.f); // We will always produce a normalized vector.
return 0; // But signal failure, so user knows we have generated an arbitrary normalization.
}
#endif
}
float4 float4::Normalized4() const
{
float4 copy = *this;
float length = copy.Normalize4();
assume(length > 0);
MARK_UNUSED(length);
return copy;
}
void float4::NormalizeW()
{
#ifdef MATH_SSE
NormalizeW_SSE();
#else
if (fabs(w) > 1e-6f)
{
float invW = 1.f / w;
x *= invW;
y *= invW;
z *= invW;
w = 1.f;
}
#endif
}
bool float4::IsWZeroOrOne(float epsilon) const
{
return EqualAbs(w, 0.f, epsilon) || EqualAbs(w, 1.f, epsilon);
}
bool float4::IsZero4(float epsilonSq) const
{
return LengthSq4() <= epsilonSq;
}
bool float4::IsZero3(float epsilonSq) const
{
return LengthSq3() <= epsilonSq;
}
bool float4::IsNormalized4(float epsilonSq) const
{
return fabs(LengthSq4()-1.f) <= epsilonSq;
}
bool float4::IsNormalized3(float epsilonSq) const
{
return fabs(LengthSq3()-1.f) <= epsilonSq;
}
void float4::Scale3(float scalar)
{
#ifdef MATH_SSE
__m128 scale = _mm_load_ss(&scalar);
__m128 one = _mm_set_ss(1.f);
scale = _mm_shuffle_ps(scale, one, _MM_SHUFFLE(0,0,0,0)); // scale = (1 1 s s)
scale = _mm_shuffle1_ps(scale, _MM_SHUFFLE(3,0,0,0)); // scale = (1 s s s)
v = _mm_mul_ps(v, scale);
#else
x *= scalar;
y *= scalar;
z *= scalar;
#endif
}
float float4::ScaleToLength3(float newLength)
{
///\todo Add SSE-enabled version.
///\todo Add ClampToLength3.
float length = LengthSq3();
if (length < 1e-6f)
return 0.f;
length = sqrtf(length);
float scalar = newLength / length;
x *= scalar;
y *= scalar;
z *= scalar;
return length;
}
float4 float4::ScaledToLength3(float newLength) const
{
assume(!IsZero3());
float4 v = *this;
v.ScaleToLength3(newLength);
return v;
}
bool float4::IsFinite() const
{
return MATH_NS::IsFinite(x) && MATH_NS::IsFinite(y) && MATH_NS::IsFinite(z) && MATH_NS::IsFinite(w);
}
bool float4::IsPerpendicular3(const float4 &other, float epsilon) const
{
return fabs(this->Dot3(other)) < epsilon;
}
#ifdef MATH_ENABLE_STL_SUPPORT
std::string float4::ToString() const
{
char str[256];
sprintf(str, "(%.3f, %.3f, %.3f, %.3f)", x, y, z, w);
return std::string(str);
}
std::string float4::SerializeToString() const
{
char str[256];
sprintf(str, "%f %f %f %f", x, y, z, w);
return std::string(str);
}
#endif
float4 float4::FromString(const char *str)
{
assume(str);
if (!str)
return float4::nan;
if (*str == '(')
++str;
float4 f;
f.x = (float)strtod(str, const_cast<char**>(&str));
while(*str == ' ' || *str == '\t') ///\todo Propagate this to other FromString functions.
++str;
if (*str == ',' || *str == ';')
++str;
f.y = (float)strtod(str, const_cast<char**>(&str));
while(*str == ' ' || *str == '\t') ///\todo Propagate this to other FromString functions.
++str;
if (*str == ',' || *str == ';')
++str;
f.z = (float)strtod(str, const_cast<char**>(&str));
while(*str == ' ' || *str == '\t') ///\todo Propagate this to other FromString functions.
++str;
if (*str == ',' || *str == ';')
++str;
f.w = (float)strtod(str, const_cast<char**>(&str));
return f;
}
float float4::SumOfElements() const
{
#ifdef MATH_SSE
return M128_TO_FLOAT(_mm_sum_xyzw_ps(v));
#else
return x + y + z + w;
#endif
}
float float4::ProductOfElements() const
{
#ifdef MATH_SSE
return M128_TO_FLOAT(_mm_mul_xyzw_ps(v));
#else
return x * y * z * w;
#endif
}
float float4::AverageOfElements() const
{
///\todo SSE.
return (x + y + z + w) / 4.f;
}
float float4::MinElement() const
{
return MATH_NS::Min(MATH_NS::Min(x, y), MATH_NS::Min(z, w));
}
int float4::MinElementIndex() const
{
if (x < y)
{
if (z < w)
return (x < z) ? 0 : 2;
else
return (x < w) ? 0 : 3;
}
else
{
if (z < w)
return (y < z) ? 1 : 2;
else
return (y < w) ? 1 : 3;
}
}
float float4::MaxElement() const
{
return MATH_NS::Max(MATH_NS::Max(x, y), MATH_NS::Min(z, w));
}
int float4::MaxElementIndex() const
{
if (x > y)
{
if (z > w)
return (x > z) ? 0 : 2;
else
return (x > w) ? 0 : 3;
}
else
{
if (z > w)
return (y > z) ? 1 : 2;
else
return (y > w) ? 1 : 3;
}
}
float4 float4::Abs() const
{
#ifdef MATH_SSE
return float4(_mm_abs_ps(v));
#else
return float4(fabs(x), fabs(y), fabs(z), fabs(w));
#endif
}
float4 float4::Neg3() const
{
///\todo SSE.
return float4(-x, -y, -z, w);
}
float4 float4::Neg4() const
{
#ifdef MATH_SSE
const __m128 zero = _mm_setzero_ps();
return float4(_mm_sub_ps(zero, v));
#else
return float4(-x, -y, -z, -w);
#endif
}
float4 float4::Recip3() const
{
///\todo SSE.
return float4(1.f/x, 1.f/y, 1.f/z, w);
}
float4 float4::Recip4() const
{
#ifdef MATH_SSE
const __m128 one = _mm_set1_ps(1.f);
return float4(_mm_div_ps(one, v));
#else
return float4(1.f/x, 1.f/y, 1.f/z, 1.f/w);
#endif
}
float4 float4::RecipFast4() const
{
#ifdef MATH_SSE
return float4(_mm_rcp_ps(v));
#else
return float4(1.f/x, 1.f/y, 1.f/z, 1.f/w);
#endif
}
float4 float4::Min(float ceil) const
{
#ifdef MATH_SSE
__m128 v2 = _mm_set1_ps(ceil);
return float4(_mm_min_ps(v, v2));
#else
return float4(MATH_NS::Min(x, ceil), MATH_NS::Min(y, ceil), MATH_NS::Min(z, ceil), MATH_NS::Min(w, ceil));
#endif
}
float4 float4::Min(const float4 &ceil) const
{
#ifdef MATH_SSE
return float4(_mm_min_ps(v, ceil.v));
#else
return float4(MATH_NS::Min(x, ceil.x), MATH_NS::Min(y, ceil.y), MATH_NS::Min(z, ceil.z), MATH_NS::Min(w, ceil.w));
#endif
}
float4 float4::Max(float floor) const
{
#ifdef MATH_SSE
__m128 v2 = _mm_set1_ps(floor);
return float4(_mm_max_ps(v, v2));
#else
return float4(MATH_NS::Max(x, floor), MATH_NS::Max(y, floor), MATH_NS::Max(z, floor), MATH_NS::Max(w, floor));
#endif
}
float4 float4::Max(const float4 &floor) const
{
#ifdef MATH_SSE
return float4(_mm_max_ps(v, floor.v));
#else
return float4(MATH_NS::Max(x, floor.x), MATH_NS::Max(y, floor.y), MATH_NS::Max(z, floor.z), MATH_NS::Max(w, floor.w));
#endif
}
float4 float4::Clamp(const float4 &floor, const float4 &ceil) const
{
#ifdef MATH_SSE
return float4(_mm_max_ps(_mm_min_ps(v, ceil.v), floor.v));
#else
return float4(MATH_NS::Clamp(x, floor.x, ceil.x),
MATH_NS::Clamp(y, floor.y, ceil.y),
MATH_NS::Clamp(z, floor.z, ceil.z),
MATH_NS::Clamp(w, floor.w, ceil.w));
#endif
}
float4 float4::Clamp01() const
{
#ifdef MATH_SSE
__m128 floor = _mm_setzero_ps();
__m128 ceil = _mm_set1_ps(1.f);
return float4(_mm_max_ps(_mm_min_ps(v, ceil), floor));
#else
return float4(MATH_NS::Clamp(x, 0.f, 1.f),
MATH_NS::Clamp(y, 0.f, 1.f),
MATH_NS::Clamp(z, 0.f, 1.f),
MATH_NS::Clamp(w, 0.f, 1.f));
#endif
}
float4 float4::Clamp(float floor, float ceil) const
{
#ifdef MATH_SSE
__m128 vfloor = _mm_set1_ps(floor);
__m128 vceil = _mm_set1_ps(ceil);
return float4(_mm_max_ps(_mm_min_ps(v, vceil), vfloor));
#else
return float4(MATH_NS::Clamp(x, floor, ceil),
MATH_NS::Clamp(y, floor, ceil),
MATH_NS::Clamp(z, floor, ceil),
MATH_NS::Clamp(w, floor, ceil));
#endif
}
float float4::Distance3Sq(const float4 &rhs) const
{
#ifdef MATH_SSE
__m128 v2 = _mm_sub_ps(v, rhs.v);
return M128_TO_FLOAT(float4(v2).LengthSq3_SSE());
#else
float dx = x - rhs.x;
float dy = y - rhs.y;
float dz = z - rhs.z;
return dx*dx + dy*dy + dz*dz;
#endif
}
float float4::Distance3(const float4 &rhs) const
{
#ifdef MATH_SSE
__m128 v2 = _mm_sub_ps(v, rhs.v);
return M128_TO_FLOAT(float4(v2).Length3_SSE());
#else
return sqrtf(Distance3Sq(rhs));
#endif
}
float float4::Distance4Sq(const float4 &rhs) const
{
#ifdef MATH_SSE
__m128 v2 = _mm_sub_ps(v, rhs.v);
return M128_TO_FLOAT(float4(v2).LengthSq4_SSE());
#else
float dx = x - rhs.x;
float dy = y - rhs.y;
float dz = z - rhs.z;
float dw = w - rhs.w;
return dx*dx + dy*dy + dz*dz + dw*dw;
#endif
}
float float4::Distance4(const float4 &rhs) const
{
#ifdef MATH_SSE
__m128 v2 = _mm_sub_ps(v, rhs.v);
return M128_TO_FLOAT(float4(v2).Length4_SSE());
#else
return sqrtf(Distance4Sq(rhs));
#endif
}
float float4::Dot3(const float3 &rhs) const
{
#ifdef MATH_SSE
return M128_TO_FLOAT(_mm_dot3_ps(v, float4(rhs, 0.f).v));
#else
return x * rhs.x + y * rhs.y + z * rhs.z;
#endif
}
float float4::Dot3(const float4 &rhs) const
{
#ifdef MATH_SSE
return M128_TO_FLOAT(_mm_dot3_ps(v, rhs.v));
#else
return x * rhs.x + y * rhs.y + z * rhs.z;
#endif
}
float float4::Dot4(const float4 &rhs) const
{
#ifdef MATH_SSE
return M128_TO_FLOAT(_mm_dot4_ps(v, rhs.v));
#else
return x * rhs.x + y * rhs.y + z * rhs.z + w * rhs.w;
#endif
}
#ifdef MATH_SSE
__m128 _mm_cross_ps(__m128 a, __m128 b)
{
__m128 a_xzy = _mm_shuffle1_ps(a, _MM_SHUFFLE(3, 0, 2, 1)); // a_xzy = [a.w, a.x, a.z, a.y]
__m128 b_yxz = _mm_shuffle1_ps(b, _MM_SHUFFLE(3, 1, 0, 2)); // b_yxz = [b.w, b.y, b.x, b.z]
__m128 a_yxz = _mm_shuffle1_ps(a, _MM_SHUFFLE(3, 1, 0, 2)); // a_yxz = [a.w, a.y, a.x, a.z]
__m128 b_xzy = _mm_shuffle1_ps(b, _MM_SHUFFLE(3, 0, 2, 1)); // b_xzy = [b.w, b.x, b.z, b.y]
__m128 x = _mm_mul_ps(a_xzy, b_yxz); // [a.w*b.w, a.x*b.y, a.z*b.x, a.y*b.z]
__m128 y = _mm_mul_ps(a_yxz, b_xzy); // [a.w*b.w, a.y*b.x, a.x*b.z, a.z*b.y]
return _mm_sub_ps(x, y); // [0, a.x*b.y - a.y*b.x, a.z*b.x - a.x*b.z, a.y*b.z - a.z*b.y]
}
#endif
/** dst = A x B - Apply the diagonal rule to derive the standard cross product formula:
\code
|a cross b| = |a||b|sin(alpha)
i j k i j k units (correspond to x,y,z)
a.x a.y a.z a.x a.y a.z vector a (this)
b.x b.y b.z b.x b.y b.z vector b
-a.z*b.y*i -a.x*b.z*j -a.y*b.x*k a.y*b.z*i a.z*b.x*j a.x*b.y*k result
Add up the results:
x = a.y*b.z - a.z*b.y
y = a.z*b.x - a.x*b.z
z = a.x*b.y - a.y*b.x
\endcode
Cross product is anti-commutative, i.e. a x b == -b x a.
It distributes over addition, meaning that a x (b + c) == a x b + a x c,
and combines with scalar multiplication: (sa) x b == a x (sb).
i x j == -(j x i) == k,
(j x k) == -(k x j) == i,
(k x i) == -(i x k) == j. */
float4 float4::Cross3(const float3 &rhs) const
{
#ifdef MATH_SSE
return float4(_mm_cross_ps(v, float4(rhs, 0.f).v));
#else
float4 dst;
dst.x = y * rhs.z - z * rhs.y;
dst.y = z * rhs.x - x * rhs.z;
dst.z = x * rhs.y - y * rhs.x;
dst.w = 0.f;
return dst;
#endif
}
float4 float4::Cross3(const float4 &rhs) const
{
#ifdef MATH_SSE
return float4(_mm_cross_ps(v, rhs.v));
#else
return Cross3(rhs.xyz());
#endif
}
float4x4 float4::OuterProduct(const float4 &rhs) const
{
const float4 &u = *this;
const float4 &v = rhs;
return float4x4(u[0]*v[0], u[0]*v[1], u[0]*v[2], u[0]*v[3],
u[1]*v[0], u[1]*v[1], u[1]*v[2], u[1]*v[3],
u[2]*v[0], u[2]*v[1], u[2]*v[2], u[2]*v[3],
u[3]*v[0], u[3]*v[1], u[3]*v[2], u[3]*v[3]);
}
float4 float4::Perpendicular3(const float3 &hint, const float3 &hint2) const
{
assume(!this->IsZero3());
assume(EqualAbs(w, 0));
assume(hint.IsNormalized());
assume(hint2.IsNormalized());
float3 v = this->Cross3(hint).xyz();
float len = v.Normalize();
if (len == 0)
return float4(hint2, 0);
else
return float4(v, 0);
}
float4 float4::AnotherPerpendicular3(const float3 &hint, const float3 &hint2) const
{
float4 firstPerpendicular = Perpendicular3(hint, hint2);
float4 v = this->Cross3(firstPerpendicular);
return v.Normalized3();
}
float4 float4::Reflect3(const float3 &normal) const
{
assume(normal.IsNormalized());
assume(EqualAbs(w, 0));
return 2.f * this->ProjectToNorm3(normal) - *this;
}
float float4::AngleBetween3(const float4 &other) const
{
float cosa = Dot3(other) / sqrt(LengthSq3() * other.LengthSq3());
if (cosa >= 1.f)
return 0.f;
else if (cosa <= -1.f)
return pi;
else
return acos(cosa);
}
float float4::AngleBetweenNorm3(const float4 &other) const
{
assume(this->IsNormalized3());
assume(other.IsNormalized3());
return acos(Dot3(other));
}
float float4::AngleBetween4(const float4 &other) const
{
float cosa = Dot4(other) / sqrt(LengthSq4() * other.LengthSq4());
if (cosa >= 1.f)
return 0.f;
else if (cosa <= -1.f)
return pi;
else
return acos(cosa);
}
float float4::AngleBetweenNorm4(const float4 &other) const
{
assume(this->IsNormalized4());
assume(other.IsNormalized4());
return acos(Dot4(other));
}
float4 float4::ProjectTo3(const float3 &target) const
{
assume(!target.IsZero());
assume(this->IsWZeroOrOne());
return float4(target * Dot(xyz(), target) / target.LengthSq(), w);
}
float4 float4::ProjectToNorm3(const float3 &target) const
{
assume(target.IsNormalized());
assume(this->IsWZeroOrOne());
return float4(target * Dot(xyz(), target), w);
}
float4 float4::Lerp(const float4 &b, float t) const
{
assume(EqualAbs(this->w, b.w));
assume(0.f <= t && t <= 1.f);
return (1.f - t) * *this + t * b;
}
float4 float4::Lerp(const float4 &a, const float4 &b, float t)
{
return a.Lerp(b, t);
}
float4 float4::FromScalar(float scalar)
{
return float4(scalar, scalar, scalar, scalar);
}
float4 float4::FromScalar(float scalar, float w)
{
return float4(scalar, scalar, scalar, w);
}
void float4::SetFromScalar(float scalar)
{
#ifdef MATH_SSE
v = _mm_set1_ps(scalar);
#else
x = scalar;
y = scalar;
z = scalar;
w = scalar;
#endif
}
void float4::Set(float x_, float y_, float z_, float w_)
{
#ifdef MATH_SSE
v = _mm_set_ps(w_, z_, y_, x_);
#else
x = x_;
y = y_;
z = z_;
w = w_;
#endif
}
void float4::SetFromScalar(float scalar, float w_)
{
#ifdef MATH_SSE
v = _mm_set_ps(w_, scalar, scalar, scalar);
#else
x = scalar;
y = scalar;
z = scalar;
w = w_;
#endif
}
bool float4::Equals(const float4 &other, float epsilon) const
{
return fabs(x - other.x) < epsilon &&
fabs(y - other.y) < epsilon &&
fabs(z - other.z) < epsilon &&
fabs(w - other.w) < epsilon;
}
bool float4::Equals(float x_, float y_, float z_, float w_, float epsilon) const
{
return fabs(x - x_) < epsilon &&
fabs(y - y_) < epsilon &&
fabs(z - z_) < epsilon &&
fabs(w - w_) < epsilon;
}
float4 float4::RandomDir(LCG &lcg, float length)
{
return float4(Sphere(float3(0,0,0), length).RandomPointOnSurface(lcg), 0.f);
}
float4 float4::operator +(const float4 &rhs) const
{
#ifdef MATH_SSE
return float4(_mm_add_ps(v, rhs.v));
#else
return float4(x + rhs.x, y + rhs.y, z + rhs.z, w + rhs.w);
#endif
}
float4 float4::operator -(const float4 &rhs) const
{
#ifdef MATH_SSE
return float4(_mm_sub_ps(v, rhs.v));
#else
return float4(x - rhs.x, y - rhs.y, z - rhs.z, w - rhs.w);
#endif
}
float4 float4::operator -() const
{
#ifdef MATH_SSE
__m128 zero = _mm_setzero_ps();
return float4(_mm_sub_ps(zero, v));
#else
return float4(-x, -y, -z, -w);
#endif
}
float4 float4::operator *(float scalar) const
{
#ifdef MATH_SSE
__m128 scale = _mm_set1_ps(scalar);
return float4(_mm_mul_ps(v, scale));
#else
return float4(x * scalar, y * scalar, z * scalar, w * scalar);
#endif
}
float4 operator *(float scalar, const float4 &rhs)
{
#ifdef MATH_SSE
__m128 scale = _mm_set1_ps(scalar);
return float4(_mm_mul_ps(scale, rhs.v));
#else
return float4(scalar * rhs.x, scalar * rhs.y, scalar * rhs.z, scalar * rhs.w);
#endif
}
float4 float4::operator /(float scalar) const
{
#ifdef MATH_SSE
__m128 scale = _mm_set1_ps(scalar);
return float4(_mm_div_ps(v, scale));
#else
float invScalar = 1.f / scalar;
return float4(x * invScalar, y * invScalar, z * invScalar, w * invScalar);
#endif
}
float4 &float4::operator +=(const float4 &rhs)
{
#ifdef MATH_SSE
v = _mm_add_ps(v, rhs.v);
#else
x += rhs.x;
y += rhs.y;
z += rhs.z;
w += rhs.w;
#endif
return *this;
}
float4 &float4::operator -=(const float4 &rhs)
{
#ifdef MATH_SSE
v = _mm_sub_ps(v, rhs.v);
#else
x -= rhs.x;
y -= rhs.y;
z -= rhs.z;
w -= rhs.w;
#endif
return *this;
}
float4 &float4::operator *=(float scalar)
{
#ifdef MATH_SSE
__m128 scale = _mm_set1_ps(scalar);
v = _mm_mul_ps(v, scale);
#else
x *= scalar;
y *= scalar;
z *= scalar;
w *= scalar;
#endif
return *this;
}
float4 &float4::operator /=(float scalar)
{
#ifdef MATH_SSE
__m128 v2 = _mm_set1_ps(scalar);
v = _mm_div_ps(v, v2);
#else
float invScalar = 1.f / scalar;
x *= invScalar;
y *= invScalar;
z *= invScalar;
w *= invScalar;
#endif
return *this;
}
float4 float4::Add(float s) const
{
#ifdef MATH_SSE
__m128 v2 = _mm_set1_ps(s);
return float4(_mm_add_ps(v, v2));
#else
return float4(x + s, y + s, z + s, w + s);
#endif
}
float4 float4::Sub(float s) const
{
#ifdef MATH_SSE
__m128 v2 = _mm_set1_ps(s);
return float4(_mm_sub_ps(v, v2));
#else
return float4(x - s, y - s, z - s, w - s);
#endif
}
float4 float4::SubLeft(float s) const
{
#ifdef MATH_SSE
__m128 v2 = _mm_set1_ps(s);
return float4(_mm_sub_ps(v2, v));
#else
return float4(s - x, s - y, s - z, s - w);
#endif
}
float4 float4::DivLeft(float s) const
{
#ifdef MATH_SSE
__m128 v2 = _mm_set1_ps(s);
return float4(_mm_div_ps(v2, v));
#else
return float4(s / x, s / y, s / z, s / w);
#endif
}
float4 float4::Mul(const float4 &rhs) const
{
#ifdef MATH_SSE
return float4(_mm_mul_ps(v, rhs.v));
#else
return float4(x * rhs.x, y * rhs.y, z * rhs.z, w * rhs.w);
#endif
}
float4 float4::Div(const float4 &rhs) const
{
#ifdef MATH_SSE
return float4(_mm_div_ps(v, rhs.v));
#else
return float4(x / rhs.x, y / rhs.y, z / rhs.z, w / rhs.w);
#endif
}
#ifdef MATH_ENABLE_STL_SUPPORT
std::ostream &operator <<(std::ostream &out, const float4 &rhs)
{
std::string str = rhs.ToString();
out << str;
return out;
}
#endif
const float4 float4::zero = float4(0, 0, 0, 0);
const float4 float4::one = float4(1, 1, 1, 1);
const float4 float4::unitX = float4(1, 0, 0, 0);
const float4 float4::unitY = float4(0, 1, 0, 0);
const float4 float4::unitZ = float4(0, 0, 1, 0);
const float4 float4::unitW = float4(0, 0, 0, 1);
const float4 float4::nan = float4(FLOAT_NAN, FLOAT_NAN, FLOAT_NAN, FLOAT_NAN);
const float4 float4::inf = float4(FLOAT_INF, FLOAT_INF, FLOAT_INF, FLOAT_INF);
MATH_END_NAMESPACE
| jesterKing/naali | src/Core/Math/Math/float4.cpp | C++ | apache-2.0 | 25,128 |
<h2> Execution '-1' of flow 'mail-creator-test' has succeeded on unit-tests</h2><table><tr><td>Start Time</td><td>2016/07/17 11:54:11 EEST</td></tr><tr><td>End Time</td><td>2016/07/17 11:54:16 EEST</td></tr><tr><td>Duration</td><td>5 sec</td></tr><tr><td>Status</td><td>SUCCEEDED</td></tr></table><a href="http://localhost:8081/executor?execid=-1">mail-creator-test Execution Link</a>
| mariacioffi/azkaban | azkaban-common/src/test/resources/azkaban/executor/mail/successEmail.html | HTML | apache-2.0 | 385 |
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System.Collections.Immutable;
using Microsoft.CodeAnalysis.CSharp.CodeStyle;
using Microsoft.CodeAnalysis.CSharp.Syntax;
using Microsoft.CodeAnalysis.Diagnostics;
namespace Microsoft.CodeAnalysis.CSharp.UseExpressionBody
{
[DiagnosticAnalyzer(LanguageNames.CSharp)]
internal class UseExpressionBodyForConstructorsDiagnosticAnalyzer :
AbstractUseExpressionBodyDiagnosticAnalyzer<ConstructorDeclarationSyntax>
{
public UseExpressionBodyForConstructorsDiagnosticAnalyzer()
: base(IDEDiagnosticIds.UseExpressionBodyForConstructorsDiagnosticId,
new LocalizableResourceString(nameof(FeaturesResources.Use_expression_body_for_constructors), FeaturesResources.ResourceManager, typeof(FeaturesResources)),
new LocalizableResourceString(nameof(FeaturesResources.Use_block_body_for_constructors), FeaturesResources.ResourceManager, typeof(FeaturesResources)),
ImmutableArray.Create(SyntaxKind.ConstructorDeclaration),
CSharpCodeStyleOptions.PreferExpressionBodiedConstructors)
{
}
protected override BlockSyntax GetBody(ConstructorDeclarationSyntax declaration)
=> declaration.Body;
protected override ArrowExpressionClauseSyntax GetExpressionBody(ConstructorDeclarationSyntax declaration)
=> declaration.ExpressionBody;
}
} | bbarry/roslyn | src/Features/CSharp/Portable/UseExpressionBody/Constructors/UseExpressionBodyForConstructorsDiagnosticAnalyzer.cs | C# | apache-2.0 | 1,563 |
'''
@author: frank
'''
import unittest
import time
import threading
from ..utils.thread import ThreadFacade
from ..utils.thread import AsyncThread
class TestThreadFacade(unittest.TestCase):
def _do(self, name, value=None):
self.ok = name
self.value = value;
def test_run_in_thread(self):
t = ThreadFacade.run_in_thread(self._do, ["ok"], {"value":"world"})
t.join()
self.assertEqual("ok", self.ok)
self.assertEqual("world", self.value)
@AsyncThread
def _do_async(self, ok, value=None):
self.async_thread_name = threading.current_thread().getName()
self.async_ok = ok
self.async_value = value
def test_async_thread(self):
t = self._do_async("ok", value="world")
t.join()
self.assertEqual('_do_async', self.async_thread_name)
self.assertEqual("ok", self.async_ok)
self.assertEqual("world", self.async_value)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | zstackorg/zstack-utility | zstacklib/zstacklib/test/test_thread.py | Python | apache-2.0 | 1,064 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.s4.deploy;
import org.I0Itec.zkclient.ZkClient;
import org.apache.s4.base.Event;
import org.apache.s4.base.KeyFinder;
import org.apache.s4.core.App;
import org.apache.s4.core.Stream;
import org.apache.zookeeper.CreateMode;
import com.google.common.collect.ImmutableList;
public class TestApp extends App {
private ZkClient zkClient;
@Override
protected void onClose() {
// TODO Auto-generated method stub
}
@Override
protected void onInit() {
try {
SimplePE prototype = createPE(SimplePE.class);
Stream<Event> stream = createInputStream("inputStream", new KeyFinder<Event>() {
public java.util.List<String> get(Event event) {
return ImmutableList.of("line");
}
}, prototype);
zkClient = new ZkClient("localhost:" + 2181);
if (!zkClient.exists("/s4-test")) {
zkClient.create("/s4-test", null, CreateMode.PERSISTENT);
}
zkClient.createEphemeral(AppConstants.INITIALIZED_ZNODE_1, null);
} catch (Exception e) {
System.exit(-1);
}
}
@Override
protected void onStart() {
try {
Class.forName("org.apache.s4.deploy.A").getConstructor(ZkClient.class).newInstance(zkClient);
} catch (Exception e) {
System.exit(-1);
}
}
}
| coolwuxing/DynamicS4 | test-apps/simple-deployable-app-1/src/main/java/org/apache/s4/deploy/TestApp.java | Java | apache-2.0 | 2,232 |
/**
* Copyright 2010 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.waveprotocol.wave.model.id;
/**
* Checked exception indicating that a serialised wave or wavelet id is
* invalid.
*
* @author [email protected] (Alex North)
*/
public class InvalidIdException extends Exception {
private final String id;
public InvalidIdException(String id, String message) {
super(message);
this.id = id;
}
public String getId() {
return id;
}
@Override
public String getMessage() {
return "Invalid id '" + id + "': " + super.getMessage();
}
} | JaredMiller/Wave | src/org/waveprotocol/wave/model/id/InvalidIdException.java | Java | apache-2.0 | 1,113 |
//---------------------------------------------------------- -*- Mode: C++ -*-
// $Id$
//
// Created 2013/9/9
// Author: Mike Ovsiannikov
//
// Copyright 2013 Quantcast Corp.
//
// This file is part of Kosmos File System (KFS).
//
// Licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
//
//----------------------------------------------------------------------------
#include "Base64.h"
#include <openssl/evp.h>
namespace KFS
{
int
Base64::Encode(
const char* inBufPtr,
int inBufLength,
char* inEncodedBufPtr)
{
return EVP_EncodeBlock(
reinterpret_cast<unsigned char*>(inEncodedBufPtr),
reinterpret_cast<const unsigned char*>(inBufPtr),
inBufLength
);
}
int
Base64::Decode(
const char* inBufPtr,
int inBufLength,
char* inDecodedBufPtr)
{
int thePadding = 0;
if (3 < inBufLength) {
if (inBufPtr[inBufLength-1] == '=') {
thePadding++;
}
if (inBufPtr[inBufLength-2] == '=') {
thePadding++;
}
}
const int theLen = EVP_DecodeBlock(
reinterpret_cast<unsigned char*>(inDecodedBufPtr),
reinterpret_cast<const unsigned char*>(inBufPtr),
inBufLength
);
return (2 < theLen ? theLen - thePadding : theLen);
}
}
| thebigbrain/qfs | src/cc/kfsio/Base64.cc | C++ | apache-2.0 | 1,802 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_13) on Wed Aug 05 08:53:07 ICT 2009 -->
<META http-equiv="Content-Type" content="text/html; charset=utf8">
<TITLE>
BeanInstantiationInterceptor
</TITLE>
<META NAME="date" CONTENT="2009-08-05">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../stylesheet.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="BeanInstantiationInterceptor";
}
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<HR>
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Class</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="class-use/BeanInstantiationInterceptor.html"><FONT CLASS="NavBarFont1"><B>Use</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../index-files/index-1.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../org/jgentleframework/core/intercept/BasicMethodInvocation.html" title="class in org.jgentleframework.core.intercept"><B>PREV CLASS</B></A>
<A HREF="../../../../org/jgentleframework/core/intercept/InstantiationInterceptor.html" title="interface in org.jgentleframework.core.intercept"><B>NEXT CLASS</B></A></FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../index.html?org/jgentleframework/core/intercept/BeanInstantiationInterceptor.html" target="_top"><B>FRAMES</B></A>
<A HREF="BeanInstantiationInterceptor.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
<TR>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
SUMMARY: NESTED | FIELD | CONSTR | <A HREF="#method_summary">METHOD</A></FONT></TD>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
DETAIL: FIELD | CONSTR | <A HREF="#method_detail">METHOD</A></FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<!-- ======== START OF CLASS DATA ======== -->
<H2>
<FONT SIZE="-1">
org.jgentleframework.core.intercept</FONT>
<BR>
Interface BeanInstantiationInterceptor</H2>
<DL>
<DT><B>All Superinterfaces:</B> <DD><A HREF="../../../../org/aopalliance/aop/Advice.html" title="interface in org.aopalliance.aop">Advice</A>, <A HREF="../../../../org/jgentleframework/core/intercept/InstantiationInterceptor.html" title="interface in org.jgentleframework.core.intercept">InstantiationInterceptor</A>, <A HREF="../../../../org/aopalliance/intercept/Interceptor.html" title="interface in org.aopalliance.intercept">Interceptor</A></DD>
</DL>
<DL>
<DT><B>All Known Implementing Classes:</B> <DD><A HREF="../../../../org/jgentleframework/integration/remoting/rmi/context/RmiBindingInstantiationInterceptor.html" title="class in org.jgentleframework.integration.remoting.rmi.context">RmiBindingInstantiationInterceptor</A>, <A HREF="../../../../org/jgentleframework/integration/scripting/ScriptingInstantiationInterceptor.html" title="class in org.jgentleframework.integration.scripting">ScriptingInstantiationInterceptor</A></DD>
</DL>
<HR>
<DL>
<DT><PRE>public interface <B>BeanInstantiationInterceptor</B><DT>extends <A HREF="../../../../org/jgentleframework/core/intercept/InstantiationInterceptor.html" title="interface in org.jgentleframework.core.intercept">InstantiationInterceptor</A></DL>
</PRE>
<P>
BeanInstantiationInterceptor is an extension of
<A HREF="../../../../org/jgentleframework/core/intercept/InstantiationInterceptor.html" title="interface in org.jgentleframework.core.intercept"><CODE>InstantiationInterceptor</CODE></A>.
<P>
<P>
<DL>
<DT><B>Author:</B></DT>
<DD>LE QUOC CHUNG - mailto: <a
href="mailto:[email protected]">[email protected]</a></DD>
</DL>
<HR>
<P>
<!-- ========== METHOD SUMMARY =========== -->
<A NAME="method_summary"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
<B>Method Summary</B></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> boolean</CODE></FONT></TD>
<TD><CODE><B><A HREF="../../../../org/jgentleframework/core/intercept/BeanInstantiationInterceptor.html#isSupportedByCore()">isSupportedByCore</A></B>()</CODE>
<BR>
Checks if is supported by core.</TD>
</TR>
</TABLE>
<A NAME="methods_inherited_from_class_org.jgentleframework.core.intercept.InstantiationInterceptor"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#EEEEFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left"><B>Methods inherited from interface org.jgentleframework.core.intercept.<A HREF="../../../../org/jgentleframework/core/intercept/InstantiationInterceptor.html" title="interface in org.jgentleframework.core.intercept">InstantiationInterceptor</A></B></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><CODE><A HREF="../../../../org/jgentleframework/core/intercept/InstantiationInterceptor.html#instantiate(org.jgentleframework.core.intercept.ObjectInstantiation)">instantiate</A></CODE></TD>
</TR>
</TABLE>
<P>
<!-- ============ METHOD DETAIL ========== -->
<A NAME="method_detail"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="1"><FONT SIZE="+2">
<B>Method Detail</B></FONT></TH>
</TR>
</TABLE>
<A NAME="isSupportedByCore()"><!-- --></A><H3>
isSupportedByCore</H3>
<PRE>
boolean <B>isSupportedByCore</B>()</PRE>
<DL>
<DD>Checks if is supported by core.
<P>
<DD><DL>
</DL>
</DD>
<DD><DL>
<DT><B>Returns:</B><DD><b>true</b>, if is supported by core</DL>
</DD>
</DL>
<!-- ========= END OF CLASS DATA ========= -->
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Class</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="class-use/BeanInstantiationInterceptor.html"><FONT CLASS="NavBarFont1"><B>Use</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../index-files/index-1.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../org/jgentleframework/core/intercept/BasicMethodInvocation.html" title="class in org.jgentleframework.core.intercept"><B>PREV CLASS</B></A>
<A HREF="../../../../org/jgentleframework/core/intercept/InstantiationInterceptor.html" title="interface in org.jgentleframework.core.intercept"><B>NEXT CLASS</B></A></FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../index.html?org/jgentleframework/core/intercept/BeanInstantiationInterceptor.html" target="_top"><B>FRAMES</B></A>
<A HREF="BeanInstantiationInterceptor.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
<TR>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
SUMMARY: NESTED | FIELD | CONSTR | <A HREF="#method_summary">METHOD</A></FONT></TD>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
DETAIL: FIELD | CONSTR | <A HREF="#method_detail">METHOD</A></FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
</BODY>
</HTML>
| haint/jgentle | doc/org/jgentleframework/core/intercept/BeanInstantiationInterceptor.html | HTML | apache-2.0 | 11,146 |
package fr.free.nrw.commons.contributions;
import android.view.View;
import android.widget.ProgressBar;
import android.widget.TextView;
import fr.free.nrw.commons.MediaWikiImageView;
import fr.free.nrw.commons.R;
class ContributionViewHolder {
final MediaWikiImageView imageView;
final TextView titleView;
final TextView stateView;
final TextView seqNumView;
final ProgressBar progressView;
ContributionViewHolder(View parent) {
imageView = (MediaWikiImageView) parent.findViewById(R.id.contributionImage);
titleView = (TextView)parent.findViewById(R.id.contributionTitle);
stateView = (TextView)parent.findViewById(R.id.contributionState);
seqNumView = (TextView)parent.findViewById(R.id.contributionSequenceNumber);
progressView = (ProgressBar)parent.findViewById(R.id.contributionProgress);
}
}
| RSBat/apps-android-commons | app/src/main/java/fr/free/nrw/commons/contributions/ContributionViewHolder.java | Java | apache-2.0 | 870 |
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.channel;
import ch.qos.logback.classic.Logger;
import ch.qos.logback.classic.spi.ILoggingEvent;
import ch.qos.logback.core.Appender;
import io.netty.channel.local.LocalChannel;
import io.netty.util.concurrent.EventExecutor;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import static org.hamcrest.Matchers.*;
import static org.junit.Assert.*;
public class SingleThreadEventLoopTest {
private static final Runnable NOOP = new Runnable() {
@Override
public void run() { }
};
private SingleThreadEventLoopA loopA;
private SingleThreadEventLoopB loopB;
@Before
public void newEventLoop() {
loopA = new SingleThreadEventLoopA();
loopB = new SingleThreadEventLoopB();
}
@After
public void stopEventLoop() {
if (!loopA.isShuttingDown()) {
loopA.shutdownGracefully(0, 0, TimeUnit.MILLISECONDS);
}
if (!loopB.isShuttingDown()) {
loopB.shutdownGracefully(0, 0, TimeUnit.MILLISECONDS);
}
while (!loopA.isTerminated()) {
try {
loopA.awaitTermination(1, TimeUnit.DAYS);
} catch (InterruptedException e) {
// Ignore
}
}
assertEquals(1, loopA.cleanedUp.get());
while (!loopB.isTerminated()) {
try {
loopB.awaitTermination(1, TimeUnit.DAYS);
} catch (InterruptedException e) {
// Ignore
}
}
}
@Test
@SuppressWarnings("deprecation")
public void shutdownBeforeStart() throws Exception {
loopA.shutdown();
assertRejection(loopA);
}
@Test
@SuppressWarnings("deprecation")
public void shutdownAfterStart() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
loopA.execute(new Runnable() {
@Override
public void run() {
latch.countDown();
}
});
// Wait for the event loop thread to start.
latch.await();
// Request the event loop thread to stop.
loopA.shutdown();
assertRejection(loopA);
assertTrue(loopA.isShutdown());
// Wait until the event loop is terminated.
while (!loopA.isTerminated()) {
loopA.awaitTermination(1, TimeUnit.DAYS);
}
}
private static void assertRejection(EventExecutor loop) {
try {
loop.execute(NOOP);
fail("A task must be rejected after shutdown() is called.");
} catch (RejectedExecutionException e) {
// Expected
}
}
@Test
public void scheduleTaskA() throws Exception {
testScheduleTask(loopA);
}
@Test
public void scheduleTaskB() throws Exception {
testScheduleTask(loopB);
}
private static void testScheduleTask(EventLoop loopA) throws InterruptedException, ExecutionException {
long startTime = System.nanoTime();
final AtomicLong endTime = new AtomicLong();
loopA.schedule(new Runnable() {
@Override
public void run() {
endTime.set(System.nanoTime());
}
}, 500, TimeUnit.MILLISECONDS).get();
assertThat(endTime.get() - startTime,
is(greaterThanOrEqualTo(TimeUnit.MILLISECONDS.toNanos(500))));
}
@Test
public void scheduleTaskAtFixedRateA() throws Exception {
testScheduleTaskAtFixedRate(loopA);
}
@Test
public void scheduleTaskAtFixedRateB() throws Exception {
testScheduleTaskAtFixedRate(loopB);
}
private static void testScheduleTaskAtFixedRate(EventLoop loopA) throws InterruptedException {
final Queue<Long> timestamps = new LinkedBlockingQueue<Long>();
ScheduledFuture<?> f = loopA.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
timestamps.add(System.nanoTime());
try {
Thread.sleep(50);
} catch (InterruptedException e) {
// Ignore
}
}
}, 100, 100, TimeUnit.MILLISECONDS);
Thread.sleep(550);
assertTrue(f.cancel(true));
assertEquals(5, timestamps.size());
// Check if the task was run without a lag.
Long firstTimestamp = null;
int cnt = 0;
for (Long t: timestamps) {
if (firstTimestamp == null) {
firstTimestamp = t;
continue;
}
long timepoint = t - firstTimestamp;
assertThat(timepoint, is(greaterThanOrEqualTo(TimeUnit.MILLISECONDS.toNanos(100 * cnt + 80))));
assertThat(timepoint, is(lessThan(TimeUnit.MILLISECONDS.toNanos(100 * (cnt + 1) + 20))));
cnt ++;
}
}
@Test
public void scheduleLaggyTaskAtFixedRateA() throws Exception {
testScheduleLaggyTaskAtFixedRate(loopA);
}
@Test
public void scheduleLaggyTaskAtFixedRateB() throws Exception {
testScheduleLaggyTaskAtFixedRate(loopB);
}
private static void testScheduleLaggyTaskAtFixedRate(EventLoop loopA) throws InterruptedException {
final Queue<Long> timestamps = new LinkedBlockingQueue<Long>();
ScheduledFuture<?> f = loopA.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
boolean empty = timestamps.isEmpty();
timestamps.add(System.nanoTime());
if (empty) {
try {
Thread.sleep(401);
} catch (InterruptedException e) {
// Ignore
}
}
}
}, 100, 100, TimeUnit.MILLISECONDS);
Thread.sleep(550);
assertTrue(f.cancel(true));
assertEquals(5, timestamps.size());
// Check if the task was run with lag.
int i = 0;
Long previousTimestamp = null;
for (Long t: timestamps) {
if (previousTimestamp == null) {
previousTimestamp = t;
continue;
}
long diff = t.longValue() - previousTimestamp.longValue();
if (i == 0) {
assertThat(diff, is(greaterThanOrEqualTo(TimeUnit.MILLISECONDS.toNanos(400))));
} else {
assertThat(diff, is(lessThanOrEqualTo(TimeUnit.MILLISECONDS.toNanos(10))));
}
previousTimestamp = t;
i ++;
}
}
@Test
public void scheduleTaskWithFixedDelayA() throws Exception {
testScheduleTaskWithFixedDelay(loopA);
}
@Test
public void scheduleTaskWithFixedDelayB() throws Exception {
testScheduleTaskWithFixedDelay(loopB);
}
private static void testScheduleTaskWithFixedDelay(EventLoop loopA) throws InterruptedException {
final Queue<Long> timestamps = new LinkedBlockingQueue<Long>();
ScheduledFuture<?> f = loopA.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
timestamps.add(System.nanoTime());
try {
Thread.sleep(51);
} catch (InterruptedException e) {
// Ignore
}
}
}, 100, 100, TimeUnit.MILLISECONDS);
Thread.sleep(500);
assertTrue(f.cancel(true));
assertEquals(3, timestamps.size());
// Check if the task was run without a lag.
Long previousTimestamp = null;
for (Long t: timestamps) {
if (previousTimestamp == null) {
previousTimestamp = t;
continue;
}
assertThat(t.longValue() - previousTimestamp.longValue(),
is(greaterThanOrEqualTo(TimeUnit.MILLISECONDS.toNanos(150))));
previousTimestamp = t;
}
}
@Test
@SuppressWarnings("deprecation")
public void shutdownWithPendingTasks() throws Exception {
final int NUM_TASKS = 3;
final AtomicInteger ranTasks = new AtomicInteger();
final CountDownLatch latch = new CountDownLatch(1);
final Runnable task = new Runnable() {
@Override
public void run() {
ranTasks.incrementAndGet();
while (latch.getCount() > 0) {
try {
latch.await();
} catch (InterruptedException e) {
// Ignored
}
}
}
};
for (int i = 0; i < NUM_TASKS; i ++) {
loopA.execute(task);
}
// At this point, the first task should be running and stuck at latch.await().
while (ranTasks.get() == 0) {
Thread.yield();
}
assertEquals(1, ranTasks.get());
// Shut down the event loop to test if the other tasks are run before termination.
loopA.shutdown();
// Let the other tasks run.
latch.countDown();
// Wait until the event loop is terminated.
while (!loopA.isTerminated()) {
loopA.awaitTermination(1, TimeUnit.DAYS);
}
// Make sure loop.shutdown() above triggered wakeup().
assertEquals(NUM_TASKS, ranTasks.get());
}
@Test(timeout = 10000)
@SuppressWarnings("deprecation")
public void testRegistrationAfterShutdown() throws Exception {
loopA.shutdown();
// Disable logging temporarily.
Logger root = (Logger) LoggerFactory.getLogger(org.slf4j.Logger.ROOT_LOGGER_NAME);
List<Appender<ILoggingEvent>> appenders = new ArrayList<Appender<ILoggingEvent>>();
for (Iterator<Appender<ILoggingEvent>> i = root.iteratorForAppenders(); i.hasNext();) {
Appender<ILoggingEvent> a = i.next();
appenders.add(a);
root.detachAppender(a);
}
try {
ChannelFuture f = loopA.register(new LocalChannel());
f.awaitUninterruptibly();
assertFalse(f.isSuccess());
assertThat(f.cause(), is(instanceOf(RejectedExecutionException.class)));
assertFalse(f.channel().isOpen());
} finally {
for (Appender<ILoggingEvent> a: appenders) {
root.addAppender(a);
}
}
}
@Test(timeout = 10000)
@SuppressWarnings("deprecation")
public void testRegistrationAfterShutdown2() throws Exception {
loopA.shutdown();
final CountDownLatch latch = new CountDownLatch(1);
Channel ch = new LocalChannel();
ChannelPromise promise = ch.newPromise();
promise.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
latch.countDown();
}
});
// Disable logging temporarily.
Logger root = (Logger) LoggerFactory.getLogger(org.slf4j.Logger.ROOT_LOGGER_NAME);
List<Appender<ILoggingEvent>> appenders = new ArrayList<Appender<ILoggingEvent>>();
for (Iterator<Appender<ILoggingEvent>> i = root.iteratorForAppenders(); i.hasNext();) {
Appender<ILoggingEvent> a = i.next();
appenders.add(a);
root.detachAppender(a);
}
try {
ChannelFuture f = loopA.register(ch, promise);
f.awaitUninterruptibly();
assertFalse(f.isSuccess());
assertThat(f.cause(), is(instanceOf(RejectedExecutionException.class)));
// Ensure the listener was notified.
assertFalse(latch.await(1, TimeUnit.SECONDS));
assertFalse(ch.isOpen());
} finally {
for (Appender<ILoggingEvent> a: appenders) {
root.addAppender(a);
}
}
}
@Test(timeout = 5000)
public void testGracefulShutdownQuietPeriod() throws Exception {
loopA.shutdownGracefully(1, Integer.MAX_VALUE, TimeUnit.SECONDS);
// Keep Scheduling tasks for another 2 seconds.
for (int i = 0; i < 20; i ++) {
Thread.sleep(100);
loopA.execute(NOOP);
}
long startTime = System.nanoTime();
assertThat(loopA.isShuttingDown(), is(true));
assertThat(loopA.isShutdown(), is(false));
while (!loopA.isTerminated()) {
loopA.awaitTermination(Integer.MAX_VALUE, TimeUnit.SECONDS);
}
assertThat(System.nanoTime() - startTime,
is(greaterThanOrEqualTo(TimeUnit.SECONDS.toNanos(1))));
}
@Test(timeout = 5000)
public void testGracefulShutdownTimeout() throws Exception {
loopA.shutdownGracefully(2, 2, TimeUnit.SECONDS);
// Keep Scheduling tasks for another 3 seconds.
// Submitted tasks must be rejected after 2 second timeout.
for (int i = 0; i < 10; i ++) {
Thread.sleep(100);
loopA.execute(NOOP);
}
try {
for (int i = 0; i < 20; i ++) {
Thread.sleep(100);
loopA.execute(NOOP);
}
fail("shutdownGracefully() must reject a task after timeout.");
} catch (RejectedExecutionException e) {
// Expected
}
assertThat(loopA.isShuttingDown(), is(true));
assertThat(loopA.isShutdown(), is(true));
}
private static class SingleThreadEventLoopA extends SingleThreadEventLoop {
final AtomicInteger cleanedUp = new AtomicInteger();
SingleThreadEventLoopA() {
super(null, Executors.defaultThreadFactory(), true);
}
@Override
protected void run() {
for (;;) {
Runnable task = takeTask();
if (task != null) {
task.run();
updateLastExecutionTime();
}
if (confirmShutdown()) {
break;
}
}
}
@Override
protected void cleanup() {
cleanedUp.incrementAndGet();
}
}
private static class SingleThreadEventLoopB extends SingleThreadEventLoop {
SingleThreadEventLoopB() {
super(null, Executors.defaultThreadFactory(), false);
}
@Override
protected void run() {
for (;;) {
try {
Thread.sleep(TimeUnit.NANOSECONDS.toMillis(delayNanos(System.nanoTime())));
} catch (InterruptedException e) {
// Waken up by interruptThread()
}
runAllTasks();
if (confirmShutdown()) {
break;
}
}
}
@Override
protected void wakeup(boolean inEventLoop) {
interruptThread();
}
}
}
| mx657649013/netty | transport/src/test/java/io/netty/channel/SingleThreadEventLoopTest.java | Java | apache-2.0 | 16,237 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include "cmockery.h"
#include "c.h"
#include "../ha_config.c"
#include "ha_config_mock.c"
#include "lib/stringinfo.h"
#include "utils/builtins.h"
/* Helper functions signature */
#define format_string_release(p) (pfree(p))
static char *format_string_create(char *f,...);
static void handle_pg_excp(char *msg, int errcode);
/*
* Unitest for GPHD_HA_load_nodes() in ../access/external/ha_config.c
* GPHD_HA_load_nodes() discovers the active Namnode from an HA Namenodes pair.
* It does this by interacting with the API exposed by hdfs.h, from which it uses
* 2 functions:
* a. Namenode * hdfsGetHANamenodes(const char * nameservice, int * size);
* b. void hdfsFreeNamenodeInformation(Namenode * namenodes, int size);
* This unitest verifies the correct interaction between GPHD_HA_load_nodes() implementation
* and the 2 hdfs.h APIs. It looks at the standard flows with expected input configuration
* and also at limit cases with corrupted input configuration.
* The mock functions for the two(2) hdfs.h APIs are in ha_config_mock.c.
*/
/*
* SUT function: NNHAConf* GPHD_HA_load_nodes(const char *nameservice);
* Mock function: Namenode * hdfsGetHANamenodes(const char * nameservice, int * size)
* Negative test: GPHD_HA_load_nodes() receives an unexistent namesrvice
*/
void
test__GPHD_HA_load_nodes__UnknownNameservice(void **state)
{
/*
* In case it receives an unknown nameservice string, the real function hdfsGetHANamenodes()
* will return NULL. We instruct our mock function to return NULL. In this way we simulate
* an unknown_service scenario and verify that our SUT function GPHD_HA_load_nodes() handles
* correctly the NULL returned by hdfsGetHANamenodes.
*/
will_return(hdfsGetHANamenodes, NULL);
PG_TRY();
{
NNHAConf *hac = GPHD_HA_load_nodes("UNKNOWN_SERVICE");
}
PG_CATCH();
{
char *msg = "nameservice UNKNOWN_SERVICE not found in client configuration. No HA namenodes provided";
handle_pg_excp(msg, ERRCODE_SYNTAX_ERROR);
return;
}
PG_END_TRY();
assert_true(false);
}
/*
* SUT function: NNHAConf* GPHD_HA_load_nodes(const char *nameservice);
* Mock function: Namenode * hdfsGetHANamenodes(const char * nameservice, int * size)
* Negative test: hdfsGetHANamenodes() returns just one namenode.
* This is not an HA sustainable.
*/
void
test__GPHD_HA_load_nodes__OneNN(void **state)
{
unsigned int numn = 1;
Namenode nns[1];
will_return(hdfsGetHANamenodes, nns);
will_assign_value(hdfsGetHANamenodes, size, numn);
PG_TRY();
{
NNHAConf *hac = GPHD_HA_load_nodes("NAMESERVICE");
}
PG_CATCH();
{
char *msg = format_string_create("High availability for nameservice %s was configured with only one node. A high availability scheme requires at least two nodes ",
"NAMESERVICE");
handle_pg_excp(msg, ERRCODE_INTERNAL_ERROR);
format_string_release(msg); /* if we trip on assert_string_equal we don't free but it doesen't matter because process stops*/
return;
}
PG_END_TRY();
assert_true(false);
}
/*
* SUT function: NNHAConf* GPHD_HA_load_nodes(const char *nameservice);
* Mock function: Namenode * hdfsGetHANamenodes(const char * nameservice, int * size)
* Negative test: hdfsGetHANamenodes() returns a Namenode.rpc_address field without ":"
* the host:port delimiter.
*/
void
test__GPHD_HA_load_nodes__RpcDelimMissing(void **state)
{
unsigned int numn = 2;
Namenode nns[] = { {"mdw2080", "mdw:50070"}, {"smdw:2080", "smdw:50070"}};
will_return(hdfsGetHANamenodes, nns);
will_assign_value(hdfsGetHANamenodes, size, numn);
PG_TRY();
{
NNHAConf *hac = GPHD_HA_load_nodes("NAMESERVICE");
}
PG_CATCH();
{
char *msg = "dfs.namenode.rpc-address was set incorrectly in the configuration. ':' missing";
handle_pg_excp(msg, ERRCODE_SYNTAX_ERROR);
return;
}
PG_END_TRY();
assert_true(false);
}
/*
* SUT function: NNHAConf* GPHD_HA_load_nodes(const char *nameservice)
* Mock functions: Namenode * hdfsGetHANamenodes(const char * nameservice, int * size)
* void port_to_str(char **port, int new_port)
* Positive test: port_to_str() assigns pxf_service_port correctly
*/
void
test__GPHD_HA_load_nodes__PxfServicePortIsAssigned(void **state)
{
unsigned int numn = 2;
Namenode nns[] = { {"mdw:2080", "mdw:50070"}, {"smdw:2080", "smdw:50070"}};
char strPort[30] = {0};
pg_ltoa(pxf_service_port, strPort);
will_return(hdfsGetHANamenodes, nns);
will_assign_value(hdfsGetHANamenodes, size, numn);
will_be_called(port_to_str);
expect_not_value(port_to_str, port, NULL);
expect_value(port_to_str, new_port, pxf_service_port);
will_assign_string(port_to_str, port, strPort);
will_be_called(port_to_str);
expect_not_value(port_to_str, port, NULL);
expect_value(port_to_str, new_port, pxf_service_port);
will_assign_string(port_to_str, port, strPort);
will_be_called(hdfsFreeNamenodeInformation);
NNHAConf *hac = GPHD_HA_load_nodes("NAMESERVICE");
}
/*
* SUT function: NNHAConf* GPHD_HA_load_nodes(const char *nameservice)
* Mock functions: Namenode * hdfsGetHANamenodes(const char * nameservice, int * size)
* void port_to_str(char **port, int new_port)
* Negative test: hdfsGetHANamenodes() returns a Namenode.http_address field without
* the host - ":port".
*/
void
test__GPHD_HA_load_nodes__HostMissing(void **state)
{
unsigned int numn = 2;
Namenode nns[] = { {":2080", "mdw:50070"}, {"smdw:2080", "smdw:50070"}};
char strPort[30] = {0};
pg_ltoa(pxf_service_port, strPort);
will_return(hdfsGetHANamenodes, nns);
will_assign_value(hdfsGetHANamenodes, size, numn);
will_be_called(port_to_str);
expect_not_value(port_to_str, port, NULL);
expect_value(port_to_str, new_port, pxf_service_port);
will_assign_string(port_to_str, port, strPort);
will_be_called(port_to_str);
expect_not_value(port_to_str, port, NULL);
expect_value(port_to_str, new_port, pxf_service_port);
will_assign_string(port_to_str, port, strPort);
will_be_called(hdfsFreeNamenodeInformation);
PG_TRY();
{
NNHAConf *hac = GPHD_HA_load_nodes("NAMESERVICE");
}
PG_CATCH();
{
char *msg = "HA Namenode host number 1 is NULL value";
handle_pg_excp(msg, ERRCODE_SYNTAX_ERROR);
return;
}
PG_END_TRY();
}
/*
* SUT function: NNHAConf* GPHD_HA_load_nodes(const char *nameservice)
* Mock functions: Namenode * hdfsGetHANamenodes(const char * nameservice, int * size)
* void port_to_str(char **port, int new_port)
* Negative test: port_to_str() does not set the port
* the port - "host:".
*/
void
test__GPHD_HA_load_nodes__PortMissing(void **state)
{
unsigned int numn = 2;
Namenode nns[] = { {"mdw:", "mdw:50070"}, {"smdw:2080", "smdw:50070"}};
will_return(hdfsGetHANamenodes, nns);
will_assign_value(hdfsGetHANamenodes, size, numn);
will_be_called(port_to_str);
expect_not_value(port_to_str, port, NULL);
expect_value(port_to_str, new_port, pxf_service_port);
will_be_called(port_to_str);
expect_not_value(port_to_str, port, NULL);
expect_value(port_to_str, new_port, pxf_service_port);
will_be_called(hdfsFreeNamenodeInformation);
PG_TRY();
{
NNHAConf *hac = GPHD_HA_load_nodes("NAMESERVICE");
}
PG_CATCH();
{
char *msg = "HA Namenode RPC port number 1 is NULL value";
handle_pg_excp(msg, ERRCODE_SYNTAX_ERROR);
return;
}
PG_END_TRY();
}
/*
* SUT function: NNHAConf* GPHD_HA_load_nodes(const char *nameservice)
* Mock functions: Namenode * hdfsGetHANamenodes(const char * nameservice, int * size)
* void port_to_str(char **port, int new_port)
* Negative test: port_to_str() returns a port outside the valid range
* - a number higher than 65535
*/
void
test__GPHD_HA_load_nodes__PortIsInvalidNumber(void **state)
{
unsigned int numn = 2;
Namenode nns[] = { {"mdw:2080", "mdw:65550"}, {"smdw:2080", "smdw:50070"}};
will_return(hdfsGetHANamenodes, nns);
will_assign_value(hdfsGetHANamenodes, size, numn);
will_be_called(port_to_str);
expect_not_value(port_to_str, port, NULL);
expect_value(port_to_str, new_port, pxf_service_port);
will_assign_string(port_to_str, port, "65550");
will_be_called(port_to_str);
expect_not_value(port_to_str, port, NULL);
expect_value(port_to_str, new_port, pxf_service_port);
will_assign_string(port_to_str, port, "65550");
will_be_called(hdfsFreeNamenodeInformation);
PG_TRY();
{
NNHAConf *hac = GPHD_HA_load_nodes("NAMESERVICE");
}
PG_CATCH();
{
char *msg = "Invalid port <65550> detected in nameservice configuration";
handle_pg_excp(msg, ERRCODE_SYNTAX_ERROR);
return;
}
PG_END_TRY();
}
/*
* SUT function: NNHAConf* GPHD_HA_load_nodes(const char *nameservice)
* Mock functions: Namenode * hdfsGetHANamenodes(const char * nameservice, int * size)
* void port_to_str(char **port, int new_port)
* Negative test: port_to_str() returns a port that is not a number
*/
void
test__GPHD_HA_load_nodes__PortIsNotNumber_TakeOne(void **state)
{
NNHAConf *hac;
unsigned int numn = 2;
Namenode nns[] = { {"mdw:2080", "mdw:50070"}, {"smdw:2080", "smdw:50070"}};
will_return(hdfsGetHANamenodes, nns);
will_assign_value(hdfsGetHANamenodes, size, numn);
will_be_called(port_to_str);
expect_not_value(port_to_str, port, NULL);
expect_value(port_to_str, new_port, pxf_service_port);
will_assign_string(port_to_str, port, "melon");
will_be_called(port_to_str);
expect_not_value(port_to_str, port, NULL);
expect_value(port_to_str, new_port, pxf_service_port);
will_be_called(hdfsFreeNamenodeInformation);
PG_TRY();
{
hac = GPHD_HA_load_nodes("NAMESERVICE");
}
PG_CATCH();
{
char *msg = "Invalid port <melon> detected in nameservice configuration";
handle_pg_excp(msg, ERRCODE_SYNTAX_ERROR);
return;
}
PG_END_TRY();
}
/*
* SUT function: NNHAConf* GPHD_HA_load_nodes(const char *nameservice)
* Mock functions: Namenode * hdfsGetHANamenodes(const char * nameservice, int * size)
* void port_to_str(char **port, int new_port)
* Negative test: port_to_str() returns a port that is not a number
*/
void
test__GPHD_HA_load_nodes__PortIsNotNumber_TakeTwo(void **state)
{
NNHAConf *hac;
unsigned int numn = 2;
Namenode nns[] = { {"mdw:2080", "mdw:50070"}, {"smdw:2080", "smdw:50070"}};
will_return(hdfsGetHANamenodes, nns);
will_assign_value(hdfsGetHANamenodes, size, numn);
will_be_called(port_to_str);
expect_not_value(port_to_str, port, NULL);
expect_value(port_to_str, new_port, pxf_service_port);
will_assign_string(port_to_str, port, "100ab");
will_be_called(port_to_str);
expect_not_value(port_to_str, port, NULL);
expect_value(port_to_str, new_port, pxf_service_port);
will_be_called(hdfsFreeNamenodeInformation);
PG_TRY();
{
hac = GPHD_HA_load_nodes("NAMESERVICE");
}
PG_CATCH();
{
char *msg = "Invalid port <100ab> detected in nameservice configuration";
handle_pg_excp(msg, ERRCODE_SYNTAX_ERROR);
return;
}
PG_END_TRY();
}
int
main(int argc, char *argv[])
{
cmockery_parse_arguments(argc, argv);
const UnitTest tests[] = {
unit_test(test__GPHD_HA_load_nodes__UnknownNameservice),
unit_test(test__GPHD_HA_load_nodes__OneNN),
unit_test(test__GPHD_HA_load_nodes__RpcDelimMissing),
unit_test(test__GPHD_HA_load_nodes__PxfServicePortIsAssigned),
unit_test(test__GPHD_HA_load_nodes__HostMissing),
unit_test(test__GPHD_HA_load_nodes__PortMissing),
unit_test(test__GPHD_HA_load_nodes__PortIsInvalidNumber),
unit_test(test__GPHD_HA_load_nodes__PortIsNotNumber_TakeOne),
unit_test(test__GPHD_HA_load_nodes__PortIsNotNumber_TakeTwo)
};
return run_tests(tests);
}
/*
* Helper function to format strings that need to be passed to assert macros
*/
static char*
format_string_create(char *f,...)
{
StringInfoData s;
va_list vl;
initStringInfo(&s);
va_start(vl,f);
appendStringInfoVA(&s, f, vl);
va_end(vl);
return s.data;
}
/*
* Encapsulates exception unpackaging
*/
static void
handle_pg_excp(char *msg, int errcode)
{
CurrentMemoryContext = 1;
ErrorData *edata = CopyErrorData();
assert_true(edata->sqlerrcode == errcode);
assert_true(edata->elevel == ERROR);
assert_string_equal(edata->message, msg);
/* Clean the internal error data stack. Otherwise errordata_stack_depth in elog.c,
* keeps growing from test to test with each ereport we issue in our SUT function
* until we reach errordata_stack_depth >= ERRORDATA_STACK_SIZE and our tests
* start failing
*/
elog_dismiss(INFO);
}
| cwelton/incubator-hawq | src/backend/access/external/test/ha_config_test.c | C | apache-2.0 | 13,343 |
package com.frameworkset.common.hibernate;
/**
* <p>Title: HibernateManager</p>
*
* <p>Description: </p>
*
* <p>
* bboss workgroup
* </p>
* <p>
* Copyright (c) 2007
* </p>
*
* @Date 2009-6-1 下午08:58:51
* @author biaoping.yin
* @version 1.0
*/
import java.io.Serializable;
import org.apache.log4j.Logger;
import org.hibernate.HibernateException;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.hibernate.Transaction;
import org.hibernate.cfg.Configuration;
import com.frameworkset.common..Manager;
public final class HibernateManager implements Serializable
{
private static Logger log = Logger.getLogger(HibernateManager.class);
public HibernateManager()
{
}
public static SessionFactory getSessionFactory()
throws HibernateException
{
if(sessionFactory == null)
{
log.debug("sessionFactory == null,Get session factory with name[" + DEFAULT_SESSION_FACTORY_BEAN_NAME + "]");
if(Manager.getApplicationContext() == null || Manager.getApplicationContext().getBean(DEFAULT_SESSION_FACTORY_BEAN_NAME) == null)
{
log.debug("Manager.getApplicationContext() == null,Get session factory from hibernate directly");
sessionFactory = (new Configuration()).configure().buildSessionFactory();
return sessionFactory;
}
log.debug("Get session factory through Manager");
sessionFactory = (SessionFactory)Manager.getApplicationContext().getBean(DEFAULT_SESSION_FACTORY_BEAN_NAME);
log.debug("Get session factory success!");
}
return sessionFactory;
}
public static Session openSession()
throws HibernateException
{
return getSessionFactory().openSession();
}
public static void closeSession(Session session)
{
if(session != null)
try
{
session.close();
}
catch(HibernateException e)
{
e.printStackTrace();
}
}
public static void rollbackTransaction(Transaction transaction)
{
if(transaction != null)
try
{
transaction.rollback();
}
catch(HibernateException e)
{
e.printStackTrace();
}
}
private static SessionFactory sessionFactory;
public static final String DEFAULT_SESSION_FACTORY_BEAN_NAME = "sessionFactory";
}
| WilliamRen/bbossgroups-3.5 | bboss-persistent/hibernate/com/frameworkset/common/hibernate/HibernateManager.java | Java | apache-2.0 | 2,503 |
Before running tests do the following
Prerequisites
-------------
1. confd is installed and confdrc is sourced
1. cd confd
2. make all
3. make start
Now run make test in the cmake build directory
To stop confd do make stop
| psykokwak4/ydk-gen | sdk/cpp/core/tests/README.md | Markdown | apache-2.0 | 231 |
package resources
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"net/http"
)
// ProvidersClient is the client for the Providers methods of the Resources service.
type ProvidersClient struct {
BaseClient
}
// NewProvidersClient creates an instance of the ProvidersClient client.
func NewProvidersClient(subscriptionID string) ProvidersClient {
return NewProvidersClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewProvidersClientWithBaseURI creates an instance of the ProvidersClient client.
func NewProvidersClientWithBaseURI(baseURI string, subscriptionID string) ProvidersClient {
return ProvidersClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// Get gets a resource provider.
//
// resourceProviderNamespace is namespace of the resource provider. expand is the $expand query parameter. e.g. To
// include property aliases in response, use $expand=resourceTypes/aliases.
func (client ProvidersClient) Get(ctx context.Context, resourceProviderNamespace string, expand string) (result Provider, err error) {
req, err := client.GetPreparer(ctx, resourceProviderNamespace, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client ProvidersClient) GetPreparer(ctx context.Context, resourceProviderNamespace string, expand string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-07-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(expand) > 0 {
queryParameters["$expand"] = autorest.Encode("query", expand)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ProvidersClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client ProvidersClient) GetResponder(resp *http.Response) (result Provider, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List gets a list of resource providers.
//
// top is query parameters. If null is passed returns all deployments. expand is the $expand query parameter. e.g. To
// include property aliases in response, use $expand=resourceTypes/aliases.
func (client ProvidersClient) List(ctx context.Context, top *int32, expand string) (result ProviderListResultPage, err error) {
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, top, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.plr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", resp, "Failure sending request")
return
}
result.plr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client ProvidersClient) ListPreparer(ctx context.Context, top *int32, expand string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-07-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if top != nil {
queryParameters["$top"] = autorest.Encode("query", *top)
}
if len(expand) > 0 {
queryParameters["$expand"] = autorest.Encode("query", expand)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client ProvidersClient) ListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client ProvidersClient) ListResponder(resp *http.Response) (result ProviderListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client ProvidersClient) listNextResults(lastResults ProviderListResult) (result ProviderListResult, err error) {
req, err := lastResults.providerListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client ProvidersClient) ListComplete(ctx context.Context, top *int32, expand string) (result ProviderListResultIterator, err error) {
result.page, err = client.List(ctx, top, expand)
return
}
// Register registers provider to be used with a subscription.
//
// resourceProviderNamespace is namespace of the resource provider.
func (client ProvidersClient) Register(ctx context.Context, resourceProviderNamespace string) (result Provider, err error) {
req, err := client.RegisterPreparer(ctx, resourceProviderNamespace)
if err != nil {
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Register", nil, "Failure preparing request")
return
}
resp, err := client.RegisterSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Register", resp, "Failure sending request")
return
}
result, err = client.RegisterResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Register", resp, "Failure responding to request")
}
return
}
// RegisterPreparer prepares the Register request.
func (client ProvidersClient) RegisterPreparer(ctx context.Context, resourceProviderNamespace string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-07-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// RegisterSender sends the Register request. The method will close the
// http.Response Body if it receives an error.
func (client ProvidersClient) RegisterSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// RegisterResponder handles the response to the Register request. The method always
// closes the http.Response Body.
func (client ProvidersClient) RegisterResponder(resp *http.Response) (result Provider, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Unregister unregisters provider from a subscription.
//
// resourceProviderNamespace is namespace of the resource provider.
func (client ProvidersClient) Unregister(ctx context.Context, resourceProviderNamespace string) (result Provider, err error) {
req, err := client.UnregisterPreparer(ctx, resourceProviderNamespace)
if err != nil {
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Unregister", nil, "Failure preparing request")
return
}
resp, err := client.UnregisterSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Unregister", resp, "Failure sending request")
return
}
result, err = client.UnregisterResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Unregister", resp, "Failure responding to request")
}
return
}
// UnregisterPreparer prepares the Unregister request.
func (client ProvidersClient) UnregisterPreparer(ctx context.Context, resourceProviderNamespace string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2016-07-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/unregister", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// UnregisterSender sends the Unregister request. The method will close the
// http.Response Body if it receives an error.
func (client ProvidersClient) UnregisterSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
}
// UnregisterResponder handles the response to the Unregister request. The method always
// closes the http.Response Body.
func (client ProvidersClient) UnregisterResponder(resp *http.Response) (result Provider, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
| maxamillion/origin | vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-07-01/resources/providers.go | GO | apache-2.0 | 12,994 |
/*
* Copyright 2015 Anton Tananaev ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.traccar.protocol;
import io.netty.handler.codec.http.HttpObjectAggregator;
import io.netty.handler.codec.http.HttpRequestDecoder;
import io.netty.handler.codec.http.HttpResponseEncoder;
import org.traccar.BaseProtocol;
import org.traccar.PipelineBuilder;
import org.traccar.TrackerServer;
public class OsmAndProtocol extends BaseProtocol {
public OsmAndProtocol() {
addServer(new TrackerServer(false, getName()) {
@Override
protected void addProtocolHandlers(PipelineBuilder pipeline) {
pipeline.addLast(new HttpResponseEncoder());
pipeline.addLast(new HttpRequestDecoder());
pipeline.addLast(new HttpObjectAggregator(16384));
pipeline.addLast(new OsmAndProtocolDecoder(OsmAndProtocol.this));
}
});
}
}
| tananaev/traccar | src/main/java/org/traccar/protocol/OsmAndProtocol.java | Java | apache-2.0 | 1,461 |
/*! jQuery UI - v1.10.3 - 2013-10-08
* http://jqueryui.com
* Copyright 2013 jQuery Foundation and other contributors; Licensed MIT */
jQuery(function(t){t.datepicker.regional["fr-CH"]={closeText:"Fermer",prevText:"<Préc",nextText:"Suiv>",currentText:"Courant",monthNames:["Janvier","Février","Mars","Avril","Mai","Juin","Juillet","Août","Septembre","Octobre","Novembre","Décembre"],monthNamesShort:["Jan","Fév","Mar","Avr","Mai","Jun","Jul","Aoû","Sep","Oct","Nov","Déc"],dayNames:["Dimanche","Lundi","Mardi","Mercredi","Jeudi","Vendredi","Samedi"],dayNamesShort:["Dim","Lun","Mar","Mer","Jeu","Ven","Sam"],dayNamesMin:["Di","Lu","Ma","Me","Je","Ve","Sa"],weekHeader:"Sm",dateFormat:"dd.mm.yy",firstDay:1,isRTL:!1,showMonthAfterYear:!1,yearSuffix:""},t.datepicker.setDefaults(t.datepicker.regional["fr-CH"])}); | bbrangeo/OpenSourceBIMaaS | dev/ues/visualizer/lib/jquery-ui-1.10.3.custom/development-bundle/ui/minified/i18n/jquery.ui.datepicker-fr-CH.min.js | JavaScript | apache-2.0 | 828 |
define([
"hr/hr"
], function(hr) {
var GlossaryEntry = hr.Model.extend({
defaults: {
id: null,
name: null,
description: null
}
});
return GlossaryEntry;
}); | GitbookIO/editor-legacy | src/models/glossaryEntry.js | JavaScript | apache-2.0 | 225 |
function number($field, $value, $fieldinfo) {
extract($fieldinfo);
if(!$value) $value = $defaultvalue;
$errortips = $this->fields[$field]['errortips'];
if($errortips) $this->formValidator .= '$("#'.$field.'").formValidator({onshow:"'.$errortips.'",onfocus:"'.$errortips.'"}).inputValidator({min:'.$minnumber.',max:'.$maxnumber.',onerror:"'.$errortips.'"}).regexValidator({regexp:"num",datatype:"enum",onerror:"'.$errortips.'"});';
return "<input type='text' name='info[$field]' id='$field' value='$value' size='$size' $this->no_allowed class='input-text' {$formattribute} {$css}>";
}
| shopscor/interface | phpcms/modules/formguide/fields/number/form.inc.php | PHP | apache-2.0 | 596 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.aws.ses;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.simpleemail.AbstractAmazonSimpleEmailService;
import com.amazonaws.services.simpleemail.model.SendEmailRequest;
import com.amazonaws.services.simpleemail.model.SendEmailResult;
import com.amazonaws.services.simpleemail.model.SendRawEmailRequest;
import com.amazonaws.services.simpleemail.model.SendRawEmailResult;
public class AmazonSESClientMock extends AbstractAmazonSimpleEmailService {
private SendEmailRequest sendEmailRequest;
private SendRawEmailRequest sendRawEmailRequest;
public AmazonSESClientMock() {
}
@Override
public SendEmailResult sendEmail(SendEmailRequest sendEmailRequest) throws AmazonServiceException, AmazonClientException {
this.sendEmailRequest = sendEmailRequest;
SendEmailResult result = new SendEmailResult();
result.setMessageId("1");
return result;
}
@Override
public SendRawEmailResult sendRawEmail(SendRawEmailRequest sendRawEmailRequest) throws AmazonServiceException, AmazonClientException {
this.sendRawEmailRequest = sendRawEmailRequest;
SendRawEmailResult result = new SendRawEmailResult();
result.setMessageId("1");
return result;
}
public SendEmailRequest getSendEmailRequest() {
return sendEmailRequest;
}
public SendRawEmailRequest getSendRawEmailRequest() {
return sendRawEmailRequest;
}
}
| DariusX/camel | components/camel-aws-ses/src/test/java/org/apache/camel/component/aws/ses/AmazonSESClientMock.java | Java | apache-2.0 | 2,367 |
/*
* Copyright 2014 Cask Data, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package co.cask.cdap.api.dataset.lib.cube;
import co.cask.cdap.api.annotation.Beta;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nullable;
/**
* Defines a query to perform on {@link Cube} data.
* </p>
* Another way to think about the query is to map it to the following statement::
* <pre><code>
* SELECT count('read.ops') {@literal <<} measure name and aggregation function
* FROM aggregation1.1min_resolution {@literal <<} aggregation and resolution
* GROUP BY dataset, {@literal <<} groupByDimensions
* WHERE namespace='ns1' AND app='myApp' AND program='myFlow' AND {@literal <<} dimensionValues
* ts>=1423370200 AND ts{@literal <}1423398198 {@literal <<} startTs and endTs
* LIMIT 100 {@literal <<} limit
*
* </code>
* </pre>
* See also {@link Cube#query(CubeQuery)}.
*/
@Beta
public final class CubeQuery {
// null value means auto-choose aggregation based on query todo: auto-choosing may be error prone, remove it?
@Nullable
private final String aggregation;
private final long startTs;
private final long endTs;
private final int resolution;
private final int limit;
private final Map<String, AggregationFunction> measurements;
private final Map<String, String> dimensionValues;
private final List<String> groupByDimensions;
private final Interpolator interpolator;
/**
* Creates {@link CubeQuery} with given parameters.
* @param aggregation (optional) aggregation name to query in; if {@code null}, the aggregation will be auto-selected
* based on rest of query parameters
* @param startTs start (inclusive) of the time range to query
* @param endTs end (exclusive) of the time range to query
* @param resolution resolution of the aggregation to query in
* @param limit max number of returned data points
* @param measurements map of measure name, measure type to query for, empty map means "all measures"
* @param dimensionValues dimension values to filter by
* @param groupByDimensions dimensions to group by
* @param interpolator {@link Interpolator} to use
*/
public CubeQuery(@Nullable String aggregation,
long startTs, long endTs, int resolution, int limit,
Map<String, AggregationFunction> measurements,
Map<String, String> dimensionValues, List<String> groupByDimensions,
@Nullable Interpolator interpolator) {
this.aggregation = aggregation;
this.startTs = startTs;
this.endTs = endTs;
this.resolution = resolution;
this.limit = limit;
this.measurements = measurements;
this.dimensionValues = Collections.unmodifiableMap(new HashMap<>(dimensionValues));
this.groupByDimensions = Collections.unmodifiableList(new ArrayList<>(groupByDimensions));
this.interpolator = interpolator;
}
@Nullable
public String getAggregation() {
return aggregation;
}
public long getStartTs() {
return startTs;
}
public long getEndTs() {
return endTs;
}
public int getResolution() {
return resolution;
}
public Map<String, AggregationFunction> getMeasurements() {
return measurements;
}
public Map<String, String> getDimensionValues() {
return dimensionValues;
}
public List<String> getGroupByDimensions() {
return groupByDimensions;
}
// todo: push down limit support to Cube
public int getLimit() {
return limit;
}
public Interpolator getInterpolator() {
return interpolator;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("CubeQuery");
sb.append("{aggregation=").append(aggregation);
sb.append(", startTs=").append(startTs);
sb.append(", endTs=").append(endTs);
sb.append(", resolution=").append(resolution);
sb.append(", limit=").append(limit);
sb.append(", measurements=").append(measurements);
sb.append(", dimensionValues=").append(dimensionValues);
sb.append(", groupByDimensions=").append(groupByDimensions);
sb.append(", interpolator=").append(interpolator);
sb.append('}');
return sb.toString();
}
/**
* @return {@link Builder} to build {@link CubeQuery}.
*/
public static Builder builder() {
return new Builder();
}
/**
* Builds {@link CubeQuery}.
*/
public static final class Builder {
private String aggregation;
private long startTs;
private long endTs;
private int resolution;
private int limit;
private Map<String, AggregationFunction> measurements = new HashMap<>();
private Map<String, String> dimensionValues = new HashMap<>();
private List<String> groupByDimensions = new ArrayList<>();
private Interpolator interpolator;
/**
* @return builder for configuring {@link CubeQuery}
*/
public Select select() {
return new Select();
}
/**
* @return instance of {@link CubeQuery}
*/
private CubeQuery build() {
return new CubeQuery(aggregation, startTs, endTs, resolution, limit,
measurements, dimensionValues, groupByDimensions, interpolator);
}
/**
* Builder for configuring {@link CubeQuery}.
*/
public final class Select {
private Select() {}
/**
* Adds measurement to be included in selection of {@link CubeQuery}.
* @param name name of the measurement
* @param aggFunc function to be used if aggregation of measurement value is needed
* @return builder for configuring {@link CubeQuery}
*/
public Measurement measurement(String name, AggregationFunction aggFunc) {
Builder.this.measurements.put(name, aggFunc);
return new Measurement();
}
/**
* Adds measurements to be included in selection of {@link CubeQuery}.
* @param measurements map of measurement name, agg function to include
* @return builder for configuring {@link CubeQuery}
*/
public Measurement measurements(Map<String, AggregationFunction> measurements) {
Builder.this.measurements.putAll(measurements);
return new Measurement();
}
}
/**
* Builder for configuring {@link CubeQuery}.
*/
public final class Measurement {
private Measurement() {}
/**
* Adds measurement to be included in selection of {@link CubeQuery}.
* @param name name of the measurement
* @param aggFunc function to be used if aggregation of measurement value is needed
* @return builder for configuring {@link CubeQuery}
*/
public Measurement measurement(String name, AggregationFunction aggFunc) {
Builder.this.measurements.put(name, aggFunc);
return this;
}
/**
* Adds measurements to be included in selection of {@link CubeQuery}.
* @param measurements map of measurement name, agg function to include
* @return builder for configuring {@link CubeQuery}
*/
public Measurement measurements(Map<String, AggregationFunction> measurements) {
Builder.this.measurements.putAll(measurements);
return new Measurement();
}
/**
* Defines aggregation view to query from.
* @param aggregation name of the aggregation view
* @return builder for configuring {@link CubeQuery}
*/
public From from(String aggregation) {
Builder.this.aggregation = aggregation;
return new From();
}
/**
* Sets aggregation view to query from to be auto-selected based on other parameters of the query.
* @return builder for configuring {@link CubeQuery}
*/
public From from() {
Builder.this.aggregation = null;
return new From();
}
}
/**
* Builder for configuring {@link CubeQuery}.
*/
public final class From {
private From() {}
/**
* Sets resolution for {@link CubeQuery}.
* @param amount amount of units
* @param timeUnit unit type
* @return builder for configuring {@link CubeQuery}
*/
public Where resolution(long amount, TimeUnit timeUnit) {
Builder.this.resolution = (int) timeUnit.convert(amount, TimeUnit.SECONDS);
return new Where();
}
}
/**
* Builder for configuring {@link CubeQuery}.
*/
public final class Where {
private Where() {}
/**
* @return builder for configuring {@link CubeQuery}
*/
public Dimension where() {
return new Dimension();
}
}
/**
* Builder for configuring {@link CubeQuery}.
*/
public final class Dimension {
private Dimension() {}
/**
* Adds dimension value to filter by.
* @param name name of dimension
* @param value value of dimension
* @return builder for configuring {@link CubeQuery}
*/
public Dimension dimension(String name, String value) {
Builder.this.dimensionValues.put(name, value);
return this;
}
/**
* Adds dimension values to filter by.
* @param dimValues dimension name, dimension value pairs to filter by
* @return builder for configuring {@link CubeQuery}
*/
public Dimension dimensions(Map<String, String> dimValues) {
Builder.this.dimensionValues.putAll(dimValues);
return this;
}
/**
* Defines time range for {@link CubeQuery}.
* @param startTsInSec start time inclusive (epoch in seconds)
* @param endTsInSec end time exclusive (epoch in seconds)
* @return builder for configuring {@link CubeQuery}
*/
public GroupBy timeRange(long startTsInSec, long endTsInSec) {
Builder.this.startTs = startTsInSec;
Builder.this.endTs = endTsInSec;
return new GroupBy();
}
}
/**
* Builder for configuring {@link CubeQuery}.
*/
public final class GroupBy {
private GroupBy() {}
/**
* @return builder for configuring {@link CubeQuery}
*/
public GroupByDimension groupBy() {
return new GroupByDimension();
}
/**
* Sets a limit on returned data points per time series
* @param limit limit value
* @return builder for configuring {@link CubeQuery}
*/
public Limit limit(int limit) {
Builder.this.limit = limit;
return new Limit();
}
}
/**
* Builder for configuring {@link CubeQuery}.
*/
public final class GroupByDimension {
private GroupByDimension() {}
/**
* Adds dimension to use for grouping results into time series.
* @param name name of the dimension
* @return builder for configuring {@link CubeQuery}
*/
public GroupByDimension dimension(String name) {
Builder.this.groupByDimensions.add(name);
return this;
}
/**
* Adds dimensions to use for grouping results into time series.
* @param names names of the dimensions
* @return builder for configuring {@link CubeQuery}
*/
public GroupByDimension dimensions(List<String> names) {
Builder.this.groupByDimensions.addAll(names);
return this;
}
/**
* Sets a limit on returned data points per time series
* @param limit limit value
* @return builder for configuring {@link CubeQuery}
*/
public Limit limit(int limit) {
Builder.this.limit = limit;
return new Limit();
}
}
/**
* Builder for configuring {@link CubeQuery}.
*/
public final class Limit {
private Limit() {}
/**
* Sets {@link Interpolator} to use for {@link CubeQuery}.
* @param interpolator interpolator to use
* @return builder for configuring {@link CubeQuery}
*/
public Build interpolator(Interpolator interpolator) {
Builder.this.interpolator = interpolator;
return new Build();
}
/**
* @return {@link CubeQuery}
*/
public CubeQuery build() {
return Builder.this.build();
}
}
/**
* Builder for configuring {@link CubeQuery}.
*/
public final class Build {
private Build() {}
/**
* @return {@link CubeQuery}
*/
public CubeQuery build() {
return Builder.this.build();
}
}
}
}
| chtyim/cdap | cdap-api/src/main/java/co/cask/cdap/api/dataset/lib/cube/CubeQuery.java | Java | apache-2.0 | 13,361 |
/*
* Copyright (c) 1996, 2007, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.awt.event;
import java.awt.AWTEvent;
/**
* A semantic event which indicates that an object's text changed.
* This high-level event is generated by an object (such as a TextComponent)
* when its text changes. The event is passed to
* every <code>TextListener</code> object which registered to receive such
* events using the component's <code>addTextListener</code> method.
* <P>
* The object that implements the <code>TextListener</code> interface gets
* this <code>TextEvent</code> when the event occurs. The listener is
* spared the details of processing individual mouse movements and key strokes
* Instead, it can process a "meaningful" (semantic) event like "text changed".
*
* @author Georges Saab
*
* @see java.awt.TextComponent
* @see TextListener
* @see <a href="http://java.sun.com/docs/books/tutorial/post1.0/ui/textlistener.html">Tutorial: Writing a Text Listener</a>
*
* @since 1.1
*/
public class TextEvent extends AWTEvent {
/**
* The first number in the range of ids used for text events.
*/
public static final int TEXT_FIRST = 900;
/**
* The last number in the range of ids used for text events.
*/
public static final int TEXT_LAST = 900;
/**
* This event id indicates that object's text changed.
*/
public static final int TEXT_VALUE_CHANGED = TEXT_FIRST;
/*
* JDK 1.1 serialVersionUID
*/
private static final long serialVersionUID = 6269902291250941179L;
/**
* Constructs a <code>TextEvent</code> object.
* <p>Note that passing in an invalid <code>id</code> results in
* unspecified behavior. This method throws an
* <code>IllegalArgumentException</code> if <code>source</code>
* is <code>null</code>.
*
* @param source the (<code>TextComponent</code>) object that
* originated the event
* @param id an integer that identifies the event type
* @throws IllegalArgumentException if <code>source</code> is null
*/
public TextEvent(Object source, int id) {
super(source, id);
}
/**
* Returns a parameter string identifying this text event.
* This method is useful for event-logging and for debugging.
*
* @return a string identifying the event and its attributes
*/
public String paramString() {
String typeStr;
switch(id) {
case TEXT_VALUE_CHANGED:
typeStr = "TEXT_VALUE_CHANGED";
break;
default:
typeStr = "unknown type";
}
return typeStr;
}
}
| andreagenso/java2scala | test/J2s/java/openjdk-6-src-b27/jdk/src/share/classes/java/awt/event/TextEvent.java | Java | apache-2.0 | 3,819 |
/// OSVR-Unity Connection
///
/// <copyright>
/// Copyright 2014 Sensics, Inc.
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
/// </copyright>
using System;
using System.Collections.Generic;
using System.IO;
using UnityEngine;
namespace OSVR
{
namespace Unity
{
public class DLLSearchPathFixer
{
/// <summary>
/// Call in a static constructor of an object depending on native code.
///
/// It is required if that native DLL being accessed depends on other native DLLs alongside it.
/// </summary>
public static void fix()
{
// Amend DLL search path - see http://forum.unity3d.com/threads/dllnotfoundexception-when-depend-on-another-dll.31083/#post-1042180
// for original inspiration for this code.
var fixer = new DLLSearchPathFixer();
fixer.ConditionallyAddRelativeDir("Plugins");
fixer.ConditionallyAddRelativeDir(new List<String>() { "Plugins", IntPtr.Size == 4 ? "x86" : "x86_64" });
fixer.ApplyChanges();
}
/// <summary>
/// Constructor for private use as a helper within the static fix() method.
/// </summary>
private DLLSearchPathFixer()
{
#if !UNITY_WINRT
var currentPath = Environment.GetEnvironmentVariable("PATH", EnvironmentVariableTarget.Process);
//Debug.Log(String.Format("Old PATH: {0}", currentPath));
OrigDirs = new List<string>(currentPath.Split(Path.PathSeparator));
UnityDataDir = Application.dataPath;
UnityDataDirBackslashed = Application.dataPath.Replace("/", "\\");
#endif
}
/// <summary>
/// Update the process environment PATH variable to contain the full list (entries new and old) of directories.
/// </summary>
private void ApplyChanges()
{
#if !UNITY_WINRT
// Combine new and old dirs
var allDirs = new List<String>(NewDirs);
allDirs.AddRange(OrigDirs);
var newPathString = String.Join(Path.PathSeparator.ToString(), allDirs.ToArray());
//Debug.Log(String.Format("New PATH: {0}", newPathString));
Environment.SetEnvironmentVariable("PATH", newPathString, EnvironmentVariableTarget.Process);
#endif
}
/// <summary>
/// If a directory specified relative to the Unity data dir is not yet in the PATH, add it.
/// </summary>
/// <param name="dirComponents">Components of a directory name relative to the Unity data dir.</param>
private void ConditionallyAddRelativeDir(List<string> dirComponents)
{
ConditionallyAddRelativeDir(PathTools.Combine(dirComponents));
}
/// <summary>
/// If a directory specified relative to the Unity data dir is not yet in the PATH, add it.
/// </summary>
/// <param name="dirComponents">A directory name relative to the Unity data dir.</param>
private void ConditionallyAddRelativeDir(string relativePortion)
{
if (IsRelativeDirIncludedInPath(relativePortion))
{
// early out.
return;
}
NewDirs.Add(PathTools.Combine(UnityDataDir, relativePortion));
}
/// <summary>
/// Checks to see if a directory specified relative to the Unity data dir is included in the path so far.
/// It checks using both forward-slashed and backslashed versions of the Unity data dir.
/// </summary>
/// <param name="relativePortion">Directory relative to the Unity data dir</param>
/// <returns>true if the given directory is included in the path so far</returns>
private bool IsRelativeDirIncludedInPath(string relativePortion)
{
return IsIncludedInPath(PathTools.Combine(UnityDataDir, relativePortion)) || IsIncludedInPath(PathTools.Combine(UnityDataDirBackslashed, relativePortion));
}
/// <summary>
/// Checks to see if a directory is included in the path so far (both new and old directories).
/// </summary>
/// <param name="dir">Directory name</param>
/// <returns>true if the given directory name is found in either the new or old directory lists.</returns>
private bool IsIncludedInPath(string dir)
{
return NewDirs.Contains(dir) || OrigDirs.Contains(dir);
}
private string UnityDataDir = string.Empty;
private string UnityDataDirBackslashed = string.Empty;
private List<string> NewDirs = new List<string>();
private List<string> OrigDirs = null;
/// <summary>
/// Utilities for combining path components with a wider variety of input data types than System.IO.Path.Combine
/// </summary>
private class PathTools
{
internal static string Combine(string a, string b)
{
return Path.Combine(a, b);
}
internal static string Combine(string[] components)
{
return String.Join(Path.DirectorySeparatorChar.ToString(), components);
}
internal static string Combine(List<String> components)
{
return PathTools.Combine(components.ToArray());
}
}
}
}
}
| OSVR/OSVR-Unity | OSVR-Unity/Assets/OSVRUnity/src/DLLSearchPathFixer.cs | C# | apache-2.0 | 6,319 |
//=============================================================================
// Copyright 2006-2010 Daniel W. Dyer
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//=============================================================================
package org.uncommons.watchmaker.examples.geneticprogramming;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import org.testng.annotations.Test;
import org.uncommons.watchmaker.framework.FitnessEvaluator;
/**
* Unit test for the {@link FitnessEvaluator} used
* in the genetic programming example applicaiton.
* @author Daniel Dyer
*/
public class TreeEvaluatorTest
{
/**
* A function that perfectly generates the correct output for all inputs
* should have a fitness of zero.
*/
@Test
public void testPerfectFunction()
{
Map<double[], Double> data = new HashMap<double[], Double>();
// Data for multiplication program.
data.put(new double[]{5d, 3d}, 15d);
data.put(new double[]{3d, 8d}, 24d);
data.put(new double[]{7d, 2d}, 14d);
FitnessEvaluator<Node> evaluator = new TreeEvaluator(data);
// Program that multiplies its two inputs together.
Node program = new Multiplication(new Parameter(0), new Parameter(1));
double fitness = evaluator.getFitness(program, Arrays.asList(program));
assert fitness == 0 : "Correct program should have zero fitness.";
}
/**
* A function that doesn't generate the correct output for all inputs
* should have a non-zero fitness.
*/
@Test
public void testIncorrectFunction()
{
Map<double[], Double> data = new HashMap<double[], Double>();
// Data for multiplication program.
data.put(new double[]{5d, 3d}, 15d);
data.put(new double[]{3d, 8d}, 24d);
data.put(new double[]{7d, 2d}, 14d);
FitnessEvaluator<Node> evaluator = new TreeEvaluator(data);
// Program that multiplies its first input by 3 (will give the correct answer
// for the first set of inputs but the wrong answer for the other two).
Node program = new Multiplication(new Parameter(0), new Constant(3d));
double fitness = evaluator.getFitness(program, Arrays.asList(program));
// Error on second example is 15, error on third is 7.
// 15^2 + 7^2 = 225 + 49 = 274
assert fitness == 274d : "Wrong fitness for incorrect program.";
}
}
| keithlow18/Watchmaker | examples/src/java/test/org/uncommons/watchmaker/examples/geneticprogramming/TreeEvaluatorTest.java | Java | apache-2.0 | 2,975 |
//===--- Metadata.cpp - Metadata tests ------------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#include "swift/Runtime/Metadata.h"
#include "swift/Runtime/Concurrent.h"
#include "gtest/gtest.h"
#include <iterator>
#include <functional>
#include <sys/mman.h>
#include <vector>
#include <pthread.h>
#if !defined(_POSIX_BARRIERS) || _POSIX_BARRIERS < 0
// Implement pthread_barrier_* for platforms that don't implement them (Darwin)
#define PTHREAD_BARRIER_SERIAL_THREAD 1
struct pthread_barrier_t {
pthread_mutex_t mutex;
pthread_cond_t cond;
unsigned count;
unsigned numThreadsWaiting;
};
typedef void *pthread_barrierattr_t;
static int pthread_barrier_init(pthread_barrier_t *barrier,
pthread_barrierattr_t*, unsigned count) {
if (count == 0) {
errno = EINVAL;
return -1;
}
if (pthread_mutex_init(&barrier->mutex, nullptr) != 0) {
return -1;
}
if (pthread_cond_init(&barrier->cond, nullptr) != 0) {
pthread_mutex_destroy(&barrier->mutex);
return -1;
}
barrier->count = count;
barrier->numThreadsWaiting = 0;
return 0;
}
static int pthread_barrier_destroy(pthread_barrier_t *barrier) {
// want to destroy both even if destroying one fails.
int ret = 0;
if (pthread_cond_destroy(&barrier->cond) != 0) {
ret = -1;
}
if (pthread_mutex_destroy(&barrier->mutex) != 0) {
ret = -1;
}
return ret;
}
static int pthread_barrier_wait(pthread_barrier_t *barrier) {
if (pthread_mutex_lock(&barrier->mutex) != 0) {
return -1;
}
++barrier->numThreadsWaiting;
if (barrier->numThreadsWaiting < barrier->count) {
// Put the thread to sleep.
if (pthread_cond_wait(&barrier->cond, &barrier->mutex) != 0) {
return -1;
}
if (pthread_mutex_unlock(&barrier->mutex) != 0) {
return -1;
}
return 0;
} else {
// Reset thread count.
barrier->numThreadsWaiting = 0;
// Wake up all threads.
if (pthread_cond_broadcast(&barrier->cond) != 0) {
return -1;
}
if (pthread_mutex_unlock(&barrier->mutex) != 0) {
return -1;
}
return PTHREAD_BARRIER_SERIAL_THREAD;
}
}
#endif
using namespace swift;
// Race testing.
template <typename T>
struct RaceArgs {
std::function<T()> code;
pthread_barrier_t *go;
};
void *RaceThunk(void *vargs) {
RaceArgs<void*> *args = static_cast<RaceArgs<void*> *>(vargs);
// Signal ready. Wait for go.
pthread_barrier_wait(args->go);
return args->code();
}
/// RaceTest(code) runs code in many threads simultaneously,
/// and returns a vector of all returned results.
template <typename T, int NumThreads = 64>
std::vector<T>
RaceTest(std::function<T()> code)
{
const unsigned threadCount = NumThreads;
pthread_barrier_t go;
pthread_barrier_init(&go, nullptr, threadCount);
// Create the threads.
pthread_t threads[threadCount];
std::vector<RaceArgs<T>> args(threadCount, {code, &go});
for (unsigned i = 0; i < threadCount; i++) {
pthread_create(&threads[i], nullptr, &RaceThunk, &args[i]);
}
// Collect results.
std::vector<T> results;
for (unsigned i = 0; i < threadCount; i++) {
void *result;
pthread_join(threads[i], &result);
results.push_back(static_cast<T>(result));
}
pthread_barrier_destroy(&go);
return results;
}
/// RaceTest_ExpectEqual(code) runs code in many threads simultaneously,
/// verifies that they all returned the same value, and returns that value.
template<typename T>
T RaceTest_ExpectEqual(std::function<T()> code)
{
auto results = RaceTest<T>(code);
auto r0 = results[0];
for (auto r : results) {
EXPECT_EQ(r0, r);
}
return r0;
}
/// Some unique global pointers.
uint32_t Global1 = 0;
uint32_t Global2 = 0;
uint32_t Global3 = 0;
/// The general structure of a generic metadata.
template <typename Instance>
struct GenericMetadataTest {
GenericMetadata Header;
Instance Template;
};
GenericMetadataTest<StructMetadata> MetadataTest1 = {
// Header
{
// allocation function
[](GenericMetadata *pattern, const void *args) -> Metadata * {
auto metadata = swift_allocateGenericValueMetadata(pattern, args);
auto metadataWords = reinterpret_cast<const void**>(metadata);
auto argsWords = reinterpret_cast<const void* const*>(args);
metadataWords[2] = argsWords[0];
return metadata;
},
3 * sizeof(void*), // metadata size
1, // num arguments
0, // address point
{} // private data
},
// Fields
{
MetadataKind::Struct,
reinterpret_cast<const NominalTypeDescriptor*>(&Global1),
nullptr
}
};
TEST(Concurrent, ConcurrentList) {
const int numElem = 100;
const int elemVal = 1;
ConcurrentList<int> List;
auto results = RaceTest<int*>(
[&]() -> int* {
for (int i = 0; i < numElem; i++)
List.push_front(elemVal);
return nullptr;
}
);
size_t ListLen = std::distance(List.begin(), List.end());
// Check that all of the values are initialized properly.
for (auto A : List) {
EXPECT_EQ(elemVal, A);
}
// Check that the length of the list is correct.
EXPECT_EQ(ListLen, results.size() * numElem);
}
TEST(Concurrent, ConcurrentMap) {
const int numElem = 100;
struct Entry {
size_t Key;
Entry(size_t key) : Key(key) {}
int compareWithKey(size_t key) const {
return (key == Key ? 0 : (key < Key ? -1 : 1));
}
static size_t getExtraAllocationSize(size_t key) { return 0; }
size_t getExtraAllocationSize() const { return 0; }
};
ConcurrentMap<Entry> Map;
// Add a bunch of numbers to the map concurrently.
auto results = RaceTest<int*>(
[&]() -> int* {
for (int i = 0; i < numElem; i++) {
size_t hash = (i * 123512) % 0xFFFF ;
Map.getOrInsert(hash);
}
return nullptr;
}
);
// Check that all of the values that we inserted are in the map.
for (int i=0; i < numElem; i++) {
size_t hash = (i * 123512) % 0xFFFF ;
EXPECT_TRUE(Map.find(hash));
}
}
TEST(MetadataTest, getGenericMetadata) {
auto metadataTemplate = (GenericMetadata*) &MetadataTest1;
void *args[] = { &Global2 };
auto result1 = RaceTest_ExpectEqual<const Metadata *>(
[&]() -> const Metadata * {
auto inst = static_cast<const StructMetadata*>
(swift_getGenericMetadata(metadataTemplate, args));
auto fields = reinterpret_cast<void * const *>(inst);
EXPECT_EQ(MetadataKind::Struct, inst->getKind());
EXPECT_EQ((const NominalTypeDescriptor*)&Global1,
inst->Description.get());
EXPECT_EQ(&Global2, fields[2]);
return inst;
});
args[0] = &Global3;
RaceTest_ExpectEqual<const Metadata *>(
[&]() -> const Metadata * {
auto inst = static_cast<const StructMetadata*>
(swift_getGenericMetadata(metadataTemplate, args));
EXPECT_NE(inst, result1);
auto fields = reinterpret_cast<void * const *>(inst);
EXPECT_EQ(MetadataKind::Struct, inst->getKind());
EXPECT_EQ((const NominalTypeDescriptor*)&Global1,
inst->Description.get());
EXPECT_EQ(&Global3, fields[2]);
return inst;
});
}
FullMetadata<ClassMetadata> MetadataTest2 = {
{ { nullptr }, { &VALUE_WITNESS_SYM(Bo) } },
{ { { MetadataKind::Class } }, nullptr, /*rodata*/ 1,
ClassFlags(), nullptr, 0, 0, 0, 0, 0 }
};
TEST(MetadataTest, getMetatypeMetadata) {
auto inst1 = RaceTest_ExpectEqual<const MetatypeMetadata *>(
[&]() -> const MetatypeMetadata * {
auto inst = swift_getMetatypeMetadata(&METADATA_SYM(Bi64_).base);
EXPECT_EQ(sizeof(void*), inst->getValueWitnesses()->size);
return inst;
});
auto inst2 = RaceTest_ExpectEqual<const MetatypeMetadata *>(
[&]() -> const MetatypeMetadata * {
auto inst = swift_getMetatypeMetadata(&METADATA_SYM(Bi32_).base);
EXPECT_EQ(sizeof(void*), inst->getValueWitnesses()->size);
return inst;
});
auto inst3 = RaceTest_ExpectEqual<const MetatypeMetadata *>(
[&]() -> const MetatypeMetadata * {
auto inst = swift_getMetatypeMetadata(&MetadataTest2);
EXPECT_EQ(sizeof(void*), inst->getValueWitnesses()->size);
return inst;
});
auto inst4 = RaceTest_ExpectEqual<const MetatypeMetadata *>(
[&]() -> const MetatypeMetadata * {
auto inst = swift_getMetatypeMetadata(inst3);
EXPECT_EQ(sizeof(void*), inst->getValueWitnesses()->size);
return inst;
});
auto inst5 = RaceTest_ExpectEqual<const MetatypeMetadata *>(
[&]() -> const MetatypeMetadata * {
auto inst = swift_getMetatypeMetadata(inst1);
EXPECT_EQ(sizeof(void*), inst->getValueWitnesses()->size);
return inst;
});
// After all this, the instance type fields should still be valid.
ASSERT_EQ(&METADATA_SYM(Bi64_).base, inst1->InstanceType);
ASSERT_EQ(&METADATA_SYM(Bi32_).base, inst2->InstanceType);
ASSERT_EQ(&MetadataTest2, inst3->InstanceType);
ASSERT_EQ(inst3, inst4->InstanceType);
ASSERT_EQ(inst1, inst5->InstanceType);
}
ProtocolDescriptor ProtocolA{
"_TMp8Metadata9ProtocolA",
nullptr,
ProtocolDescriptorFlags()
.withSwift(true)
.withClassConstraint(ProtocolClassConstraint::Any)
.withDispatchStrategy(ProtocolDispatchStrategy::Swift)
};
ProtocolDescriptor ProtocolB{
"_TMp8Metadata9ProtocolB",
nullptr,
ProtocolDescriptorFlags()
.withSwift(true)
.withClassConstraint(ProtocolClassConstraint::Any)
.withDispatchStrategy(ProtocolDispatchStrategy::Swift)
};
ProtocolDescriptor ProtocolError{
"_TMp8Metadata13ProtocolError",
nullptr,
ProtocolDescriptorFlags()
.withSwift(true)
.withClassConstraint(ProtocolClassConstraint::Any)
.withDispatchStrategy(ProtocolDispatchStrategy::Swift)
.withSpecialProtocol(SpecialProtocol::Error)
};
ProtocolDescriptor ProtocolClassConstrained{
"_TMp8Metadata24ProtocolClassConstrained",
nullptr,
ProtocolDescriptorFlags()
.withSwift(true)
.withClassConstraint(ProtocolClassConstraint::Class)
.withDispatchStrategy(ProtocolDispatchStrategy::Swift)
};
ProtocolDescriptor ProtocolNoWitnessTable{
"_TMp8Metadata22ProtocolNoWitnessTable",
nullptr,
ProtocolDescriptorFlags()
.withSwift(true)
.withClassConstraint(ProtocolClassConstraint::Class)
.withDispatchStrategy(ProtocolDispatchStrategy::ObjC)
};
static const ExistentialTypeMetadata *test_getExistentialMetadata(
std::initializer_list<const ProtocolDescriptor *> descriptors)
{
std::vector<const ProtocolDescriptor *> mutDescriptors(descriptors);
return swift_getExistentialTypeMetadata(mutDescriptors.size(),
mutDescriptors.data());
}
TEST(MetadataTest, getExistentialMetadata) {
RaceTest_ExpectEqual<const ExistentialTypeMetadata *>(
[&]() -> const ExistentialTypeMetadata * {
auto any = test_getExistentialMetadata({});
EXPECT_EQ(MetadataKind::Existential, any->getKind());
EXPECT_EQ(0U, any->Flags.getNumWitnessTables());
EXPECT_EQ(ProtocolClassConstraint::Any, any->Flags.getClassConstraint());
EXPECT_EQ(0U, any->Protocols.NumProtocols);
EXPECT_EQ(SpecialProtocol::None,
any->Flags.getSpecialProtocol());
return any;
});
auto exA = RaceTest_ExpectEqual<const ExistentialTypeMetadata *>(
[&]() -> const ExistentialTypeMetadata * {
auto a = test_getExistentialMetadata({&ProtocolA});
EXPECT_EQ(MetadataKind::Existential, a->getKind());
EXPECT_EQ(1U, a->Flags.getNumWitnessTables());
EXPECT_EQ(ProtocolClassConstraint::Any, a->Flags.getClassConstraint());
EXPECT_EQ(1U, a->Protocols.NumProtocols);
EXPECT_EQ(&ProtocolA, a->Protocols[0]);
EXPECT_EQ(SpecialProtocol::None,
a->Flags.getSpecialProtocol());
return a;
});
RaceTest_ExpectEqual<const ExistentialTypeMetadata *>(
[&]() -> const ExistentialTypeMetadata * {
auto b = test_getExistentialMetadata({&ProtocolB});
EXPECT_NE(exA, b);
EXPECT_EQ(MetadataKind::Existential, b->getKind());
EXPECT_EQ(1U, b->Flags.getNumWitnessTables());
EXPECT_EQ(ProtocolClassConstraint::Any, b->Flags.getClassConstraint());
EXPECT_EQ(1U, b->Protocols.NumProtocols);
EXPECT_EQ(&ProtocolB, b->Protocols[0]);
EXPECT_EQ(SpecialProtocol::None,
b->Flags.getSpecialProtocol());
return b;
});
// protocol compositions are order-invariant
RaceTest_ExpectEqual<const ExistentialTypeMetadata *>(
[&]() -> const ExistentialTypeMetadata * {
auto ab = test_getExistentialMetadata({&ProtocolA, &ProtocolB});
auto ba = test_getExistentialMetadata({&ProtocolB, &ProtocolA});
EXPECT_EQ(ab, ba);
EXPECT_EQ(MetadataKind::Existential, ab->getKind());
EXPECT_EQ(2U, ab->Flags.getNumWitnessTables());
EXPECT_EQ(ProtocolClassConstraint::Any, ab->Flags.getClassConstraint());
EXPECT_EQ(2U, ab->Protocols.NumProtocols);
EXPECT_TRUE(
(ab->Protocols[0]==&ProtocolA && ab->Protocols[1]==&ProtocolB)
|| (ab->Protocols[0]==&ProtocolB && ab->Protocols[1]==&ProtocolA));
EXPECT_EQ(SpecialProtocol::None,
ab->Flags.getSpecialProtocol());
EXPECT_EQ(SpecialProtocol::None,
ba->Flags.getSpecialProtocol());
return ab;
});
RaceTest_ExpectEqual<const ExistentialTypeMetadata *>(
[&]() -> const ExistentialTypeMetadata * {
auto classConstrained
= test_getExistentialMetadata({&ProtocolClassConstrained});
EXPECT_EQ(MetadataKind::Existential, classConstrained->getKind());
EXPECT_EQ(1U, classConstrained->Flags.getNumWitnessTables());
EXPECT_EQ(ProtocolClassConstraint::Class,
classConstrained->Flags.getClassConstraint());
EXPECT_EQ(1U, classConstrained->Protocols.NumProtocols);
EXPECT_EQ(SpecialProtocol::None,
classConstrained->Flags.getSpecialProtocol());
EXPECT_EQ(&ProtocolClassConstrained, classConstrained->Protocols[0]);
return classConstrained;
});
RaceTest_ExpectEqual<const ExistentialTypeMetadata *>(
[&]() -> const ExistentialTypeMetadata * {
auto noWitnessTable
= test_getExistentialMetadata({&ProtocolNoWitnessTable});
EXPECT_EQ(MetadataKind::Existential, noWitnessTable->getKind());
EXPECT_EQ(0U, noWitnessTable->Flags.getNumWitnessTables());
EXPECT_EQ(ProtocolClassConstraint::Class,
noWitnessTable->Flags.getClassConstraint());
EXPECT_EQ(1U, noWitnessTable->Protocols.NumProtocols);
EXPECT_EQ(SpecialProtocol::None,
noWitnessTable->Flags.getSpecialProtocol());
EXPECT_EQ(&ProtocolNoWitnessTable, noWitnessTable->Protocols[0]);
return noWitnessTable;
});
RaceTest_ExpectEqual<const ExistentialTypeMetadata *>(
[&]() -> const ExistentialTypeMetadata * {
auto mixedWitnessTable
= test_getExistentialMetadata({&ProtocolNoWitnessTable,
&ProtocolA, &ProtocolB});
EXPECT_EQ(MetadataKind::Existential, mixedWitnessTable->getKind());
EXPECT_EQ(2U, mixedWitnessTable->Flags.getNumWitnessTables());
EXPECT_EQ(ProtocolClassConstraint::Class,
mixedWitnessTable->Flags.getClassConstraint());
EXPECT_EQ(3U, mixedWitnessTable->Protocols.NumProtocols);
EXPECT_EQ(SpecialProtocol::None,
mixedWitnessTable->Flags.getSpecialProtocol());
return mixedWitnessTable;
});
const ValueWitnessTable *ExpectedErrorValueWitnesses;
#if SWIFT_OBJC_INTEROP
ExpectedErrorValueWitnesses = &VALUE_WITNESS_SYM(BO);
#else
ExpectedErrorValueWitnesses = &VALUE_WITNESS_SYM(Bo);
#endif
RaceTest_ExpectEqual<const ExistentialTypeMetadata *>(
[&]() -> const ExistentialTypeMetadata * {
auto special
= test_getExistentialMetadata({&ProtocolError});
EXPECT_EQ(MetadataKind::Existential, special->getKind());
EXPECT_EQ(1U, special->Flags.getNumWitnessTables());
EXPECT_EQ(SpecialProtocol::Error,
special->Flags.getSpecialProtocol());
EXPECT_EQ(ExpectedErrorValueWitnesses,
special->getValueWitnesses());
return special;
});
RaceTest_ExpectEqual<const ExistentialTypeMetadata *>(
[&]() -> const ExistentialTypeMetadata * {
auto special
= test_getExistentialMetadata({&ProtocolError, &ProtocolA});
EXPECT_EQ(MetadataKind::Existential, special->getKind());
EXPECT_EQ(2U, special->Flags.getNumWitnessTables());
// Compositions of special protocols aren't special.
EXPECT_EQ(SpecialProtocol::None,
special->Flags.getSpecialProtocol());
EXPECT_NE(ExpectedErrorValueWitnesses,
special->getValueWitnesses());
return special;
});
}
static void destroySuperclass(HeapObject *toDestroy) {}
struct {
void *Prefix[4];
FullMetadata<ClassMetadata> Metadata;
} SuperclassWithPrefix = {
{ &Global1, &Global3, &Global2, &Global3 },
{ { { &destroySuperclass }, { &VALUE_WITNESS_SYM(Bo) } },
{ { { MetadataKind::Class } }, nullptr, /*rodata*/ 1, ClassFlags(), nullptr,
0, 0, 0, sizeof(SuperclassWithPrefix),
sizeof(SuperclassWithPrefix.Prefix) + sizeof(HeapMetadataHeader) } }
};
ClassMetadata * const SuperclassWithPrefix_AddressPoint =
&SuperclassWithPrefix.Metadata;
static void destroySubclass(HeapObject *toDestroy) {}
struct {
GenericMetadata Header;
FullMetadata<ClassMetadata> Pattern;
void *Suffix[3];
} GenericSubclass = {
{
// allocation function
[](GenericMetadata *pattern, const void *args) -> Metadata* {
auto metadata =
swift_allocateGenericClassMetadata(pattern, args,
SuperclassWithPrefix_AddressPoint);
char *bytes = (char*) metadata + sizeof(ClassMetadata);
auto metadataWords = reinterpret_cast<const void**>(bytes);
auto argsWords = reinterpret_cast<const void* const *>(args);
metadataWords[2] = argsWords[0];
return metadata;
},
sizeof(GenericSubclass.Pattern) + sizeof(GenericSubclass.Suffix), // pattern size
1, // num arguments
sizeof(HeapMetadataHeader), // address point
{} // private data
},
{ { { &destroySubclass }, { &VALUE_WITNESS_SYM(Bo) } },
{ { { MetadataKind::Class } }, nullptr, /*rodata*/ 1, ClassFlags(), nullptr,
0, 0, 0,
sizeof(GenericSubclass.Pattern) + sizeof(GenericSubclass.Suffix),
sizeof(HeapMetadataHeader) } },
{ &Global2, &Global1, &Global2 }
};
TEST(MetadataTest, getGenericMetadata_SuperclassWithUnexpectedPrefix) {
auto metadataTemplate = &GenericSubclass.Header;
void *args[] = { &Global3 };
RaceTest_ExpectEqual<const ClassMetadata *>(
[&]() -> const ClassMetadata * {
auto inst = static_cast<const ClassMetadata*>(
swift_getGenericMetadata(metadataTemplate, args));
void * const *fields = reinterpret_cast<void * const *>(inst);
// Assert that we copied the extra prefix data from the superclass.
EXPECT_EQ(&Global1, fields[-6]);
EXPECT_EQ(&Global3, fields[-5]);
EXPECT_EQ(&Global2, fields[-4]);
EXPECT_EQ(&Global3, fields[-3]);
// Assert that we copied the shared prefix data from the subclass.
EXPECT_EQ((void*) &destroySubclass, fields[-2]);
EXPECT_EQ(&VALUE_WITNESS_SYM(Bo), fields[-1]);
// Assert that we set the superclass field.
EXPECT_EQ(SuperclassWithPrefix_AddressPoint, fields[1]);
// Assert that we copied the subclass suffix data.
auto suffix = (void * const *) ((char*) inst + sizeof(ClassMetadata));
EXPECT_EQ(&Global2, suffix[0]);
EXPECT_EQ(&Global1, suffix[1]);
// This should have been overwritten by the creation function.
EXPECT_EQ(&Global3, suffix[2]);
EXPECT_EQ(7 * sizeof(void*) + sizeof(GenericSubclass.Pattern),
inst->getClassSize());
EXPECT_EQ(4 * sizeof(void*) + sizeof(HeapMetadataHeader),
inst->getClassAddressPoint());
// These are all expected to be equal.
return inst;
});
}
static ProtocolDescriptor OpaqueProto1 = { "OpaqueProto1", nullptr,
ProtocolDescriptorFlags().withSwift(true)
.withDispatchStrategy(ProtocolDispatchStrategy::Swift)
.withClassConstraint(ProtocolClassConstraint::Any)
};
static ProtocolDescriptor OpaqueProto2 = { "OpaqueProto2", nullptr,
ProtocolDescriptorFlags().withSwift(true)
.withDispatchStrategy(ProtocolDispatchStrategy::Swift)
.withClassConstraint(ProtocolClassConstraint::Any)
};
static ProtocolDescriptor OpaqueProto3 = { "OpaqueProto3", nullptr,
ProtocolDescriptorFlags().withSwift(true)
.withDispatchStrategy(ProtocolDispatchStrategy::Swift)
.withClassConstraint(ProtocolClassConstraint::Any)
};
static ProtocolDescriptor ClassProto1 = { "ClassProto1", nullptr,
ProtocolDescriptorFlags().withSwift(true)
.withDispatchStrategy(ProtocolDispatchStrategy::Swift)
.withClassConstraint(ProtocolClassConstraint::Class)
};
TEST(MetadataTest, getExistentialTypeMetadata_opaque) {
const ProtocolDescriptor *protoList1[] = {
&OpaqueProto1
};
RaceTest_ExpectEqual<const ExistentialTypeMetadata *>(
[&]() -> const ExistentialTypeMetadata * {
auto ex1 = swift_getExistentialTypeMetadata(1, protoList1);
EXPECT_EQ(MetadataKind::Existential, ex1->getKind());
EXPECT_EQ(5 * sizeof(void*), ex1->getValueWitnesses()->getSize());
EXPECT_EQ(alignof(void*), ex1->getValueWitnesses()->getAlignment());
EXPECT_FALSE(ex1->getValueWitnesses()->isPOD());
EXPECT_FALSE(ex1->getValueWitnesses()->isBitwiseTakable());
return ex1;
});
const ProtocolDescriptor *protoList2[] = {
&OpaqueProto1, &OpaqueProto2
};
RaceTest_ExpectEqual<const ExistentialTypeMetadata *>(
[&]() -> const ExistentialTypeMetadata * {
auto ex2 = swift_getExistentialTypeMetadata(2, protoList2);
EXPECT_EQ(MetadataKind::Existential, ex2->getKind());
EXPECT_EQ(6 * sizeof(void*), ex2->getValueWitnesses()->getSize());
EXPECT_EQ(alignof(void*), ex2->getValueWitnesses()->getAlignment());
EXPECT_FALSE(ex2->getValueWitnesses()->isPOD());
EXPECT_FALSE(ex2->getValueWitnesses()->isBitwiseTakable());
return ex2;
});
const ProtocolDescriptor *protoList3[] = {
&OpaqueProto1, &OpaqueProto2, &OpaqueProto3
};
RaceTest_ExpectEqual<const ExistentialTypeMetadata *>(
[&]() -> const ExistentialTypeMetadata * {
auto ex3 = swift_getExistentialTypeMetadata(3, protoList3);
EXPECT_EQ(MetadataKind::Existential, ex3->getKind());
EXPECT_EQ(7 * sizeof(void*), ex3->getValueWitnesses()->getSize());
EXPECT_EQ(alignof(void*), ex3->getValueWitnesses()->getAlignment());
EXPECT_FALSE(ex3->getValueWitnesses()->isPOD());
EXPECT_FALSE(ex3->getValueWitnesses()->isBitwiseTakable());
return ex3;
});
}
TEST(MetadataTest, getExistentialTypeMetadata_class) {
const ProtocolDescriptor *protoList1[] = {
&ClassProto1
};
RaceTest_ExpectEqual<const ExistentialTypeMetadata *>(
[&]() -> const ExistentialTypeMetadata * {
auto ex1 = swift_getExistentialTypeMetadata(1, protoList1);
EXPECT_EQ(MetadataKind::Existential, ex1->getKind());
EXPECT_EQ(2 * sizeof(void*), ex1->getValueWitnesses()->getSize());
EXPECT_EQ(alignof(void*), ex1->getValueWitnesses()->getAlignment());
EXPECT_FALSE(ex1->getValueWitnesses()->isPOD());
EXPECT_TRUE(ex1->getValueWitnesses()->isBitwiseTakable());
return ex1;
});
const ProtocolDescriptor *protoList2[] = {
&OpaqueProto1, &ClassProto1
};
RaceTest_ExpectEqual<const ExistentialTypeMetadata *>(
[&]() -> const ExistentialTypeMetadata * {
auto ex2 = swift_getExistentialTypeMetadata(2, protoList2);
EXPECT_EQ(MetadataKind::Existential, ex2->getKind());
EXPECT_EQ(3 * sizeof(void*), ex2->getValueWitnesses()->getSize());
EXPECT_EQ(alignof(void*), ex2->getValueWitnesses()->getAlignment());
EXPECT_FALSE(ex2->getValueWitnesses()->isPOD());
EXPECT_TRUE(ex2->getValueWitnesses()->isBitwiseTakable());
return ex2;
});
const ProtocolDescriptor *protoList3[] = {
&OpaqueProto1, &OpaqueProto2, &ClassProto1
};
RaceTest_ExpectEqual<const ExistentialTypeMetadata *>(
[&]() -> const ExistentialTypeMetadata * {
auto ex3 = swift_getExistentialTypeMetadata(3, protoList3);
EXPECT_EQ(MetadataKind::Existential, ex3->getKind());
EXPECT_EQ(4 * sizeof(void*), ex3->getValueWitnesses()->getSize());
EXPECT_EQ(alignof(void*), ex3->getValueWitnesses()->getAlignment());
EXPECT_FALSE(ex3->getValueWitnesses()->isPOD());
EXPECT_TRUE(ex3->getValueWitnesses()->isBitwiseTakable());
return ex3;
});
}
static const void *AllocatedBuffer = nullptr;
static const void *DeallocatedBuffer = nullptr;
namespace swift {
void installCommonValueWitnesses(ValueWitnessTable *vwtable);
} // namespace swift
TEST(MetadataTest, installCommonValueWitnesses_pod_indirect) {
ValueWitnessTable testTable;
FullMetadata<Metadata> testMetadata{{&testTable}, {MetadataKind::Opaque}};
// rdar://problem/21375421 - pod_indirect_initializeBufferWithTakeOfBuffer
// should move ownership of a fixed-size buffer.
testTable.size = sizeof(ValueBuffer) + 1;
testTable.flags = ValueWitnessFlags()
.withAlignment(alignof(ValueBuffer))
.withPOD(true)
.withBitwiseTakable(true)
.withInlineStorage(false);
testTable.stride = sizeof(ValueBuffer) + alignof(ValueBuffer);
installCommonValueWitnesses(&testTable);
// Replace allocateBuffer and destroyBuffer with logging versions.
testTable.allocateBuffer =
[](ValueBuffer *buf, const Metadata *self) -> OpaqueValue * {
void *mem = malloc(self->getValueWitnesses()->size);
*reinterpret_cast<void**>(buf) = mem;
AllocatedBuffer = mem;
return reinterpret_cast<OpaqueValue *>(mem);
};
testTable.destroyBuffer =
[](ValueBuffer *buf, const Metadata *self) -> void {
void *mem = *reinterpret_cast<void**>(buf);
DeallocatedBuffer = mem;
free(mem);
};
struct {
ValueBuffer buffer;
uintptr_t canary;
} buf1{{}, 0x5A5A5A5AU}, buf2{{}, 0xA5A5A5A5U};
testTable.allocateBuffer(&buf1.buffer, &testMetadata);
testTable.initializeBufferWithTakeOfBuffer(&buf2.buffer, &buf1.buffer,
&testMetadata);
testTable.destroyBuffer(&buf2.buffer, &testMetadata);
EXPECT_EQ(AllocatedBuffer, DeallocatedBuffer);
EXPECT_EQ(buf1.canary, (uintptr_t)0x5A5A5A5AU);
EXPECT_EQ(buf2.canary, (uintptr_t)0xA5A5A5A5U);
}
// We cannot construct RelativeDirectPointer instances, so define
// a "shadow" struct for that purpose
struct GenericWitnessTableStorage {
uint16_t WitnessTableSizeInWords;
uint16_t WitnessTablePrivateSizeInWords;
int32_t Protocol;
int32_t Pattern;
int32_t Instantiator;
void *PrivateData[swift::NumGenericMetadataPrivateDataWords];
};
template<typename T>
static void initializeRelativePointer(int32_t *ptr, T value) {
*ptr = (int32_t)(value == nullptr ? 0 : (uintptr_t) value - (uintptr_t) ptr);
}
// Tests for resilient witness table instantiation, with runtime-provided
// default requirements
static void witnessTableInstantiator(WitnessTable *instantiatedTable,
const Metadata *type,
void * const *instantiationArgs) {
EXPECT_EQ(type, nullptr);
EXPECT_EQ(instantiationArgs, nullptr);
EXPECT_EQ(((void **) instantiatedTable)[0], (void*) 123);
EXPECT_EQ(((void **) instantiatedTable)[1], (void*) 234);
// The last witness is computed dynamically at instantiation time.
((void **) instantiatedTable)[2] = (void *) 345;
}
// A mock protocol descriptor with some default witnesses at the end.
//
// Note: It is not standards-compliant to compare function pointers for
// equality, so we just use fake addresses instead.
struct TestProtocol {
ProtocolDescriptor descriptor;
const void *witnesses[2] = {
(void *) 996633,
(void *) 336699
};
TestProtocol()
: descriptor("TestProtocol",
nullptr,
ProtocolDescriptorFlags().withResilient(true)) {
descriptor.MinimumWitnessTableSizeInWords = 3;
descriptor.DefaultWitnessTableSizeInWords = 2;
}
};
// All of these have to be global to relative reference each other, and
// the instantiator function.
TestProtocol testProtocol;
GenericWitnessTableStorage tableStorage1;
GenericWitnessTableStorage tableStorage2;
GenericWitnessTableStorage tableStorage3;
GenericWitnessTableStorage tableStorage4;
const void *witnesses[] = {
(void *) 123,
(void *) 234,
(void *) 0, // filled in by instantiator function
(void *) 456,
(void *) 567
};
TEST(WitnessTableTest, getGenericWitnessTable) {
EXPECT_EQ(sizeof(GenericWitnessTableStorage), sizeof(GenericWitnessTable));
EXPECT_EQ(testProtocol.descriptor.getDefaultWitnesses()[0],
(void *) 996633);
EXPECT_EQ(testProtocol.descriptor.getDefaultWitnesses()[1],
(void *) 336699);
// Conformance provides all requirements, and we don't have an
// instantiator, so we can just return the pattern.
{
tableStorage1.WitnessTableSizeInWords = 5;
tableStorage1.WitnessTablePrivateSizeInWords = 0;
initializeRelativePointer(&tableStorage1.Protocol, &testProtocol.descriptor);
initializeRelativePointer(&tableStorage1.Pattern, witnesses);
initializeRelativePointer(&tableStorage1.Instantiator, nullptr);
GenericWitnessTable *table = reinterpret_cast<GenericWitnessTable *>(
&tableStorage1);
RaceTest_ExpectEqual<const WitnessTable *>(
[&]() -> const WitnessTable * {
const WitnessTable *instantiatedTable =
swift_getGenericWitnessTable(table, nullptr, nullptr);
EXPECT_EQ(instantiatedTable, table->Pattern.get());
return instantiatedTable;
});
}
// Conformance provides all requirements, but we have private storage
// and an initializer, so we must instantiate.
{
tableStorage2.WitnessTableSizeInWords = 5;
tableStorage2.WitnessTablePrivateSizeInWords = 1;
initializeRelativePointer(&tableStorage2.Protocol, &testProtocol.descriptor);
initializeRelativePointer(&tableStorage2.Pattern, witnesses);
initializeRelativePointer(&tableStorage2.Instantiator,
(const void *) witnessTableInstantiator);
GenericWitnessTable *table = reinterpret_cast<GenericWitnessTable *>(
&tableStorage2);
RaceTest_ExpectEqual<const WitnessTable *>(
[&]() -> const WitnessTable * {
const WitnessTable *instantiatedTable =
swift_getGenericWitnessTable(table, nullptr, nullptr);
EXPECT_NE(instantiatedTable, table->Pattern.get());
EXPECT_EQ(((void **) instantiatedTable)[-1], (void *) 0);
EXPECT_EQ(((void **) instantiatedTable)[0], (void *) 123);
EXPECT_EQ(((void **) instantiatedTable)[1], (void *) 234);
EXPECT_EQ(((void **) instantiatedTable)[2], (void *) 345);
EXPECT_EQ(((void **) instantiatedTable)[3], (void *) 456);
EXPECT_EQ(((void **) instantiatedTable)[4], (void *) 567);
return instantiatedTable;
});
}
// Conformance needs one default requirement to be filled in
{
tableStorage3.WitnessTableSizeInWords = 4;
tableStorage3.WitnessTablePrivateSizeInWords = 1;
initializeRelativePointer(&tableStorage3.Protocol, &testProtocol.descriptor);
initializeRelativePointer(&tableStorage3.Pattern, witnesses);
initializeRelativePointer(&tableStorage3.Instantiator, witnessTableInstantiator);
GenericWitnessTable *table = reinterpret_cast<GenericWitnessTable *>(
&tableStorage3);
RaceTest_ExpectEqual<const WitnessTable *>(
[&]() -> const WitnessTable * {
const WitnessTable *instantiatedTable =
swift_getGenericWitnessTable(table, nullptr, nullptr);
EXPECT_NE(instantiatedTable, table->Pattern.get());
EXPECT_EQ(((void **) instantiatedTable)[-1], (void *) 0);
EXPECT_EQ(((void **) instantiatedTable)[0], (void *) 123);
EXPECT_EQ(((void **) instantiatedTable)[1], (void *) 234);
EXPECT_EQ(((void **) instantiatedTable)[2], (void *) 345);
EXPECT_EQ(((void **) instantiatedTable)[3], (void *) 456);
EXPECT_EQ(((void **) instantiatedTable)[4], (void *) 336699);
return instantiatedTable;
});
}
// Third case: conformance needs both default requirements
// to be filled in
{
tableStorage4.WitnessTableSizeInWords = 3;
tableStorage4.WitnessTablePrivateSizeInWords = 1;
initializeRelativePointer(&tableStorage4.Protocol, &testProtocol.descriptor);
initializeRelativePointer(&tableStorage4.Pattern, witnesses);
initializeRelativePointer(&tableStorage4.Instantiator, witnessTableInstantiator);
GenericWitnessTable *table = reinterpret_cast<GenericWitnessTable *>(
&tableStorage4);
RaceTest_ExpectEqual<const WitnessTable *>(
[&]() -> const WitnessTable * {
const WitnessTable *instantiatedTable =
swift_getGenericWitnessTable(table, nullptr, nullptr);
EXPECT_NE(instantiatedTable, table->Pattern.get());
EXPECT_EQ(((void **) instantiatedTable)[-1], (void *) 0);
EXPECT_EQ(((void **) instantiatedTable)[0], (void *) 123);
EXPECT_EQ(((void **) instantiatedTable)[1], (void *) 234);
EXPECT_EQ(((void **) instantiatedTable)[2], (void *) 345);
EXPECT_EQ(((void **) instantiatedTable)[3], (void *) 996633);
EXPECT_EQ(((void **) instantiatedTable)[4], (void *) 336699);
return instantiatedTable;
});
}
}
| gmilos/swift | unittests/runtime/Metadata.cpp | C++ | apache-2.0 | 34,344 |
// Karma configuration
// Generated on Thu Aug 21 2014 10:24:39 GMT+0200 (CEST)
module.exports = function(config) {
config.set({
// base path that will be used to resolve all patterns (eg. files, exclude)
basePath: '',
// frameworks to use
// available frameworks: https://npmjs.org/browse/keyword/karma-adapter
frameworks: ['mocha', 'chai-jquery', 'jquery-1.8.3', 'sinon-chai'],
plugins: [
'karma-mocha',
'karma-chai',
'karma-sinon-chai',
'karma-chrome-launcher',
'karma-phantomjs-launcher',
'karma-jquery',
'karma-chai-jquery',
'karma-mocha-reporter'
],
// list of files / patterns to load in the browser
files: [
'bower/angular/angular.js',
'bower/angular-mocks/angular-mocks.js',
'src/**/*.js',
'test/unit/**/*.js'
],
// list of files to exclude
exclude: [
],
// preprocess matching files before serving them to the browser
// available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor
preprocessors: {
},
// test results reporter to use
// possible values: 'dots', 'progress'
// available reporters: https://npmjs.org/browse/keyword/karma-reporter
reporters: ['mocha'],
// web server port
port: 9876,
// enable / disable colors in the output (reporters and logs)
colors: true,
// level of logging
// possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG
logLevel: config.LOG_INFO,
// enable / disable watching file and executing tests whenever any file changes
autoWatch: true,
// start these browsers
// available browser launchers: https://npmjs.org/browse/keyword/karma-launcher
browsers: ['PhantomJS'],
// Continuous Integration mode
// if true, Karma captures browsers, runs the tests and exits
singleRun: false
});
};
| tacticianstudios/quarky | platforms/windows/www/lib/angular-jwt/karma-src.conf.js | JavaScript | apache-2.0 | 1,954 |
/*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.compiler.impl.rmiCompiler;
import com.intellij.compiler.OutputParser;
import com.intellij.compiler.impl.CompilerUtil;
import com.intellij.compiler.impl.javaCompiler.CompilerParsingThread;
import com.intellij.compiler.impl.javaCompiler.FileObject;
import com.intellij.compiler.make.Cache;
import com.intellij.compiler.make.CacheCorruptedException;
import com.intellij.compiler.make.DependencyCache;
import com.intellij.compiler.make.MakeUtil;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.compiler.*;
import com.intellij.openapi.compiler.ex.CompileContextEx;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.module.Module;
import com.intellij.openapi.progress.ProgressIndicator;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.projectRoots.Sdk;
import com.intellij.openapi.roots.ModuleRootManager;
import com.intellij.openapi.roots.OrderEnumerator;
import com.intellij.openapi.roots.ProjectFileIndex;
import com.intellij.openapi.roots.ProjectRootManager;
import com.intellij.openapi.util.Computable;
import com.intellij.openapi.util.Pair;
import com.intellij.openapi.util.io.FileUtil;
import com.intellij.openapi.vfs.LocalFileSystem;
import com.intellij.openapi.vfs.VfsUtil;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.util.ArrayUtil;
import com.intellij.util.Chunk;
import com.intellij.util.PathsList;
import com.intellij.util.containers.ContainerUtil;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.annotations.NotNull;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.File;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
/**
* @author Eugene Zhuravlev
* Date: Mar 29, 2004
*/
public class RmicCompiler implements ClassPostProcessingCompiler{
private static final Logger LOG = Logger.getInstance("#com.intellij.compiler.impl.rmiCompiler.RmicCompiler");
//private static final FileFilter CLASSES_AND_DIRECTORIES_FILTER = new FileFilter() {
// public boolean accept(File pathname) {
// return pathname.isDirectory() || pathname.getName().endsWith(".class");
// }
//};
//private static final String REMOTE_INTERFACE_NAME = Remote.class.getName();
@NotNull
public ProcessingItem[] getProcessingItems(final CompileContext context) {
if (!RmicConfiguration.getOptions(context.getProject()).IS_EANABLED) {
return ProcessingItem.EMPTY_ARRAY;
}
final Project project = context.getProject();
final List<ProcessingItem> items = new ArrayList<ProcessingItem>();
ApplicationManager.getApplication().runReadAction(new Runnable() {
public void run() {
DependencyCache dependencyCache = ((CompileContextEx)context).getDependencyCache();
try {
final Cache cache = dependencyCache.getCache();
final int[] allClassNames = cache.getAllClassNames();
final ProjectFileIndex fileIndex = ProjectRootManager.getInstance(project).getFileIndex();
final LocalFileSystem lfs = LocalFileSystem.getInstance();
for (final int className : allClassNames) {
final boolean isRemoteObject = cache.isRemote(className) && !MakeUtil.isInterface(cache.getFlags(className));
if (!isRemoteObject && !dependencyCache.wasRemote(className)) {
continue;
}
final String outputPath = cache.getPath(className);
if (outputPath == null) {
continue;
}
final VirtualFile outputClassFile = lfs.findFileByPath(outputPath.replace(File.separatorChar, '/'));
if (outputClassFile == null) {
continue;
}
final VirtualFile sourceFile = ((CompileContextEx)context).getSourceFileByOutputFile(outputClassFile);
if (sourceFile == null) {
continue;
}
final Module module = context.getModuleByFile(sourceFile);
if (module == null) {
continue;
}
final VirtualFile outputDir = fileIndex.isInTestSourceContent(sourceFile)
? context.getModuleOutputDirectoryForTests(module)
: context.getModuleOutputDirectory(module);
if (outputDir == null) {
continue;
}
if (!VfsUtil.isAncestor(outputDir, outputClassFile, true)) {
LOG.error(outputClassFile.getPath() + " should be located under the output root " + outputDir.getPath());
}
final ProcessingItem item = createProcessingItem(module, outputClassFile, outputDir,
isRemoteObject, dependencyCache.resolve(className));
items.add(item);
}
}
catch (CacheCorruptedException e) {
context.addMessage(CompilerMessageCategory.ERROR, e.getMessage(), null, -1, -1);
LOG.info(e);
}
}
});
return items.toArray(new ProcessingItem[items.size()]);
}
public static ProcessingItem createProcessingItem(final Module module,
final VirtualFile outputClassFile,
final VirtualFile outputDir,
final boolean remoteObject, String qName) {
final RmicProcessingItem item = new RmicProcessingItem(
module, outputClassFile, new File(outputDir.getPath()), qName
);
item.setIsRemoteObject(remoteObject);
return item;
}
public ProcessingItem[] process(CompileContext context, ProcessingItem[] items) {
final Project project = context.getProject();
if (!RmicConfiguration.getOptions(project).IS_EANABLED) {
return ProcessingItem.EMPTY_ARRAY;
}
final ProgressIndicator progressIndicator = context.getProgressIndicator();
progressIndicator.pushState();
try {
progressIndicator.setText(CompilerBundle.message("progress.generating.rmi.stubs"));
final Map<Pair<Module, File>, List<RmicProcessingItem>> sortedByModuleAndOutputPath = new HashMap<Pair<Module,File>, List<RmicProcessingItem>>();
for (ProcessingItem item1 : items) {
final RmicProcessingItem item = (RmicProcessingItem)item1;
final Pair<Module, File> moduleOutputPair = new Pair<Module, File>(item.getModule(), item.getOutputDir());
List<RmicProcessingItem> dirItems = sortedByModuleAndOutputPath.get(moduleOutputPair);
if (dirItems == null) {
dirItems = new ArrayList<RmicProcessingItem>();
sortedByModuleAndOutputPath.put(moduleOutputPair, dirItems);
}
dirItems.add(item);
}
final List<ProcessingItem> processed = new ArrayList<ProcessingItem>();
final JavacOutputParserPool parserPool = new JavacOutputParserPool(project, context);
for (final Pair<Module, File> pair : sortedByModuleAndOutputPath.keySet()) {
if (progressIndicator.isCanceled()) {
break;
}
final List<RmicProcessingItem> dirItems = sortedByModuleAndOutputPath.get(pair);
try {
// should delete all previously generated files for the remote class if there are any
for (Iterator itemIterator = dirItems.iterator(); itemIterator.hasNext();) {
final RmicProcessingItem item = (RmicProcessingItem)itemIterator.next();
item.deleteGeneratedFiles();
if (!item.isRemoteObject()) {
itemIterator
.remove(); // the object was remote and currently is not, so remove it from the list and do not generate stubs for it
}
}
if (!dirItems.isEmpty()) {
final RmicProcessingItem[] successfullyProcessed = invokeRmic(context, parserPool, pair.getFirst(), dirItems, pair.getSecond());
ContainerUtil.addAll(processed, successfullyProcessed);
}
progressIndicator.setFraction(((double)processed.size()) / ((double)items.length));
}
catch (IOException e) {
context.addMessage(CompilerMessageCategory.ERROR, e.getMessage(), null, -1, -1);
LOG.info(e);
}
}
// update state so that the latest timestamps are recorded by make
final ProcessingItem[] processedItems = processed.toArray(new ProcessingItem[processed.size()]);
final List<File> filesToRefresh = new ArrayList<File>(processedItems.length * 3);
for (ProcessingItem processedItem : processedItems) {
RmicProcessingItem item = (RmicProcessingItem)processedItem;
item.updateState();
filesToRefresh.add(item.myStub);
filesToRefresh.add(item.mySkel);
filesToRefresh.add(item.myTie);
}
CompilerUtil.refreshIOFiles(filesToRefresh);
return processedItems;
}
finally {
progressIndicator.popState();
}
}
private static RmicProcessingItem[] invokeRmic(final CompileContext context,
final JavacOutputParserPool parserPool, final Module module,
final List<RmicProcessingItem> dirItems,
final File outputDir
) throws IOException{
final Sdk jdk = ModuleRootManager.getInstance(module).getSdk();
final Map<String, RmicProcessingItem> pathToItemMap = new HashMap<String, RmicProcessingItem>();
final String[] cmdLine = ApplicationManager.getApplication().runReadAction(new Computable<String[]>() {
public String[] compute() {
for (final RmicProcessingItem item : dirItems) {
pathToItemMap.put(item.myStub.getPath().replace(File.separatorChar, '/'), item);
pathToItemMap.put(item.mySkel.getPath().replace(File.separatorChar, '/'), item);
pathToItemMap.put(item.myTie.getPath().replace(File.separatorChar, '/'), item);
}
return createStartupCommand(module, outputDir.getPath(), dirItems.toArray(new RmicProcessingItem[dirItems.size()]));
}
});
if (LOG.isDebugEnabled()) {
StringBuilder buf = new StringBuilder();
for (int idx = 0; idx < cmdLine.length; idx++) {
if (idx > 0) {
buf.append(" ");
}
buf.append(cmdLine[idx]);
}
LOG.debug(buf.toString());
}
// obtain parser before running the process because configuring parser may involve starting another process
final OutputParser outputParser = parserPool.getJavacOutputParser(jdk);
final Process process = Runtime.getRuntime().exec(cmdLine);
final Set<RmicProcessingItem> successfullyCompiledItems = new HashSet<RmicProcessingItem>();
final CompilerParsingThread parsingThread = new CompilerParsingThread(process, outputParser, false, true,context) {
protected void processCompiledClass(FileObject classFileToProcess) {
String key = classFileToProcess.getFile().getPath().replace(File.separatorChar, '/');
final RmicProcessingItem item = pathToItemMap.get(key);
if (item != null) {
successfullyCompiledItems.add(item);
}
}
};
final Future<?> parsingThreadFuture = ApplicationManager.getApplication().executeOnPooledThread(parsingThread);
try {
process.waitFor();
}
catch (InterruptedException ignored) {
}
finally {
parsingThread.setProcessTerminated(true);
}
try {
parsingThreadFuture.get();
}
catch (InterruptedException ignored) {
}
catch (ExecutionException ignored) {
}
return successfullyCompiledItems.toArray(new RmicProcessingItem[successfullyCompiledItems.size()]);
}
private static String[] createStartupCommand(final Module module, final String outputPath, final RmicProcessingItem[] items) {
final Sdk jdk = ModuleRootManager.getInstance(module).getSdk();
final VirtualFile homeDirectory = jdk.getHomeDirectory();
if (homeDirectory == null) {
throw new IllegalArgumentException(CompilerBundle.jdkHomeNotFoundMessage(jdk));
}
final String jdkPath = homeDirectory.getPath().replace('/', File.separatorChar);
@NonNls final String compilerPath = jdkPath + File.separator + "bin" + File.separator + "rmic";
@NonNls final List<String> commandLine = new ArrayList<String>();
commandLine.add(compilerPath);
CompilerUtil.addLocaleOptions(commandLine, true);
commandLine.add("-verbose");
final Project project = module.getProject();
ContainerUtil.addAll(commandLine, new RmicSettingsBuilder(RmicConfiguration.getOptions(project)).getOptions(new Chunk<Module>(module)));
commandLine.add("-classpath");
commandLine.add(getCompilationClasspath(module));
commandLine.add("-d");
commandLine.add(outputPath);
for (RmicProcessingItem item : items) {
commandLine.add(item.getClassQName());
}
return ArrayUtil.toStringArray(commandLine);
}
@NotNull
public String getDescription() {
return CompilerBundle.message("rmi.compiler.description");
}
public boolean validateConfiguration(CompileScope scope) {
return true;
}
/*
private void addAllRemoteFilesFromModuleOutput(final CompileContext context, final Module module, final List<ProcessingItem> items, final File outputDir, File fromDir, final JavaClass remoteInterface) {
final File[] children = fromDir.listFiles(CLASSES_AND_DIRECTORIES_FILTER);
for (int idx = 0; idx < children.length; idx++) {
final File child = children[idx];
if (child.isDirectory()) {
addAllRemoteFilesFromModuleOutput(context, module, items, outputDir, child, remoteInterface);
}
else {
final String path = child.getPath();
try {
final ClassParser classParser = new ClassParser(path);
final JavaClass javaClass = classParser.parse();
// important! Need this in order to resolve other classes in the project (e.g. superclasses)
javaClass.setRepository(BcelUtils.getActiveRepository());
if (isRmicCompilable(javaClass, remoteInterface)) {
ApplicationManager.getApplication().runReadAction(new Runnable() {
public void run() {
final VirtualFile outputClassFile = LocalFileSystem.getInstance().findFileByIoFile(child);
if (outputClassFile != null) {
items.add(new RmicProcessingItem(module, outputClassFile, outputDir, javaClass.getClassName()));
}
}
});
}
}
catch (IOException e) {
context.addMessage(CompilerMessageCategory.ERROR, "Cannot parse class file " + path + ": " + e.toString(), null, -1, -1);
}
catch (ClassFormatException e) {
context.addMessage(CompilerMessageCategory.ERROR, "Class format exception: " + e.getMessage() + " File: " + path, null, -1, -1);
}
}
}
}
*/
/*
private boolean isRmicCompilable(final JavaClass javaClass, final JavaClass remoteInterface) {
// stubs are needed for classes that _directly_ implement remote interfaces
if (javaClass.isInterface() || isGenerated(javaClass)) {
return false;
}
final JavaClass[] directlyImplementedInterfaces = javaClass.getInterfaces();
if (directlyImplementedInterfaces != null) {
for (int i = 0; i < directlyImplementedInterfaces.length; i++) {
if (directlyImplementedInterfaces[i].instanceOf(remoteInterface)) {
return true;
}
}
}
return false;
}
*/
/*
private boolean isGenerated(JavaClass javaClass) {
final String sourceFileName = javaClass.getSourceFileName();
return sourceFileName == null || !sourceFileName.endsWith(".java");
}
*/
public ValidityState createValidityState(DataInput in) throws IOException {
return new RemoteClassValidityState(in.readLong(), in.readLong(), in.readLong(), in.readLong());
}
private static String getCompilationClasspath(Module module) {
final OrderEnumerator enumerator = ModuleRootManager.getInstance(module).orderEntries().withoutSdk().compileOnly().recursively().exportedOnly();
final PathsList pathsList = enumerator.getPathsList();
return pathsList.getPathsString();
}
private static final class RemoteClassValidityState implements ValidityState {
private final long myRemoteClassTimestamp;
private final long myStubTimestamp;
private final long mySkelTimestamp;
private final long myTieTimestamp;
private RemoteClassValidityState(long remoteClassTimestamp, long stubTimestamp, long skelTimestamp, long tieTimestamp) {
myRemoteClassTimestamp = remoteClassTimestamp;
myStubTimestamp = stubTimestamp;
mySkelTimestamp = skelTimestamp;
myTieTimestamp = tieTimestamp;
}
public boolean equalsTo(ValidityState otherState) {
if (otherState instanceof RemoteClassValidityState) {
final RemoteClassValidityState state = (RemoteClassValidityState)otherState;
return myRemoteClassTimestamp == state.myRemoteClassTimestamp &&
myStubTimestamp == state.myStubTimestamp &&
mySkelTimestamp == state.mySkelTimestamp &&
myTieTimestamp == state.myTieTimestamp;
}
return false;
}
public void save(DataOutput out) throws IOException {
out.writeLong(myRemoteClassTimestamp);
out.writeLong(myStubTimestamp);
out.writeLong(mySkelTimestamp);
out.writeLong(myTieTimestamp);
}
}
private static final class RmicProcessingItem implements ProcessingItem {
private final Module myModule;
private final VirtualFile myOutputClassFile;
private final File myOutputDir;
private final String myQName;
private RemoteClassValidityState myState;
private final File myStub;
private final File mySkel;
private final File myTie;
private boolean myIsRemoteObject = false;
private RmicProcessingItem(Module module, final VirtualFile outputClassFile, File outputDir, String qName) {
myModule = module;
myOutputClassFile = outputClassFile;
myOutputDir = outputDir;
myQName = qName;
final String relativePath;
final String baseName;
final int index = qName.lastIndexOf('.');
if (index >= 0) {
relativePath = qName.substring(0, index + 1).replace('.', '/');
baseName = qName.substring(index + 1);
}
else {
relativePath = "";
baseName = qName;
}
final String path = outputDir.getPath().replace(File.separatorChar, '/') + "/" + relativePath;
//noinspection HardCodedStringLiteral
myStub = new File(path + "/" + baseName + "_Stub.class");
//noinspection HardCodedStringLiteral
mySkel = new File(path + "/" + baseName + "_Skel.class");
//noinspection HardCodedStringLiteral
myTie = new File(path + "/_" + baseName + "_Tie.class");
updateState();
}
public boolean isRemoteObject() {
return myIsRemoteObject;
}
public void setIsRemoteObject(boolean isRemote) {
myIsRemoteObject = isRemote;
}
@NotNull
public VirtualFile getFile() {
return myOutputClassFile;
}
public ValidityState getValidityState() {
return myState;
}
public void updateState() {
myState = new RemoteClassValidityState(
myOutputClassFile.getTimeStamp(),
getTimestamp(myStub),
getTimestamp(mySkel),
getTimestamp(myTie)
);
}
private static long getTimestamp(File file) {
long l = file.lastModified();
return l == 0 ? -1L : l;
}
public void deleteGeneratedFiles() {
if (FileUtil.delete(myStub)) {
CompilerUtil.refreshIOFile(myStub);
}
if (FileUtil.delete(mySkel)) {
CompilerUtil.refreshIOFile(mySkel);
}
if (FileUtil.delete(myTie)) {
CompilerUtil.refreshIOFile(myTie);
}
}
public String getClassQName() {
return myQName;
}
public File getOutputDir() {
return myOutputDir;
}
public Module getModule() {
return myModule;
}
}
}
| lshain-android-source/tools-idea | java/compiler/impl/src/com/intellij/compiler/impl/rmiCompiler/RmicCompiler.java | Java | apache-2.0 | 20,851 |
/*
* Copyright 2000-2011 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.openapi.roots.libraries.ui;
import com.intellij.openapi.fileTypes.FileType;
import com.intellij.openapi.progress.ProgressIndicator;
import com.intellij.openapi.roots.OrderRootType;
import com.intellij.openapi.vfs.VirtualFile;
import org.jetbrains.annotations.NotNull;
/**
* Implementation of {@link RootDetector} which detects a root by presence of files of some specified type under it
*
* @author nik
* @deprecated use {@link DescendentBasedRootFilter#createFileTypeBasedFilter(OrderRootType, boolean, FileType, String)} instead
*/
public class FileTypeBasedRootFilter extends RootFilter {
private final FileType myFileType;
private final RootFilter myDelegate;
public FileTypeBasedRootFilter(OrderRootType rootType, boolean jarDirectory, @NotNull FileType fileType, String presentableRootTypeName) {
super(rootType, jarDirectory, presentableRootTypeName);
myFileType = fileType;
myDelegate = new DescendentBasedRootFilter(rootType, jarDirectory, presentableRootTypeName, this::isFileAccepted);
}
@Override
public boolean isAccepted(@NotNull VirtualFile rootCandidate, @NotNull ProgressIndicator progressIndicator) {
return myDelegate.isAccepted(rootCandidate, progressIndicator);
}
protected boolean isFileAccepted(VirtualFile virtualFile) {
return virtualFile.getFileType().equals(myFileType);
}
}
| ThiagoGarciaAlves/intellij-community | platform/lang-impl/src/com/intellij/openapi/roots/libraries/ui/FileTypeBasedRootFilter.java | Java | apache-2.0 | 1,972 |
# Eve W-Space
# Copyright 2014 Andrew Austin and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns, include, url
fleetmembersitepatterns = patterns('SiteTracker.views',
url(r'unclaim/$', 'unclaim_site'),
url(r'claim/$', 'claim_site'),
)
fleetsitepatterns = patterns('SiteTracker.views',
url(r'delete/$', 'remove_site'),
url(r'member/(?P<memberID>\d+)/', include(fleetmembersitepatterns)),
)
fleetmemberpatterns = patterns('SiteTracker.views',
url(r'kick/$', 'kick_member'),
url(r'promote/$', 'promote_member'),
url(r'$', 'refresh_boss_member'),
)
fleetpatterns = patterns('SiteTracker.views',
url(r'join/$', 'join_fleet'),
url(r'leave/$', 'leave_fleet'),
url(r'site/$', 'credit_site'),
url(r'bosspanel/$', 'boss_panel'),
url(r'site/(?P<siteID>\d+)/', include(fleetsitepatterns)),
url(r'member/(?P<memberID>\d+)/', include(fleetmemberpatterns)),
url(r'disband/$', 'disband_fleet'),
)
urlpatterns = patterns('SiteTracker.views',
url(r'fleet/new/$', 'create_fleet'),
url(r'fleet/leaveall/$', 'leave_fleet'),
url(r'fleet/$', 'refresh_fleets'),
url(r'fleet/(?P<fleetID>\d+)/', include(fleetpatterns)),
url(r'status/$', 'st_status'),
url(r'$', 'status_bar'),
)
| evewspace/eve-wspace | evewspace/SiteTracker/urls.py | Python | apache-2.0 | 1,930 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for the ImageNet ILSVRC 2012 Dataset plus some bounding boxes.
Some images have one or more bounding boxes associated with the label of the
image. See details here: http://image-net.org/download-bboxes
ImageNet is based upon WordNet 3.0. To uniquely identify a synset, we use
"WordNet ID" (wnid), which is a concatenation of POS ( i.e. part of speech )
and SYNSET OFFSET of WordNet. For more information, please refer to the
WordNet documentation[http://wordnet.princeton.edu/wordnet/documentation/].
"There are bounding boxes for over 3000 popular synsets available.
For each synset, there are on average 150 images with bounding boxes."
WARNING: Don't use for object detection, in this case all the bounding boxes
of the image belong to just one class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
slim = tf.contrib.slim
# TODO(nsilberman): Add tfrecord file type once the script is updated.
_FILE_PATTERN = '%s-*'
_SPLITS_TO_SIZES = {
'train': 1281167,
'validation': 50000,
}
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A color image of varying height and width.',
'label': 'The label id of the image, integer between 0 and 999',
'label_text': 'The text of the label.',
'object/bbox': 'A list of bounding boxes.',
'object/label': 'A list of labels, one per each object.',
}
_NUM_CLASSES = 1001
_KEYS_TO_FEATURES = {
'image/encoded': tf.FixedLenFeature(
(), tf.string, default_value=''),
'image/format': tf.FixedLenFeature(
(), tf.string, default_value='jpeg'),
'image/class/label': tf.FixedLenFeature(
[], dtype=tf.int64, default_value=-1),
'image/class/text': tf.FixedLenFeature(
[], dtype=tf.string, default_value=''),
'image/object/bbox/xmin': tf.VarLenFeature(
dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(
dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(
dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(
dtype=tf.float32),
'image/object/class/label': tf.VarLenFeature(
dtype=tf.int64),
}
_ITEMS_TO_HANDLERS = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'label': slim.tfexample_decoder.Tensor('image/class/label'),
'label_text': slim.tfexample_decoder.Tensor('image/class/text'),
'object/bbox': slim.tfexample_decoder.BoundingBox(
['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'),
'object/label': slim.tfexample_decoder.Tensor('image/object/class/label'),
}
def get_split_size(set_name):
"""Return size of train/validation set."""
return _SPLITS_TO_SIZES.get(set_name)
def get_decoder():
return slim.tfexample_decoder.TFExampleDecoder(
_KEYS_TO_FEATURES, _ITEMS_TO_HANDLERS)
def get_split(split_name, dataset_dir, file_pattern=None,
reader=None, use_slim=True):
"""Gets a dataset tuple with instructions for reading ImageNet.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
use_slim: Boolean to decide dataset type
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in _SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
if use_slim:
# Allowing None in signature so that dataset_factory can use the default
if reader is None:
reader = tf.TFRecordReader
dataset = slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=get_decoder(),
num_samples=_SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
num_classes=_NUM_CLASSES)
else:
dataset = tf.data.Dataset.list_files(file_pattern, shuffle=False)
return dataset
| mlperf/training_results_v0.5 | v0.5.0/google/cloud_v3.8/ssd-tpuv3-8/code/ssd/model/tpu/models/experimental/inception/imagenet.py | Python | apache-2.0 | 4,975 |
/*
* Copyright (c) 2005-2011 Grameen Foundation USA
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*
* See also http://www.apache.org/licenses/LICENSE-2.0.html for an
* explanation of the license and how it is applied.
*/
package org.mifos.framework.util.helpers;
import java.util.Locale;
import junit.framework.Assert;
import junit.framework.TestCase;
import org.junit.Ignore;
import org.mifos.framework.util.LocalizationConverter;
public class MifosDoubleConverterTest extends TestCase {
private MifosDoubleConverter mifosDoubleConverter = null;
public void testConvert_en_GB() {
mifosDoubleConverter = new MifosDoubleConverter();
Double test = new Double(2.0);
Assert.assertEquals(test, mifosDoubleConverter.convert(String.class, "2.0"));
}
/**
* Currently broken -- incomplete support for multiple locales for numeric input.
*/
@Ignore
public void xtestConvert_is_IS() {
LocalizationConverter converter = new LocalizationConverter();
converter.setCurrentLocale(new Locale("IS", "is"));
Assert.assertEquals(new Double(2.0), mifosDoubleConverter.convert(String.class, "2,0"));
}
}
| madhav123/gkmaster | application/src/test/java/org/mifos/framework/util/helpers/MifosDoubleConverterTest.java | Java | apache-2.0 | 1,717 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.pig.newplan.logical.visitor;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.pig.EvalFunc;
import org.apache.pig.FuncSpec;
import org.apache.pig.PigException;
import org.apache.pig.PigWarning;
import org.apache.pig.data.DataType;
import org.apache.pig.impl.PigContext;
import org.apache.pig.impl.logicalLayer.FrontendException;
import org.apache.pig.impl.logicalLayer.schema.Schema;
import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema;
import org.apache.pig.impl.logicalLayer.validators.TypeCheckerException;
import org.apache.pig.impl.plan.CompilationMessageCollector;
import org.apache.pig.impl.plan.CompilationMessageCollector.MessageType;
import org.apache.pig.impl.plan.PlanException;
import org.apache.pig.impl.plan.VisitorException;
import org.apache.pig.impl.util.Pair;
import org.apache.pig.newplan.Operator;
import org.apache.pig.newplan.OperatorPlan;
import org.apache.pig.newplan.ReverseDependencyOrderWalker;
import org.apache.pig.newplan.logical.Util;
import org.apache.pig.newplan.logical.expression.AddExpression;
import org.apache.pig.newplan.logical.expression.AllSameExpressionVisitor;
import org.apache.pig.newplan.logical.expression.AndExpression;
import org.apache.pig.newplan.logical.expression.BinCondExpression;
import org.apache.pig.newplan.logical.expression.BinaryExpression;
import org.apache.pig.newplan.logical.expression.CastExpression;
import org.apache.pig.newplan.logical.expression.ConstantExpression;
import org.apache.pig.newplan.logical.expression.DereferenceExpression;
import org.apache.pig.newplan.logical.expression.DivideExpression;
import org.apache.pig.newplan.logical.expression.EqualExpression;
import org.apache.pig.newplan.logical.expression.GreaterThanEqualExpression;
import org.apache.pig.newplan.logical.expression.GreaterThanExpression;
import org.apache.pig.newplan.logical.expression.LessThanEqualExpression;
import org.apache.pig.newplan.logical.expression.LessThanExpression;
import org.apache.pig.newplan.logical.expression.LogicalExpression;
import org.apache.pig.newplan.logical.expression.LogicalExpressionVisitor;
import org.apache.pig.newplan.logical.expression.MapLookupExpression;
import org.apache.pig.newplan.logical.expression.ModExpression;
import org.apache.pig.newplan.logical.expression.MultiplyExpression;
import org.apache.pig.newplan.logical.expression.NegativeExpression;
import org.apache.pig.newplan.logical.expression.NotEqualExpression;
import org.apache.pig.newplan.logical.expression.NotExpression;
import org.apache.pig.newplan.logical.expression.OrExpression;
import org.apache.pig.newplan.logical.expression.RegexExpression;
import org.apache.pig.newplan.logical.expression.SubtractExpression;
import org.apache.pig.newplan.logical.expression.UserFuncExpression;
import org.apache.pig.newplan.logical.relational.LogicalRelationalOperator;
import org.apache.pig.newplan.logical.relational.LogicalSchema;
import org.apache.pig.newplan.logical.relational.LogicalSchema.LogicalFieldSchema;
public class TypeCheckingExpVisitor extends LogicalExpressionVisitor{
private CompilationMessageCollector msgCollector;
private LogicalRelationalOperator currentRelOp;
private static final int INF = -1;
public TypeCheckingExpVisitor(
OperatorPlan expPlan,
CompilationMessageCollector msgCollector,
LogicalRelationalOperator relOp
)
throws FrontendException {
super(expPlan, new ReverseDependencyOrderWalker(expPlan));
this.msgCollector = msgCollector;
this.currentRelOp = relOp;
//reset field schema of all expression operators because
// it needs to be re-evaluated after correct types are set
FieldSchemaResetter sr = new FieldSchemaResetter(expPlan);
sr.visit();
}
@Override
public void visit(AddExpression binOp) throws FrontendException {
addCastsToNumericBinExpression(binOp);
}
@Override
public void visit(SubtractExpression binOp) throws FrontendException {
addCastsToNumericBinExpression(binOp);
}
@Override
public void visit(MultiplyExpression binOp) throws FrontendException {
addCastsToNumericBinExpression(binOp);
}
@Override
public void visit(DivideExpression binOp) throws FrontendException {
addCastsToNumericBinExpression(binOp);
}
/**
* Add casts to promote numeric type to larger of two input numeric types of
* the {@link BinaryExpression} binOp . If one of the inputs is numeric
* and other bytearray, cast the bytearray type to other numeric type.
* If both inputs are bytearray, cast them to double.
* @param binOp
* @throws FrontendException
*/
private void addCastsToNumericBinExpression(BinaryExpression binOp)
throws FrontendException {
LogicalExpression lhs = binOp.getLhs() ;
LogicalExpression rhs = binOp.getRhs() ;
byte lhsType = lhs.getType() ;
byte rhsType = rhs.getType() ;
if ( DataType.isNumberType(lhsType) &&
DataType.isNumberType(rhsType) ) {
// return the bigger type
byte biggerType = lhsType > rhsType ? lhsType:rhsType ;
// Cast smaller type to the bigger type
if (lhsType != biggerType) {
insertCast(binOp, biggerType, binOp.getLhs());
}
else if (rhsType != biggerType) {
insertCast(binOp, biggerType, binOp.getRhs());
}
}
else if ( (lhsType == DataType.BYTEARRAY) &&
(DataType.isNumberType(rhsType)) ) {
insertCast(binOp, rhsType, binOp.getLhs());
}
else if ( (rhsType == DataType.BYTEARRAY) &&
(DataType.isNumberType(lhsType)) ) {
insertCast(binOp, lhsType, binOp.getRhs());
}
else if ( (lhsType == DataType.BYTEARRAY) &&
(rhsType == DataType.BYTEARRAY) ) {
// Cast both operands to double
insertCast(binOp, DataType.DOUBLE, binOp.getLhs());
insertCast(binOp, DataType.DOUBLE, binOp.getRhs());
}
else {
int errCode = 1039;
String msg = generateIncompatibleTypesMessage(binOp);
msgCollector.collect(msg, MessageType.Error);
throw new TypeCheckerException(binOp, msg, errCode, PigException.INPUT) ;
}
}
@Override
public void visit(ModExpression binOp) throws FrontendException {
LogicalExpression lhs = binOp.getLhs() ;
LogicalExpression rhs = binOp.getRhs() ;
byte lhsType = lhs.getType() ;
byte rhsType = rhs.getType() ;
boolean error = false;
if (lhsType == DataType.INTEGER) {
if (rhsType == DataType.INTEGER) {
//do nothing
} else if (rhsType == DataType.LONG || rhsType == DataType.BIGINTEGER) {
insertCast(binOp, rhsType, binOp.getLhs());
} else {
error = true;
}
} else if (lhsType == DataType.LONG) {
if (rhsType == DataType.INTEGER) {
insertCast(binOp, lhsType, binOp.getRhs());
} else if (rhsType == DataType.BIGINTEGER) {
insertCast(binOp, rhsType, binOp.getLhs());
} else if (rhsType == DataType.LONG) {
//do nothing
} else {
error = true;
}
} else if (lhsType == DataType.BIGINTEGER) {
if (rhsType == DataType.INTEGER || rhsType == DataType.LONG) {
insertCast(binOp, lhsType, binOp.getRhs());
} else if (rhsType == DataType.BIGINTEGER) {
//do nothing
} else {
error = true;
}
} else if (lhsType == DataType.BYTEARRAY) {
if (rhsType == DataType.INTEGER || rhsType == DataType.LONG || rhsType == DataType.BIGINTEGER) {
insertCast(binOp, rhsType, binOp.getLhs());
} else {
error = true;
}
} else {
error = true;
}
if (error) {
int errCode = 1039;
String msg = generateIncompatibleTypesMessage(binOp);
msgCollector.collect(msg, MessageType.Error);
throw new TypeCheckerException(binOp, msg, errCode, PigException.INPUT) ;
}
}
private String generateIncompatibleTypesMessage(BinaryExpression binOp)
throws FrontendException {
String msg = binOp.toString();
if (currentRelOp.getAlias()!=null){
msg = "In alias " + currentRelOp.getAlias() + ", ";
}
LogicalFieldSchema lhsFs = binOp.getLhs().getFieldSchema();
LogicalFieldSchema rhsFs = binOp.getRhs().getFieldSchema();
msg = msg + "incompatible types in " + binOp.getName() + " Operator"
+ " left hand side:" + DataType.findTypeName(lhsFs.type)
+ (lhsFs.schema == null ? "" : " " + lhsFs.schema.toString(false) + " ")
+ " right hand side:" + DataType.findTypeName(rhsFs.type)
+ (rhsFs.schema == null ? "" : " " + rhsFs.schema.toString(false) + " ") ;
return msg;
}
@Override
public void visit(NegativeExpression negExp) throws FrontendException {
byte type = negExp.getExpression().getType() ;
if (DataType.isNumberType(type)) {
//do nothing
}
else if (type == DataType.BYTEARRAY) {
// cast bytearray to double
insertCast(negExp, DataType.DOUBLE, negExp.getExpression());
}
else {
int errCode = 1041;
String msg = "NEG can be used with numbers or Bytearray only" ;
msgCollector.collect(msg, MessageType.Error);
throw new TypeCheckerException(negExp, msg, errCode, PigException.INPUT) ;
}
}
@Override
public void visit(NotExpression notExp) throws FrontendException {
if (notExp.getExpression() instanceof ConstantExpression
&& ((ConstantExpression) notExp.getExpression()).getValue() == null) {
insertCast(notExp, DataType.BOOLEAN, notExp.getExpression());
}
byte type = notExp.getExpression().getType();
if (type != DataType.BOOLEAN) {
int errCode = 1042;
String msg = "NOT can be used with boolean only" ;
msgCollector.collect(msg, MessageType.Error);
throw new TypeCheckerException( notExp, msg, errCode, PigException.INPUT) ;
}
}
@Override
public void visit(OrExpression orExp) throws FrontendException {
visitBooleanBinary(orExp);
}
@Override
public void visit(AndExpression andExp) throws FrontendException {
visitBooleanBinary(andExp);
}
private void visitBooleanBinary(BinaryExpression boolExp)
throws FrontendException {
// if lhs or rhs is null constant then cast it to boolean
insertCastsForNullToBoolean(boolExp);
LogicalExpression lhs = boolExp.getLhs();
LogicalExpression rhs = boolExp.getRhs();
byte lhsType = lhs.getType() ;
byte rhsType = rhs.getType() ;
if ( (lhsType != DataType.BOOLEAN) ||
(rhsType != DataType.BOOLEAN) ) {
int errCode = 1038;
String msg = "Operands of AND/OR can be boolean only" ;
msgCollector.collect(msg, MessageType.Error);
throw new TypeCheckerException(boolExp, msg, errCode, PigException.INPUT) ;
}
}
@Override
public void visit(LessThanExpression binOp)
throws FrontendException {
addCastsToCompareBinaryExp(binOp, false /*not equality op*/);
}
@Override
public void visit(LessThanEqualExpression binOp)
throws FrontendException {
addCastsToCompareBinaryExp(binOp, false /*not equality op*/);
}
@Override
public void visit(GreaterThanExpression binOp)
throws FrontendException {
addCastsToCompareBinaryExp(binOp, false /*not equality op*/);
}
@Override
public void visit(GreaterThanEqualExpression binOp)
throws FrontendException {
addCastsToCompareBinaryExp(binOp, false /*not equality op*/);
}
@Override
public void visit(EqualExpression binOp)
throws FrontendException {
addCastsToCompareBinaryExp(binOp, true /*equality op*/);
}
@Override
public void visit(NotEqualExpression binOp)
throws FrontendException {
addCastsToCompareBinaryExp(binOp, true /*equality op*/);
}
private void addCastsToCompareBinaryExp(BinaryExpression binOp, boolean isEquality)
throws FrontendException {
LogicalExpression lhs = binOp.getLhs() ;
LogicalExpression rhs = binOp.getRhs() ;
byte lhsType = lhs.getType() ;
byte rhsType = rhs.getType() ;
if ( DataType.isNumberType(lhsType) &&
DataType.isNumberType(rhsType) ) {
// If not the same type, we cast them to the same
byte biggerType = lhsType > rhsType ? lhsType:rhsType ;
// Cast smaller type to the bigger type
if (lhsType != biggerType) {
insertCast(binOp, biggerType, binOp.getLhs());
}
else if (rhsType != biggerType) {
insertCast(binOp, biggerType, binOp.getRhs());
}
}
else if ( (lhsType == DataType.DATETIME) &&
(rhsType == DataType.DATETIME) ) {
// good
}
else if ( (lhsType == DataType.CHARARRAY) &&
(rhsType == DataType.CHARARRAY) ) {
// good
}
else if ( (lhsType == DataType.BYTEARRAY) &&
(rhsType == DataType.BYTEARRAY) ) {
// good
}
else if ( (lhsType == DataType.BYTEARRAY) &&
( (rhsType == DataType.CHARARRAY) || (DataType.isNumberType(rhsType)) || (rhsType == DataType.BOOLEAN) || (rhsType == DataType.DATETIME))
) {
// Cast byte array to the type on rhs
insertCast(binOp, rhsType, binOp.getLhs());
}
else if ( (rhsType == DataType.BYTEARRAY) &&
( (lhsType == DataType.CHARARRAY) || (DataType.isNumberType(lhsType)) || (lhsType == DataType.BOOLEAN) || (lhsType == DataType.DATETIME))
) {
// Cast byte array to the type on lhs
insertCast(binOp, lhsType, binOp.getRhs());
}else if (isEquality){
//in case of equality condition, allow boolean, tuples and maps as args
if((lhsType == DataType.BOOLEAN) &&
(rhsType == DataType.BOOLEAN) ) {
// good
}
else if((lhsType == DataType.TUPLE) &&
(rhsType == DataType.TUPLE) ) {
// good
}
else if ( (lhsType == DataType.MAP) &&
(rhsType == DataType.MAP) ) {
// good
}
else if (lhsType == DataType.BYTEARRAY &&
(rhsType == DataType.MAP || rhsType == DataType.TUPLE)){
// Cast byte array to the type on lhs
insertCast(binOp, rhsType, binOp.getLhs());
}
else if(rhsType == DataType.BYTEARRAY &&
(lhsType == DataType.MAP || lhsType == DataType.TUPLE)){
// Cast byte array to the type on lhs
insertCast(binOp, lhsType, binOp.getRhs());
}
else {
throwIncompatibleTypeError(binOp);
}
}
else {
throwIncompatibleTypeError(binOp);
}
}
private void throwIncompatibleTypeError(BinaryExpression binOp)
throws FrontendException {
int errCode = 1039;
String msg = generateIncompatibleTypesMessage(binOp);
msgCollector.collect(msg, MessageType.Error) ;
throw new TypeCheckerException(binOp, msg, errCode, PigException.INPUT);
}
private void insertCastsForNullToBoolean(BinaryExpression binOp)
throws FrontendException {
if (binOp.getLhs() instanceof ConstantExpression
&& ((ConstantExpression) binOp.getLhs()).getValue() == null)
insertCast(binOp, DataType.BOOLEAN, binOp.getLhs());
if (binOp.getRhs() instanceof ConstantExpression
&& ((ConstantExpression) binOp.getRhs()).getValue() == null)
insertCast(binOp, DataType.BOOLEAN, binOp.getRhs());
}
/**
* add cast to convert the input of exp
* {@link LogicalExpression} arg to type toType
* @param exp
* @param toType
* @param arg
* @throws FrontendException
*/
private void insertCast(LogicalExpression exp, byte toType, LogicalExpression arg)
throws FrontendException {
LogicalFieldSchema toFs = new LogicalSchema.LogicalFieldSchema(null, null, toType);
insertCast(exp, toFs, arg);
}
private void insertCast(LogicalExpression node, LogicalFieldSchema toFs,
LogicalExpression arg)
throws FrontendException {
collectCastWarning(node, arg.getType(), toFs.type, msgCollector);
CastExpression cast = new CastExpression(plan, arg, toFs);
try {
// disconnect cast and arg because the connection is already
// added by cast constructor and insertBetween call is going
// to do it again
plan.disconnect(cast, arg);
plan.insertBetween(node, cast, arg);
}
catch (PlanException pe) {
int errCode = 2059;
String msg = "Problem with inserting cast operator for " + node + " in plan.";
throw new TypeCheckerException(arg, msg, errCode, PigException.BUG, pe);
}
this.visit(cast);
}
/**
* For Basic Types:
* 0) Casting to itself is always ok
* 1) Casting from number to number is always ok
* 2) ByteArray to anything is ok
* 3) number to chararray is ok
* For Composite Types:
* Recursively traverse the schemas till you get a basic type
* @throws FrontendException
*/
@Override
public void visit(CastExpression cast) throws FrontendException {
byte inType = cast.getExpression().getType();
byte outType = cast.getType();
if(outType == DataType.BYTEARRAY){
int errCode = 1051;
String msg = "Cannot cast to bytearray";
msgCollector.collect(msg, MessageType.Error) ;
throw new TypeCheckerException(cast, msg, errCode, PigException.INPUT) ;
}
LogicalFieldSchema inFs = cast.getExpression().getFieldSchema();
LogicalFieldSchema outFs = cast.getFieldSchema();
if(inFs == null){
//replace null schema with bytearray schema.
inFs = new LogicalFieldSchema(null, null, DataType.BYTEARRAY);
}
//check if the field schemas are castable
boolean castable = LogicalFieldSchema.castable(inFs, outFs);
if(!castable) {
int errCode = 1052;
String msg = "Cannot cast "
+ DataType.findTypeName(inType)
+ ((DataType.isSchemaType(inType))? " with schema " + inFs.toString(false) : "")
+ " to "
+ DataType.findTypeName(outType)
+ ((DataType.isSchemaType(outType))? " with schema " + outFs.toString(false) : "");
msgCollector.collect(msg, MessageType.Error) ;
throw new TypeCheckerException(cast, msg, errCode, PigException.INPUT) ;
}
}
/**
* {@link RegexExpression} expects CharArray as input
* Itself always returns Boolean
* @param rg
* @throws FrontendException
*/
@Override
public void visit(RegexExpression rg) throws FrontendException {
// We allow BYTEARRAY to be converted to CHARARRAY
if (rg.getLhs().getType() == DataType.BYTEARRAY){
insertCast(rg, DataType.CHARARRAY, rg.getLhs());
}
if (rg.getRhs().getType() == DataType.BYTEARRAY){
insertCast(rg, DataType.CHARARRAY, rg.getRhs());
}
// Other than that if it's not CharArray just say goodbye
if (rg.getLhs().getType() != DataType.CHARARRAY ||
rg.getRhs().getType() != DataType.CHARARRAY)
{
int errCode = 1037;
String msg = "Operands of Regex can be CharArray only :" + rg;
msgCollector.collect(msg, MessageType.Error);
throw new TypeCheckerException(rg, msg, errCode, PigException.INPUT) ;
}
}
@Override
public void visit(BinCondExpression binCond) throws FrontendException{
// high-level type checking
if (binCond.getCondition().getType() != DataType.BOOLEAN) {
int errCode = 1047;
String msg = "Condition in BinCond must be boolean" ;
msgCollector.collect(msg, MessageType.Error);
throw new TypeCheckerException(binCond, msg, errCode, PigException.INPUT) ;
}
byte lhsType = binCond.getLhs().getType() ;
byte rhsType = binCond.getRhs().getType() ;
// If both sides are number, we can convert the smaller type to the bigger type
if (DataType.isNumberType(lhsType) && DataType.isNumberType(rhsType)) {
byte biggerType = lhsType > rhsType ? lhsType:rhsType ;
if (biggerType > lhsType) {
insertCast(binCond, biggerType, binCond.getLhs());
}
else if (biggerType > rhsType) {
insertCast(binCond, biggerType, binCond.getRhs());
}
}
else if ((lhsType == DataType.BYTEARRAY)
&& ((rhsType == DataType.CHARARRAY) || (DataType
.isNumberType(rhsType))) || (rhsType == DataType.DATETIME)) { // need to add boolean as well
// Cast byte array to the type on rhs
insertCast(binCond, rhsType, binCond.getLhs());
} else if ((rhsType == DataType.BYTEARRAY)
&& ((lhsType == DataType.CHARARRAY) || (DataType
.isNumberType(lhsType)) || (rhsType == DataType.DATETIME))) { // need to add boolean as well
// Cast byte array to the type on lhs
insertCast(binCond, lhsType, binCond.getRhs());
}
// A constant null is always bytearray - so cast it
// to rhs type
else if (binCond.getLhs() instanceof ConstantExpression
&& ((ConstantExpression) binCond.getLhs()).getValue() == null) {
try {
insertCast(binCond, binCond.getRhs().getFieldSchema(), binCond.getLhs());
} catch (FrontendException e) {
int errCode = 2216;
String msg = "Problem getting fieldSchema for " +binCond.getRhs();
throw new TypeCheckerException(binCond, msg, errCode, PigException.BUG, e);
}
} else if (binCond.getRhs() instanceof ConstantExpression
&& ((ConstantExpression) binCond.getRhs()).getValue() == null) {
try {
insertCast(binCond, binCond.getLhs().getFieldSchema(), binCond.getRhs());
} catch (FrontendException e) {
int errCode = 2216;
String msg = "Problem getting fieldSchema for " +binCond.getRhs();
throw new TypeCheckerException(binCond, msg, errCode, PigException.BUG, e);
}
} else if (lhsType == rhsType) {
// Matching schemas if we're working with tuples/bags
if (DataType.isSchemaType(lhsType)) {
try {
if(! binCond.getLhs().getFieldSchema().isEqual(binCond.getRhs().getFieldSchema())){
int errCode = 1048;
String msg = "Two inputs of BinCond must have compatible schemas."
+ " left hand side: " + binCond.getLhs().getFieldSchema()
+ " right hand side: " + binCond.getRhs().getFieldSchema();
msgCollector.collect(msg, MessageType.Error) ;
throw new TypeCheckerException(binCond, msg, errCode, PigException.INPUT) ;
}
// TODO: We may have to merge the schema here
// if the previous check is not exact match
}
catch (FrontendException fe) {
int errCode = 1049;
String msg = "Problem during evaluaton of BinCond output type" ;
msgCollector.collect(msg, MessageType.Error) ;
throw new TypeCheckerException(binCond, msg, errCode, PigException.INPUT, fe) ;
}
}
}
else {
int errCode = 1050;
String msg = "Unsupported input type for BinCond: left hand side: "
+ DataType.findTypeName(lhsType) + "; right hand side: "
+ DataType.findTypeName(rhsType);
msgCollector.collect(msg, MessageType.Error) ;
throw new TypeCheckerException(binCond, msg, errCode, PigException.INPUT) ;
}
}
@Override
public void visit(MapLookupExpression map)
throws FrontendException{
if(map.getMap().getType() != DataType.MAP) {
// insert cast if the predecessor does not
// return map
insertCast(map, DataType.MAP, map.getMap());
}
}
@Override
public void visit(DereferenceExpression deref) throws FrontendException{
byte inputType = deref.getReferredExpression().getType();
switch(inputType){
case DataType.TUPLE:
case DataType.BAG:
case DataType.BYTEARRAY: // ideally determine type at runtime
//allowed types
break;
default:
int errCode = 1129;
String msg = "Referring to column(s) within a column of type " +
DataType.findTypeName(inputType)
+ " is not allowed";
throw new TypeCheckerException(deref, msg, errCode, PigException.INPUT);
}
}
@Override
public void visit(UserFuncExpression func) throws FrontendException{
List<LogicalExpression> list = func.getArguments() ;
// If the dependency graph is right, all the inputs
// must already know the types
Schema currentArgSchema = new Schema();
for(LogicalExpression op: list) {
if (!DataType.isUsableType(op.getType())) {
int errCode = 1014;
String msg = "Problem with input " + op + " of User-defined function: " + func;
msgCollector.collect(msg, MessageType.Error);
throw new TypeCheckerException(func, msg, errCode, PigException.INPUT) ;
}
try {
currentArgSchema.add(Util.translateFieldSchema(op.getFieldSchema()));
} catch (FrontendException e) {
int errCode = 1043;
String msg = "Unable to retrieve field schema.";
throw new TypeCheckerException(func, msg, errCode, PigException.INPUT, e);
}
}
EvalFunc<?> ef = (EvalFunc<?>) PigContext.instantiateFuncFromSpec(func.getFuncSpec());
// ask the EvalFunc what types of inputs it can handle
List<FuncSpec> funcSpecs = null;
try {
funcSpecs = ef.getArgToFuncMapping();
if (funcSpecs!=null) {
for (FuncSpec funcSpec : funcSpecs) {
Schema s = funcSpec.getInputArgsSchema();
LogicalSchema ls = Util.translateSchema(s);
ls.normalize();
funcSpec.setInputArgsSchema(Util.translateSchema(ls));
}
}
} catch (Exception e) {
int errCode = 1044;
String msg = "Unable to get list of overloaded methods.";
throw new TypeCheckerException(func, msg, errCode, PigException.INPUT, e);
}
/**
* Here is an explanation of the way the matching UDF funcspec will be chosen
* based on actual types in the input schema.
* First an "exact" match is tried for each of the fields in the input schema
* with the corresponding fields in the candidate funcspecs' schemas.
*
* If exact match fails, then first a check if made if the input schema has any
* bytearrays in it.
*
* If there are NO bytearrays in the input schema, then a best fit match is attempted
* for the different fields. Essential a permissible cast from one type to another
* is given a "score" based on its position in the "castLookup" table. A final
* score for a candidate funcspec is deduced as
* SUM(score_of_particular_cast*noOfCastsSoFar).
* If no permissible casts are possible, the score for the candidate is -1. Among
* the non -1 score candidates, the candidate with the lowest score is chosen.
*
* If there are bytearrays in the input schema, a modified exact match is tried. In this
* matching, bytearrays in the input schema are not considered. As a result of
* ignoring the bytearrays, we could get multiple candidate funcspecs which match
* "exactly" for the other columns - if this is the case, we notify the user of
* the ambiguity and error out. Else if all other (non byte array) fields
* matched exactly, then we can cast bytearray(s) to the corresponding type(s)
* in the matched udf schema. If this modified exact match fails, the above best fit
* algorithm is attempted by initially coming up with scores and candidate funcSpecs
* (with bytearray(s) being ignored in the scoring process). Then a check is
* made to ensure that the positions which have bytearrays in the input schema
* have the same type (for a given position) in the corresponding positions in
* all the candidate funcSpecs. If this is not the case, it indicates a conflict
* and the user is notified of the error (because we have more than
* one choice for the destination type of the cast for the bytearray). If this is the case,
* the candidate with the lowest score is chosen.
*/
FuncSpec matchingSpec = null;
boolean notExactMatch = false;
if(funcSpecs!=null && funcSpecs.size()!=0){
//Some function mappings found. Trying to see
//if one of them fits the input schema
if((matchingSpec = exactMatch(funcSpecs, currentArgSchema, func))==null){
//Oops, no exact match found. Trying to see if we
//have mappings that we can fit using casts.
notExactMatch = true;
if(byteArrayFound(func, currentArgSchema)){
// try "exact" matching all other fields except the byte array
// fields and if they all exact match and we have only one candidate
// for the byte array cast then that's the matching one!
if((matchingSpec = exactMatchWithByteArrays(funcSpecs, currentArgSchema, func))==null){
// "exact" match with byte arrays did not work - try best fit match
if((matchingSpec = bestFitMatchWithByteArrays(funcSpecs, currentArgSchema, func)) == null) {
int errCode = 1045;
String msg = "Could not infer the matching function for "
+ func.getFuncSpec()
+ " as multiple or none of them fit. Please use an explicit cast.";
msgCollector.collect(msg, MessageType.Error);
throw new TypeCheckerException(func, msg, errCode, PigException.INPUT);
}
}
} else if ((matchingSpec = bestFitMatch(funcSpecs, currentArgSchema)) == null) {
// Either no byte arrays found or there are byte arrays
// but only one mapping exists.
// However, we could not find a match as there were either
// none fitting the input schema or it was ambiguous.
// Throw exception that we can't infer a fit.
int errCode = 1045;
String msg = "Could not infer the matching function for "
+ func.getFuncSpec()
+ " as multiple or none of them fit. Please use an explicit cast.";
msgCollector.collect(msg, MessageType.Error);
throw new TypeCheckerException(func, msg, errCode, PigException.INPUT);
}
}
}
if(matchingSpec!=null){
//Voila! We have a fitting match. Lets insert casts and make
//it work.
// notify the user about the match we picked if it was not
// an exact match
if(notExactMatch) {
String msg = "Function " + func.getFuncSpec().getClassName() + "()" +
" will be called with following argument types: " +
matchingSpec.getInputArgsSchema() + ". If you want to use " +
"different input argument types, please use explicit casts.";
msgCollector.collect(msg, MessageType.Warning, PigWarning.USING_OVERLOADED_FUNCTION);
}
if (func.isViaDefine()) {
matchingSpec.setCtorArgs(func.getFuncSpec().getCtorArgs());
}
func.setFuncSpec(matchingSpec);
insertCastsForUDF(func, currentArgSchema, matchingSpec.getInputArgsSchema());
}
}
/**
* Tries to find the schema supported by one of funcSpecs which can be
* obtained by inserting a set of casts to the input schema
*
* @param funcSpecs -
* mappings provided by udf
* @param s -
* input schema
* @param func -
* udf expression
* @return the funcSpec that supports the schema that is best suited to s.
* The best suited schema is one that has the lowest score as
* returned by fitPossible().
* @throws VisitorException
*/
private FuncSpec bestFitMatchWithByteArrays(List<FuncSpec> funcSpecs,
Schema s, UserFuncExpression func) throws VisitorException {
List<Pair<Long, FuncSpec>> scoreFuncSpecList = new ArrayList<Pair<Long,FuncSpec>>();
for (Iterator<FuncSpec> iterator = funcSpecs.iterator(); iterator
.hasNext();) {
FuncSpec fs = iterator.next();
long score = fitPossible(s, fs.getInputArgsSchema());
if (score != INF) {
scoreFuncSpecList.add(new Pair<Long, FuncSpec>(score, fs));
}
}
// if no candidates found, return null
if(scoreFuncSpecList.size() == 0)
return null;
if(scoreFuncSpecList.size() > 1) {
// sort the candidates based on score
Collections.sort(scoreFuncSpecList, new ScoreFuncSpecListComparator());
// if there are two (or more) candidates with the same *lowest* score
// we cannot choose one of them - notify the user
if (scoreFuncSpecList.get(0).first == scoreFuncSpecList.get(1).first) {
int errCode = 1046;
String msg = "Multiple matching functions for "
+ func.getFuncSpec() + " with input schemas: " + "("
+ scoreFuncSpecList.get(0).second.getInputArgsSchema() + ", "
+ scoreFuncSpecList.get(1).second.getInputArgsSchema() + "). Please use an explicit cast.";
msgCollector.collect(msg, MessageType.Error);
throw new TypeCheckerException(func, msg, errCode, PigException.INPUT);
}
// now consider the bytearray fields
List<Integer> byteArrayPositions = getByteArrayPositions(func, s);
// make sure there is only one type to "cast to" for the byte array
// positions among the candidate funcSpecs
Map<Integer, Pair<FuncSpec, Byte>> castToMap = new HashMap<Integer, Pair<FuncSpec, Byte>>();
for (Iterator<Pair<Long, FuncSpec>> it = scoreFuncSpecList.iterator(); it.hasNext();) {
FuncSpec funcSpec = it.next().second;
Schema sch = funcSpec.getInputArgsSchema();
for (Iterator<Integer> iter = byteArrayPositions.iterator(); iter
.hasNext();) {
Integer i = iter.next();
try {
if (!castToMap.containsKey(i)) {
// first candidate
castToMap.put(i, new Pair<FuncSpec, Byte>(funcSpec, sch
.getField(i).type));
} else {
// make sure the existing type from an earlier candidate
// matches
Pair<FuncSpec, Byte> existingPair = castToMap.get(i);
if (sch.getField(i).type != existingPair.second) {
int errCode = 1046;
String msg = "Multiple matching functions for "
+ func.getFuncSpec() + " with input schema: "
+ "(" + existingPair.first.getInputArgsSchema()
+ ", " + funcSpec.getInputArgsSchema()
+ "). Please use an explicit cast.";
msgCollector.collect(msg, MessageType.Error);
throw new TypeCheckerException(func, msg, errCode, PigException.INPUT);
}
}
} catch (FrontendException fee) {
int errCode = 1043;
String msg = "Unalbe to retrieve field schema.";
throw new TypeCheckerException(func, msg, errCode, PigException.INPUT, fee);
}
}
}
}
// if we reached here, it means we have >= 1 candidates and these candidates
// have the same type for position which have bytearray in the input
// Also the candidates are stored sorted by score in a list - we can now
// just return the first candidate (the one with the lowest score)
return scoreFuncSpecList.get(0).second;
}
private static class ScoreFuncSpecListComparator implements Comparator<Pair<Long, FuncSpec>> {
/* (non-Javadoc)
* @see java.util.Comparator#compare(java.lang.Object, java.lang.Object)
*/
public int compare(Pair<Long, FuncSpec> o1, Pair<Long, FuncSpec> o2) {
if(o1.first < o2.first)
return -1;
else if (o1.first > o2.first)
return 1;
else
return 0;
}
}
/**
* Finds if there is an exact match between the schema supported by
* one of the funcSpecs and the input schema s. Here first exact match
* for all non byte array fields is first attempted and if there is
* exactly one candidate, it is chosen (since the bytearray(s) can
* just be cast to corresponding type(s) in the candidate)
* @param funcSpecs - mappings provided by udf
* @param s - input schema
* @param func - UserFuncExpression for which matching is requested
* @return the matching spec if found else null
* @throws FrontendException
*/
private FuncSpec exactMatchWithByteArrays(List<FuncSpec> funcSpecs,
Schema s, UserFuncExpression func) throws FrontendException {
// exact match all fields except byte array fields
// ignore byte array fields for matching
return exactMatchHelper(funcSpecs, s, func, true);
}
/**
* Finds if there is an exact match between the schema supported by
* one of the funcSpecs and the input schema s. Here an exact match
* for all fields is attempted.
* @param funcSpecs - mappings provided by udf
* @param s - input schema
* @param func - UserFuncExpression for which matching is requested
* @return the matching spec if found else null
* @throws FrontendException
*/
private FuncSpec exactMatch(List<FuncSpec> funcSpecs, Schema s,
UserFuncExpression func) throws FrontendException {
// exact match all fields, don't ignore byte array fields
return exactMatchHelper(funcSpecs, s, func, false);
}
/**
* Tries to find the schema supported by one of funcSpecs which can
* be obtained by inserting a set of casts to the input schema
* @param funcSpecs - mappings provided by udf
* @param s - input schema
* @return the funcSpec that supports the schema that is best suited
* to s. The best suited schema is one that has the
* lowest score as returned by fitPossible().
*/
private FuncSpec bestFitMatch(List<FuncSpec> funcSpecs, Schema s) {
FuncSpec matchingSpec = null;
long score = INF;
long prevBestScore = Long.MAX_VALUE;
long bestScore = Long.MAX_VALUE;
for (Iterator<FuncSpec> iterator = funcSpecs.iterator(); iterator.hasNext();) {
FuncSpec fs = iterator.next();
score = fitPossible(s,fs.getInputArgsSchema());
if(score!=INF && score<=bestScore){
matchingSpec = fs;
prevBestScore = bestScore;
bestScore = score;
}
}
if(matchingSpec!=null && bestScore!=prevBestScore)
return matchingSpec;
return null;
}
/**
* Checks to see if any field of the input schema is a byte array
* @param func
* @param s - input schema
* @return true if found else false
* @throws VisitorException
*/
private boolean byteArrayFound(UserFuncExpression func, Schema s) throws VisitorException {
for(int i=0;i<s.size();i++){
try {
FieldSchema fs=s.getField(i);
if(fs == null)
return false;
if(fs.type==DataType.BYTEARRAY){
return true;
}
} catch (FrontendException fee) {
int errCode = 1043;
String msg = "Unable to retrieve field schema.";
throw new TypeCheckerException(func, msg, errCode, PigException.INPUT, fee);
}
}
return false;
}
/**
* Gets the positions in the schema which are byte arrays
* @param func
*
* @param s -
* input schema
* @throws VisitorException
*/
private List<Integer> getByteArrayPositions(UserFuncExpression func, Schema s)
throws VisitorException {
List<Integer> result = new ArrayList<Integer>();
for (int i = 0; i < s.size(); i++) {
try {
FieldSchema fs = s.getField(i);
if (fs.type == DataType.BYTEARRAY) {
result.add(i);
}
} catch (FrontendException fee) {
int errCode = 1043;
String msg = "Unable to retrieve field schema.";
throw new TypeCheckerException(func, msg, errCode, PigException.INPUT, fee); }
}
return result;
}
/**
* Finds if there is an exact match between the schema supported by
* one of the funcSpecs and the input schema s
* @param funcSpecs - mappings provided by udf
* @param s - input schema
* @param func user defined function
* @param ignoreByteArrays - flag for whether the exact match is to computed
* after ignoring bytearray (if true) or without ignoring bytearray (if false)
* @return the matching spec if found else null
* @throws FrontendException
*/
private FuncSpec exactMatchHelper(List<FuncSpec> funcSpecs, Schema s,
UserFuncExpression func, boolean ignoreByteArrays)
throws FrontendException {
List<FuncSpec> matchingSpecs = new ArrayList<FuncSpec>();
for (Iterator<FuncSpec> iterator = funcSpecs.iterator(); iterator.hasNext();) {
FuncSpec fs = iterator.next();
if (schemaEqualsForMatching(s, fs.getInputArgsSchema(), ignoreByteArrays)) {
matchingSpecs.add(fs);
}
}
if(matchingSpecs.size() == 0)
return null;
if(matchingSpecs.size() > 1) {
int errCode = 1046;
String msg = "Multiple matching functions for "
+ func.getFuncSpec() + " with input schema: "
+ "(" + matchingSpecs.get(0).getInputArgsSchema()
+ ", " + matchingSpecs.get(1).getInputArgsSchema()
+ "). Please use an explicit cast.";
msgCollector.collect(msg, MessageType.Error);
throw new TypeCheckerException(func, msg, errCode, PigException.INPUT);
}
// exactly one matching spec - return it
return matchingSpecs.get(0);
}
/***************************************************************************
* Compare two schemas for equality for argument matching purposes. This is
* a more relaxed form of Schema.equals wherein first the Datatypes of the
* field schema are checked for equality. Then if a field schema in the udf
* schema is for a complex type AND if the inner schema is NOT null, check
* for schema equality of the inner schemas of the UDF field schema and
* input field schema
*
* @param inputSchema
* @param udfSchema
* @param ignoreByteArrays
* @return true if FieldSchemas are equal for argument matching, false
* otherwise
* @throws FrontendException
*/
public static boolean schemaEqualsForMatching(Schema inputSchema,
Schema udfSchema, boolean ignoreByteArrays) throws FrontendException {
// If both of them are null, they are equal
if ((inputSchema == null) && (udfSchema == null)) {
return true;
}
// otherwise
if (inputSchema == null) {
return false;
}
if (udfSchema == null) {
return false;
}
// the old udf schemas might not have tuple inside bag
// fix that!
udfSchema = Util.fixSchemaAddTupleInBag(udfSchema);
if (inputSchema.size() != udfSchema.size())
return false;
Iterator<FieldSchema> i = inputSchema.getFields().iterator();
Iterator<FieldSchema> j = udfSchema.getFields().iterator();
while (i.hasNext()) {
FieldSchema inputFieldSchema = i.next();
FieldSchema udfFieldSchema = j.next();
if(inputFieldSchema == null)
return false;
if(ignoreByteArrays && inputFieldSchema.type == DataType.BYTEARRAY) {
continue;
}
if (inputFieldSchema.type != udfFieldSchema.type) {
return false;
}
// if a field schema in the udf schema is for a complex
// type AND if the inner schema is NOT null, check for schema
// equality of the inner schemas of the UDF field schema and
// input field schema. If the field schema in the udf schema is
// for a complex type AND if the inner schema IS null it means
// the udf is applicable for all input which has the same type
// for that field (irrespective of inner schema)
// if it is a bag with empty tuple, then just rely on the field type
if (DataType.isSchemaType(udfFieldSchema.type)
&& udfFieldSchema.schema != null
&& isNotBagWithEmptyTuple(udfFieldSchema)
) {
// Compare recursively using field schema
if (!FieldSchema.equals(inputFieldSchema, udfFieldSchema,
false, true)) {
//try modifying any empty tuple to type of bytearray
// and see if that matches. Need to do this for
// backward compatibility -
// User might have specified tuple with a bytearray
// and this should also match an empty tuple
FieldSchema inputFSWithBytearrayinTuple =
new FieldSchema(inputFieldSchema);
convertEmptyTupleToBytearrayTuple(inputFSWithBytearrayinTuple);
if (!FieldSchema.equals(inputFSWithBytearrayinTuple, udfFieldSchema,
false, true)) {
return false;
}
}
}
}
return true;
}
/**
* Check if the fieldSch is a bag with empty tuple schema
* @param fieldSch
* @return
* @throws FrontendException
*/
private static boolean isNotBagWithEmptyTuple(FieldSchema fieldSch)
throws FrontendException {
boolean isBagWithEmptyTuple = false;
if(fieldSch.type == DataType.BAG
&& fieldSch.schema != null
&& fieldSch.schema.getField(0) != null
&& fieldSch.schema.getField(0).type == DataType.TUPLE
&& fieldSch.schema.getField(0).schema == null
){
isBagWithEmptyTuple = true;
}
return !isBagWithEmptyTuple;
}
private static void convertEmptyTupleToBytearrayTuple(
FieldSchema fs) {
if(fs.type == DataType.TUPLE
&& fs.schema != null
&& fs.schema.size() == 0){
fs.schema.add(new FieldSchema(null, DataType.BYTEARRAY));
return;
}
if(fs.schema != null){
for(FieldSchema inFs : fs.schema.getFields()){
convertEmptyTupleToBytearrayTuple(inFs);
}
}
}
static final HashMap<Byte, List<Byte>> castLookup = new HashMap<Byte, List<Byte>>();
static{
//Ordering here decides the score for the best fit function.
//Do not change the order. Conversions to a smaller type is preferred
//over conversion to a bigger type where ordering of types is:
//INTEGER, LONG, FLOAT, DOUBLE, DATETIME, CHARARRAY, TUPLE, BAG, MAP
//from small to big
List<Byte> boolToTypes = Arrays.asList(
DataType.INTEGER,
DataType.LONG,
DataType.FLOAT,
DataType.DOUBLE,
DataType.BIGINTEGER,
DataType.BIGDECIMAL
// maybe more bigger types
);
castLookup.put(DataType.BOOLEAN, boolToTypes);
List<Byte> intToTypes = Arrays.asList(
DataType.LONG,
DataType.FLOAT,
DataType.DOUBLE,
DataType.BIGINTEGER,
DataType.BIGDECIMAL
);
castLookup.put(DataType.INTEGER, intToTypes);
List<Byte> longToTypes = Arrays.asList(
DataType.FLOAT,
DataType.DOUBLE,
DataType.BIGINTEGER,
DataType.BIGDECIMAL
);
castLookup.put(DataType.LONG, longToTypes);
List<Byte> floatToTypes = Arrays.asList(
DataType.DOUBLE,
DataType.BIGINTEGER,
DataType.BIGDECIMAL
);
castLookup.put(DataType.FLOAT, floatToTypes);
List<Byte> doubleToTypes = Arrays.asList(
DataType.BIGINTEGER,
DataType.BIGDECIMAL
);
castLookup.put(DataType.DOUBLE, doubleToTypes);
List<Byte> bigIntegerToTypes = Arrays.asList(
DataType.BIGDECIMAL
);
castLookup.put(DataType.BIGINTEGER, bigIntegerToTypes);
List<Byte> byteArrayToTypes = Arrays.asList(
DataType.BOOLEAN,
DataType.INTEGER,
DataType.LONG,
DataType.FLOAT,
DataType.DOUBLE,
DataType.DATETIME,
DataType.CHARARRAY,
DataType.BIGINTEGER,
DataType.BIGDECIMAL,
DataType.TUPLE,
DataType.BAG,
DataType.MAP
);
castLookup.put(DataType.BYTEARRAY, byteArrayToTypes);
}
/**
* Computes a modified version of manhattan distance between
* the two schemas: s1 & s2. Here the value on the same axis
* are preferred over values that change axis as this means
* that the number of casts required will be lesser on the same
* axis.
*
* However, this function ceases to be a metric as the triangle
* inequality does not hold.
*
* Each schema is an s1.size() dimensional vector.
* The ordering for each axis is as defined by castLookup.
* Unallowed casts are returned a dist of INFINITY.
* @param s1
* @param s2
* @return
*/
private long fitPossible(Schema s1, Schema s2) {
if(s1==null || s2==null) return INF;
List<FieldSchema> sFields = s1.getFields();
List<FieldSchema> fsFields = s2.getFields();
if(sFields.size()!=fsFields.size())
return INF;
long score = 0;
int castCnt=0;
for(int i=0;i<sFields.size();i++){
FieldSchema sFS = sFields.get(i);
if(sFS == null){
return INF;
}
// if we have a byte array do not include it
// in the computation of the score - bytearray
// fields will be looked at separately outside
// of this function
if (sFS.type == DataType.BYTEARRAY)
continue;
FieldSchema fsFS = fsFields.get(i);
if(DataType.isSchemaType(sFS.type)){
if(!FieldSchema.equals(sFS, fsFS, false, true))
return INF;
}
if(FieldSchema.equals(sFS, fsFS, true, true)) continue;
if(!castLookup.containsKey(sFS.type))
return INF;
if(!(castLookup.get(sFS.type).contains(fsFS.type)))
return INF;
score += (castLookup.get(sFS.type)).indexOf(fsFS.type) + 1;
++castCnt;
}
return score * castCnt;
}
private void insertCastsForUDF(UserFuncExpression func, Schema fromSch, Schema toSch)
throws FrontendException {
List<FieldSchema> fsLst = fromSch.getFields();
List<FieldSchema> tsLst = toSch.getFields();
List<LogicalExpression> args = func.getArguments();
int i=-1;
for (FieldSchema fFSch : fsLst) {
++i;
FieldSchema tFSch = tsLst.get(i);
if(fFSch.type==tFSch.type) {
continue;
}
insertCast(func, Util.translateFieldSchema(tFSch), args.get(i));
}
}
/***
* Helper for collecting warning when casting is inserted
* to the plan (implicit casting)
*
* @param node
* @param originalType
* @param toType
*/
static void collectCastWarning(Operator node,
byte originalType,
byte toType,
CompilationMessageCollector msgCollector
) {
String originalTypeName = DataType.findTypeName(originalType) ;
String toTypeName = DataType.findTypeName(toType) ;
String opName= node.getClass().getSimpleName() ;
PigWarning kind = null;
switch(toType) {
case DataType.BAG:
kind = PigWarning.IMPLICIT_CAST_TO_BAG;
break;
case DataType.CHARARRAY:
kind = PigWarning.IMPLICIT_CAST_TO_CHARARRAY;
break;
case DataType.DOUBLE:
kind = PigWarning.IMPLICIT_CAST_TO_DOUBLE;
break;
case DataType.FLOAT:
kind = PigWarning.IMPLICIT_CAST_TO_FLOAT;
break;
case DataType.INTEGER:
kind = PigWarning.IMPLICIT_CAST_TO_INT;
break;
case DataType.LONG:
kind = PigWarning.IMPLICIT_CAST_TO_LONG;
break;
case DataType.BOOLEAN:
kind = PigWarning.IMPLICIT_CAST_TO_BOOLEAN;
break;
case DataType.DATETIME:
kind = PigWarning.IMPLICIT_CAST_TO_DATETIME;
break;
case DataType.MAP:
kind = PigWarning.IMPLICIT_CAST_TO_MAP;
break;
case DataType.TUPLE:
kind = PigWarning.IMPLICIT_CAST_TO_TUPLE;
break;
case DataType.BIGINTEGER:
kind = PigWarning.IMPLICIT_CAST_TO_BIGINTEGER;
break;
case DataType.BIGDECIMAL:
kind = PigWarning.IMPLICIT_CAST_TO_BIGDECIMAL;
break;
}
msgCollector.collect(originalTypeName + " is implicitly cast to "
+ toTypeName +" under " + opName + " Operator",
MessageType.Warning, kind) ;
}
static class FieldSchemaResetter extends AllSameExpressionVisitor {
protected FieldSchemaResetter(OperatorPlan p) throws FrontendException {
super(p, new ReverseDependencyOrderWalker(p));
}
@Override
protected void execute(LogicalExpression op) throws FrontendException {
op.resetFieldSchema();
}
}
}
| miyakawataku/piggybank-ltsv | src/org/apache/pig/newplan/logical/visitor/TypeCheckingExpVisitor.java | Java | apache-2.0 | 59,224 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext, SparkFunSuite}
import org.apache.spark.executor.ExecutorMetrics
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
import org.apache.spark.storage.BlockManagerId
import org.apache.spark.util.AccumulatorV2
class ExternalClusterManagerSuite extends SparkFunSuite with LocalSparkContext {
test("launch of backend and scheduler") {
val conf = new SparkConf().setMaster("myclusterManager").setAppName("testcm")
sc = new SparkContext(conf)
// check if the scheduler components are created and initialized
sc.schedulerBackend match {
case dummy: DummySchedulerBackend => assert(dummy.initialized)
case other => fail(s"wrong scheduler backend: ${other}")
}
sc.taskScheduler match {
case dummy: DummyTaskScheduler => assert(dummy.initialized)
case other => fail(s"wrong task scheduler: ${other}")
}
}
}
/**
* Super basic ExternalClusterManager, just to verify ExternalClusterManagers can be configured.
*
* Note that if you want a special ClusterManager for tests, you are probably much more interested
* in [[MockExternalClusterManager]] and the corresponding [[SchedulerIntegrationSuite]]
*/
private class DummyExternalClusterManager extends ExternalClusterManager {
def canCreate(masterURL: String): Boolean = masterURL == "myclusterManager"
def createTaskScheduler(sc: SparkContext,
masterURL: String): TaskScheduler = new DummyTaskScheduler
def createSchedulerBackend(sc: SparkContext,
masterURL: String,
scheduler: TaskScheduler): SchedulerBackend = new DummySchedulerBackend()
def initialize(scheduler: TaskScheduler, backend: SchedulerBackend): Unit = {
scheduler.asInstanceOf[DummyTaskScheduler].initialized = true
backend.asInstanceOf[DummySchedulerBackend].initialized = true
}
}
private class DummySchedulerBackend extends SchedulerBackend {
var initialized = false
def start() {}
def stop() {}
def reviveOffers() {}
def defaultParallelism(): Int = 1
def maxNumConcurrentTasks(): Int = 0
}
private class DummyTaskScheduler extends TaskScheduler {
var initialized = false
override def schedulingMode: SchedulingMode = SchedulingMode.FIFO
override def rootPool: Pool = new Pool("", schedulingMode, 0, 0)
override def start(): Unit = {}
override def stop(): Unit = {}
override def submitTasks(taskSet: TaskSet): Unit = {}
override def cancelTasks(stageId: Int, interruptThread: Boolean): Unit = {}
override def killTaskAttempt(
taskId: Long, interruptThread: Boolean, reason: String): Boolean = false
override def killAllTaskAttempts(
stageId: Int, interruptThread: Boolean, reason: String): Unit = {}
override def notifyPartitionCompletion(stageId: Int, partitionId: Int): Unit = {}
override def setDAGScheduler(dagScheduler: DAGScheduler): Unit = {}
override def defaultParallelism(): Int = 2
override def executorLost(executorId: String, reason: ExecutorLossReason): Unit = {}
override def workerRemoved(workerId: String, host: String, message: String): Unit = {}
override def applicationAttemptId(): Option[String] = None
def executorHeartbeatReceived(
execId: String,
accumUpdates: Array[(Long, Seq[AccumulatorV2[_, _]])],
blockManagerId: BlockManagerId,
executorMetrics: ExecutorMetrics): Boolean = true
}
| aosagie/spark | core/src/test/scala/org/apache/spark/scheduler/ExternalClusterManagerSuite.scala | Scala | apache-2.0 | 4,214 |
/*
* Licensed to The Apereo Foundation under one or more contributor license
* agreements. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*
* The Apereo Foundation licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.unitime.timetable.gwt.server;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.util.List;
import java.util.TreeSet;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.fileupload.FileItem;
import org.apache.commons.fileupload.FileUploadException;
import org.apache.commons.fileupload.disk.DiskFileItemFactory;
import org.apache.commons.fileupload.servlet.ServletFileUpload;
import org.unitime.timetable.ApplicationProperties;
import org.unitime.timetable.defaults.ApplicationProperty;
import org.unitime.timetable.gwt.server.CalendarServlet.HttpParams;
import org.unitime.timetable.gwt.server.CalendarServlet.Params;
import org.unitime.timetable.gwt.server.CalendarServlet.QParams;
import org.unitime.timetable.model.Event;
import org.unitime.timetable.model.EventNote;
import org.unitime.timetable.model.dao.EventDAO;
import org.unitime.timetable.security.SessionContext;
import org.unitime.timetable.security.context.HttpSessionContext;
import org.unitime.timetable.security.rights.Right;
/**
* @author Tomas Muller
*/
public class UploadServlet extends HttpServlet {
private static final long serialVersionUID = 1L;
private static final int DEFAULT_MAX_SIZE = 4096 * 1024;
public static final String SESSION_LAST_FILE = "LAST_FILE";
protected SessionContext getSessionContext() {
return HttpSessionContext.getSessionContext(getServletContext());
}
public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
Params params = null;
String q = request.getParameter("q");
if (q != null) {
params = new QParams(q);
} else {
params = new HttpParams(request);
}
if (params.getParameter("event") != null) {
Long eventId = Long.parseLong(params.getParameter("event"));
String fileName = params.getParameter("name");
Long noteId = (params.getParameter("note") == null ? null : Long.valueOf(params.getParameter("note")));
if (q == null)
getSessionContext().checkPermissionAnyAuthority(Long.valueOf(eventId), "Event", Right.EventDetail);
Event event = EventDAO.getInstance().get(eventId);
TreeSet<EventNote> notes = new TreeSet<EventNote>();
if (event != null)
for (EventNote note: event.getNotes()) {
if (note.getAttachedName() == null || note.getAttachedName().isEmpty()) continue;
if (fileName != null) {
if (fileName.equals(note.getAttachedName()) && (noteId == null || noteId.equals(note.getUniqueId()))) notes.add(note);
} else if (noteId != null) {
if (noteId.equals(note.getUniqueId())) notes.add(note);
} else {
notes.add(note);
}
}
if (!notes.isEmpty()) {
EventNote note = notes.last();
response.setContentType(note.getAttachedContentType());
response.setHeader( "Content-Disposition", "attachment; filename=\"" + note.getAttachedName() + "\"" );
OutputStream out = response.getOutputStream();
out.write(note.getAttachedFile());
out.flush();
out.close();
return;
}
}
throw new ServletException("Nothing to download.");
}
public void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
try {
String maxSizeProperty = ApplicationProperty.MaxUploadSize.value();
int maxSize = (maxSizeProperty == null ? DEFAULT_MAX_SIZE : Integer.parseInt(maxSizeProperty));
ServletFileUpload upload = new ServletFileUpload(new DiskFileItemFactory(maxSize, ApplicationProperties.getTempFolder()));
upload.setSizeMax(maxSize);
List<FileItem> files = (List<FileItem>)upload.parseRequest(request);
String message = null;
if (files.size() == 1) {
FileItem file = files.get(0);
if (file.getSize() <= 0) {
request.getSession().removeAttribute(SESSION_LAST_FILE);
message = "No file is selected.";
} else {
request.getSession().setAttribute(SESSION_LAST_FILE, file);
message = "File " + file.getName() + " (" + file.getSize() + " bytes) selected.";
}
} else {
request.getSession().removeAttribute(SESSION_LAST_FILE);
message = "No file is selected.";
}
response.setContentType("text/html; charset=UTF-8");
response.setCharacterEncoding("UTF-8");
PrintWriter out = response.getWriter();
out.print(message);
out.flush();
out.close();
} catch (FileUploadException e) {
response.setContentType("text/html; charset=UTF-8");
response.setCharacterEncoding("UTF-8");
PrintWriter out = response.getWriter();
out.print("ERROR:Upload failed: " + e.getMessage());
out.flush();
out.close();
}
}
}
| rafati/unitime | JavaSource/org/unitime/timetable/gwt/server/UploadServlet.java | Java | apache-2.0 | 5,614 |
/*
* Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
*/
package com.intellij.dvcs.hosting;
import com.intellij.openapi.progress.ProgressIndicator;
import org.jetbrains.annotations.CalledInBackground;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.awt.*;
import java.util.Collections;
import java.util.List;
/**
* Allows to query remote service for a list of available VCS repositories with the current IDEA settings.
* Can be used to suggest the list of repositories that can be checked out.
* <p>
* Implement either {@link #getAvailableRepositories(ProgressIndicator)} to load everything in a single request
* or {@link #getAvailableRepositoriesFromMultipleSources(ProgressIndicator)} to load in several requests
*/
public interface RepositoryListLoader {
/**
* Check if this loader is configured (e.g. has necessary authentication data)
*/
boolean isEnabled();
/**
* Prompt user for additional configuration (e.g. provide credentials)
*/
default boolean enable(@Nullable Component parentComponent) {
return enable();
}
/**
* @deprecated parent component is required for dialogs to not fall through on welcome screen
*/
@Deprecated
default boolean enable() { return false; }
/**
* Load repository urls in a single requests
*/
@CalledInBackground
@NotNull
default List<String> getAvailableRepositories(@NotNull ProgressIndicator progressIndicator) throws RepositoryListLoadingException {
return Collections.emptyList();
}
/**
* Load repository urls in multiple requests with ability to show partial result
*/
@CalledInBackground
@NotNull
default Result getAvailableRepositoriesFromMultipleSources(@NotNull ProgressIndicator progressIndicator) {
try {
return new Result(getAvailableRepositories(progressIndicator), Collections.emptyList());
}
catch (RepositoryListLoadingException e) {
return new Result(Collections.emptyList(), Collections.singletonList(e));
}
}
/**
* Result from multiple sources
*/
class Result {
@NotNull private final List<String> myUrls;
@NotNull private final List<RepositoryListLoadingException> myErrors;
public Result(@NotNull List<String> urls, @NotNull List<RepositoryListLoadingException> errors) {
this.myUrls = urls;
this.myErrors = errors;
}
/**
* @return all loaded urls (can contain duplicates)
*/
@NotNull
public List<String> getUrls() {
return myUrls;
}
/**
* @return exceptions occurred during loading from some sources
*/
@NotNull
public List<RepositoryListLoadingException> getErrors() {
return myErrors;
}
}
}
| goodwinnk/intellij-community | platform/dvcs-api/src/com/intellij/dvcs/hosting/RepositoryListLoader.java | Java | apache-2.0 | 2,815 |
require 'log4r'
require 'VMwareWebService/MiqVim'
require 'VMwareWebService/MiqVimBroker'
require 'disk/MiqDisk'
require 'WriteVm/MiqPayloadOutputter'
#
# Formatter to output log messages to the console.
#
class ConsoleFormatter < Log4r::Formatter
def format(event)
(event.data.kind_of?(String) ? event.data : event.data.inspect) + "\n"
end
end
$log = Log4r::Logger.new 'toplog'
Log4r::StderrOutputter.new('err_console', :level => Log4r::INFO, :formatter => ConsoleFormatter)
$log.add 'err_console'
$stdout.sync = true
$stderr.sync = true
MKFILE = "payload"
SERVER = raise "please define SERVER"
USERNAME = raise "please define USERNAME"
PASSWORD = raise "please define PASSWORD"
TARGET_VM = raise "please define"
begin
vim = MiqVim.new(SERVER, USERNAME, PASSWORD)
puts
puts "vim.class: #{vim.class}"
puts "#{vim.server} is #{(vim.isVirtualCenter? ? 'VC' : 'ESX')}"
puts "API version: #{vim.apiVersion}"
puts
#
# Get the target VM
#
tvm = vim.virtualMachinesByFilter("config.name" => TARGET_VM)
if tvm.empty?
puts "VM: #{TARGET_VM} not found"
exit
end
vmMor = tvm[0]['MOR']
miqVm = vim.getVimVmByMor(vmMor)
#
# We can't do this if the VM is powered on.
#
if miqVm.poweredOn?
puts "VM: #{TARGET_VM} is powered on"
exit
end
#
# Construct the path to the new payload vmdk file.
#
payloadVmdk = File.join(File.dirname(miqVm.dsPath), "miqPayload.vmdk")
puts "payloadVmdk = #{payloadVmdk}"
#
# Open a VixDiskLib connection.
#
vdlConnection = vim.vdlConnection
#
# Instantiate a MiqDisk object based on the vdl connection,
# giving us remote write access to the new disk.
#
vixDiskInfo = {:connection => vdlConnection, :fileName => payloadVmdk}
dInfo = OpenStruct.new
dInfo.mountMode = "rw"
dInfo.vixDiskInfo = vixDiskInfo
disk = MiqDisk.getDisk(dInfo)
unless disk
puts "Failed to open disk for writing"
exit(1)
end
disk.seek(0)
magic, size, pos = disk.read(Log4r::MiqPayloadOutputter::HEADER_SIZE).unpack("a8LL")
puts
puts "MAGIC: #{magic}"
puts "SIZE: #{size}"
puts "POS: #{pos}"
puts
puts
puts "*** LOG START"
puts disk.read(pos)
puts "*** LOG END"
disk.close
rescue => err
puts err.to_s
puts err.backtrace.join("\n")
ensure
puts
puts "Exiting..."
miqVm.release if miqVm
vdlConnection.disconnect if vdlConnection
vim.disconnect if vim
end
| maas-ufcg/manageiq | gems/pending/WriteVm/test/readLog.rb | Ruby | apache-2.0 | 2,408 |
/*
* Copyright 2011 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Code for loading high resolution critical images.
* This javascript is part of DelayImages filter.
*
* @author [email protected] (Pulkit Goyal)
*/
goog.require('pagespeedutils');
// Exporting functions using quoted attributes to prevent js compiler from
// renaming them.
// See http://code.google.com/closure/compiler/docs/api-tutorial3.html#dangers
window['pagespeed'] = window['pagespeed'] || {};
var pagespeed = window['pagespeed'];
/**
* @constructor
*/
pagespeed.DelayImages = function() {
/**
* Boolean that controls whether the event handlers for lazy load are already
* registered.
* @type {boolean}
* @private
*/
this.lazyLoadHighResHandlersRegistered_ = false;
/**
* Boolean that controls the logic to replace low res images with high res
* only once.
* @type {boolean}
* @private
*/
this.highResReplaced_ = false;
};
/**
* For given elements, replace src with pagespeed_high_res_src if present.
* @param {NodeList.<Element>} elements list of DOM elements to check.
*/
pagespeed.DelayImages.prototype.replaceElementSrc = function(elements) {
for (var i = 0; i < elements.length; ++i) {
var src = elements[i].getAttribute('pagespeed_high_res_src');
if (src) {
elements[i].setAttribute('src', src);
}
}
};
pagespeed.DelayImages.prototype['replaceElementSrc'] =
pagespeed.DelayImages.prototype.replaceElementSrc;
/**
* Register the event handlers to lazy load the high res images. This is
* called only when lazyload_high_res_experimental flag is enabled.
*/
pagespeed.DelayImages.prototype.registerLazyLoadHighRes = function() {
// Add event handlers only once
if (this.lazyLoadHighResHandlersRegistered_) {
this.highResReplaced_ = false;
return;
}
var elem = document.body;
var interval = 500;
var tapStart, tapEnd = 0;
var me = this;
this.highResReplaced = false;
if ('ontouchstart' in elem) {
pagespeedutils.addHandler(elem, 'touchstart', function(e) {
tapStart = Date.now();
});
pagespeedutils.addHandler(elem, 'touchend', function(e) {
tapEnd = Date.now();
// Load the high res images if there is a multi-touch or if the tap
// duration is less than 500ms i.e single click. The timer catches the
// click event sooner than the click handler on most phones.
if ((e.changedTouches != null && e.changedTouches.length == 2) ||
(e.touches != null && e.touches.length == 2) ||
tapEnd - tapStart < interval) {
me.loadHighRes();
}
});
} else {
pagespeedutils.addHandler(window, 'click', function(e) {
me.loadHighRes();
});
}
pagespeedutils.addHandler(window, 'load', function(e) {
me.loadHighRes();
});
this.lazyLoadHighResHandlersRegistered_ = true;
};
pagespeed.DelayImages.prototype['registerLazyLoadHighRes'] =
pagespeed.DelayImages.prototype.registerLazyLoadHighRes;
/**
* Triggered from event handlers that were previously registered. Replaces
* low res images with high res sources.
*/
pagespeed.DelayImages.prototype.loadHighRes = function() {
if (!this.highResReplaced_) {
this.replaceWithHighRes();
this.highResReplaced_ = true;
}
};
/**
* Replaces low resolution image with high resolution image.
*/
pagespeed.DelayImages.prototype.replaceWithHighRes = function() {
this.replaceElementSrc(document.getElementsByTagName('img'));
this.replaceElementSrc(document.getElementsByTagName('input'));
};
pagespeed.DelayImages.prototype['replaceWithHighRes'] =
pagespeed.DelayImages.prototype.replaceWithHighRes;
/**
* Initializes the delay images module.
*/
pagespeed.delayImagesInit = function() {
var temp = new pagespeed.DelayImages();
pagespeed['delayImages'] = temp;
};
pagespeed['delayImagesInit'] = pagespeed.delayImagesInit;
| ajayanandgit/mod_pagespeed | net/instaweb/rewriter/delay_images.js | JavaScript | apache-2.0 | 4,420 |
/**
* Copyright 2015 GitFx
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
* Created by rvvaidya on 03/08/15.
*/
import io.github.gitfx.GitFxApp;
import javafx.scene.Parent;
import javafx.scene.Scene;
import javafx.scene.control.ScrollPane;
import javafx.scene.layout.VBox;
import javafx.stage.Stage;
import org.junit.Assert;
import org.junit.Test;
import org.testfx.api.FxToolkit;
import org.testfx.util.WaitForAsyncUtils;
public class GitFxHistoryScrollPaneTest extends ApplicationTest {
Stage stage;
Scene scene;
@Override
public void init()
throws Exception{
stage=launch(GitFxApp.class,null);
}
@Override
public void stop() throws Exception{
FxToolkit.hideStage();
}
@Test
public void launchApplication() throws Exception {
WaitForAsyncUtils.waitForFxEvents();
ScrollPane scrollPane = (ScrollPane)scene.lookup("#historyScrollPane");
Parent container = scrollPane.getParent();
//ScrollPane below a VBox causes the accordion not to scroll correctly
Assert.assertFalse("ScrollPane inside VBox",(container instanceof VBox)?true:false);
}
@Override
public void start(Stage stage) throws Exception {
scene=stage.getScene();
}
}
| jughyd/GitFx | src/test/java/GitFxHistoryScrollPaneTest.java | Java | apache-2.0 | 1,776 |
/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
export default [
[
['mi', 'n', 'in the morning', 'in the afternoon', 'in the evening', 'at night'],
['midnight', 'noon', 'in the morning', 'in the afternoon', 'in the evening', 'at night'],
],
[
['midnight', 'noon', 'morning', 'afternoon', 'evening', 'night'],
,
],
[
'00:00', '12:00', ['06:00', '12:00'], ['12:00', '18:00'], ['18:00', '21:00'],
['21:00', '06:00']
]
];
//# sourceMappingURL=en-MS.js.map | rospilot/rospilot | share/web_assets/nodejs_deps/node_modules/@angular/common/locales/extra/en-MS.js | JavaScript | apache-2.0 | 678 |
<link rel="stylesheet" href="../../elements-demo-resources/demo.css">
<link rel="stylesheet" href="common.css">
<script src="../../elements-demo-resources/ga.js"></script>
<!-- Used for common JS and for loading polymer dependencies -->
<script src="common.js"></script>
| manolo/components | demo/common.html | HTML | apache-2.0 | 273 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.stream.sql
import org.apache.flink.api.scala._
import org.apache.flink.table.api.scala._
import org.apache.flink.table.planner.runtime.utils.StreamingWithStateTestBase.StateBackendMode
import org.apache.flink.table.planner.runtime.utils.TimeTestUtil.TimestampAndWatermarkWithOffset
import org.apache.flink.table.planner.runtime.utils._
import org.apache.flink.types.Row
import org.junit.Assert._
import org.junit._
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
@RunWith(classOf[Parameterized])
class TemporalSortITCase(mode: StateBackendMode) extends StreamingWithStateTestBase(mode) {
@Test
def testOnlyEventTimeOrderBy(): Unit = {
val data = List(
(3L, 2L, "Hello world", 3),
(2L, 2L, "Hello", 2),
(6L, 3L, "Luke Skywalker", 6),
(5L, 3L, "I am fine.", 5),
(7L, 4L, "Comment#1", 7),
(9L, 4L, "Comment#3", 9),
(10L, 4L, "Comment#4", 10),
(8L, 4L, "Comment#2", 8),
(1L, 1L, "Hi", 2),
(1L, 1L, "Hi", 1),
(4L, 3L, "Helloworld, how are you?", 4))
val t = failingDataSource(data)
.assignTimestampsAndWatermarks(
new TimestampAndWatermarkWithOffset[(Long, Long, String, Int)](10L))
.toTable(tEnv, 'rowtime.rowtime, 'key, 'str, 'int)
tEnv.registerTable("T", t)
val sqlQuery = "SELECT key, str, `int` FROM T ORDER BY rowtime"
val sink = new TestingRetractSink
val results = tEnv.sqlQuery(sqlQuery).toRetractStream[Row]
results.addSink(sink).setParallelism(1)
env.execute()
val expected = Seq(
"1,Hi,2",
"1,Hi,1",
"2,Hello,2",
"2,Hello world,3",
"3,Helloworld, how are you?,4",
"3,I am fine.,5",
"3,Luke Skywalker,6",
"4,Comment#1,7",
"4,Comment#2,8",
"4,Comment#3,9",
"4,Comment#4,10")
assertEquals(expected, sink.getRetractResults)
}
@Test
def testEventTimeAndOtherFieldOrderBy(): Unit = {
val data = List(
(3L, 2L, "Hello world", 3),
(2L, 2L, "Hello", 2),
(6L, 3L, "Luke Skywalker", 6),
(5L, 3L, "I am fine.", 5),
(7L, 4L, "Comment#1", 7),
(9L, 4L, "Comment#3", 9),
(10L, 4L, "Comment#4", 10),
(8L, 4L, "Comment#2", 8),
(1L, 1L, "Hi", 2),
(1L, 1L, "Hi", 1),
(4L, 3L, "Helloworld, how are you?", 4))
val t = failingDataSource(data)
.assignTimestampsAndWatermarks(
new TimestampAndWatermarkWithOffset[(Long, Long, String, Int)](10L))
.toTable(tEnv, 'rowtime.rowtime, 'key, 'str, 'int)
tEnv.registerTable("T", t)
val sqlQuery = "SELECT key, str, `int` FROM T ORDER BY rowtime, `int`"
val sink = new TestingRetractSink
val results = tEnv.sqlQuery(sqlQuery).toRetractStream[Row]
results.addSink(sink).setParallelism(1)
env.execute()
val expected = Seq(
"1,Hi,1",
"1,Hi,2",
"2,Hello,2",
"2,Hello world,3",
"3,Helloworld, how are you?,4",
"3,I am fine.,5",
"3,Luke Skywalker,6",
"4,Comment#1,7",
"4,Comment#2,8",
"4,Comment#3,9",
"4,Comment#4,10")
assertEquals(expected, sink.getRetractResults)
}
@Test
def testProcTimeOrderBy(): Unit = {
val t = failingDataSource(TestData.tupleData3)
.toTable(tEnv, 'a, 'b, 'c, 'proctime.proctime)
tEnv.registerTable("T", t)
val sql = "SELECT a, b, c FROM T ORDER BY proctime"
val sink = new TestingRetractSink
val results = tEnv.sqlQuery(sql).toRetractStream[Row]
results.addSink(sink).setParallelism(1)
env.execute()
val expected = Seq(
"1,1,Hi",
"2,2,Hello",
"3,2,Hello world",
"4,3,Hello world, how are you?",
"5,3,I am fine.",
"6,3,Luke Skywalker",
"7,4,Comment#1",
"8,4,Comment#2",
"9,4,Comment#3",
"10,4,Comment#4",
"11,5,Comment#5",
"12,5,Comment#6",
"13,5,Comment#7",
"14,5,Comment#8",
"15,5,Comment#9",
"16,6,Comment#10",
"17,6,Comment#11",
"18,6,Comment#12",
"19,6,Comment#13",
"20,6,Comment#14",
"21,6,Comment#15")
assertEquals(expected, sink.getRetractResults)
}
}
| hequn8128/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/runtime/stream/sql/TemporalSortITCase.scala | Scala | apache-2.0 | 4,971 |
/*
* Copyright 2000-2016 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.lang;
public interface XmlCodeFoldingSettings {
boolean isCollapseXmlTags();
boolean isCollapseHtmlStyleAttribute();
boolean isCollapseEntities();
boolean isCollapseDataUri();
}
| hurricup/intellij-community | xml/xml-psi-api/src/com/intellij/lang/XmlCodeFoldingSettings.java | Java | apache-2.0 | 809 |
//===--- ForeignRepresentationInfo.h - Used in bridging queries -*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See http://swift.org/LICENSE.txt for license information
// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_FOREIGNREPRESENTATIONINFO_H
#define SWIFT_FOREIGNREPRESENTATIONINFO_H
#include "swift/AST/ProtocolConformance.h"
#include "swift/AST/Type.h"
#include "swift/Basic/LLVM.h"
#include "llvm/ADT/PointerEmbeddedInt.h"
#include "llvm/ADT/PointerIntPair.h"
namespace swift {
class ForeignRepresentationInfo {
using PayloadTy =
llvm::PointerEmbeddedInt<uintptr_t, sizeof(uintptr_t) * CHAR_BIT - 3>;
/// The low three bits store a ForeignRepresentableKind.
///
/// When the ForeignRepresentableKind == None, the upper bits are
/// the generation count at which this negative result was last checked.
/// When it's Bridged, it's the conformance that describes the bridging.
/// When it's Trivial, it's simply a flag stating whether Optional is
/// supported.
llvm::PointerIntPair<PayloadTy, 3, ForeignRepresentableKind> Storage;
public:
/// Retrieve a cache entry for a non-foreign-representable type.
static ForeignRepresentationInfo forNone(unsigned generation = 0) {
ForeignRepresentationInfo result;
result.Storage = { generation, ForeignRepresentableKind::None };
return result;
}
// Retrieve a cache entry for a trivially representable type.
static ForeignRepresentationInfo forTrivial() {
ForeignRepresentationInfo result;
result.Storage = { 0, ForeignRepresentableKind::Trivial };
return result;
}
// Retrieve a cache entry for a trivially representable type that can also
// be optional.
static ForeignRepresentationInfo forTrivialWithOptional() {
ForeignRepresentationInfo result;
result.Storage = { 1, ForeignRepresentableKind::Trivial };
return result;
}
// Retrieve a cache entry for a bridged representable type.
static ForeignRepresentationInfo
forBridged(ProtocolConformance *conformance) {
using PayloadTraits = llvm::PointerLikeTypeTraits<PayloadTy>;
ForeignRepresentationInfo result;
result.Storage = {PayloadTraits::getFromVoidPointer(conformance),
ForeignRepresentableKind::Bridged};
assert(result.getConformance() == conformance && "insufficiently aligned");
return result;
}
/// Retrieve the foreign representable kind.
ForeignRepresentableKind getKind() const {
return Storage.getInt();
}
/// Retrieve the generation for a non-representable type.
unsigned getGeneration() const {
assert(getKind() == ForeignRepresentableKind::None);
return Storage.getPointer();
}
/// Retrieve the protocol conformance that makes it representable.
ProtocolConformance *getConformance() const {
switch (getKind()) {
case ForeignRepresentableKind::None:
llvm_unreachable("this type is not representable");
case ForeignRepresentableKind::Trivial:
return nullptr;
case ForeignRepresentableKind::Bridged: {
using PayloadTraits = llvm::PointerLikeTypeTraits<PayloadTy>;
auto payload = PayloadTraits::getAsVoidPointer(Storage.getPointer());
return static_cast<ProtocolConformance *>(payload);
}
case ForeignRepresentableKind::Object:
case ForeignRepresentableKind::StaticBridged:
llvm_unreachable("unexpected kind in ForeignRepresentableCacheEntry");
}
}
/// Returns true if the optional version of this type is also representable.
bool isRepresentableAsOptional() const {
switch (getKind()) {
case ForeignRepresentableKind::None:
llvm_unreachable("this type is not representable");
case ForeignRepresentableKind::Trivial:
return Storage.getPointer() != 0;
case ForeignRepresentableKind::Bridged: {
auto KPK_ObjectiveCBridgeable = KnownProtocolKind::ObjectiveCBridgeable;
ProtocolDecl *proto = getConformance()->getProtocol();
assert(proto->isSpecificProtocol(KPK_ObjectiveCBridgeable) &&
"unknown protocol; does it support optional?");
(void)proto;
(void)KPK_ObjectiveCBridgeable;
return true;
}
case ForeignRepresentableKind::Object:
case ForeignRepresentableKind::StaticBridged:
llvm_unreachable("unexpected kind in ForeignRepresentableCacheEntry");
}
}
};
} // end namespace swift
#endif // SWIFT_FOREIGNREPRESENTATIONINFO_H
| dreamsxin/swift | lib/AST/ForeignRepresentationInfo.h | C | apache-2.0 | 4,702 |
/*
* Copyright (c) 2005-2011 Grameen Foundation USA
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*
* See also http://www.apache.org/licenses/LICENSE-2.0.html for an
* explanation of the license and how it is applied.
*/
package org.mifos.customers.util.helpers;
import org.mifos.customers.api.DataTransferObject;
public class CustomerDto implements DataTransferObject {
private Integer customerId;
private String globalCustNum;
private String displayName;
private Short statusId;
private Integer versionNo;
private Short customerLevelId;
private Short officeId;
private Short personnelId;
public CustomerDto() {
}
public CustomerDto(java.lang.Integer customerId, java.lang.String displayName, java.lang.String globalCustNum,
java.lang.Short statusId) {
this.customerId = customerId;
this.displayName = displayName;
this.globalCustNum = globalCustNum;
this.statusId = statusId;
}
public CustomerDto(java.lang.Integer customerId, java.lang.String displayName, java.lang.String globalCustNum,
java.lang.Short statusId, Short customerLevelId, Integer versionNo, Short officeId, Short personnelId) {
this.customerId = customerId;
this.displayName = displayName;
this.globalCustNum = globalCustNum;
this.statusId = statusId;
this.customerLevelId = customerLevelId;
this.versionNo = versionNo;
this.officeId = officeId;
this.personnelId = personnelId;
}
public Integer getCustomerId() {
return customerId;
}
public void setCustomerId(Integer customerId) {
this.customerId = customerId;
}
public String getDisplayName() {
return displayName;
}
public void setDisplayName(String displayName) {
this.displayName = displayName;
}
public String getGlobalCustNum() {
return globalCustNum;
}
public void setGlobalCustNum(String globalCustNum) {
this.globalCustNum = globalCustNum;
}
public Short getStatusId() {
return statusId;
}
public void setStatusId(Short statusId) {
this.statusId = statusId;
}
public Short getCustomerLevelId() {
return customerLevelId;
}
public void setCustomerLevelId(Short customerLevelId) {
this.customerLevelId = customerLevelId;
}
public Integer getVersionNo() {
return versionNo;
}
public void setVersionNo(Integer versionNo) {
this.versionNo = versionNo;
}
public Short getOfficeId() {
return officeId;
}
public void setOfficeId(Short officeId) {
this.officeId = officeId;
}
public Short getPersonnelId() {
return personnelId;
}
public void setPersonnelId(Short personnelId) {
this.personnelId = personnelId;
}
}
| madhav123/gkmaster | appdomain/src/main/java/org/mifos/customers/util/helpers/CustomerDto.java | Java | apache-2.0 | 3,437 |
/*
* Copyright 2007-2009 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Project: JGentleFramework
*/
package org.jgentleframework.context;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.jgentleframework.configure.Configurable;
import org.jgentleframework.configure.annotation.BeanServices;
import org.jgentleframework.configure.enums.Scope;
import org.jgentleframework.context.injecting.AbstractBeanFactory;
import org.jgentleframework.context.injecting.AppropriateScopeNameClass;
import org.jgentleframework.context.injecting.ObjectBeanFactory;
import org.jgentleframework.context.injecting.Provider;
import org.jgentleframework.context.injecting.autodetect.AutoLoadingDefinitionDetector;
import org.jgentleframework.context.injecting.autodetect.Detector;
import org.jgentleframework.context.injecting.autodetect.ExtensionPointsDetector;
import org.jgentleframework.context.injecting.autodetect.FirstDetector;
import org.jgentleframework.context.injecting.scope.ScopeImplementation;
import org.jgentleframework.context.services.ServiceHandler;
import org.jgentleframework.context.support.CoreInstantiationSelector;
import org.jgentleframework.context.support.CoreInstantiationSelectorImpl;
import org.jgentleframework.core.JGentleException;
import org.jgentleframework.core.JGentleRuntimeException;
import org.jgentleframework.core.factory.BeanCreationProcessor;
import org.jgentleframework.core.factory.InOutDependencyException;
import org.jgentleframework.core.intercept.support.Matcher;
import org.jgentleframework.reflection.metadata.Definition;
import org.jgentleframework.utils.Assertor;
import org.jgentleframework.utils.Utils;
/**
* This is an implementation of {@link Provider} interface, is responsible for
* core container of JGentle.
*
* @author LE QUOC CHUNG - mailto: <a
* href="mailto:[email protected]">[email protected]</a>
* @date Oct 1, 2007
* @see Provider
* @see AbstractBeanFactory
*/
class ProviderCoreCreator extends AbstractBeanFactory implements Provider {
/** The Constant serialVersionUID. */
private static final long serialVersionUID = 330183329296893472L;
/**
* The {@link List list} holds a list of config instance of this
* {@link Provider}.
*/
private List<Configurable> configInstances = null;
/** the detector controller. */
private Detector detectorController = null;
/** registered interceptors. */
protected Map<Matcher<Definition>, List<Object>> interceptorList = null;
/** The interceptor cacher. */
protected ConcurrentMap<Definition, Matcher<Definition>> matcherCache = null;
/**
* Constructor.
*
* @param serviceHandler
* the {@link ServiceHandler} instance
* @param OLArray
* the oL array
*/
public ProviderCoreCreator(ServiceHandler serviceHandler,
List<Map<String, Object>> OLArray) {
this.serviceHandler = serviceHandler;
this.definitionManager = this.serviceHandler.getDefinitionManager();
this.objectBeanFactory = new ObjectBeanFactoryImpl(this) {
@Override
public Object getRefInstance(String refInstance) {
return ProviderCoreCreator.this.getRefInstance(refInstance);
}
@Override
public Object getBean(String refer) {
return ProviderCoreCreator.this.getBean(refer);
}
};
this.mappingList = this.objectBeanFactory.getMappingList();
this.mapDirectList = this.objectBeanFactory.getMapDirectList();
this.aliasMap = this.objectBeanFactory.getAliasMap();
this.scopeList = this.objectBeanFactory.getScopeList();
this.detectorController = new FirstDetector(this);
this.matcherCache = new ConcurrentHashMap<Definition, Matcher<Definition>>();
this.interceptorList = new HashMap<Matcher<Definition>, List<Object>>();
// Creates detector
Detector espDetector = new ExtensionPointsDetector(this);
Detector aldDetector = new AutoLoadingDefinitionDetector(this);
// Sets chain
detectorController.setNextDetector(espDetector);
espDetector.setNextDetector(aldDetector);
// executes init
init(OLArray);
}
/*
* (non-Javadoc)
* @see
* org.jgentleframework.context.injecting.Provider#getBean(java.lang.Class)
*/
@SuppressWarnings("unchecked")
@Override
public <T> T getBean(Class<T> clazz) {
Object result = returnSharedObject(rootScopeName.get(clazz));
if (result == NULL_SHAREDOBJECT) {
AppropriateScopeNameClass asc = doAppropriateScopeName(clazz);
return (T) getBeanInstance(asc);
}
return (T) result;
}
/*
* (non-Javadoc)
* @see
* org.jgentleframework.context.injecting.Provider#getBean(org.jgentleframework
* .core.reflection.metadata.Definition)
*/
@Override
public Object getBean(Definition def) {
Assertor.notNull(def, "The given definition must not be null!");
Object result = returnSharedObject(this.rootScopeName.get(def));
if (result != NULL_SHAREDOBJECT)
return result;
AppropriateScopeNameClass asc = doAppropriateScopeName(def);
if (def.isInterpretedOfClass()) {
return this.getBeanInstance(asc);
}
else {
if (log.isErrorEnabled()) {
log
.error(
"The specified definition instance is not object-class definition !",
new JGentleRuntimeException());
}
}
return null;
}
/*
* (non-Javadoc)
* @see
* org.jgentleframework.context.injecting.Provider#getBean(java.lang.String)
*/
@Override
public Object getBean(String refer) {
Object result = getRefInstance(refer);
if ((result != null && result == refer) || result == null) {
result = getBeanBoundToDefinition(refer);
if (result == null)
result = getBeanBoundToMapping(refer);
if (result == null)
result = getBeanBoundToName(refer);
// if (result == null)
// result = refer;
}
return result;
}
/*
* (non-Javadoc)
* @seeorg.jgentleframework.context.injecting.AbstractBeanFactory#
* getBeanBoundToDefinition(java.lang.String)
*/
@Override
public Object getBeanBoundToDefinition(String ID) {
Definition def = this.definitionManager.getDefinition(ID);
if (def != null) {
Object result = returnSharedObject(this.rootScopeName.get(def));
if (result != NULL_SHAREDOBJECT)
return result;
AppropriateScopeNameClass asc = doAppropriateScopeName(def);
if (def.isInterpretedOfClass()) {
return getBeanInstance(asc);
}
else {
if (log.isErrorEnabled()) {
log.error("The definition instance corresponds to ID '"
+ ID + "' is not object-class Definition !",
new JGentleRuntimeException());
}
}
}
return null;
}
/*
* (non-Javadoc)
* @see
* org.jgentleframework.context.injecting.Provider#getBeanBoundToMapping(
* java.lang.String)
*/
@Override
public Object getBeanBoundToMapping(String mappingName) {
if (this.aliasMap.containsKey(mappingName)) {
Object result = returnSharedObject(this.rootScopeName
.get(Configurable.REF_MAPPING + mappingName));
if (result != NULL_SHAREDOBJECT)
return result;
AppropriateScopeNameClass asc = doAppropriateScopeName(Configurable.REF_MAPPING
+ mappingName);
return getBeanInstance(asc);
}
return null;
}
/*
* (non-Javadoc)
* @see
* org.jgentleframework.context.injecting.Provider#getBeanBoundToName(java
* .lang.String)
*/
@Override
public Object getBeanBoundToName(String instanceName) {
Object result = null;
Object obj = null;
result = returnSharedObject(this.rootScopeName
.get(Configurable.REF_CONSTANT + instanceName));
if (result != NULL_SHAREDOBJECT)
return result;
AppropriateScopeNameClass asc = doAppropriateScopeName(Configurable.REF_CONSTANT
+ instanceName);
String ref = asc.ref;
CoreInstantiationSelector coreSelector = new CoreInstantiationSelectorImpl(
ref);
// creates scope info, default is SINGLETON
synchronized (scopeList) {
if (!scopeList.containsKey(ref)) {
scopeList.put(ref, Scope.SINGLETON);
}
}
ScopeImplementation scopeImple = this.objectBeanFactory
.createScopeInstance(ref);
try {
obj = getBeanFromScope(scopeImple, coreSelector, ref);
}
catch (Exception e) {
if (log.isFatalEnabled()) {
log.fatal("Could not instantiate bean instance!", e);
}
}
if (obj != null && obj.getClass().equals(String.class)) {
String objStr = (String) obj;
// if the returned value is refered to the instance.
result = getRefInstance(objStr);
}
else {
result = obj;
}
return result;
}
/*
* (non-Javadoc)
* @see org.jgentleframework.context.injecting.Provider#getConfigInstances()
*/
@Override
public List<Configurable> getConfigInstances() {
return this.configInstances;
}
/*
* (non-Javadoc)
* @see
* org.jgentleframework.context.injecting.Provider#getDetectorController()
*/
@Override
public Detector getDetectorController() {
return detectorController;
}
/*
* (non-Javadoc)
* @seeorg.jgentleframework.context.injecting.IAbstractBeanFactory#
* getObjectBeanFactory()
*/
@Override
public ObjectBeanFactory getObjectBeanFactory() {
return this.objectBeanFactory;
}
/*
* (non-Javadoc)
* @see
* org.jgentleframework.context.injecting.AbstractBeanFactory#getRefInstance
* (java.lang.String)
*/
@Override
public Object getRefInstance(String refInstance) {
Object result = null;
if (refInstance.equals(Configurable.REF_MAPPING)) {
if (log.isErrorEnabled()) {
log.error("Mapping type reference is not supported !",
new InOutDependencyException());
}
}
else if (refInstance.indexOf(":") != -1) {
String[] values = refInstance.split(":");
if (values[0].equals(Configurable.REF_CONSTANT)) {
if (values[1].indexOf(":") != -1) {
result = getRefInstance(values[1]);
}
else {
String scopeName = Utils.createScopeName(values[1]);
result = this.mapDirectList.get(scopeName);
}
}
// TODO fix other Configurable.REF_MAPPING
else if (values[0].equals(Configurable.REF_MAPPING)) {
Class<?> clazz = null;
if (values[1].indexOf(" ") != -1) {
String[] split = values[1].split(" ");
try {
clazz = Class.forName(split[1]);
}
catch (ClassNotFoundException e) {
result = this.getBeanBoundToMapping(values[1]);
}
result = this.getBean(clazz);
}
else
result = this.getBeanBoundToMapping(values[1]);
}
else if (values[0].startsWith(Configurable.REF_ID)) {
result = this.getBeanBoundToDefinition(values[1]);
}
else {
return null;
}
}
else {
result = refInstance;
}
return result;
}
/**
* Inits the system.
*
* @param OLArray
* the oL array
*/
protected void init(List<Map<String, Object>> OLArray) {
init_BeanCreatingProcessor();
// creates system scope
initSystemScope();
AbstractInitLoading.loading(this, OLArray);
}
/**
* This method is responsible for {@link BeanCreationProcessor}
* instantiation.
*/
private void init_BeanCreatingProcessor() {
try {
if (!this.serviceHandler
.containsDomain(BeanServices.DEFAULT_DOMAIN)) {
this.serviceHandler.newDomain(BeanServices.DEFAULT_DOMAIN);
}
}
catch (JGentleException e) {
if (log.isFatalEnabled()) {
log
.fatal(
"Could not create default domain of system instance !!",
e);
}
}
this.serviceHandler.addService(BeanCreationProcessor.class,
BeanServices.DEFAULT_DOMAIN, new Class[] { Provider.class },
new Object[] { this });
}
/**
* This method is responsible for system scope instantiation.
*/
private void initSystemScope() {
for (Scope scope : Scope.class.getEnumConstants()) {
synchronized (this.scopeController) {
this.scopeController.addScope(scope);
}
}
}
/*
* (non-Javadoc)
* @see
* org.jgentleframework.context.injecting.Provider#setConfigInstances(org
* .exxlabs.jgentle.configure.AbstractConfig[])
*/
@Override
public void setConfigInstances(List<Configurable> configInstances) {
this.configInstances = configInstances;
}
}
| haint/jgentle | src/org/jgentleframework/context/ProviderCoreCreator.java | Java | apache-2.0 | 13,036 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.tinkerpop.gremlin.process.traversal.strategy.optimization;
import org.apache.tinkerpop.gremlin.process.computer.traversal.step.map.VertexProgramStep;
import org.apache.tinkerpop.gremlin.process.traversal.Step;
import org.apache.tinkerpop.gremlin.process.traversal.Traversal;
import org.apache.tinkerpop.gremlin.process.traversal.TraversalStrategy;
import org.apache.tinkerpop.gremlin.process.traversal.step.LambdaHolder;
import org.apache.tinkerpop.gremlin.process.traversal.step.TraversalParent;
import org.apache.tinkerpop.gremlin.process.traversal.step.filter.PathFilterStep;
import org.apache.tinkerpop.gremlin.process.traversal.step.map.EdgeOtherVertexStep;
import org.apache.tinkerpop.gremlin.process.traversal.step.map.EdgeVertexStep;
import org.apache.tinkerpop.gremlin.process.traversal.step.map.PathStep;
import org.apache.tinkerpop.gremlin.process.traversal.step.map.TreeStep;
import org.apache.tinkerpop.gremlin.process.traversal.step.map.VertexStep;
import org.apache.tinkerpop.gremlin.process.traversal.step.sideEffect.TreeSideEffectStep;
import org.apache.tinkerpop.gremlin.process.traversal.step.util.EmptyStep;
import org.apache.tinkerpop.gremlin.process.traversal.strategy.AbstractTraversalStrategy;
import org.apache.tinkerpop.gremlin.process.traversal.util.TraversalHelper;
import org.apache.tinkerpop.gremlin.structure.Direction;
import org.apache.tinkerpop.gremlin.structure.Graph;
import org.apache.tinkerpop.gremlin.structure.Vertex;
import org.javatuples.Pair;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
/**
* This strategy looks for {@code .outE().inV()}, {@code .inE().outV()} and {@code .bothE().otherV()}
* and replaces these step sequences with {@code .out()}, {@code .in()} or {@code .both()} respectively.
* The strategy won't modify the traversal if:
* <p/>
* <ul>
* <li>the edge step is labeled</li>
* <li>the traversal contains a {@code path} step</li>
* <li>the traversal contains a lambda step</li>
* </ul>
* <p/>
*
* By re-writing the traversal in this fashion, the traversal eliminates unnecessary steps and becomes more normalized.
*
* @author Daniel Kuppitz (http://gremlin.guru)
* @example <pre>
* __.outE().inV() // is replaced by __.out()
* __.inE().outV() // is replaced by __.in()
* __.bothE().otherV() // is replaced by __.both()
* __.bothE().bothV() // will not be modified
* __.outE().inV().path() // will not be modified
* __.outE().inV().tree() // will not be modified
* </pre>
*/
public final class IncidentToAdjacentStrategy extends AbstractTraversalStrategy<TraversalStrategy.OptimizationStrategy>
implements TraversalStrategy.OptimizationStrategy {
private static final IncidentToAdjacentStrategy INSTANCE = new IncidentToAdjacentStrategy();
private static final String MARKER = Graph.Hidden.hide("gremlin.incidentToAdjacent");
private static final Set<Class> INVALIDATING_STEP_CLASSES = new HashSet<>(Arrays.asList(
PathStep.class, PathFilterStep.class, TreeStep.class, TreeSideEffectStep.class, LambdaHolder.class));
private IncidentToAdjacentStrategy() {
}
/**
* Checks whether a given step is optimizable or not.
*
* @param step1 an edge-emitting step
* @param step2 a vertex-emitting step
* @return <code>true</code> if step1 is not labeled and emits edges and step2 emits vertices,
* otherwise <code>false</code>
*/
private static boolean isOptimizable(final Step step1, final Step step2) {
if (step1 instanceof VertexStep && ((VertexStep) step1).returnsEdge() && step1.getLabels().isEmpty()) {
final Direction step1Dir = ((VertexStep) step1).getDirection();
if (step1Dir.equals(Direction.BOTH)) {
return step2 instanceof EdgeOtherVertexStep;
}
return step2 instanceof EdgeOtherVertexStep || (step2 instanceof EdgeVertexStep &&
((EdgeVertexStep) step2).getDirection().equals(step1Dir.opposite()));
}
return false;
}
/**
* Optimizes the given edge-emitting step and the vertex-emitting step by replacing them with a single
* vertex-emitting step.
*
* @param traversal the traversal that holds the given steps
* @param step1 the edge-emitting step to replace
* @param step2 the vertex-emitting step to replace
*/
private static void optimizeSteps(final Traversal.Admin traversal, final VertexStep step1, final Step step2) {
final Step newStep = new VertexStep(traversal, Vertex.class, step1.getDirection(), step1.getEdgeLabels());
for (final String label : (Iterable<String>) step2.getLabels()) {
newStep.addLabel(label);
}
TraversalHelper.replaceStep(step1, newStep, traversal);
traversal.removeStep(step2);
}
public static IncidentToAdjacentStrategy instance() {
return INSTANCE;
}
@Override
public void apply(final Traversal.Admin<?, ?> traversal) {
// using a hidden label marker to denote whether the traversal should not be processed by this strategy
if ((traversal.isRoot() || traversal.getParent() instanceof VertexProgramStep) &&
TraversalHelper.hasStepOfAssignableClassRecursively(INVALIDATING_STEP_CLASSES, traversal))
TraversalHelper.applyTraversalRecursively(t -> t.getStartStep().addLabel(MARKER), traversal);
if (traversal.getStartStep().getLabels().contains(MARKER)) {
traversal.getStartStep().removeLabel(MARKER);
return;
}
////////////////////////////////////////////////////////////////////////////
final Collection<Pair<VertexStep, Step>> stepsToReplace = new ArrayList<>();
Step prev = null;
for (final Step curr : traversal.getSteps()) {
if (curr instanceof TraversalParent) {
((TraversalParent) curr).getLocalChildren().forEach(this::apply);
((TraversalParent) curr).getGlobalChildren().forEach(this::apply);
}
if (isOptimizable(prev, curr)) {
stepsToReplace.add(Pair.with((VertexStep) prev, curr));
}
prev = curr;
}
if (!stepsToReplace.isEmpty()) {
for (final Pair<VertexStep, Step> pair : stepsToReplace) {
optimizeSteps(traversal, pair.getValue0(), pair.getValue1());
}
}
}
@Override
public Set<Class<? extends OptimizationStrategy>> applyPrior() {
return Collections.singleton(IdentityRemovalStrategy.class);
}
@Override
public Set<Class<? extends OptimizationStrategy>> applyPost() {
return Collections.singleton(PathRetractionStrategy.class);
}
}
| apache/tinkerpop | gremlin-core/src/main/java/org/apache/tinkerpop/gremlin/process/traversal/strategy/optimization/IncidentToAdjacentStrategy.java | Java | apache-2.0 | 7,704 |
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.flowable.idm.engine.impl.persistence.entity;
import java.util.List;
import java.util.Map;
import org.flowable.common.engine.impl.persistence.entity.EntityManager;
import org.flowable.idm.api.PasswordEncoder;
import org.flowable.idm.api.PasswordSalt;
import org.flowable.idm.api.Picture;
import org.flowable.idm.api.User;
import org.flowable.idm.api.UserQuery;
import org.flowable.idm.engine.impl.UserQueryImpl;
/**
* @author Joram Barrez
*/
public interface UserEntityManager extends EntityManager<UserEntity> {
User createNewUser(String userId);
void updateUser(User updatedUser);
List<User> findUserByQueryCriteria(UserQueryImpl query);
long findUserCountByQueryCriteria(UserQueryImpl query);
UserQuery createNewUserQuery();
Boolean checkPassword(String userId, String password, PasswordEncoder passwordEncoder, PasswordSalt passwordSalt);
List<User> findUsersByNativeQuery(Map<String, Object> parameterMap);
long findUserCountByNativeQuery(Map<String, Object> parameterMap);
boolean isNewUser(User user);
Picture getUserPicture(User user);
void setUserPicture(User user, Picture picture);
void deletePicture(User user);
List<User> findUsersByPrivilegeId(String privilegeId);
}
| lsmall/flowable-engine | modules/flowable-idm-engine/src/main/java/org/flowable/idm/engine/impl/persistence/entity/UserEntityManager.java | Java | apache-2.0 | 1,826 |
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.buck.core.starlark.rule;
import com.facebook.buck.core.artifact.Artifact;
import com.facebook.buck.core.rules.analysis.RuleAnalysisContext;
import com.google.common.collect.ImmutableSet;
import com.google.devtools.build.lib.cmdline.Label;
import com.google.devtools.build.lib.skylarkinterface.SkylarkPrinter;
/** The context passed to user defined rules' implementation functions */
public class SkylarkRuleContext implements SkylarkRuleContextApi {
private final Label label;
private final CapturingActionRegistry registry;
private final SkylarkRuleContextAttr attr;
private final SkylarkRuleContextActions actions;
/**
* Create a {@link SkylarkRuleContext} to be used in users' implementation functions
*
* @param context the context for the analysing this rule. Used primarily for creating and
* manipulating actions
* @param label the label of the new rule being evaluated
* @param skylarkRuleContextAttr a mapping-like representation of field names to
* coerced-transformed values for a given rule
*/
public SkylarkRuleContext(
RuleAnalysisContext context, Label label, SkylarkRuleContextAttr skylarkRuleContextAttr) {
this.label = label;
this.registry = new CapturingActionRegistry(context.actionRegistry());
this.attr = skylarkRuleContextAttr;
this.actions = new SkylarkRuleContextActions(registry);
}
@Override
public void repr(SkylarkPrinter printer) {
printer.append("<ctx>");
}
@Override
public SkylarkRuleContextAttr getAttr() {
return attr;
}
@Override
public Label getLabel() {
return label;
}
@Override
public SkylarkRuleContextActionsApi getActions() {
return this.actions;
}
/**
* Get a list of all Artifacts that were used in actions
*
* <p>This is used to infer outputs to use to create a {@link
* com.facebook.buck.core.rules.providers.lib.DefaultInfo} object if no output attributes were
* specified, or if no {@link com.facebook.buck.core.rules.providers.lib.DefaultInfo} object was
* returned by a user's implementation function.
*
* @return List of {@link Artifact}s that were used in actions.
*/
ImmutableSet<Artifact> getOutputs() {
return registry.getOutputs();
}
}
| facebook/buck | src/com/facebook/buck/core/starlark/rule/SkylarkRuleContext.java | Java | apache-2.0 | 2,886 |
#ifndef H_CELERO_UTILITIES_H
#define H_CELERO_UTILITIES_H
///
/// \author John Farrier
///
/// \copyright Copyright 2015 John Farrier
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
#ifndef WIN32
#include <unistd.h>
#endif
#include <cstdlib>
#include <thread>
#include <stdint.h>
#include <celero/Export.h>
namespace celero
{
///
/// \func DoNotOptimizeAway
///
/// Used to prevent compiler optimization of a variable
/// that performs no real purpose other than to participate
/// in a benchmark
///
/// Consider the following trivial benchmark:
///
/// \code
/// BASELINE(...)
/// {
/// int x = 0;
///
/// for(int i = 0; i < 64; i++)
/// {
/// x += i;
/// }
/// }
/// \endcode
///
/// Using Ubuntu clang v3.0, the resultant assembly is highly optimized
/// as one might expect, but not terribly useful for baselining:
///
/// \verbatim
/// movl $2016, %eax # imm = 0x7E0
/// ret
/// \endverbatim
///
/// Now, replace the inner loop with a call to DoNotOptimizeAway:
///
/// \code
/// DoNotOptimizeAway(x += i);
/// \endcode
///
/// The result is now a loop which is meaningful for establishing a
/// baseline.
///
/// \verbatim
/// xorl %ecx, %ecx
/// xorl %eax, %eax
/// .LBB0_1: # =>This Inner Loop Header: Depth=1
/// addl %ecx, %eax
/// incl %ecx
/// cmpl $64, %ecx
/// jne .LBB0_1
/// ret
/// \endverbatim
///
/// GCC 4.8 gives similar results.
///
/// gcc.godbolt.org permalink: http://goo.gl/lsngwX
///
/// Folly uses a simple bit of inline assembly:
/// > template <class T>
/// > void doNotOptimizeAway(T&& datum) {
/// > asm volatile("" : "+r" (datum));
/// >}
///
/// It would be great if that were portable with respect to both compilers and 32/64-bit targets.
///
template<class T> void DoNotOptimizeAway(T&& x)
{
//
// We must always do this test, but it will never pass.
//
if(std::chrono::system_clock::now() == std::chrono::time_point<std::chrono::system_clock>())
{
// This forces the value to never be optimized away
// by taking a reference then using it.
const auto* p = &x;
putchar(*reinterpret_cast<const char*>(p));
// If we do get here, kick out because something has gone wrong.
std::abort();
}
}
/// Specialization for std::function objects.
template<> CELERO_EXPORT void DoNotOptimizeAway(std::function<void(void)>&& x);
///
/// Quick definition of the number of microseconds per second.
///
const uint64_t UsPerSec(1000000);
///
/// Disable dynamic CPU scaling which allows the clock speed of the processor
/// to be adjusted to different P-states by software.
///
CELERO_EXPORT void DisableDynamicCPUScaling();
}
#endif
| bkloppenborg/Celero | include/celero/Utilities.h | C | apache-2.0 | 3,258 |
# frozen_string_literal: true
RSpec.configure do |c|
c.mock_with :rspec
end
require 'puppetlabs_spec_helper/module_spec_helper'
require 'rspec-puppet-facts'
require 'spec_helper_local' if File.file?(File.join(File.dirname(__FILE__), 'spec_helper_local.rb'))
include RspecPuppetFacts
default_facts = {
puppetversion: Puppet.version,
facterversion: Facter.version,
}
default_fact_files = [
File.expand_path(File.join(File.dirname(__FILE__), 'default_facts.yml')),
File.expand_path(File.join(File.dirname(__FILE__), 'default_module_facts.yml')),
]
default_fact_files.each do |f|
next unless File.exist?(f) && File.readable?(f) && File.size?(f)
begin
default_facts.merge!(YAML.safe_load(File.read(f), [], [], true))
rescue => e
RSpec.configuration.reporter.message "WARNING: Unable to load #{f}: #{e}"
end
end
# read default_facts and merge them over what is provided by facterdb
default_facts.each do |fact, value|
add_custom_fact fact, value
end
RSpec.configure do |c|
c.default_facts = default_facts
c.hiera_config = 'hiera'
c.before :each do
# set to strictest setting for testing
# by default Puppet runs at warning level
Puppet.settings[:strict] = :warning
Puppet.settings[:strict_variables] = true
end
c.filter_run_excluding(bolt: true) unless ENV['GEM_BOLT']
c.after(:suite) do
RSpec::Puppet::Coverage.report!(0)
end
# Filter backtrace noise
backtrace_exclusion_patterns = [
%r{spec_helper},
%r{gems},
]
if c.respond_to?(:backtrace_exclusion_patterns)
c.backtrace_exclusion_patterns = backtrace_exclusion_patterns
elsif c.respond_to?(:backtrace_clean_patterns)
c.backtrace_clean_patterns = backtrace_exclusion_patterns
end
end
# Ensures that a module is defined
# @param module_name Name of the module
def ensure_module_defined(module_name)
module_name.split('::').reduce(Object) do |last_module, next_module|
last_module.const_set(next_module, Module.new) unless last_module.const_defined?(next_module, false)
last_module.const_get(next_module, false)
end
end
# 'spec_overrides' from sync.yml will appear below this line
| emahags/puppet-module-krb5 | spec/spec_helper.rb | Ruby | apache-2.0 | 2,142 |
/**
* Visual Blocks Editor
*
* Copyright 2011 Google Inc.
* http://blockly.googlecode.com/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Toolbox from whence to create blocks.
* @author [email protected] (Neil Fraser)
*/
'use strict';
goog.provide('Blockly.Toolbox');
goog.require('Blockly.Flyout');
goog.require('goog.style');
goog.require('goog.ui.tree.TreeControl');
goog.require('goog.ui.tree.TreeNode');
/**
* Width of the toolbox.
* @type {number}
*/
Blockly.Toolbox.width = 0;
/**
* The SVG group currently selected.
* @type {SVGGElement}
* @private
*/
Blockly.Toolbox.selectedOption_ = null;
/**
* Configuration constants for Closure's tree UI.
* @type {Object.<string,*>}
* @const
* @private
*/
Blockly.Toolbox.CONFIG_ = {
indentWidth: 19,
cssRoot: 'blocklyTreeRoot',
cssHideRoot: 'blocklyHidden',
cssItem: '',
cssTreeRow: 'blocklyTreeRow',
cssItemLabel: 'blocklyTreeLabel',
cssTreeIcon: 'blocklyTreeIcon',
cssExpandedFolderIcon: 'blocklyTreeIconOpen',
cssFileIcon: 'blocklyTreeIconNone',
cssSelectedRow: 'blocklyTreeSelected'
};
/**
* Creates the toolbox's DOM. Only needs to be called once.
* @param {!Element} svg The top-level SVG element.
* @param {!Element} container The SVG's HTML parent element.
*/
Blockly.Toolbox.createDom = function(svg, container) {
// Create an HTML container for the Toolbox menu.
Blockly.Toolbox.HtmlDiv = goog.dom.createDom('div',
{'class': 'blocklyToolboxDiv'});
Blockly.Toolbox.HtmlDiv.setAttribute('dir', Blockly.RTL ? 'RTL' : 'LTR');
container.appendChild(Blockly.Toolbox.HtmlDiv);
/**
* @type {!Blockly.Flyout}
* @private
*/
Blockly.Toolbox.flyout_ = new Blockly.Flyout();
svg.appendChild(Blockly.Toolbox.flyout_.createDom());
// Clicking on toolbar closes popups.
Blockly.bindEvent_(Blockly.Toolbox.HtmlDiv, 'mousedown', null,
function(e) {
if (Blockly.isRightButton(e) || e.target == Blockly.Toolbox.HtmlDiv) {
// Close flyout.
Blockly.hideChaff(false);
} else {
// Just close popups.
Blockly.hideChaff(true);
}
});
};
/**
* Initializes the toolbox.
*/
Blockly.Toolbox.init = function() {
Blockly.Toolbox.CONFIG_['cleardotPath'] =
Blockly.pathToBlockly + 'media/1x1.gif';
Blockly.Toolbox.CONFIG_['cssCollapsedFolderIcon'] =
'blocklyTreeIconClosed' + (Blockly.RTL ? 'Rtl' : 'Ltr');
var tree = new Blockly.Toolbox.TreeControl('root', Blockly.Toolbox.CONFIG_);
Blockly.Toolbox.tree_ = tree;
tree.setShowRootNode(false);
tree.setShowLines(false);
tree.setShowExpandIcons(false);
tree.setSelectedItem(null);
Blockly.Toolbox.HtmlDiv.style.display = 'block';
Blockly.Toolbox.flyout_.init(Blockly.mainWorkspace,
Blockly.getMainWorkspaceMetrics, true);
Blockly.Toolbox.populate_();
tree.render(Blockly.Toolbox.HtmlDiv);
// If the document resizes, reposition the toolbox.
goog.events.listen(window, goog.events.EventType.RESIZE,
Blockly.Toolbox.position_);
Blockly.Toolbox.position_();
};
/**
* Move the toolbox to the edge.
* @private
*/
Blockly.Toolbox.position_ = function() {
var treeDiv = Blockly.Toolbox.HtmlDiv;
var svgBox = goog.style.getBorderBox(Blockly.svg);
var svgSize = Blockly.svgSize();
if (Blockly.RTL) {
var x = svgSize.left + 1;
x += svgSize.width - treeDiv.offsetWidth;
treeDiv.style.left = x + 'px';
} else {
treeDiv.style.marginLeft = svgBox.left;
}
treeDiv.style.height = (svgSize.height + 1) + 'px';
Blockly.Toolbox.width = treeDiv.offsetWidth;
};
/**
* Fill the toolbox with categories and blocks.
* @private
*/
Blockly.Toolbox.populate_ = function() {
var rootOut = Blockly.Toolbox.tree_;
rootOut.blocks = [];
function syncTrees(treeIn, treeOut) {
for (var i = 0, childIn; childIn = treeIn.childNodes[i]; i++) {
if (!childIn.tagName) {
// Skip over text.
continue;
}
var name = childIn.tagName.toUpperCase();
if (name == 'CATEGORY') {
var childOut = rootOut.createNode(childIn.getAttribute('name'));
childOut.blocks = [];
treeOut.add(childOut);
var custom = childIn.getAttribute('custom');
if (custom) {
// Variables and procedures have special categories that are dynamic.
childOut.blocks = custom;
} else {
syncTrees(childIn, childOut);
}
} else if (name == 'BLOCK') {
treeOut.blocks.push(childIn);
}
}
}
syncTrees(Blockly.languageTree, Blockly.Toolbox.tree_);
if (rootOut.blocks.length) {
throw 'Toolbox cannot have both blocks and categories in the root level.';
}
// Fire a resize event since the toolbox may have changed width and height.
Blockly.fireUiEvent(window, 'resize');
};
/**
* Unhighlight any previously specified option.
*/
Blockly.Toolbox.clearSelection = function() {
Blockly.Toolbox.tree_.setSelectedItem(null);
};
// Extending Closure's Tree UI.
/**
* Extention of a TreeControl object that uses a custom tree node.
* @param {string} html The HTML content of the node label.
* @param {Object=} opt_config The configuration for the tree. See
* goog.ui.tree.TreeControl.DefaultConfig. If not specified, a default config
* will be used.
* @param {goog.dom.DomHelper=} opt_domHelper Optional DOM helper.
* @constructor
* @extends {goog.ui.tree.TreeControl}
*/
Blockly.Toolbox.TreeControl = function(html, opt_config, opt_domHelper) {
goog.ui.tree.TreeControl.call(this, html, opt_config, opt_domHelper);
};
goog.inherits(Blockly.Toolbox.TreeControl, goog.ui.tree.TreeControl);
/**
* Creates a new tree node using a custom tree node.
* @param {string} html The html content of the node label.
* @return {goog.ui.tree.TreeNode} The new item.
* @override
*/
Blockly.Toolbox.TreeControl.prototype.createNode = function(html) {
return new Blockly.Toolbox.TreeNode(html || '', this.getConfig(),
this.getDomHelper());
};
/**
* Display/hide the flyout when an item is selected.
* @param {goog.ui.tree.BaseNode} node The item to select.
* @override
*/
Blockly.Toolbox.TreeControl.prototype.setSelectedItem = function(node) {
if (this.selectedItem_ == node) {
return;
}
goog.ui.tree.TreeControl.prototype.setSelectedItem.call(this, node);
if (node && node.blocks && node.blocks.length) {
Blockly.Toolbox.flyout_.show(node.blocks);
} else {
// Hide the flyout.
Blockly.Toolbox.flyout_.hide();
}
};
/**
* An single node in the tree, customized for Blockly's UI.
* @param {string} html The html content of the node label.
* @param {Object=} opt_config The configuration for the tree. See
* goog.ui.tree.TreeControl.DefaultConfig. If not specified, a default config
* will be used.
* @param {goog.dom.DomHelper=} opt_domHelper Optional DOM helper.
* @constructor
* @extends {goog.ui.tree.TreeNode}
*/
Blockly.Toolbox.TreeNode = function(html, opt_config, opt_domHelper) {
goog.ui.tree.TreeNode.call(this, html, opt_config, opt_domHelper);
var resize = function() {
Blockly.fireUiEvent(window, 'resize');
};
// Fire a resize event since the toolbox may have changed width.
goog.events.listen(Blockly.Toolbox.tree_,
goog.ui.tree.BaseNode.EventType.EXPAND, resize);
goog.events.listen(Blockly.Toolbox.tree_,
goog.ui.tree.BaseNode.EventType.COLLAPSE, resize);
};
goog.inherits(Blockly.Toolbox.TreeNode, goog.ui.tree.TreeNode);
/**
* Do not show the +/- icon.
* @return {string} The source for the icon.
* @override
*/
Blockly.Toolbox.TreeNode.prototype.getExpandIconHtml = function() {
return '<span></span>';
};
/**
* Supress population of the +/- icon.
* @return {null} Null.
* @protected
* @override
*/
Blockly.Toolbox.TreeNode.prototype.getExpandIconElement = function() {
return null;
};
/**
* Expand or collapse the node on mouse click.
* @param {!goog.events.BrowserEvent} e The browser event.
* @override
*/
Blockly.Toolbox.TreeNode.prototype.onMouseDown = function(e) {
// Expand icon.
if (this.hasChildren() && this.isUserCollapsible_) {
this.toggle();
this.select();
} else if (this.isSelected()) {
this.getTree().setSelectedItem(null);
} else {
this.select();
}
this.updateRow();
};
/**
* Supress the inherited double-click behaviour.
* @param {!goog.events.BrowserEvent} e The browser event.
* @override
*/
Blockly.Toolbox.TreeNode.prototype.onDoubleClick_ = function(e) {
// NOP.
};
| jadonk/blockly-bonescript | static/core/toolbox.js | JavaScript | apache-2.0 | 9,020 |
package egovframework.com.cop.smt.mrm.service;
import java.util.Map;
/**
* 개요
* - 메모보고에 대한 Service Interface를 정의한다.
*
* 상세내용
* - 메모보고에 대한 등록, 수정, 삭제, 조회기능을 제공한다.
* - 메모보고의 조회기능은 목록조회, 상세조회로 구분된다.
* @author 장철호
* @version 1.0
* @created 19-7-2010 오전 10:14:53
* <pre>
* << 개정이력(Modification Information) >>
*
* 수정일 수정자 수정내용
* ------- -------- ---------------------------
* 2010.7.19 장철호 최초 생성
*
* </pre>
*/
public interface EgovMemoReprtService {
/**
* 보고자 목록을 조회한다.
* @param ReportrVO
* @return Map<String, Object>
*
* @param reportrVO
*/
public Map<String, Object> selectReportrList(ReportrVO reportrVO) throws Exception;
/**
* 사용자 직위명을 정보를 조회한다.
* @param String
* @return String
*
* @param String
*/
public String selectWrterClsfNm(String wrterId) throws Exception;
/**
* 메모보고 목록을 조회한다.
* @param MemoReprtVO - 메모보고 VO
* @return List<MemoReprtVO> - 메모보고 List
*
* @param memoReprtVO
*/
public Map<String, Object> selectMemoReprtList(MemoReprtVO memoReprtVO) throws Exception;
/**
* 메모보고 정보를 조회한다.
* @param MemoReprtVO - 메모보고 VO
* @return MemoReprtVO - 메모보고 VO
*
* @param memoReprtVO
*/
public MemoReprtVO selectMemoReprt(MemoReprtVO memoReprtVO) throws Exception;
/**
* 메모보고 정보의 보고자 조회일시를 수정한다.
* @param MemoReprt - 메모보고 model
*
* @param memoReprt
*/
public void readMemoReprt(MemoReprt memoReprt) throws Exception;
/**
* 메모보고 정보를 수정한다.
* @param MemoReprt - 메모보고 model
*
* @param memoReprt
*/
public void updateMemoReprt(MemoReprt memoReprt) throws Exception;
/**
* 메모보고 정보의 지시사항을 등록한다.
* @param MemoReprt - 메모보고 model
*
* @param memoReprt
*/
public void updateMemoReprtDrctMatter(MemoReprt memoReprt) throws Exception;
/**
* 메모보고 정보를 등록한다.
* @param MemoReprt - 메모보고 model
*
* @param memoReprt
*/
public void insertMemoReprt(MemoReprt memoReprt) throws Exception;
/**
* 메모보고 정보를 삭제한다.
* @param MemoReprt - 메모보고 model
*
* @param memoReprt
*/
public void deleteMemoReprt(MemoReprt memoReprt) throws Exception;
} | dasomel/egovframework | common-component/v3.9.0/src/main/java/egovframework/com/cop/smt/mrm/service/EgovMemoReprtService.java | Java | apache-2.0 | 2,689 |
// RUN: %empty-directory(%t)
// RUN: %target-clang %s -all_load %test-resource-dir/%target-sdk-name/libswiftCompatibility50.a -lobjc -o %t/main
// RUN: %target-run %t/main
// REQUIRES: objc_interop
// REQUIRES: executable_test
// The compatibility library needs to have no build-time dependencies on
// libswiftCore so it can be linked into a program that doesn't link
// libswiftCore, but will load it at runtime, such as xctest.
//
// Test this by linking it into a plain C program and making sure it builds.
int main(void) {}
| karwa/swift | test/stdlib/Compatibility50Linking.c | C | apache-2.0 | 531 |
/*
* Copyright 2016 LINE Corporation
*
* LINE Corporation licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.linecorp.armeria.server.annotation;
import static com.linecorp.armeria.internal.server.annotation.DefaultValues.UNSPECIFIED;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import com.linecorp.armeria.common.HttpMethod;
/**
* Annotation for mapping {@link HttpMethod#TRACE} onto specific method.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
public @interface Trace {
/**
* A path pattern for the annotated method.
*/
String value() default UNSPECIFIED;
}
| anuraaga/armeria | core/src/main/java/com/linecorp/armeria/server/annotation/Trace.java | Java | apache-2.0 | 1,262 |
--
-- Copyright 2010-2017 Boxfuse GmbH
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
UPDATE "${schema}"."${table}" SET "type"='BASELINE' WHERE "type"='INIT';
ALTER TABLE "${schema}"."${table}" RENAME TO "${table}_3";
CREATE TABLE "${schema}"."${table}" (
"installed_rank" INT NOT NULL PRIMARY KEY,
"version" VARCHAR(50),
"description" VARCHAR(200) NOT NULL,
"type" VARCHAR(20) NOT NULL,
"script" VARCHAR(1000) NOT NULL,
"checksum" INT,
"installed_by" VARCHAR(100) NOT NULL,
"installed_on" TEXT NOT NULL DEFAULT (strftime('%Y-%m-%d %H:%M:%f','now')),
"execution_time" INT NOT NULL,
"success" BOOLEAN NOT NULL
);
INSERT INTO "${schema}"."${table}" SELECT "installed_rank","version","description","type","script","checksum","installed_by","installed_on","execution_time","success" FROM "${schema}"."${table}_3";
DROP TABLE "${schema}"."${table}_3"; | IAops/flyway | flyway-core/src/main/resources/org/flywaydb/core/internal/dbsupport/sqlite/upgradeMetaDataTable.sql | SQL | apache-2.0 | 1,385 |
/*
* Copyright (C) 2006 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.launcher3;
import android.animation.TimeInterpolator;
import android.content.Context;
import android.hardware.SensorManager;
import android.os.Build;
import android.view.ViewConfiguration;
import android.view.animation.AnimationUtils;
import android.view.animation.Interpolator;
/**
* This class differs from the framework {@link android.widget.Scroller} in that
* you can modify the Interpolator post-construction.
*/
public class LauncherScroller {
private int mMode;
private int mStartX;
private int mStartY;
private int mFinalX;
private int mFinalY;
private int mMinX;
private int mMaxX;
private int mMinY;
private int mMaxY;
private int mCurrX;
private int mCurrY;
private long mStartTime;
private int mDuration;
private float mDurationReciprocal;
private float mDeltaX;
private float mDeltaY;
private boolean mFinished;
private TimeInterpolator mInterpolator;
private boolean mFlywheel;
private float mVelocity;
private float mCurrVelocity;
private int mDistance;
private float mFlingFriction = ViewConfiguration.getScrollFriction();
private static final int DEFAULT_DURATION = 250;
private static final int SCROLL_MODE = 0;
private static final int FLING_MODE = 1;
private static float DECELERATION_RATE = (float) (Math.log(0.78) / Math.log(0.9));
private static final float INFLEXION = 0.35f; // Tension lines cross at (INFLEXION, 1)
private static final float START_TENSION = 0.5f;
private static final float END_TENSION = 1.0f;
private static final float P1 = START_TENSION * INFLEXION;
private static final float P2 = 1.0f - END_TENSION * (1.0f - INFLEXION);
private static final int NB_SAMPLES = 100;
private static final float[] SPLINE_POSITION = new float[NB_SAMPLES + 1];
private static final float[] SPLINE_TIME = new float[NB_SAMPLES + 1];
private float mDeceleration;
private final float mPpi;
// A context-specific coefficient adjusted to physical values.
private float mPhysicalCoeff;
static {
float x_min = 0.0f;
float y_min = 0.0f;
for (int i = 0; i < NB_SAMPLES; i++) {
final float alpha = (float) i / NB_SAMPLES;
float x_max = 1.0f;
float x, tx, coef;
while (true) {
x = x_min + (x_max - x_min) / 2.0f;
coef = 3.0f * x * (1.0f - x);
tx = coef * ((1.0f - x) * P1 + x * P2) + x * x * x;
if (Math.abs(tx - alpha) < 1E-5) break;
if (tx > alpha) x_max = x;
else x_min = x;
}
SPLINE_POSITION[i] = coef * ((1.0f - x) * START_TENSION + x) + x * x * x;
float y_max = 1.0f;
float y, dy;
while (true) {
y = y_min + (y_max - y_min) / 2.0f;
coef = 3.0f * y * (1.0f - y);
dy = coef * ((1.0f - y) * START_TENSION + y) + y * y * y;
if (Math.abs(dy - alpha) < 1E-5) break;
if (dy > alpha) y_max = y;
else y_min = y;
}
SPLINE_TIME[i] = coef * ((1.0f - y) * P1 + y * P2) + y * y * y;
}
SPLINE_POSITION[NB_SAMPLES] = SPLINE_TIME[NB_SAMPLES] = 1.0f;
// This controls the viscous fluid effect (how much of it)
sViscousFluidScale = 8.0f;
// must be set to 1.0 (used in viscousFluid())
sViscousFluidNormalize = 1.0f;
sViscousFluidNormalize = 1.0f / viscousFluid(1.0f);
}
private static float sViscousFluidScale;
private static float sViscousFluidNormalize;
public void setInterpolator(TimeInterpolator interpolator) {
mInterpolator = interpolator;
}
/**
* Create a Scroller with the default duration and interpolator.
*/
public LauncherScroller(Context context) {
this(context, null);
}
/**
* Create a Scroller with the specified interpolator. If the interpolator is
* null, the default (viscous) interpolator will be used. "Flywheel" behavior will
* be in effect for apps targeting Honeycomb or newer.
*/
public LauncherScroller(Context context, Interpolator interpolator) {
this(context, interpolator,
context.getApplicationInfo().targetSdkVersion >= Build.VERSION_CODES.HONEYCOMB);
}
/**
* Create a Scroller with the specified interpolator. If the interpolator is
* null, the default (viscous) interpolator will be used. Specify whether or
* not to support progressive "flywheel" behavior in flinging.
*/
public LauncherScroller(Context context, Interpolator interpolator, boolean flywheel) {
mFinished = true;
mInterpolator = interpolator;
mPpi = context.getResources().getDisplayMetrics().density * 160.0f;
mDeceleration = computeDeceleration(ViewConfiguration.getScrollFriction());
mFlywheel = flywheel;
mPhysicalCoeff = computeDeceleration(0.84f); // look and feel tuning
}
/**
* The amount of friction applied to flings. The default value
* is {@link ViewConfiguration#getScrollFriction}.
*
* @param friction A scalar dimension-less value representing the coefficient of
* friction.
*/
public final void setFriction(float friction) {
mDeceleration = computeDeceleration(friction);
mFlingFriction = friction;
}
private float computeDeceleration(float friction) {
return SensorManager.GRAVITY_EARTH // g (m/s^2)
* 39.37f // inch/meter
* mPpi // pixels per inch
* friction;
}
/**
*
* Returns whether the scroller has finished scrolling.
*
* @return True if the scroller has finished scrolling, false otherwise.
*/
public final boolean isFinished() {
return mFinished;
}
/**
* Force the finished field to a particular value.
*
* @param finished The new finished value.
*/
public final void forceFinished(boolean finished) {
mFinished = finished;
}
/**
* Returns how long the scroll event will take, in milliseconds.
*
* @return The duration of the scroll in milliseconds.
*/
public final int getDuration() {
return mDuration;
}
/**
* Returns the current X offset in the scroll.
*
* @return The new X offset as an absolute distance from the origin.
*/
public final int getCurrX() {
return mCurrX;
}
/**
* Returns the current Y offset in the scroll.
*
* @return The new Y offset as an absolute distance from the origin.
*/
public final int getCurrY() {
return mCurrY;
}
/**
* Returns the current velocity.
*
* @return The original velocity less the deceleration. Result may be
* negative.
*/
public float getCurrVelocity() {
return mMode == FLING_MODE ?
mCurrVelocity : mVelocity - mDeceleration * timePassed() / 2000.0f;
}
/**
* Returns the start X offset in the scroll.
*
* @return The start X offset as an absolute distance from the origin.
*/
public final int getStartX() {
return mStartX;
}
/**
* Returns the start Y offset in the scroll.
*
* @return The start Y offset as an absolute distance from the origin.
*/
public final int getStartY() {
return mStartY;
}
/**
* Returns where the scroll will end. Valid only for "fling" scrolls.
*
* @return The final X offset as an absolute distance from the origin.
*/
public final int getFinalX() {
return mFinalX;
}
/**
* Returns where the scroll will end. Valid only for "fling" scrolls.
*
* @return The final Y offset as an absolute distance from the origin.
*/
public final int getFinalY() {
return mFinalY;
}
/**
* Call this when you want to know the new location. If it returns true,
* the animation is not yet finished.
*/
public boolean computeScrollOffset() {
if (mFinished) {
return false;
}
int timePassed = (int)(AnimationUtils.currentAnimationTimeMillis() - mStartTime);
if (timePassed < mDuration) {
switch (mMode) {
case SCROLL_MODE:
float x = timePassed * mDurationReciprocal;
if (mInterpolator == null)
x = viscousFluid(x);
else
x = mInterpolator.getInterpolation(x);
mCurrX = mStartX + Math.round(x * mDeltaX);
mCurrY = mStartY + Math.round(x * mDeltaY);
break;
case FLING_MODE:
final float t = (float) timePassed / mDuration;
final int index = (int) (NB_SAMPLES * t);
float distanceCoef = 1.f;
float velocityCoef = 0.f;
if (index < NB_SAMPLES) {
final float t_inf = (float) index / NB_SAMPLES;
final float t_sup = (float) (index + 1) / NB_SAMPLES;
final float d_inf = SPLINE_POSITION[index];
final float d_sup = SPLINE_POSITION[index + 1];
velocityCoef = (d_sup - d_inf) / (t_sup - t_inf);
distanceCoef = d_inf + (t - t_inf) * velocityCoef;
}
mCurrVelocity = velocityCoef * mDistance / mDuration * 1000.0f;
mCurrX = mStartX + Math.round(distanceCoef * (mFinalX - mStartX));
// Pin to mMinX <= mCurrX <= mMaxX
mCurrX = Math.min(mCurrX, mMaxX);
mCurrX = Math.max(mCurrX, mMinX);
mCurrY = mStartY + Math.round(distanceCoef * (mFinalY - mStartY));
// Pin to mMinY <= mCurrY <= mMaxY
mCurrY = Math.min(mCurrY, mMaxY);
mCurrY = Math.max(mCurrY, mMinY);
if (mCurrX == mFinalX && mCurrY == mFinalY) {
mFinished = true;
}
break;
}
}
else {
mCurrX = mFinalX;
mCurrY = mFinalY;
mFinished = true;
}
return true;
}
/**
* Start scrolling by providing a starting point and the distance to travel.
* The scroll will use the default value of 250 milliseconds for the
* duration.
*
* @param startX Starting horizontal scroll offset in pixels. Positive
* numbers will scroll the content to the left.
* @param startY Starting vertical scroll offset in pixels. Positive numbers
* will scroll the content up.
* @param dx Horizontal distance to travel. Positive numbers will scroll the
* content to the left.
* @param dy Vertical distance to travel. Positive numbers will scroll the
* content up.
*/
public void startScroll(int startX, int startY, int dx, int dy) {
startScroll(startX, startY, dx, dy, DEFAULT_DURATION);
}
/**
* Start scrolling by providing a starting point, the distance to travel,
* and the duration of the scroll.
*
* @param startX Starting horizontal scroll offset in pixels. Positive
* numbers will scroll the content to the left.
* @param startY Starting vertical scroll offset in pixels. Positive numbers
* will scroll the content up.
* @param dx Horizontal distance to travel. Positive numbers will scroll the
* content to the left.
* @param dy Vertical distance to travel. Positive numbers will scroll the
* content up.
* @param duration Duration of the scroll in milliseconds.
*/
public void startScroll(int startX, int startY, int dx, int dy, int duration) {
mMode = SCROLL_MODE;
mFinished = false;
mDuration = duration;
mStartTime = AnimationUtils.currentAnimationTimeMillis();
mStartX = startX;
mStartY = startY;
mFinalX = startX + dx;
mFinalY = startY + dy;
mDeltaX = dx;
mDeltaY = dy;
mDurationReciprocal = 1.0f / (float) mDuration;
}
/**
* Start scrolling based on a fling gesture. The distance travelled will
* depend on the initial velocity of the fling.
*
* @param startX Starting point of the scroll (X)
* @param startY Starting point of the scroll (Y)
* @param velocityX Initial velocity of the fling (X) measured in pixels per
* second.
* @param velocityY Initial velocity of the fling (Y) measured in pixels per
* second
* @param minX Minimum X value. The scroller will not scroll past this
* point.
* @param maxX Maximum X value. The scroller will not scroll past this
* point.
* @param minY Minimum Y value. The scroller will not scroll past this
* point.
* @param maxY Maximum Y value. The scroller will not scroll past this
* point.
*/
public void fling(int startX, int startY, int velocityX, int velocityY,
int minX, int maxX, int minY, int maxY) {
// Continue a scroll or fling in progress
if (mFlywheel && !mFinished) {
float oldVel = getCurrVelocity();
float dx = (float) (mFinalX - mStartX);
float dy = (float) (mFinalY - mStartY);
float hyp = (float) Math.hypot(dx, dy);
float ndx = dx / hyp;
float ndy = dy / hyp;
float oldVelocityX = ndx * oldVel;
float oldVelocityY = ndy * oldVel;
if (Math.signum(velocityX) == Math.signum(oldVelocityX) &&
Math.signum(velocityY) == Math.signum(oldVelocityY)) {
velocityX += oldVelocityX;
velocityY += oldVelocityY;
}
}
mMode = FLING_MODE;
mFinished = false;
float velocity = (float) Math.hypot(velocityX, velocityY);
mVelocity = velocity;
mDuration = getSplineFlingDuration(velocity);
mStartTime = AnimationUtils.currentAnimationTimeMillis();
mStartX = startX;
mStartY = startY;
float coeffX = velocity == 0 ? 1.0f : velocityX / velocity;
float coeffY = velocity == 0 ? 1.0f : velocityY / velocity;
double totalDistance = getSplineFlingDistance(velocity);
mDistance = (int) (totalDistance * Math.signum(velocity));
mMinX = minX;
mMaxX = maxX;
mMinY = minY;
mMaxY = maxY;
mFinalX = startX + (int) Math.round(totalDistance * coeffX);
// Pin to mMinX <= mFinalX <= mMaxX
mFinalX = Math.min(mFinalX, mMaxX);
mFinalX = Math.max(mFinalX, mMinX);
mFinalY = startY + (int) Math.round(totalDistance * coeffY);
// Pin to mMinY <= mFinalY <= mMaxY
mFinalY = Math.min(mFinalY, mMaxY);
mFinalY = Math.max(mFinalY, mMinY);
}
private double getSplineDeceleration(float velocity) {
return Math.log(INFLEXION * Math.abs(velocity) / (mFlingFriction * mPhysicalCoeff));
}
private int getSplineFlingDuration(float velocity) {
final double l = getSplineDeceleration(velocity);
final double decelMinusOne = DECELERATION_RATE - 1.0;
return (int) (1000.0 * Math.exp(l / decelMinusOne));
}
private double getSplineFlingDistance(float velocity) {
final double l = getSplineDeceleration(velocity);
final double decelMinusOne = DECELERATION_RATE - 1.0;
return mFlingFriction * mPhysicalCoeff * Math.exp(DECELERATION_RATE / decelMinusOne * l);
}
static float viscousFluid(float x)
{
x *= sViscousFluidScale;
if (x < 1.0f) {
x -= (1.0f - (float)Math.exp(-x));
} else {
float start = 0.36787944117f; // 1/e == exp(-1)
x = 1.0f - (float)Math.exp(1.0f - x);
x = start + x * (1.0f - start);
}
x *= sViscousFluidNormalize;
return x;
}
/**
* Stops the animation. Contrary to {@link #forceFinished(boolean)},
* aborting the animating cause the scroller to move to the final x and y
* position
*
* @see #forceFinished(boolean)
*/
public void abortAnimation() {
mCurrX = mFinalX;
mCurrY = mFinalY;
mFinished = true;
}
/**
* Extend the scroll animation. This allows a running animation to scroll
* further and longer, when used with {@link #setFinalX(int)} or {@link #setFinalY(int)}.
*
* @param extend Additional time to scroll in milliseconds.
* @see #setFinalX(int)
* @see #setFinalY(int)
*/
public void extendDuration(int extend) {
int passed = timePassed();
mDuration = passed + extend;
mDurationReciprocal = 1.0f / mDuration;
mFinished = false;
}
/**
* Returns the time elapsed since the beginning of the scrolling.
*
* @return The elapsed time in milliseconds.
*/
public int timePassed() {
return (int)(AnimationUtils.currentAnimationTimeMillis() - mStartTime);
}
/**
* Sets the final position (X) for this scroller.
*
* @param newX The new X offset as an absolute distance from the origin.
* @see #extendDuration(int)
* @see #setFinalY(int)
*/
public void setFinalX(int newX) {
mFinalX = newX;
mDeltaX = mFinalX - mStartX;
mFinished = false;
}
/**
* Sets the final position (Y) for this scroller.
*
* @param newY The new Y offset as an absolute distance from the origin.
* @see #extendDuration(int)
* @see #setFinalX(int)
*/
public void setFinalY(int newY) {
mFinalY = newY;
mDeltaY = mFinalY - mStartY;
mFinished = false;
}
/**
* @hide
*/
public boolean isScrollingInDirection(float xvel, float yvel) {
return !mFinished && Math.signum(xvel) == Math.signum(mFinalX - mStartX) &&
Math.signum(yvel) == Math.signum(mFinalY - mStartY);
}
}
| YAJATapps/FlickLauncher | src/com/android/launcher3/LauncherScroller.java | Java | apache-2.0 | 18,972 |
/****************************************************************************
*
* Copyright 2016 Samsung Electronics All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*
****************************************************************************/
/****************************************************************************
*
* Copyright (C) 2007-2009, 2011-2013 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
#ifndef __INCLUDE_FS_FS_H
#define __INCLUDE_FS_FS_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <tinyara/config.h>
#include <tinyara/compiler.h>
#include <sys/types.h>
#include <stdarg.h>
#include <stdint.h>
#include <stdbool.h>
#include <semaphore.h>
#ifdef CONFIG_FS_NAMED_SEMAPHORES
#include <tinyara/semaphore.h>
#endif
#ifndef CONFIG_DISABLE_MQUEUE
#include <tinyara/mqueue.h>
#endif
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Stream flags for the fs_flags field of in struct file_struct */
#define __FS_FLAG_EOF (1 << 0) /* EOF detected by a read operation */
#define __FS_FLAG_ERROR (1 << 1) /* Error detected by any operation */
#ifndef CONFIG_MOUNT_POINT
#define CONFIG_MOUNT_POINT "/mnt/"
#endif
/****************************************************************************
* Public Type Definitions
****************************************************************************/
/* This structure is provided by devices when they are registered with the
* system. It is used to call back to perform device specific operations.
*/
struct file; /* Forward reference */
struct pollfd; /* Forward reference */
struct inode; /* Forward reference */
struct file_operations {
/* The device driver open method differs from the mountpoint open method */
int (*open)(FAR struct file *filep);
/* The following methods must be identical in signature and position because
* the struct file_operations and struct mountp_operations are treated like
* unions.
*/
int (*close)(FAR struct file *filep);
ssize_t (*read)(FAR struct file *filep, FAR char *buffer, size_t buflen);
ssize_t (*write)(FAR struct file *filep, FAR const char *buffer, size_t buflen);
off_t (*seek)(FAR struct file *filep, off_t offset, int whence);
int (*ioctl)(FAR struct file *filep, int cmd, unsigned long arg);
/* The two structures need not be common after this point */
#ifndef CONFIG_DISABLE_POLL
int (*poll)(FAR struct file *filep, struct pollfd *fds, bool setup);
#endif
int (*unlink)(FAR struct inode *inode);
};
/* This structure provides information about the state of a block driver */
#ifndef CONFIG_DISABLE_MOUNTPOINT
struct geometry {
bool geo_available; /* true: The device is available */
bool geo_mediachanged; /* true: The media has changed since last query */
bool geo_writeenabled; /* true: It is okay to write to this device */
size_t geo_nsectors; /* Number of sectors on the device */
size_t geo_sectorsize; /* Size of one sector */
};
/* This structure is provided by block devices when they register with the
* system. It is used by file systems to perform filesystem transfers. It
* differs from the normal driver vtable in several ways -- most notably in
* that it deals in struct inode vs. struct filep.
*/
struct inode;
struct block_operations {
int (*open)(FAR struct inode *inode);
int (*close)(FAR struct inode *inode);
ssize_t (*read)(FAR struct inode *inode, FAR unsigned char *buffer, size_t start_sector, unsigned int nsectors);
ssize_t (*write)(FAR struct inode *inode, FAR const unsigned char *buffer, size_t start_sector, unsigned int nsectors);
int (*geometry)(FAR struct inode *inode, FAR struct geometry *geometry);
int (*ioctl)(FAR struct inode *inode, int cmd, unsigned long arg);
int (*unlink)(FAR struct inode *inode);
};
/* This structure is provided by a filesystem to describe a mount point.
* Note that this structure differs from file_operations ONLY in the form of
* the open method. Once the file is opened, it can be accessed either as a
* struct file_operations or struct mountpt_operations
*/
struct inode;
struct fs_dirent_s;
struct stat;
struct statfs;
struct mountpt_operations {
/* The mountpoint open method differs from the driver open method
* because it receives (1) the inode that contains the mountpoint
* private data, (2) the relative path into the mountpoint, and (3)
* information to manage privileges.
*/
int (*open)(FAR struct file *filep, FAR const char *relpath, int oflags, mode_t mode);
/* The following methods must be identical in signature and position
* because the struct file_operations and struct mountp_operations are
* treated like unions.
*/
int (*close)(FAR struct file *filep);
ssize_t (*read)(FAR struct file *filep, FAR char *buffer, size_t buflen);
ssize_t (*write)(FAR struct file *filep, FAR const char *buffer, size_t buflen);
off_t (*seek)(FAR struct file *filep, off_t offset, int whence);
int (*ioctl)(FAR struct file *filep, int cmd, unsigned long arg);
/* The two structures need not be common after this point. The following
* are extended methods needed to deal with the unique needs of mounted
* file systems.
*
* Additional open-file-specific mountpoint operations:
*/
int (*sync)(FAR struct file *filep);
int (*dup)(FAR const struct file *oldp, FAR struct file *newp);
/* Directory operations */
int (*opendir)(FAR struct inode *mountpt, FAR const char *relpath, FAR struct fs_dirent_s *dir);
int (*closedir)(FAR struct inode *mountpt, FAR struct fs_dirent_s *dir);
int (*readdir)(FAR struct inode *mountpt, FAR struct fs_dirent_s *dir);
int (*rewinddir)(FAR struct inode *mountpt, FAR struct fs_dirent_s *dir);
/* General volume-related mountpoint operations: */
int (*bind)(FAR struct inode *blkdriver, FAR const void *data, FAR void **handle);
int (*unbind)(FAR void *handle, FAR struct inode **blkdriver);
int (*statfs)(FAR struct inode *mountpt, FAR struct statfs *buf);
/* Operations on paths */
int (*unlink)(FAR struct inode *mountpt, FAR const char *relpath);
int (*mkdir)(FAR struct inode *mountpt, FAR const char *relpath, mode_t mode);
int (*rmdir)(FAR struct inode *mountpt, FAR const char *relpath);
int (*rename)(FAR struct inode *mountpt, FAR const char *oldrelpath, FAR const char *newrelpath);
int (*stat)(FAR struct inode *mountpt, FAR const char *relpath, FAR struct stat *buf);
/* NOTE: More operations will be needed here to support: disk usage
* stats file stat(), file attributes, file truncation, etc.
*/
};
#endif /* CONFIG_DISABLE_MOUNTPOINT */
/* Named OS resources are also maintained by the VFS. This includes:
*
* - Named semaphores: sem_open(), sem_close(), and sem_unlink()
* - POSIX Message Queues: mq_open() and mq_close()
* - Shared memory: shm_open() and shm_unlink();
*
* These are a special case in that they do not follow quite the same
* pattern as the other file system types in that they have operations.
*/
/* These are the various kinds of operations that can be associated with
* an inode.
*/
union inode_ops_u {
FAR const struct file_operations
*i_ops; /* Driver operations for inode */
#ifndef CONFIG_DISABLE_MOUNTPOINT
FAR const struct block_operations *i_bops; /* Block driver operations */
FAR const struct mountpt_operations
*i_mops; /* Operations on a mountpoint */
#endif
#ifdef CONFIG_FS_NAMED_SEMAPHORES
FAR struct nsem_inode_s *i_nsem; /* Named semaphore */
#endif
#ifndef CONFIG_DISABLE_MQUEUE
FAR struct mqueue_inode_s *i_mqueue; /* POSIX message queue */
#endif
};
/* This structure represents one inode in the TinyAra pseudo-file system */
struct inode {
FAR struct inode *i_peer; /* Link to same level inode */
FAR struct inode *i_child; /* Link to lower level inode */
int16_t i_crefs; /* References to inode */
uint16_t i_flags; /* Flags for inode */
union inode_ops_u u; /* Inode operations */
#ifdef CONFIG_FILE_MODE
mode_t i_mode; /* Access mode flags */
#endif
FAR void *i_private; /* Per inode driver private data */
char i_name[1]; /* Name of inode (variable) */
};
#define FSNODE_SIZE(n) (sizeof(struct inode) + (n))
/* This is the underlying representation of an open file. A file
* descriptor is an index into an array of such types. The type associates
* the file descriptor to the file state and to a set of inode operations.
*/
struct file {
int f_oflags; /* Open mode flags */
off_t f_pos; /* File position */
FAR struct inode *f_inode; /* Driver interface */
void *f_priv; /* Per file driver private data */
};
/* This defines a list of files indexed by the file descriptor */
#if CONFIG_NFILE_DESCRIPTORS > 0
struct filelist {
sem_t fl_sem; /* Manage access to the file list */
struct file fl_files[CONFIG_NFILE_DESCRIPTORS];
};
#endif
/* The following structure defines the list of files used for standard C I/O.
* Note that TinyAra can support the standard C APIs without or without buffering
*
* When buffering us used, the following described the usage of the I/O buffer.
* The buffer can be used for reading or writing -- but not both at the same time.
* An fflush is implied between each change in directionof access.
*
* The field fs_bufread determines whether the buffer is being used for reading or
* for writing as fillows:
*
* BUFFER
* +----------------------+ <- fs_bufstart Points to the beginning of the buffer.
* | WR: Buffered data | WR: Start of buffered write data.
* | RD: Already read | RD: Start of already read data.
* +----------------------+
* | WR: Available buffer | <- fs_bufpos Points to next byte:
* | RD: Read-ahead data | WR: End+1 of buffered write data.
* | | RD: Points to next char to return
* +----------------------+
* | WR: Available | <- fs_bufread Top+1 of buffered read data
* | RD: Available | WR: =bufstart buffer used for writing.
* | | RD: Pointer to last buffered read char+1
* +----------------------+
* <- fs_bufend Points to end end of the buffer+1
*/
#if CONFIG_NFILE_STREAMS > 0
struct file_struct {
int fs_fd; /* File descriptor associated with stream */
#if CONFIG_STDIO_BUFFER_SIZE > 0
sem_t fs_sem; /* For thread safety */
pid_t fs_holder; /* Holder of sem */
int fs_counts; /* Number of times sem is held */
FAR unsigned char *fs_bufstart; /* Pointer to start of buffer */
FAR unsigned char *fs_bufend; /* Pointer to 1 past end of buffer */
FAR unsigned char *fs_bufpos; /* Current position in buffer */
FAR unsigned char
*fs_bufread; /* Pointer to 1 past last buffered read char. */
#endif
uint16_t fs_oflags; /* Open mode flags */
uint8_t fs_flags; /* Stream flags */
#if CONFIG_NUNGET_CHARS > 0
uint8_t fs_nungotten; /* The number of characters buffered for ungetc */
unsigned char fs_ungotten[CONFIG_NUNGET_CHARS];
#endif
};
struct streamlist {
sem_t sl_sem; /* For thread safety */
struct file_struct sl_streams[CONFIG_NFILE_STREAMS];
};
#endif /* CONFIG_NFILE_STREAMS */
/* Callback used by foreach_mountpoints to traverse all mountpoints in the
* pseudo-file system.
*/
#ifndef CONFIG_DISABLE_MOUNTPOINT
struct statfs; /* Forward reference */
typedef int (*foreach_mountpoint_t)(FAR const char *mountpoint, FAR struct statfs *statbuf, FAR void *arg);
#endif
/****************************************************************************
* Global Function Prototypes
****************************************************************************/
#undef EXTERN
#if defined(__cplusplus)
#define EXTERN extern "C"
extern "C" {
#else
#define EXTERN extern
#endif
/* fs_inode.c ***************************************************************/
/****************************************************************************
* Name: fs_initialize
*
* Description:
* This is called from the OS initialization logic to configure the file
* system.
*
****************************************************************************/
void fs_initialize(void);
/* fs_foreachmountpoint.c ***************************************************/
/****************************************************************************
* Name: foreach_mountpoint
*
* Description:
* Visit each mountpoint in the pseudo-file system. The traversal is
* terminated when the callback 'handler' returns a non-zero value, or when
* all of the mountpoints have been visited.
*
* This is just a front end "filter" to foreach_inode() that forwards only
* mountpoint inodes. It is intended to support the mount() command to
* when the mount command is used to enumerate mounts.
*
* NOTE 1: Use with caution... The pseudo-file system is locked throughout
* the traversal.
* NOTE 2: The search algorithm is recursive and could, in principle, use
* an indeterminant amount of stack space. This will not usually be a
* real work issue.
*
****************************************************************************/
#ifndef CONFIG_DISABLE_MOUNTPOINT
int foreach_mountpoint(foreach_mountpoint_t handler, FAR void *arg);
#endif
/* fs_registerdriver.c ******************************************************/
/****************************************************************************
* Name: register_driver
*
* Description:
* Register a character driver inode the pseudo file system.
*
* Input parameters:
* path - The path to the inode to create
* fops - The file operations structure
* mode - inmode priviledges (not used)
* priv - Private, user data that will be associated with the inode.
*
* Returned Value:
* Zero on success (with the inode point in 'inode'); A negated errno
* value is returned on a failure (all error values returned by
* inode_reserve):
*
* EINVAL - 'path' is invalid for this operation
* EEXIST - An inode already exists at 'path'
* ENOMEM - Failed to allocate in-memory resources for the operation
*
****************************************************************************/
int register_driver(FAR const char *path, FAR const struct file_operations *fops, mode_t mode, FAR void *priv);
/* fs_registerblockdriver.c *************************************************/
/****************************************************************************
* Name: register_blockdriver
*
* Description:
* Register a block driver inode the pseudo file system.
*
* Input parameters:
* path - The path to the inode to create
* bops - The block driver operations structure
* mode - inmode priviledges (not used)
* priv - Private, user data that will be associated with the inode.
*
* Returned Value:
* Zero on success (with the inode point in 'inode'); A negated errno
* value is returned on a failure (all error values returned by
* inode_reserve):
*
* EINVAL - 'path' is invalid for this operation
* EEXIST - An inode already exists at 'path'
* ENOMEM - Failed to allocate in-memory resources for the operation
*
****************************************************************************/
#ifndef CONFIG_DISABLE_MOUNTPOINT
int register_blockdriver(FAR const char *path, FAR const struct block_operations *bops, mode_t mode, FAR void *priv);
#endif
/* fs_unregisterdriver.c ****************************************************/
/****************************************************************************
* Name: unregister_driver
*
* Description:
* Remove the character driver inode at 'path' from the pseudo-file system
*
****************************************************************************/
int unregister_driver(const char *path);
/* fs_unregisterblockdriver.c ***********************************************/
/****************************************************************************
* Name: unregister_blockdriver
*
* Description:
* Remove the block driver inode at 'path' from the pseudo-file system
*
****************************************************************************/
int unregister_blockdriver(const char *path);
/* fs_open.c ****************************************************************/
/****************************************************************************
* Name: inode_checkflags
*
* Description:
* Check if the access described by 'oflags' is supported on 'inode'
*
****************************************************************************/
int inode_checkflags(FAR struct inode *inode, int oflags);
/* fs_files.c ***************************************************************/
/****************************************************************************
* Name: files_initlist
*
* Description:
* Initializes the list of files for a new task
*
****************************************************************************/
#if CONFIG_NFILE_DESCRIPTORS > 0
void files_initlist(FAR struct filelist *list);
#endif
/****************************************************************************
* Name: files_releaselist
*
* Description:
* Release a reference to the file list
*
****************************************************************************/
#if CONFIG_NFILE_DESCRIPTORS > 0
void files_releaselist(FAR struct filelist *list);
#endif
/****************************************************************************
* Name: file_dup2
*
* Description:
* Assign an inode to a specific files structure. This is the heart of
* dup2.
*
****************************************************************************/
#if CONFIG_NFILE_DESCRIPTORS > 0
int file_dup2(FAR struct file *filep1, FAR struct file *filep2);
#endif
/* fs_filedup.c *************************************************************/
/****************************************************************************
* Name: fs_dupfd OR dup
*
* Description:
* Clone a file descriptor 'fd' to an arbitray descriptor number (any value
* greater than or equal to 'minfd'). If socket descriptors are
* implemented, then this is called by dup() for the case of file
* descriptors. If socket descriptors are not implemented, then this
* function IS dup().
*
* This alternative naming is used when dup could operate on both file and
* socket descritors to avoid drawing unused socket support into the link.
*
****************************************************************************/
#if CONFIG_NFILE_DESCRIPTORS > 0
int fs_dupfd(int fd, int minfd);
#endif
/****************************************************************************
* Name: file_dup
*
* Description:
* Equivalent to the non-standard fs_dupfd() function except that it
* accepts a struct file instance instead of a file descriptor. Currently
* used only by file_vfcntl();
*
****************************************************************************/
int file_dup(FAR struct file *filep, int minfd);
/* fs_filedup2.c ************************************************************/
/****************************************************************************
* Name: fs_dupfd2 OR dup2
*
* Description:
* Clone a file descriptor to a specific descriptor number. If socket
* descriptors are implemented, then this is called by dup2() for the
* case of file descriptors. If socket descriptors are not implemented,
* then this function IS dup2().
*
* This alternative naming is used when dup2 could operate on both file and
* socket descritors to avoid drawing unused socket support into the link.
*
****************************************************************************/
#if CONFIG_NFILE_DESCRIPTORS > 0
#if defined(CONFIG_NET) && CONFIG_NSOCKET_DESCRIPTORS > 0
int fs_dupfd2(int fd1, int fd2);
#else
#define fs_dupfd2(fd1, fd2) dup2(fd1, fd2)
#endif
#endif
/* fs_openblockdriver.c *****************************************************/
/****************************************************************************
* Name: open_blockdriver
*
* Description:
* Return the inode of the block driver specified by 'pathname'
*
* Inputs:
* pathname - the full path to the block driver to be opened
* mountflags - if MS_RDONLY is not set, then driver must support write
* operations (see include/sys/mount.h)
* ppinode - address of the location to return the inode reference
*
* Return:
* Returns zero on success or a negated errno on failure:
*
* EINVAL - pathname or pinode is NULL
* ENOENT - No block driver of this name is registered
* ENOTBLK - The inode associated with the pathname is not a block driver
* EACCESS - The MS_RDONLY option was not set but this driver does not
* support write access
*
****************************************************************************/
#if CONFIG_NFILE_DESCRIPTORS > 0
int open_blockdriver(FAR const char *pathname, int mountflags, FAR struct inode **ppinode);
#endif
/* fs_closeblockdriver.c ****************************************************/
/****************************************************************************
* Name: close_blockdriver
*
* Description:
* Call the close method and release the inode
*
* Inputs:
* inode - reference to the inode of a block driver opened by open_blockdriver
*
* Return:
* Returns zero on success or a negated errno on failure:
*
* EINVAL - inode is NULL
* ENOTBLK - The inode is not a block driver
*
****************************************************************************/
#if CONFIG_NFILE_DESCRIPTORS > 0
int close_blockdriver(FAR struct inode *inode);
#endif
/* fs/vfs/fs_ioctl.c ********************************************************/
/****************************************************************************
* Name: fs_ioctl
*
* Description:
* Perform device specific operations.
*
* Parameters:
* fd File/socket descriptor of device
* req The ioctl command
* arg The argument of the ioctl cmd
*
* Return:
* >=0 on success (positive non-zero values are cmd-specific)
* -1 on failure with errno set properly:
*
* EBADF
* 'fd' is not a valid descriptor.
* EFAULT
* 'arg' references an inaccessible memory area.
* EINVAL
* 'cmd' or 'arg' is not valid.
* ENOTTY
* 'fd' is not associated with a character special device.
* ENOTTY
* The specified request does not apply to the kind of object that the
* descriptor 'fd' references.
*
****************************************************************************/
#ifdef CONFIG_LIBC_IOCTL_VARIADIC
int fs_ioctl(int fd, int req, unsigned long arg);
#endif
/* fs_fdopen.c **************************************************************/
/****************************************************************************
* Name: fs_fdopen
*
* Description:
* This function does the core operations for fopen and fdopen. It is
* used by the OS to clone stdin, stdout, stderr
*
****************************************************************************/
#if CONFIG_NFILE_STREAMS > 0
struct tcb_s; /* Forward reference */
FAR struct file_struct *fs_fdopen(int fd, int oflags, FAR struct tcb_s *tcb);
#endif
/* libc/stdio/lib_fflush.c *************************************************/
/****************************************************************************
* Name: lib_flushall
*
* Description:
* Called either (1) by the OS when a task exits, or (2) from fflush()
* when a NULL stream argument is provided.
*
****************************************************************************/
#if CONFIG_NFILE_STREAMS > 0
int lib_flushall(FAR struct streamlist *list);
#endif
/* fs/fs_getfilep.c *********************************************************/
/****************************************************************************
* Name: fs_getfilep
*
* Description:
* Given a file descriptor, return the corresponding instance of struct
* file. NOTE that this function will currently fail if it is provided
* with a socket descriptor.
*
* Parameters:
* fd - The file descriptor
*
* Return:
* A point to the corresponding struct file instance is returned on
* success. On failure, NULL is returned and the errno value is
* set appropriately (EBADF).
*
****************************************************************************/
#if CONFIG_NFILE_DESCRIPTORS > 0
FAR struct file *fs_getfilep(int fd);
#endif
/* fs/fs_read.c *************************************************************/
/****************************************************************************
* Name: file_read
*
* Description:
* Equivalent to the standard read() function except that is accepts a
* struct file instance instead of a file descriptor. Currently used
* only by net_sendfile() and aio_read();
*
****************************************************************************/
#if CONFIG_NFILE_DESCRIPTORS > 0
ssize_t file_read(FAR struct file *filep, FAR void *buf, size_t nbytes);
#endif
/* fs/fs_write.c ************************************************************/
/****************************************************************************
* Name: file_write
*
* Description:
* Equivalent to the standard write() function except that is accepts a
* struct file instance instead of a file descriptor. Currently used
* only by aio_write();
*
****************************************************************************/
#if CONFIG_NFILE_DESCRIPTORS > 0
ssize_t file_write(FAR struct file *filep, FAR const void *buf, size_t nbytes);
#endif
/* fs/fs_pread.c ************************************************************/
/****************************************************************************
* Name: file_pread
*
* Description:
* Equivalent to the standard pread function except that is accepts a
* struct file instance instead of a file descriptor. Currently used
* only by aio_read();
*
****************************************************************************/
#if CONFIG_NFILE_DESCRIPTORS > 0
ssize_t file_pread(FAR struct file *filep, FAR void *buf, size_t nbytes, off_t offset);
#endif
/* fs/fs_pwrite.c ***********************************************************/
/****************************************************************************
* Name: file_pwrite
*
* Description:
* Equivalent to the standard pwrite function except that is accepts a
* struct file instance instead of a file descriptor. Currently used
* only by aio_write();
*
****************************************************************************/
#if CONFIG_NFILE_DESCRIPTORS > 0
ssize_t file_pwrite(FAR struct file *filep, FAR const void *buf, size_t nbytes, off_t offset);
#endif
/* fs/fs_lseek.c ************************************************************/
/****************************************************************************
* Name: file_seek
*
* Description:
* Equivalent to the standard lseek() function except that is accepts a
* struct file instance instead of a file descriptor. Currently used
* only by net_sendfile()
*
****************************************************************************/
#if CONFIG_NFILE_DESCRIPTORS > 0
off_t file_seek(FAR struct file *filep, off_t offset, int whence);
#endif
/* fs/fs_fsync.c ************************************************************/
/****************************************************************************
* Name: file_fsync
*
* Description:
* Equivalent to the standard fsync() function except that is accepts a
* struct file instance instead of a file descriptor. Currently used
* only by aio_fsync();
*
****************************************************************************/
#if CONFIG_NFILE_DESCRIPTORS > 0
int file_fsync(FAR struct file *filep);
#endif
/* fs/fs_fcntl.c ************************************************************/
/****************************************************************************
* Name: file_vfcntl
*
* Description:
* Similar to the standard vfcntl function except that is accepts a struct
* struct file instance instead of a file descriptor. Currently used
* only by aio_fcntl();
*
****************************************************************************/
#if CONFIG_NFILE_DESCRIPTORS > 0
int file_vfcntl(FAR struct file *filep, int cmd, va_list ap);
#endif
/* drivers/dev_null.c *******************************************************/
/****************************************************************************
* Name: devnull_register
*
* Description:
* Register /dev/null
*
****************************************************************************/
void devnull_register(void);
/* drivers/dev_zero.c *******************************************************/
/****************************************************************************
* Name: devzero_register
*
* Description:
* Register /dev/zero
*
****************************************************************************/
void devzero_register(void);
/* drivers/loop.c ***********************************************************/
/****************************************************************************
* Name: losetup
*
* Description:
* Setup the loop device so that it exports the file referenced by 'filename'
* as a block device.
*
****************************************************************************/
int losetup(FAR const char *devname, FAR const char *filename, uint16_t sectsize, off_t offset, bool readonly);
/****************************************************************************
* Name: loteardown
*
* Description:
* Undo the setup performed by losetup
*
****************************************************************************/
int loteardown(FAR const char *devname);
/* drivers/bch/bchdev_register.c ********************************************/
/****************************************************************************
* Name: bchdev_register
*
* Description:
* Setup so that it exports the block driver referenced by 'blkdev' as a
* character device 'chardev'
*
****************************************************************************/
int bchdev_register(FAR const char *blkdev, FAR const char *chardev, bool readonly);
/* drivers/bch/bchdev_unregister.c ******************************************/
/****************************************************************************
* Name: bchdev_unregister
*
* Description:
* Unregister character driver access to a block device that was created
* by a previous call to bchdev_register().
*
****************************************************************************/
int bchdev_unregister(FAR const char *chardev);
/* Low level, direct access. NOTE: low-level access and character driver access
* are incompatible. One and only one access method should be implemented.
*/
/* drivers/bch/bchlib_setup.c ***********************************************/
/****************************************************************************
* Name: bchlib_setup
*
* Description:
* Setup so that the block driver referenced by 'blkdev' can be accessed
* similar to a character device.
*
****************************************************************************/
int bchlib_setup(FAR const char *blkdev, bool readonly, FAR void **handle);
/* drivers/bch/bchlib_teardown.c ********************************************/
/****************************************************************************
* Name: bchlib_teardown
*
* Description:
* Setup so that the block driver referenced by 'blkdev' can be accessed
* similar to a character device.
*
****************************************************************************/
int bchlib_teardown(FAR void *handle);
/* drivers/bch/bchlib_read.c ************************************************/
/****************************************************************************
* Name: bchlib_read
*
* Description:
* Read from the block device set-up by bchlib_setup as if it were a
* character device.
*
****************************************************************************/
ssize_t bchlib_read(FAR void *handle, FAR char *buffer, size_t offset, size_t len);
/* drivers/bch/bchlib_write.c ***********************************************/
/****************************************************************************
* Name: bchlib_write
*
* Description:
* Write to the block device set-up by bchlib_setup as if it were a
* character device.
*
****************************************************************************/
ssize_t bchlib_write(FAR void *handle, FAR const char *buffer, size_t offset, size_t len);
#undef EXTERN
#if defined(__cplusplus)
}
#endif
#endif /* __INCLUDE_FS_FS_H */
| guswns0528/TizenRT | os/include/tinyara/fs/fs.h | C | apache-2.0 | 35,393 |
.calcite-layout-small-title .calcite-navbar {
height: 40px;
min-height: 40px; }
.calcite-layout-small-title .calcite-navbar .navbar-brand {
padding: 12px 14px;
font-size: 36px; }
.calcite-layout-small-title .calcite-navbar .navbar-nav > li > a {
padding: 10px 12px; }
.calcite-layout-small-title .calcite-dropdown .dropdown-toggle {
padding: 12px 20px 14px 21px; }
.calcite-layout-small-title .calcite-dropdown-toggle {
width: 15px; }
.calcite-layout-small-title .calcite-title-main {
padding: 3px 0 4px;
font-size: 19px;
line-height: 1.1; }
.calcite-layout-small-title .calcite-title-divider {
height: 80px;
margin: 0 13px; }
.calcite-layout-small-title .calcite-title-sub {
font-size: 12px;
line-height: 1.1; }
.calcite-layout-small-title .calcite-navbar-search.calcite-search-expander {
margin: 5px; }
.calcite-layout-small-title.calcite-nav-top.calcite-zoom-top-left .esriZoom.esriVertical.esriComponent,
.calcite-layout-small-title.calcite-nav-top.calcite-zoom-top-right .esriZoom.esriVertical.esriComponent,
.calcite-layout-small-title.calcite-nav-bottom.calcite-zoom-bottom-left .esriZoom.esriVertical.esriComponent,
.calcite-layout-small-title.calcite-nav-bottom.calcite-zoom-bottom-right .esriZoom.esriVertical.esriComponent {
margin: 55px 25px; }
.calcite-layout-small-title.calcite-nav-top .calcite-map .leaflet-control-zoom {
margin: 55px 25px 15px 30px; }
.calcite-layout-small-title.calcite-nav-top .calcite-dropdown .calcite-menu-drawer, .calcite-layout-small-title.calcite-nav-top-fixed .calcite-dropdown .calcite-menu-drawer {
top: 40px; }
.calcite-layout-small-title.calcite-nav-top.calcite-margin-top .calcite-dropdown .calcite-menu-drawer, .calcite-layout-small-title.calcite-nav-top.calcite-margin-all .calcite-dropdown .calcite-menu-drawer {
top: 55px; }
.calcite-layout-small-title.calcite-nav-bottom .calcite-dropdown .calcite-menu-drawer, .calcite-layout-small-title.calcite-nav-bottom-fixed .calcite-dropdown .calcite-menu-drawer {
bottom: 40px; }
.calcite-layout-small-title.calcite-nav-bottom.calcite-margin-bottom .calcite-dropdown .calcite-menu-drawer, .calcite-layout-small-title.calcite-nav-bottom.calcite-margin-all .calcite-dropdown .calcite-menu-drawer {
bottom: 55px; }
@media (min-width: 769px) {
.calcite-layout-small-title.calcite-nav-top .calcite-panels {
top: 55px; }
.calcite-layout-small-title.calcite-nav-hidden.calcite-nav-top .calcite-panels {
top: 55px; }
.calcite-layout-small-title.calcite-nav-hidden .calcite-panels, .calcite-layout-small-title.calcite-nav-hidden.calcite-nav-bottom .calcite-panels {
top: 30px; } }
@media (max-width: 768px) {
.calcite-layout-small-title.calcite-nav-top .calcite-panels {
top: auto; }
.calcite-layout-small-title .calcite-dropdown .dropdown-toggle {
padding: 12px 15px 14px 18px; }
.calcite-layout-small-title.calcite-nav-bottom .calcite-panels,
.calcite-layout-small-title.calcite-nav-bottom-fixed .calcite-panels,
.calcite-layout-small-title.calcite-nav-bottom.calcite-margin-all .calcite-panels,
.calcite-layout-small-title.calcite-nav-bottom.calcite-margin-bottom .calcite-panels {
bottom: 40px; } }
| Esri/calcite-maps | dist/css/layouts/small-title-v0.6.css | CSS | apache-2.0 | 3,196 |
from __future__ import absolute_import
from __future__ import print_function
from optparse import make_option
from typing import Any, Text
from django.conf import settings
from django.core.management.base import BaseCommand, CommandParser
from zerver.lib.actions import Realm, do_create_realm, set_default_streams
from zerver.models import RealmAlias, can_add_alias, get_realm
if settings.ZILENCER_ENABLED:
from zilencer.models import Deployment
import re
import sys
class Command(BaseCommand):
help = """Create a realm.
Usage: ./manage.py create_realm --string_id=acme --name='Acme'"""
def add_arguments(self, parser):
# type: (CommandParser) -> None
parser.add_argument('-d', '--domain',
dest='domain',
type=str,
help='The domain for the realm.')
parser.add_argument('-s', '--string_id',
dest='string_id',
type=str,
help="A short name for the realm. If this "
"installation uses subdomains, this will be "
"used as the realm's subdomain.")
parser.add_argument('-n', '--name',
dest='name',
type=str,
help='The user-visible name for the realm.')
parser.add_argument('--corporate',
dest='org_type',
action="store_const",
const=Realm.CORPORATE,
help='Is a corporate org_type')
parser.add_argument('--community',
dest='org_type',
action="store_const",
const=Realm.COMMUNITY,
default=None,
help='Is a community org_type. Is the default.')
parser.add_argument('--deployment',
dest='deployment_id',
type=int,
default=None,
help='Optionally, the ID of the deployment you '
'want to associate the realm with.')
def validate_domain(self, domain):
# type: (str) -> None
# Domains can't contain whitespace if they are to be used in memcached
# keys. Seems safer to leave that as the default case regardless of
# which backing store we use.
if re.search("\s", domain):
raise ValueError("Domains can't contain whitespace")
# Domains must look like domains, ie have the structure of
# <subdomain(s)>.<tld>. One reason for this is that bots need
# to have valid looking emails.
if len(domain.split(".")) < 2:
raise ValueError("Domains must contain a '.'")
if not can_add_alias(domain):
raise ValueError("Domain already assigned to an existing realm")
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
string_id = options["string_id"]
name = options["name"]
domain = options["domain"]
if not name or not string_id:
print("\033[1;31mPlease provide a name and string_id.\033[0m\n", file=sys.stderr)
self.print_help("./manage.py", "create_realm")
exit(1)
if options["deployment_id"] is not None and not settings.ZILENCER_ENABLED:
print("\033[1;31mExternal deployments are not supported on voyager deployments.\033[0m\n", file=sys.stderr)
exit(1)
if domain is not None:
self.validate_domain(domain)
if get_realm(string_id) is not None:
raise ValueError("string_id taken. Please choose another one.")
realm, created = do_create_realm(string_id, name, org_type=options["org_type"])
if created:
print(string_id, "created.")
if domain:
RealmAlias.objects.create(realm=realm, domain=domain)
print("RealmAlias %s created for realm %s" % (domain, string_id))
if options["deployment_id"] is not None:
deployment = Deployment.objects.get(id=options["deployment_id"])
deployment.realms.add(realm)
deployment.save()
print("Added to deployment", str(deployment.id))
elif settings.PRODUCTION and settings.ZILENCER_ENABLED:
deployment = Deployment.objects.get(base_site_url="https://zulip.com/")
deployment.realms.add(realm)
deployment.save()
# In the else case, we are not using the Deployments feature.
stream_dict = {
"social": {"description": "For socializing", "invite_only": False},
"engineering": {"description": "For engineering", "invite_only": False}
} # type: Dict[Text, Dict[Text, Any]]
set_default_streams(realm, stream_dict)
print("\033[1;36mDefault streams set to social,engineering,zulip!\033[0m")
else:
print(string_id, "already exists.")
| amyliu345/zulip | zerver/management/commands/create_realm.py | Python | apache-2.0 | 5,235 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.syncope.core.provisioning.java.pushpull;
import org.apache.syncope.core.persistence.api.entity.Entity;
import org.apache.syncope.core.provisioning.api.pushpull.PushActions;
import org.apache.syncope.core.provisioning.api.pushpull.ProvisioningProfile;
import org.apache.syncope.core.provisioning.api.pushpull.ProvisioningReport;
import org.quartz.JobExecutionException;
/**
* Default (empty) implementation of PushActions.
*/
public abstract class DefaultPushActions implements PushActions {
@Override
public void beforeAll(final ProvisioningProfile<?, ?> profile) throws JobExecutionException {
}
@Override
public Entity beforeAssign(final ProvisioningProfile<?, ?> profile, final Entity entity)
throws JobExecutionException {
return entity;
}
@Override
public Entity beforeProvision(final ProvisioningProfile<?, ?> profile, final Entity entity)
throws JobExecutionException {
return entity;
}
@Override
public Entity beforeLink(final ProvisioningProfile<?, ?> profile, final Entity entity)
throws JobExecutionException {
return entity;
}
@Override
public Entity beforeUnassign(final ProvisioningProfile<?, ?> profile, final Entity entity)
throws JobExecutionException {
return entity;
}
@Override
public Entity beforeDeprovision(final ProvisioningProfile<?, ?> profile, final Entity entity)
throws JobExecutionException {
return entity;
}
@Override
public Entity beforeUnlink(final ProvisioningProfile<?, ?> profile, final Entity entity)
throws JobExecutionException {
return entity;
}
@Override
public void onError(
final ProvisioningProfile<?, ?> profile, final Entity entity, final ProvisioningReport result,
final Exception error) throws JobExecutionException {
// do nothing
}
@Override
public void after(
final ProvisioningProfile<?, ?> profile, final Entity entity, final ProvisioningReport result)
throws JobExecutionException {
// do nothing
}
@Override
public void afterAll(final ProvisioningProfile<?, ?> profile)
throws JobExecutionException {
// do nothing
}
}
| tmess567/syncope | core/provisioning-java/src/main/java/org/apache/syncope/core/provisioning/java/pushpull/DefaultPushActions.java | Java | apache-2.0 | 3,149 |
// This file has been autogenerated.
var profile = require('../../../lib/util/profile');
exports.getMockedProfile = function () {
var newProfile = new profile.Profile();
newProfile.addSubscription(new profile.Subscription({
id: '2c224e7e-3ef5-431d-a57b-e71f4662e3a6',
name: 'Node CLI Test',
user: {
name: '[email protected]',
type: 'user'
},
tenantId: '72f988bf-86f1-41af-91ab-2d7cd011db47',
state: 'Enabled',
registeredProviders: [],
_eventsCount: '1',
isDefault: true
}, newProfile.environments['AzureCloud']));
return newProfile;
};
exports.setEnvironment = function() {
process.env['AZURE_VM_TEST_LOCATION'] = 'westus';
};
exports.scopes = [[function (nock) {
var result =
nock('http://management.azure.com:443')
.get('/subscriptions/2c224e7e-3ef5-431d-a57b-e71f4662e3a6/resourceGroups/xplat-test-lb/providers/Microsoft.Network/loadBalancers/loadBalancerName?api-version=2016-09-01')
.reply(200, "{\r\n \"name\": \"loadBalancerName\",\r\n \"id\": \"/subscriptions/2c224e7e-3ef5-431d-a57b-e71f4662e3a6/resourceGroups/xplat-test-lb/providers/Microsoft.Network/loadBalancers/loadBalancerName\",\r\n \"etag\": \"W/\\\"a1f77993-ccf3-487f-a209-eadd9733e096\\\"\",\r\n \"type\": \"Microsoft.Network/loadBalancers\",\r\n \"location\": \"westus\",\r\n \"properties\": {\r\n \"provisioningState\": \"Succeeded\",\r\n \"resourceGuid\": \"74b2a4b6-1a4b-4ef7-9e51-1fd4e297150c\",\r\n \"frontendIPConfigurations\": [],\r\n \"backendAddressPools\": [],\r\n \"loadBalancingRules\": [],\r\n \"probes\": [],\r\n \"inboundNatRules\": [],\r\n \"outboundNatRules\": [],\r\n \"inboundNatPools\": []\r\n }\r\n}", { 'cache-control': 'no-cache',
pragma: 'no-cache',
'content-length': '642',
'content-type': 'application/json; charset=utf-8',
expires: '-1',
etag: 'W/"a1f77993-ccf3-487f-a209-eadd9733e096"',
'x-ms-request-id': 'c00a8a24-b612-4118-870f-5baef624649e',
'strict-transport-security': 'max-age=31536000; includeSubDomains',
server: 'Microsoft-HTTPAPI/2.0, Microsoft-HTTPAPI/2.0',
'x-ms-ratelimit-remaining-subscription-reads': '14976',
'x-ms-correlation-request-id': '9503135c-35e8-4894-bfcc-522461dab3e9',
'x-ms-routing-request-id': 'CANADAEAST:20170310T155543Z:9503135c-35e8-4894-bfcc-522461dab3e9',
date: 'Fri, 10 Mar 2017 15:55:43 GMT',
connection: 'close' });
return result; },
function (nock) {
var result =
nock('https://management.azure.com:443')
.get('/subscriptions/2c224e7e-3ef5-431d-a57b-e71f4662e3a6/resourceGroups/xplat-test-lb/providers/Microsoft.Network/loadBalancers/loadBalancerName?api-version=2016-09-01')
.reply(200, "{\r\n \"name\": \"loadBalancerName\",\r\n \"id\": \"/subscriptions/2c224e7e-3ef5-431d-a57b-e71f4662e3a6/resourceGroups/xplat-test-lb/providers/Microsoft.Network/loadBalancers/loadBalancerName\",\r\n \"etag\": \"W/\\\"a1f77993-ccf3-487f-a209-eadd9733e096\\\"\",\r\n \"type\": \"Microsoft.Network/loadBalancers\",\r\n \"location\": \"westus\",\r\n \"properties\": {\r\n \"provisioningState\": \"Succeeded\",\r\n \"resourceGuid\": \"74b2a4b6-1a4b-4ef7-9e51-1fd4e297150c\",\r\n \"frontendIPConfigurations\": [],\r\n \"backendAddressPools\": [],\r\n \"loadBalancingRules\": [],\r\n \"probes\": [],\r\n \"inboundNatRules\": [],\r\n \"outboundNatRules\": [],\r\n \"inboundNatPools\": []\r\n }\r\n}", { 'cache-control': 'no-cache',
pragma: 'no-cache',
'content-length': '642',
'content-type': 'application/json; charset=utf-8',
expires: '-1',
etag: 'W/"a1f77993-ccf3-487f-a209-eadd9733e096"',
'x-ms-request-id': 'c00a8a24-b612-4118-870f-5baef624649e',
'strict-transport-security': 'max-age=31536000; includeSubDomains',
server: 'Microsoft-HTTPAPI/2.0, Microsoft-HTTPAPI/2.0',
'x-ms-ratelimit-remaining-subscription-reads': '14976',
'x-ms-correlation-request-id': '9503135c-35e8-4894-bfcc-522461dab3e9',
'x-ms-routing-request-id': 'CANADAEAST:20170310T155543Z:9503135c-35e8-4894-bfcc-522461dab3e9',
date: 'Fri, 10 Mar 2017 15:55:43 GMT',
connection: 'close' });
return result; }]]; | MikhailTryakhov/azure-xplat-cli | test/recordings/arm-network-lb-tests/arm_network_load_balancers_show_should_display_load_balancers_details.nock.js | JavaScript | apache-2.0 | 4,100 |
/*table*/
.site-table tbody tr td {text-align: center;}
.site-table tbody tr td .layui-btn+.layui-btn{margin-left: 0px;}
.admin-table-page {position: fixed;z-index: 19940201;bottom: 0;width: 100%;background-color: #eee;border-bottom: 1px solid #ddd;left: 0px;}
.admin-table-page .page{padding-left:20px;}
.admin-table-page .page .layui-laypage {margin: 6px 0 0 0;}
.table-hover tbody tr:hover{ background-color: #EEEEEE; }
.admin-table{
}
.admin-table .layui-form-checkbox{
margin-top: 0;
height: 20px;
line-height: 20px;
}
| MikeHooge/weixinServer | src/main/resources/static/css/table.css | CSS | apache-2.0 | 530 |
//
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Warning: This code was generated by a tool.
//
// Changes to this file may cause incorrect behavior and will be lost if the
// code is regenerated.
using System;
using System.Linq;
using Microsoft.Azure;
using Microsoft.Azure.Management.Sql.Models;
namespace Microsoft.Azure.Management.Sql.Models
{
/// <summary>
/// Represents the response to a List Azure Sql Server Key request.
/// </summary>
public partial class ServerKeyGetResponse : AzureOperationResponse
{
private ServerKey _serverKey;
/// <summary>
/// Optional. The Azure Sql Server Key object.
/// </summary>
public ServerKey ServerKey
{
get { return this._serverKey; }
set { this._serverKey = value; }
}
/// <summary>
/// Initializes a new instance of the ServerKeyGetResponse class.
/// </summary>
public ServerKeyGetResponse()
{
}
}
}
| naveedaz/azure-sdk-for-net | src/ResourceManagement/Sql/SqlManagement/Generated/Models/ServerKeyGetResponse.cs | C# | apache-2.0 | 1,611 |
# Fluid Benchmark
This directory contains several models configurations and tools that used to run
Fluid benchmarks for local and distributed training.
## Run the Benchmark
To start, run the following command to get the full help message:
```bash
python fluid_benchmark.py --help
```
Currently supported `--model` argument include:
* mnist
* resnet
* you can chose to use different dataset using `--data_set cifar10` or
`--data_set flowers`.
* vgg
* stacked_dynamic_lstm
* machine_translation
* Run the following command to start a benchmark job locally:
```bash
python fluid_benchmark.py --model mnist --device GPU
```
You can choose to use GPU/CPU training. With GPU training, you can specify
`--gpus <gpu_num>` to run multi GPU training.
You can set async mode parameter server. With async mode, you can specify
`--async_mode` to train model asynchronous.
* Run distributed training with parameter servers:
* see [run_fluid_benchmark.sh](https://github.com/PaddlePaddle/Paddle/blob/develop/benchmark/fluid/run_fluid_benchmark.sh) as an example.
* start parameter servers:
```bash
PADDLE_TRAINING_ROLE=PSERVER PADDLE_PSERVER_PORT=7164 PADDLE_PSERVER_IPS=127.0.0.1 PADDLE_TRAINERS=1 PADDLE_CURRENT_IP=127.0.0.1 PADDLE_TRAINER_ID=0 python fluid_benchmark.py --model mnist --device GPU --update_method pserver
sleep 15
```
* start trainers:
```bash
PADDLE_TRAINING_ROLE=TRAINER PADDLE_PSERVER_PORT=7164 PADDLE_PSERVER_IPS=127.0.0.1 PADDLE_TRAINERS=1 PADDLE_CURRENT_IP=127.0.0.1 PADDLE_TRAINER_ID=0 python fluid_benchmark.py --model mnist --device GPU --update_method pserver
```
* Run distributed training using NCCL2
```bash
PADDLE_PSERVER_PORT=7164 PADDLE_TRAINER_IPS=192.168.0.2,192.168.0.3 PADDLE_CURRENT_IP=127.0.0.1 PADDLE_TRAINER_ID=0 python fluid_benchmark.py --model mnist --device GPU --update_method nccl2
```
## Prepare the RecordIO file to Achieve Better Performance
Run the following command will generate RecordIO files like "mnist.recordio" under the path
and batch_size you choose, you can use batch_size=1 so that later reader can change the batch_size
at any time using `fluid.batch`.
```bash
python -c 'from recordio_converter import *; prepare_mnist("data", 1)'
```
## Run Distributed Benchmark on Kubernetes Cluster
You may need to build a Docker image before submitting a cluster job onto Kubernetes, or you will
have to start all those processes mannually on each node, which is not recommended.
To build the Docker image, you need to choose a paddle "whl" package to run with, you may either
download it from
http://www.paddlepaddle.org/docs/develop/documentation/zh/build_and_install/pip_install_en.html or
build it by your own. Once you've got the "whl" package, put it under the current directory and run:
```bash
docker build -t [your docker image name]:[your docker image tag] .
```
Then push the image to a Docker registry that your Kubernetes cluster can reach.
We provide a script `kube_gen_job.py` to generate Kubernetes yaml files to submit
distributed benchmark jobs to your cluster. To generate a job yaml, just run:
```bash
python kube_gen_job.py --jobname myjob --pscpu 4 --cpu 8 --gpu 8 --psmemory 20 --memory 40 --pservers 4 --trainers 4 --entry "python fluid_benchmark.py --model mnist --gpus 8 --device GPU --update_method pserver " --disttype pserver
```
Then the yaml files are generated under directory `myjob`, you can run:
```bash
kubectl create -f myjob/
```
The job shall start.
## Notes for Run Fluid Distributed with NCCL2 and RDMA
Before running NCCL2 distributed jobs, please check that whether your node has multiple network
interfaces, try to add the environment variable `export NCCL_SOCKET_IFNAME=eth0` to use your actual
network device.
To run high-performance distributed training, you must prepare your hardware environment to be
able to run RDMA enabled network communication, please check out [this](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/howto/cluster/nccl2_rdma_training.md)
note for details.
| QiJune/Paddle | benchmark/fluid/README.md | Markdown | apache-2.0 | 4,128 |
// Licensed to Cloudera, Inc. under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. Cloudera, Inc. licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
var RunningCoordinatorModel = (function () {
var RunningCoordinatorModel = function (actions) {
var self = this;
self.Action = function (action) {
return {
id: action.id,
url: action.url,
number: action.number,
type: action.type,
status: action.status,
statusClass: "label " + getStatusClass(action.status),
externalId: action.externalId,
externalIdUrl: action.externalIdUrl,
title: action.title,
nominalTime: action.nominalTime,
createdTime: action.createdTime,
lastModifiedTime: action.lastModifiedTime,
errorMessage: action.errorMessage,
errorCode: action.errorCode,
missingDependencies: action.missingDependencies,
selected: ko.observable(false),
handleSelect: function (row, e) {
e.stopPropagation();
this.selected(!this.selected());
self.allSelected(false);
}
};
};
self.isLoading = ko.observable(true);
self.actions = ko.observableArray(ko.utils.arrayMap(actions, function (action) {
return new self.Action(action);
}));
self.setActions = function (actions) {
self.actions(ko.utils.arrayMap(actions, function (action) {
return new self.Action(action);
}));
}
self.allSelected = ko.observable(false);
self.filter = ko.observableArray([]);
self.searchFilter = ko.observable("");
self.isRefreshingLogs = ko.observable(false);
self.logFilterRecentHours = ko.observable("");
self.logFilterRecentMinutes = ko.observable("");
self.logFilterRecent = ko.computed(function () {
var _h = self.logFilterRecentHours();
var _m = self.logFilterRecentMinutes();
return (_h != "" ? _h + "h" : "") + (_h != "" && _m != "" ? ":" : "") + (_m != "" ? _m + "m" : "");
}).extend({throttle: 500});
self.logFilterLimit = ko.observable("5000").extend({throttle: 500});
self.logFilterText = ko.observable("").extend({throttle: 500});
self.logFilterRecent.subscribe(function () {
refreshLogs();
});
self.logFilterLimit.subscribe(function () {
refreshLogs();
});
self.logFilterText.subscribe(function () {
refreshLogs();
});
self.isLogFilterVisible = ko.observable(false);
self.toggleLogFilterVisible = function () {
self.isLogFilterVisible(!self.isLogFilterVisible());
};
self.select = function (filter) {
ko.utils.arrayFilter(self.actions(), function (action) {
if (action.status.toLowerCase() === filter) {
action.selected(true);
}
});
};
self.clearAllSelections = function () {
ko.utils.arrayFilter(self.actions(), function (action) {
action.selected(false);
});
self.allSelected(false);
};
self.clearSelections = function (filter) {
ko.utils.arrayFilter(self.actions(), function (action) {
if (action.status.toLowerCase() === filter) {
action.selected(false);
}
});
self.allSelected(false);
};
self.selectAll = function () {
var regexp;
if (!Array.isArray(self.filter())) {
ko.utils.arrayForEach(self.actions(), function (action) {
regexp = new RegExp(self.filter());
self.allSelected(!self.allSelected());
if (regexp.test(action.title.toLowerCase())) {
action.selected(!action.selected());
}
});
return true;
}
self.allSelected(!self.allSelected());
ko.utils.arrayForEach(self.actions(), function (action) {
if (action.id) {
action.selected(self.allSelected());
}
});
return true;
};
self.selectedActions = ko.computed(function () {
var actionlist = [];
ko.utils.arrayFilter(self.actions(), function (action) {
if (action.selected()) {
actionlist.push(action.number.toString());
}
});
return actionlist;
});
self.searchFilter.subscribe(function () {
if (self.searchFilter().length === 0) {
self.filter([]);
} else {
self.filter(self.searchFilter().toLowerCase());
}
if (self.selectedActions().length === self.actions().length) {
self.allSelected(true);
} else {
self.allSelected(false);
}
});
self.filteredActions = ko.pureComputed(function () {
var filter = self.filter(),
actions = [],
regexp,
data;
if (self.filter().length === 0) {
return self.actions();
}
ko.utils.arrayFilter(self.actions(), function (action) {
if ($.inArray(filter.toString(), ['succeeded', 'running', 'failed']) === -1) {
regexp = new RegExp(filter);
if (regexp.test(action.title.toLowerCase())) {
actions.push(action);
}
}
});
if (Array.isArray(self.filter())) {
data = self.actions()
} else {
data = actions;
}
return data;
});
};
return RunningCoordinatorModel;
})();
| kawamon/hue | apps/oozie/src/oozie/static/oozie/js/list-oozie-coordinator.ko.js | JavaScript | apache-2.0 | 5,905 |
<?php if ( ! defined('BASEPATH')) exit('No direct script access allowed');
class MY_Image_lib extends CI_Image_lib{
//stop images from enlarging
function image_reproportion()
{
if(!is_numeric($this->width) OR !is_numeric($this->height) OR $this->width == 0 OR $this->height == 0)
{
return;
}
if(!is_numeric($this->orig_width) OR !is_numeric($this->orig_height) OR $this->orig_width == 0 OR $this->orig_height == 0)
{
return;
}
// STEP 1: Are new measures needed?
if($this->orig_width <= $this->width && $this->orig_height <= $this->height)
{
// Image is smaller
$this->width = $this->orig_width;
$this->height = $this->orig_height;
}
// STEP 2: Calculate new measurements
// <!-- Original code from here -->
$new_width = ceil($this->orig_width*$this->height/$this->orig_height);
$new_height = ceil($this->width*$this->orig_height/$this->orig_width);
$ratio = (($this->orig_height/$this->orig_width) - ($this->height/$this->width));
if ($this->master_dim != 'width' AND $this->master_dim != 'height')
{
$this->master_dim = ($ratio < 0) ? 'width' : 'height';
}
if (($this->width != $new_width) AND ($this->height != $new_height))
{
if ($this->master_dim == 'height')
{
$this->width = $new_width;
}
else
{
$this->height = $new_height;
}
}
}
}
| jonyhandoko/khayana | gocart/libraries/MY_Image_lib.php | PHP | apache-2.0 | 1,346 |
"""Support for International Space Station data sensor."""
from datetime import timedelta
import logging
import pyiss
import requests
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorDevice
from homeassistant.const import (
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_NAME,
CONF_SHOW_ON_MAP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTR_ISS_NEXT_RISE = "next_rise"
ATTR_ISS_NUMBER_PEOPLE_SPACE = "number_of_people_in_space"
DEFAULT_NAME = "ISS"
DEFAULT_DEVICE_CLASS = "visible"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SHOW_ON_MAP, default=False): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ISS sensor."""
if None in (hass.config.latitude, hass.config.longitude):
_LOGGER.error("Latitude or longitude not set in Home Assistant config")
return False
try:
iss_data = IssData(hass.config.latitude, hass.config.longitude)
iss_data.update()
except requests.exceptions.HTTPError as error:
_LOGGER.error(error)
return False
name = config.get(CONF_NAME)
show_on_map = config.get(CONF_SHOW_ON_MAP)
add_entities([IssBinarySensor(iss_data, name, show_on_map)], True)
class IssBinarySensor(BinarySensorDevice):
"""Implementation of the ISS binary sensor."""
def __init__(self, iss_data, name, show):
"""Initialize the sensor."""
self.iss_data = iss_data
self._state = None
self._name = name
self._show_on_map = show
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self.iss_data.is_above if self.iss_data else False
@property
def device_class(self):
"""Return the class of this sensor."""
return DEFAULT_DEVICE_CLASS
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self.iss_data:
attrs = {
ATTR_ISS_NUMBER_PEOPLE_SPACE: self.iss_data.number_of_people_in_space,
ATTR_ISS_NEXT_RISE: self.iss_data.next_rise,
}
if self._show_on_map:
attrs[ATTR_LONGITUDE] = self.iss_data.position.get("longitude")
attrs[ATTR_LATITUDE] = self.iss_data.position.get("latitude")
else:
attrs["long"] = self.iss_data.position.get("longitude")
attrs["lat"] = self.iss_data.position.get("latitude")
return attrs
def update(self):
"""Get the latest data from ISS API and updates the states."""
self.iss_data.update()
class IssData:
"""Get data from the ISS API."""
def __init__(self, latitude, longitude):
"""Initialize the data object."""
self.is_above = None
self.next_rise = None
self.number_of_people_in_space = None
self.position = None
self.latitude = latitude
self.longitude = longitude
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from the ISS API."""
try:
iss = pyiss.ISS()
self.is_above = iss.is_ISS_above(self.latitude, self.longitude)
self.next_rise = iss.next_rise(self.latitude, self.longitude)
self.number_of_people_in_space = iss.number_of_people_in_space()
self.position = iss.current_location()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError):
_LOGGER.error("Unable to retrieve data")
return False
| leppa/home-assistant | homeassistant/components/iss/binary_sensor.py | Python | apache-2.0 | 3,948 |
# -*- coding: utf-8 -*-
###############################################################################
#
# DeleteDBSecurityGroup
# Deletes a specified database security group.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DeleteDBSecurityGroup(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DeleteDBSecurityGroup Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(DeleteDBSecurityGroup, self).__init__(temboo_session, '/Library/Amazon/RDS/DeleteDBSecurityGroup')
def new_input_set(self):
return DeleteDBSecurityGroupInputSet()
def _make_result_set(self, result, path):
return DeleteDBSecurityGroupResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteDBSecurityGroupChoreographyExecution(session, exec_id, path)
class DeleteDBSecurityGroupInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DeleteDBSecurityGroup
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
super(DeleteDBSecurityGroupInputSet, self)._set_input('AWSAccessKeyId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
super(DeleteDBSecurityGroupInputSet, self)._set_input('AWSSecretKeyId', value)
def set_DBSecurityGroupName(self, value):
"""
Set the value of the DBSecurityGroupName input for this Choreo. ((required, string) The name for the security group you want to delete.)
"""
super(DeleteDBSecurityGroupInputSet, self)._set_input('DBSecurityGroupName', value)
def set_UserRegion(self, value):
"""
Set the value of the UserRegion input for this Choreo. ((optional, string) The AWS region that corresponds to the RDS endpoint you wish to access. The default region is "us-east-1". See description below for valid values.)
"""
super(DeleteDBSecurityGroupInputSet, self)._set_input('UserRegion', value)
class DeleteDBSecurityGroupResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DeleteDBSecurityGroup Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((string) The response from Amazon.)
"""
return self._output.get('Response', None)
class DeleteDBSecurityGroupChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteDBSecurityGroupResultSet(response, path)
| jordanemedlock/psychtruths | temboo/core/Library/Amazon/RDS/DeleteDBSecurityGroup.py | Python | apache-2.0 | 4,034 |
#!/usr/bin/python2
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import random
import shutil
import time
from datetime import datetime
from db import init_db
if len(sys.argv) < 5:
print 'You must specify 4 arguments:'
print '1. The MySQL DB user name'
print '2. The MySQL DB password'
print '3. The start datetime in the format: %Y-%m-%d %H:%M:%S'
print '4. The finish datetime in the format: %Y-%m-%d %H:%M:%S'
sys.exit(1)
db = init_db('mysql://' + sys.argv[1] + ':' + sys.argv[2] + '@localhost/neat')
start_time = datetime.fromtimestamp(
time.mktime(time.strptime(sys.argv[3], '%Y-%m-%d %H:%M:%S')))
finish_time = datetime.fromtimestamp(
time.mktime(time.strptime(sys.argv[4], '%Y-%m-%d %H:%M:%S')))
#print "Start time: " + str(start_time)
#print "Finish time: " + str(finish_time)
def total_seconds(delta):
return (delta.microseconds +
(delta.seconds + delta.days * 24 * 3600) * 1000000) / 1000000
total_idle_time = 0
for hostname, host_id in db.select_host_ids().items():
prev_timestamp = start_time
prev_state = 1
states = {0: [], 1: []}
for timestamp, state in db.select_host_states(host_id, start_time, finish_time):
if prev_timestamp:
states[prev_state].append(total_seconds(timestamp - prev_timestamp))
prev_timestamp = timestamp
prev_state = state
states[prev_state].append(total_seconds(finish_time - prev_timestamp))
#print states
off_time = sum(states[0])
total_idle_time += off_time
total_time = 0
total_overload_time = 0
for hostname, host_id in db.select_host_ids().items():
prev_timestamp = start_time
prev_state = 0
states = {0: [], 1: []}
for timestamp, state in db.select_host_overload(host_id, start_time, finish_time):
if prev_timestamp:
states[prev_state].append(total_seconds(timestamp - prev_timestamp))
prev_timestamp = timestamp
prev_state = state
states[prev_state].append(total_seconds(finish_time - prev_timestamp))
#print states
nonoverload_time = sum(states[0])
overload_time = sum(states[1])
total_time += nonoverload_time + overload_time
total_overload_time += overload_time
print "Total time: " + str(total_time)
print "Overload time: " + str(total_overload_time)
print "Overload time fraction: " + str(float(total_overload_time) / (total_time - total_idle_time))
| beloglazov/openstack-neat | utils/overload-time-fraction.py | Python | apache-2.0 | 2,956 |
go-i18n [](http://travis-ci.org/nicksnyder/go-i18n)
=======
go-i18n is a Go [package](#i18n-package) and a [command](#goi18n-command) that can be used to translate Go programs into multiple languages.
Requires Go 1.2.
Features
--------
* Implements [CLDR plural rules](http://cldr.unicode.org/index/cldr-spec/plural-rules).
* Uses [text/template](http://golang.org/pkg/text/template/) for strings with variables.
* Translation files are simple JSON.
* [Documented](http://godoc.org/github.com/nicksnyder/go-i18n) and [tested](https://travis-ci.org/nicksnyder/go-i18n)!
i18n package
------------
The i18n package provides runtime APIs for looking up translated strings.
```go
import "github.com/nicksnyder/go-i18n/i18n"
```
##### Loading translations
Load translation files during your program's initialization.
The name of a translation file must contain a supported [language tag](http://en.wikipedia.org/wiki/IETF_language_tag).
```go
i18n.MustLoadTranslationFile("path/to/fr-FR.all.json")
```
##### Selecting a locale
Tfunc returns a function that can lookup the translation of a string for that locale.
It accepts one or more locale parameters so you can gracefully fallback to other locales.
```go
userLocale = "ar-AR" // user preference, accept header, language cookie
defaultLocale = "en-US" // known valid locale
T, err := i18n.Tfunc(userLocale, defaultLocale)
```
##### Loading a string translation
Use the translation function to fetch the translation of a string.
```go
fmt.Println(T("Hello world"))
```
Usually it is a good idea to identify strings by a generic id rather than the English translation, but the rest of this document will continue to use the English translation for readability.
```go
T("program_greeting")
```
##### Strings with variables
You can have variables in your string using [text/template](http://golang.org/pkg/text/template/) syntax.
```go
T("Hello {{.Person}}", map[string]interface{}{
"Person": "Bob",
})
```
##### Plural strings
Each language handles pluralization differently. A few examples:
* English treats one as singular and all other numbers as plural (e.g. 0 cats, 1 cat, 2 cats).
* French treats zero as singular.
* Japan has a single plural form for all numbers.
* Arabic has six different plural forms!
The translation function handles [all of this logic](http://www.unicode.org/cldr/charts/latest/supplemental/language_plural_rules.html) for you.
```go
T("You have {{.Count}} unread emails", 2)
T("I am {{.Count}} meters tall.", "1.7")
```
With variables:
```go
T("{{.Person}} has {{.Count}} unread emails", 2, map[string]interface{}{
"Person": "Bob",
})
```
Sentences with multiple plural components can be supported with nesting.
```go
T("{{.Person}} has {{.Count}} unread emails in the past {{.Timeframe}}.", 3, map[string]interface{}{
"Person": "Bob",
"Timeframe": T("{{.Count}} days", 2),
})
```
A complete example is [here](i18n/example_test.go).
##### Strings in templates
You can call the `.Funcs()` method on a [text/template](http://golang.org/pkg/text/template/#Template.Funcs) or [html/template](http://golang.org/pkg/html/template/#Template.Funcs) to register the translation function for usage inside of that template.
A complete example is [here](i18n/exampletemplate_test.go).
goi18n command
--------------
The goi18n command provides functionality for managing the translation process.
### Installation
Make sure you have [setup GOPATH](http://golang.org/doc/code.html#GOPATH).
go get -u github.com/nicksnyder/go-i18n/goi18n
goi18n -help
### Workflow
A typical workflow looks like this:
1. Add a new string to your source code.
```go
T("settings_title")
```
2. Add the string to en-US.all.json
```json
[
{
"id": "settings_title",
"translation": "Settings"
}
]
```
3. Run goi18n
```
goi18n path/to/*.all.json
```
4. Send `path/to/*.untranslated.json` to get translated.
5. Run goi18n again to merge the translations
```sh
goi18n path/to/*.all.json path/to/*.untranslated.json
```
Translation files
-----------------
A translation file stores translated and untranslated strings.
Example:
```json
[
{
"id": "d_days",
"translation": {
"one": "{{.Count}} day",
"other": "{{.Count}} days"
}
},
{
"id": "my_height_in_meters",
"translation": {
"one": "I am {{.Count}} meter tall.",
"other": "I am {{.Count}} meters tall."
}
},
{
"id": "person_greeting",
"translation": "Hello {{.Person}}"
},
{
"id": "person_unread_email_count",
"translation": {
"one": "{{.Person}} has {{.Count}} unread email.",
"other": "{{.Person}} has {{.Count}} unread emails."
}
},
{
"id": "person_unread_email_count_timeframe",
"translation": {
"one": "{{.Person}} has {{.Count}} unread email in the past {{.Timeframe}}.",
"other": "{{.Person}} has {{.Count}} unread emails in the past {{.Timeframe}}."
}
},
{
"id": "program_greeting",
"translation": "Hello world"
},
{
"id": "your_unread_email_count",
"translation": {
"one": "You have {{.Count}} unread email.",
"other": "You have {{.Count}} unread emails."
}
}
]
```
Supported languages
-------------------
* Arabic (`ar`)
* Catalan (`ca`)
* Chinese (simplified and traditional) (`zh`)
* Czech (`cs`)
* Danish (`da`)
* Dutch (`nl`)
* English (`en`)
* French (`fr`)
* German (`de`)
* Italian (`it`)
* Japanese (`ja`)
* Portuguese (`pt`)
* Portuguese (Brazilian) (`pt-BR`)
* Spanish (`es`)
More languages are straightforward to add:
1. Lookup the language's [CLDR plural rules](http://www.unicode.org/cldr/charts/latest/supplemental/language_plural_rules.html).
2. Add the language to [language.go](i18n/language/language.go):
```go
var languages = map[string]*Language{
// ...
"en": &Language{
ID: "en",
Name: "English",
PluralCategories: newSet(plural.One, plural.Other),
PluralFunc: func(ops *plural.Operands) plural.Category {
if ops.I == 1 && ops.V == 0 {
return plural.One
}
return plural.Other
},
},
// ...
}
```
3. Add a test to [language_test.go](i18n/language/language_test.go)
4. Submit a pull request!
License
-------
go-i18n is available under the MIT license. See the [LICENSE](LICENSE) file for more info.
| liuhong1happy/ConsoleWindowApp | src/go-i18n/README.md | Markdown | apache-2.0 | 6,617 |
#!/usr/bin/env bash
#
# Generates documentation and pushes it up to the site
# WARNING: Do NOT run this script unless you have remote `upstream` set properly
#
rm -rf tmp/docs && npm run build_docs && git checkout gh-pages && git fetch upstream && git rebase upstream/gh-pages && cp -r ./tmp/docs/ ./ && rm -rf tmp/ && git add . && git commit -am "chore(docs): docs generated automatically" && git push upstream gh-pages
| elliotschi/rxjs | publish_docs.sh | Shell | apache-2.0 | 421 |
// Copyright 2007-2011 Chris Patterson, Dru Sellers, Travis Smith, et. al.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
namespace MassTransit.Transports
{
using System;
public interface ConnectionPolicyChain
{
void Push(ConnectionPolicy policy);
void Pop(ConnectionPolicy policy);
void Next(Action callback);
}
} | lahma/MassTransit | src/MassTransit/Transports/ConnectionPolicyChain.cs | C# | apache-2.0 | 870 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version
* 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package org.apache.storm.trident.windowing.strategy;
import org.apache.storm.trident.windowing.config.WindowConfig;
import org.apache.storm.windowing.EvictionPolicy;
import org.apache.storm.windowing.TimeEvictionPolicy;
import org.apache.storm.windowing.TimeTriggerPolicy;
import org.apache.storm.windowing.TriggerHandler;
import org.apache.storm.windowing.TriggerPolicy;
/**
* This class represents tumbling window strategy based on the window duration from the given {@code slidingCountWindow} configuration. In
* this strategy , window and sliding durations are equal.
*/
public final class TumblingDurationWindowStrategy<T> extends BaseWindowStrategy<T> {
public TumblingDurationWindowStrategy(WindowConfig tumblingDurationWindow) {
super(tumblingDurationWindow);
}
/**
* Returns a {@code TriggerPolicy} which triggers for every given sliding duration.
*/
@Override
public TriggerPolicy<T, ?> getTriggerPolicy(TriggerHandler triggerHandler, EvictionPolicy<T, ?> evictionPolicy) {
return new TimeTriggerPolicy<>(windowConfig.getSlidingLength(), triggerHandler, evictionPolicy);
}
/**
* Returns an {@code EvictionPolicy} instance which evicts elements after given window duration.
*/
@Override
public EvictionPolicy<T, ?> getEvictionPolicy() {
return new TimeEvictionPolicy<>(windowConfig.getWindowLength());
}
}
| kishorvpatil/incubator-storm | storm-client/src/jvm/org/apache/storm/trident/windowing/strategy/TumblingDurationWindowStrategy.java | Java | apache-2.0 | 2,204 |
# ceph-base
Ceph base image (fedora 24 with the latest Ceph release installed).
## Docker Hub/Registry location
<https://registry.hub.docker.com/u/ceph/base/>
## Usage (example)
```bash
docker run -i -t ceph/base
```
| cdxvirt/ceph-docker | ceph-releases/jewel/fedora/24/base/README.md | Markdown | apache-2.0 | 222 |
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/url"
"path"
"github.com/coreos/fleet/Godeps/_workspace/src/golang.org/x/net/context"
"github.com/coreos/fleet/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types"
)
var (
defaultV2MembersPrefix = "/v2/members"
)
type Member struct {
// ID is the unique identifier of this Member.
ID string `json:"id"`
// Name is a human-readable, non-unique identifier of this Member.
Name string `json:"name"`
// PeerURLs represents the HTTP(S) endpoints this Member uses to
// participate in etcd's consensus protocol.
PeerURLs []string `json:"peerURLs"`
// ClientURLs represents the HTTP(S) endpoints on which this Member
// serves it's client-facing APIs.
ClientURLs []string `json:"clientURLs"`
}
type memberCollection []Member
func (c *memberCollection) UnmarshalJSON(data []byte) error {
d := struct {
Members []Member
}{}
if err := json.Unmarshal(data, &d); err != nil {
return err
}
if d.Members == nil {
*c = make([]Member, 0)
return nil
}
*c = d.Members
return nil
}
type memberCreateRequest struct {
PeerURLs types.URLs
}
func (m *memberCreateRequest) MarshalJSON() ([]byte, error) {
s := struct {
PeerURLs []string `json:"peerURLs"`
}{
PeerURLs: make([]string, len(m.PeerURLs)),
}
for i, u := range m.PeerURLs {
s.PeerURLs[i] = u.String()
}
return json.Marshal(&s)
}
// NewMembersAPI constructs a new MembersAPI that uses HTTP to
// interact with etcd's membership API.
func NewMembersAPI(c Client) MembersAPI {
return &httpMembersAPI{
client: c,
}
}
type MembersAPI interface {
// List enumerates the current cluster membership.
List(ctx context.Context) ([]Member, error)
// Add instructs etcd to accept a new Member into the cluster.
Add(ctx context.Context, peerURL string) (*Member, error)
// Remove demotes an existing Member out of the cluster.
Remove(ctx context.Context, mID string) error
}
type httpMembersAPI struct {
client httpClient
}
func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) {
req := &membersAPIActionList{}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var mCollection memberCollection
if err := json.Unmarshal(body, &mCollection); err != nil {
return nil, err
}
return []Member(mCollection), nil
}
func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) {
urls, err := types.NewURLs([]string{peerURL})
if err != nil {
return nil, err
}
req := &membersAPIActionAdd{peerURLs: urls}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil {
return nil, err
}
if resp.StatusCode != http.StatusCreated {
var merr membersError
if err := json.Unmarshal(body, &merr); err != nil {
return nil, err
}
return nil, merr
}
var memb Member
if err := json.Unmarshal(body, &memb); err != nil {
return nil, err
}
return &memb, nil
}
func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error {
req := &membersAPIActionRemove{memberID: memberID}
resp, _, err := m.client.Do(ctx, req)
if err != nil {
return err
}
return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone)
}
type membersAPIActionList struct{}
func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
type membersAPIActionRemove struct {
memberID string
}
func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
u.Path = path.Join(u.Path, d.memberID)
req, _ := http.NewRequest("DELETE", u.String(), nil)
return req
}
type membersAPIActionAdd struct {
peerURLs types.URLs
}
func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
m := memberCreateRequest{PeerURLs: a.peerURLs}
b, _ := json.Marshal(&m)
req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b))
req.Header.Set("Content-Type", "application/json")
return req
}
func assertStatusCode(got int, want ...int) (err error) {
for _, w := range want {
if w == got {
return nil
}
}
return fmt.Errorf("unexpected status code %d", got)
}
// v2MembersURL add the necessary path to the provided endpoint
// to route requests to the default v2 members API.
func v2MembersURL(ep url.URL) *url.URL {
ep.Path = path.Join(ep.Path, defaultV2MembersPrefix)
return &ep
}
type membersError struct {
Message string `json:"message"`
Code int `json:"-"`
}
func (e membersError) Error() string {
return e.Message
}
| mischief/fleet | Godeps/_workspace/src/github.com/coreos/etcd/client/members.go | GO | apache-2.0 | 5,382 |
---
external help file: Microsoft.Azure.Commands.DataFactoryV2.dll-Help.xml
Module Name: AzureRM.DataFactoryV2
online version: https://docs.microsoft.com/en-us/powershell/module/azurerm.datafactories/start-azurermdatafactoryv2trigger
schema: 2.0.0
---
# Start-AzureRmDataFactoryV2Trigger
## SYNOPSIS
Starts a trigger in a data factory.
## SYNTAX
### ByFactoryName (Default)
```
Start-AzureRmDataFactoryV2Trigger [-Name] <String> [-ResourceGroupName] <String> [-DataFactoryName] <String>
[-Force] [-DefaultProfile <IAzureContextContainer>] [-WhatIf] [-Confirm] [<CommonParameters>]
```
### ByInputObject
```
Start-AzureRmDataFactoryV2Trigger [-InputObject] <PSTrigger> [-Force]
[-DefaultProfile <IAzureContextContainer>] [-WhatIf] [-Confirm] [<CommonParameters>]
```
### ByResourceId
```
Start-AzureRmDataFactoryV2Trigger [-ResourceId] <String> [-Force] [-DefaultProfile <IAzureContextContainer>]
[-WhatIf] [-Confirm] [<CommonParameters>]
```
## DESCRIPTION
The **Start-AzureRmDataFactoryV2Trigger** cmdlet starts a trigger in a data factory. If the trigger is in the 'Stopped' state, the cmdlet starts the trigger and it eventually invokes pipelines based on its definition. If the trigger is already in the 'Started' state, this cmdlet has no effect. If the Force parameter is specified, the cmdlet doesn't prompt before starting the trigger.
## EXAMPLES
### Example 1: Start a trigger
```
Start-AzureRmDataFactoryV2Trigger -ResourceGroupName "ADF" -DataFactoryName "WikiADF" -TriggerName "ScheduledTrigger"
Confirm
Are you sure you want to start trigger 'ScheduledTrigger' in data factory 'WikiADF'?
[Y] Yes [N] No [S] Suspend [?] Help (default is "Y"): y
True
```
Starts a trigger called "ScheduledTrigger" in the data factory "WikiADF".
## PARAMETERS
### -DataFactoryName
The data factory name.
```yaml
Type: System.String
Parameter Sets: ByFactoryName
Aliases:
Required: True
Position: 1
Default value: None
Accept pipeline input: True (ByPropertyName)
Accept wildcard characters: False
```
### -DefaultProfile
The credentials, account, tenant, and subscription used for communication with azure.
```yaml
Type: Microsoft.Azure.Commands.Common.Authentication.Abstractions.IAzureContextContainer
Parameter Sets: (All)
Aliases: AzureRmContext, AzureCredential
Required: False
Position: Named
Default value: None
Accept pipeline input: False
Accept wildcard characters: False
```
### -Force
Runs the cmdlet without prompting for confirmation.
```yaml
Type: System.Management.Automation.SwitchParameter
Parameter Sets: (All)
Aliases:
Required: False
Position: Named
Default value: None
Accept pipeline input: False
Accept wildcard characters: False
```
### -InputObject
Trigger object to start.
```yaml
Type: Microsoft.Azure.Commands.DataFactoryV2.Models.PSTrigger
Parameter Sets: ByInputObject
Aliases:
Required: True
Position: 0
Default value: None
Accept pipeline input: True (ByValue)
Accept wildcard characters: False
```
### -Name
The trigger name.
```yaml
Type: System.String
Parameter Sets: ByFactoryName
Aliases: TriggerName
Required: True
Position: 2
Default value: None
Accept pipeline input: True (ByPropertyName)
Accept wildcard characters: False
```
### -ResourceGroupName
The resource group name.
```yaml
Type: System.String
Parameter Sets: ByFactoryName
Aliases:
Required: True
Position: 0
Default value: None
Accept pipeline input: True (ByPropertyName)
Accept wildcard characters: False
```
### -ResourceId
The Azure resource ID.
```yaml
Type: System.String
Parameter Sets: ByResourceId
Aliases:
Required: True
Position: 0
Default value: None
Accept pipeline input: True (ByPropertyName)
Accept wildcard characters: False
```
### -Confirm
Prompts you for confirmation before running the cmdlet.
```yaml
Type: System.Management.Automation.SwitchParameter
Parameter Sets: (All)
Aliases: cf
Required: False
Position: Named
Default value: None
Accept pipeline input: False
Accept wildcard characters: False
```
### -WhatIf
Shows what happens if the cmdlet runs, but doesn't run the cmdlet.
```yaml
Type: System.Management.Automation.SwitchParameter
Parameter Sets: (All)
Aliases: wi
Required: False
Position: Named
Default value: None
Accept pipeline input: False
Accept wildcard characters: False
```
### CommonParameters
This cmdlet supports the common parameters: -Debug, -ErrorAction, -ErrorVariable, -InformationAction, -InformationVariable, -OutVariable, -OutBuffer, -PipelineVariable, -Verbose, -WarningAction, and -WarningVariable. For more information, see about_CommonParameters (http://go.microsoft.com/fwlink/?LinkID=113216).
## INPUTS
### System.String
### Microsoft.Azure.Commands.DataFactoryV2.Models.PSTrigger
Parameters: InputObject (ByValue)
## OUTPUTS
### Microsoft.Azure.Commands.DataFactoryV2.Models.PSTrigger
## NOTES
## RELATED LINKS
[Get-AzureRmDataFactoryV2Trigger]()
[Set-AzureRmDataFactoryV2Trigger]()
[Stop-AzureRmDataFactoryV2Trigger]()
[Remove-AzureRmDataFactoryV2Trigger]()
| ClogenyTechnologies/azure-powershell | src/ResourceManager/DataFactoryV2/Commands.DataFactoryV2/help/Start-AzureRmDataFactoryV2Trigger.md | Markdown | apache-2.0 | 4,976 |
/*
* Copyright 2012-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.actuate.endpoint.mvc;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.actuate.autoconfigure.MinimalActuatorHypermediaApplication;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.hateoas.ResourceSupport;
import org.springframework.http.HttpHeaders;
import org.springframework.http.MediaType;
import org.springframework.test.annotation.DirtiesContext;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.test.web.servlet.MockMvc;
import org.springframework.test.web.servlet.setup.MockMvcBuilders;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.web.context.WebApplicationContext;
import static org.hamcrest.CoreMatchers.containsString;
import static org.springframework.hateoas.mvc.ControllerLinkBuilder.linkTo;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.header;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
/**
* Integration tests for {@link HalBrowserMvcEndpoint} when a custom management context
* path has been configured.
*
* @author Dave Syer
* @author Andy Wilkinson
*/
@RunWith(SpringRunner.class)
@SpringBootTest
@TestPropertySource(properties = "management.contextPath:/admin")
@DirtiesContext
public class HalBrowserMvcEndpointManagementContextPathIntegrationTests {
@Autowired
private WebApplicationContext context;
@Autowired
private MvcEndpoints mvcEndpoints;
private MockMvc mockMvc;
@Before
public void setUp() {
this.mockMvc = MockMvcBuilders.webAppContextSetup(this.context).build();
}
@Test
public void actuatorHomeJson() throws Exception {
this.mockMvc.perform(get("/admin").accept(MediaType.APPLICATION_JSON))
.andExpect(status().isOk()).andExpect(jsonPath("$._links").exists());
}
@Test
public void actuatorHomeWithTrailingSlashJson() throws Exception {
this.mockMvc.perform(get("/admin/").accept(MediaType.APPLICATION_JSON))
.andExpect(status().isOk()).andExpect(jsonPath("$._links").exists());
}
@Test
public void actuatorHomeHtml() throws Exception {
this.mockMvc.perform(get("/admin/").accept(MediaType.TEXT_HTML))
.andExpect(status().isFound()).andExpect(header().string(
HttpHeaders.LOCATION, "http://localhost/admin/browser.html"));
}
@Test
public void actuatorBrowserHtml() throws Exception {
this.mockMvc
.perform(get("/admin/browser.html").accept(MediaType.APPLICATION_JSON))
.andExpect(status().isOk())
.andExpect(content().string(containsString("entryPoint: '/admin'")));
}
@Test
public void trace() throws Exception {
this.mockMvc.perform(get("/admin/trace").accept(MediaType.APPLICATION_JSON))
.andExpect(status().isOk()).andExpect(jsonPath("$._links").doesNotExist())
.andExpect(jsonPath("$").isArray());
}
@Test
public void endpointsAllListed() throws Exception {
for (MvcEndpoint endpoint : this.mvcEndpoints.getEndpoints()) {
String path = endpoint.getPath();
if ("/actuator".equals(path)) {
continue;
}
path = path.startsWith("/") ? path.substring(1) : path;
path = path.length() > 0 ? path : "self";
this.mockMvc.perform(get("/admin").accept(MediaType.APPLICATION_JSON))
.andExpect(status().isOk())
.andExpect(jsonPath("$._links.%s.href", path)
.value("http://localhost/admin" + endpoint.getPath()));
}
}
@MinimalActuatorHypermediaApplication
@RestController
public static class SpringBootHypermediaApplication {
@RequestMapping("")
public ResourceSupport home() {
ResourceSupport resource = new ResourceSupport();
resource.add(linkTo(SpringBootHypermediaApplication.class).slash("/")
.withSelfRel());
return resource;
}
}
}
| candrews/spring-boot | spring-boot-actuator/src/test/java/org/springframework/boot/actuate/endpoint/mvc/HalBrowserMvcEndpointManagementContextPathIntegrationTests.java | Java | apache-2.0 | 4,902 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.