content
stringlengths 10
4.9M
|
---|
from collections import OrderedDict
from itertools import chain
import regex as re
from dateutil import parser
from dateparser.timezone_parser import pop_tz_offset_from_string, word_is_tz
from dateparser.utils import combine_dicts, normalize_unicode
from .dictionary import ALWAYS_KEEP_TOKENS, Dictionary, NormalizedDictionary
NUMERAL_PATTERN = re.compile(r"(\d+)", re.U)
class Locale:
"""
Class that deals with applicability and translation from a locale.
:param shortname:
A locale code, e.g. 'fr-PF', 'qu-EC', 'af-NA'.
:type shortname: str
:param language_info:
Language info (translation data) of the language the locale belongs to.
:type language_info: dict
:return: A Locale instance
"""
_dictionary = None
_normalized_dictionary = None
_simplifications = None
_normalized_simplifications = None
_splitters = None
_wordchars = None
_relative_translations = None
_normalized_relative_translations = None
_abbreviations = None
_split_dictionary = None
_wordchars_for_detection = None
def __init__(self, shortname, language_info):
self.shortname = shortname
locale_specific_info = language_info.get("locale_specific", {}).get(
shortname, {}
)
self.info = combine_dicts(language_info, locale_specific_info)
self.info.pop("locale_specific", None)
def is_applicable(self, date_string, strip_timezone=False, settings=None):
"""
Check if the locale is applicable to translate date string.
:param date_string:
A string representing date and/or time in a recognizably valid format.
:type date_string: str
:param strip_timezone:
If True, timezone is stripped from date string.
:type strip_timezone: bool
:return: boolean value representing if the locale is applicable for the date string or not.
"""
if strip_timezone:
date_string, _ = pop_tz_offset_from_string(date_string, as_offset=False)
date_string = self._translate_numerals(date_string)
if settings.NORMALIZE:
date_string = normalize_unicode(date_string)
date_string = self._simplify(date_string, settings=settings)
dictionary = self._get_dictionary(settings)
date_tokens = dictionary.split(date_string)
return dictionary.are_tokens_valid(date_tokens)
def count_applicability(self, text, strip_timezone=False, settings=None):
if strip_timezone:
text, _ = pop_tz_offset_from_string(text, as_offset=False)
text = self._simplify(text, settings=settings)
sentences = self._sentence_split(text, settings=settings)
tokens = []
for sent in sentences:
tokens.extend(self._split(sent, keep_formatting=False, settings=settings))
return self._count_words_present_in_the_dictionary(tokens, settings)
def _count_words_present_in_the_dictionary(self, words, settings=None):
dictionary = self.clean_dictionary(
self._get_split_dictionary(settings=settings)
)
dict_cnt = 0
skip_cnt = 0
for word in set(words):
if word in dictionary:
if dictionary[word]:
dict_cnt += 1
else:
skip_cnt += 1
elif word.isdigit():
skip_cnt += 1
return [dict_cnt, skip_cnt]
@staticmethod
def clean_dictionary(dictionary, threshold=2):
del_keys = []
for key in dictionary:
if len(key) < threshold:
del_keys.append(key)
for del_key in del_keys:
del dictionary[del_key]
return dictionary
def translate(self, date_string, keep_formatting=False, settings=None):
"""
Translate the date string to its English equivalent.
:param date_string:
A string representing date and/or time in a recognizably valid format.
:type date_string: str
:param keep_formatting:
If True, retain formatting of the date string after translation.
:type keep_formatting: bool
:return: translated date string.
"""
date_string = self._translate_numerals(date_string)
if settings.NORMALIZE:
date_string = normalize_unicode(date_string)
date_string = self._simplify(date_string, settings=settings)
dictionary = self._get_dictionary(settings)
date_string_tokens = dictionary.split(date_string, keep_formatting)
relative_translations = self._get_relative_translations(settings=settings)
for i, word in enumerate(date_string_tokens):
word = word.lower()
for pattern, replacement in relative_translations.items():
if pattern.match(word):
date_string_tokens[i] = pattern.sub(replacement, word)
break
else:
if word in dictionary:
fallback = word if keep_formatting and not word.isalpha() else ""
date_string_tokens[i] = dictionary[word] or fallback
if "in" in date_string_tokens:
date_string_tokens = self._clear_future_words(date_string_tokens)
return self._join(
list(filter(bool, date_string_tokens)),
separator="" if keep_formatting else " ",
settings=settings,
)
def _translate_numerals(self, date_string):
date_string_tokens = NUMERAL_PATTERN.split(date_string)
for i, token in enumerate(date_string_tokens):
if token.isdecimal():
date_string_tokens[i] = str(int(token)).zfill(len(token))
return "".join(date_string_tokens)
def _get_relative_translations(self, settings=None):
if settings.NORMALIZE:
if self._normalized_relative_translations is None:
self._normalized_relative_translations = (
self._generate_relative_translations(normalize=True)
)
return self._normalized_relative_translations
else:
if self._relative_translations is None:
self._relative_translations = self._generate_relative_translations(
normalize=False
)
return self._relative_translations
def _generate_relative_translations(self, normalize=False):
relative_translations = self.info.get("relative-type-regex", {})
relative_dictionary = OrderedDict()
for key, value in relative_translations.items():
if normalize:
value = list(map(normalize_unicode, value))
pattern = "|".join(sorted(value, key=len, reverse=True))
pattern = pattern.replace(r"(\d+", r"(?P<n>\d+")
pattern = re.compile(
r"^(?:{})$".format(pattern), re.UNICODE | re.IGNORECASE
)
relative_dictionary[pattern] = key
return relative_dictionary
def translate_search(self, search_string, settings=None):
dashes = ["-", "——", "—", "~"]
word_joint_unsupported_languages = ["zh", "ja"]
sentences = self._sentence_split(search_string, settings=settings)
dictionary = self._get_dictionary(settings=settings)
translated = []
original = []
for sentence in sentences:
original_tokens, simplified_tokens = self._simplify_split_align(
sentence, settings=settings
)
translated_chunk = []
original_chunk = []
last_token_index = len(simplified_tokens) - 1
skip_next_token = False
for i, word in enumerate(simplified_tokens):
next_word = simplified_tokens[i + 1] if i < last_token_index else ""
current_and_next_joined = self._join_chunk(
[word, next_word], settings=settings
)
if skip_next_token:
skip_next_token = False
continue
if word == "" or word == " ":
translated_chunk.append(word)
original_chunk.append(original_tokens[i])
elif (
current_and_next_joined in dictionary
and word not in dashes
and self.shortname not in word_joint_unsupported_languages
):
translated_chunk.append(dictionary[current_and_next_joined])
original_chunk.append(
self._join_chunk(
[original_tokens[i], original_tokens[i + 1]],
settings=settings,
)
)
skip_next_token = True
elif word in dictionary and word not in dashes:
translated_chunk.append(dictionary[word])
original_chunk.append(original_tokens[i])
elif word.strip("()\"'{}[],.،") in dictionary and word not in dashes:
punct = word[len(word.strip("()\"'{}[],.،")) :]
if punct and dictionary[word.strip("()\"'{}[],.،")]:
translated_chunk.append(
dictionary[word.strip("()\"'{}[],.،")] + punct
)
else:
translated_chunk.append(dictionary[word.strip("()\"'{}[],.،")])
original_chunk.append(original_tokens[i])
elif self._token_with_digits_is_ok(word):
translated_chunk.append(word)
original_chunk.append(original_tokens[i])
# Use original token because word_is_tz is case sensitive
elif translated_chunk and word_is_tz(original_tokens[i]):
translated_chunk.append(word)
original_chunk.append(original_tokens[i])
else:
if translated_chunk:
translated.append(translated_chunk)
translated_chunk = []
original.append(original_chunk)
original_chunk = []
if translated_chunk:
translated.append(translated_chunk)
original.append(original_chunk)
for i in range(len(translated)):
if "in" in translated[i]:
translated[i] = self._clear_future_words(translated[i])
translated[i] = self._join_chunk(
list(filter(bool, translated[i])), settings=settings
)
original[i] = self._join_chunk(
list(filter(bool, original[i])), settings=settings
)
return translated, original
def _get_abbreviations(self, settings):
dictionary = self._get_dictionary(settings=settings)
abbreviations = []
if self._abbreviations is None:
for item in dictionary:
if item.endswith(".") and len(item) > 1:
abbreviations.append(item)
self._abbreviations = abbreviations
return self._abbreviations
def _sentence_split(self, string, settings):
abbreviations = self._get_abbreviations(settings=settings)
digit_abbreviations = ["[0-9]"] # numeric date with full stop
abbreviation_string = ""
for abbreviation in abbreviations:
abbreviation_string += (
"(?<! " + abbreviation[:-1] + ")"
) # negative lookbehind
if self.shortname in ["fi", "cs", "hu", "de", "da"]:
for digit_abbreviation in digit_abbreviations:
abbreviation_string += (
"(?<!" + digit_abbreviation + ")"
) # negative lookbehind
splitters_dict = {
1: r"[\.!?;…\r\n]+(?:\s|$)*", # most European, Tagalog, Hebrew, Georgian,
# Indonesian, Vietnamese
2: r"[\.!?;…\r\n]+(\s*[¡¿]*|$)|[¡¿]+", # Spanish
3: r"[|!?;\r\n]+(?:\s|$)+", # Hindi and Bangla
4: r"[。…‥\.!??!;\r\n]+(?:\s|$)+", # Japanese and Chinese
5: r"[\r\n]+", # Thai
6: r"[\r\n؟!\.…]+(?:\s|$)+",
} # Arabic and Farsi
if "sentence_splitter_group" not in self.info:
split_reg = abbreviation_string + splitters_dict[1]
sentences = re.split(split_reg, string)
else:
split_reg = (
abbreviation_string
+ splitters_dict[self.info["sentence_splitter_group"]]
)
sentences = re.split(split_reg, string)
sentences = filter(None, sentences)
return sentences
def _simplify_split_align(self, original, settings):
# TODO: Switch to new split method.
original_tokens = self._word_split(original, settings=settings)
simplified_tokens = self._word_split(
self._simplify(normalize_unicode(original), settings=settings),
settings=settings,
)
if len(original_tokens) == len(simplified_tokens):
return original_tokens, simplified_tokens
elif len(original_tokens) < len(simplified_tokens):
add_empty = False
for i, token in enumerate(simplified_tokens):
if i < len(original_tokens):
if token == normalize_unicode(original_tokens[i].lower()):
add_empty = False
else:
if not add_empty:
add_empty = True
continue
else:
original_tokens.insert(i, "")
else:
original_tokens.insert(i, "")
else:
add_empty = False
for i, token in enumerate(original_tokens):
if i < len(simplified_tokens):
if normalize_unicode(token.lower()) == simplified_tokens[i]:
add_empty = False
else:
if not add_empty:
add_empty = True
continue
else:
simplified_tokens.insert(i, "")
else:
simplified_tokens.insert(i, "")
while len(original_tokens) != len(simplified_tokens):
if len(original_tokens) > len(simplified_tokens):
original_tokens.remove("")
else:
simplified_tokens.remove("")
return original_tokens, simplified_tokens
def _get_split_dictionary(self, settings):
if self._split_dictionary is None:
settings.NORMALIZE = True
dictionary = self._get_dictionary(settings=settings)
self._split_dictionary = self._split_dict(dictionary)
return self._split_dictionary
def _split_dict(self, dictionary):
newdict = {}
for item in dictionary:
if " " in item:
items = item.split()
for i in items:
newdict[i] = dictionary[item]
else:
newdict[item] = dictionary[item]
return newdict
def _word_split(self, string, settings):
if "no_word_spacing" in self.info:
return self._split(string, keep_formatting=True, settings=settings)
else:
return string.split()
def _split(self, date_string, keep_formatting, settings=None):
tokens = [date_string]
tokens = list(self._split_tokens_with_regex(tokens, r"(\d+)"))
tokens = list(
self._split_tokens_by_known_words(
tokens, keep_formatting, settings=settings
)
)
return tokens
def _split_tokens_with_regex(self, tokens, regex):
tokens = tokens[:]
for i, token in enumerate(tokens):
tokens[i] = re.split(regex, token)
return filter(bool, chain.from_iterable(tokens))
def _split_tokens_by_known_words(self, tokens, keep_formatting, settings=None):
dictionary = self._get_dictionary(settings)
for i, token in enumerate(tokens):
tokens[i] = dictionary.split(token, keep_formatting)
return list(chain.from_iterable(tokens))
def _join_chunk(self, chunk, settings):
if "no_word_spacing" in self.info:
return self._join(chunk, separator="", settings=settings)
else:
return re.sub(r"\s{2,}", " ", " ".join(chunk))
def _token_with_digits_is_ok(self, token):
if "no_word_spacing" in self.info:
if re.search(r"[\d\.:\-/]+", token) is not None:
return True
else:
return False
else:
if re.search(r"\d+", token) is not None:
return True
else:
return False
def _simplify(self, date_string, settings=None):
date_string = date_string.lower()
simplifications = self._get_simplifications(settings=settings)
for simplification in simplifications:
pattern, replacement = list(simplification.items())[0]
date_string = pattern.sub(replacement, date_string).lower()
return date_string
def _get_simplifications(self, settings=None):
no_word_spacing = eval(self.info.get("no_word_spacing", "False"))
if settings.NORMALIZE:
if self._normalized_simplifications is None:
self._normalized_simplifications = []
simplifications = self._generate_simplifications(normalize=True)
for simplification in simplifications:
pattern, replacement = list(simplification.items())[0]
if not no_word_spacing:
pattern = r"(?<=\A|\W|_)%s(?=\Z|\W|_)" % pattern
pattern = re.compile(pattern, flags=re.I | re.U)
self._normalized_simplifications.append({pattern: replacement})
return self._normalized_simplifications
else:
if self._simplifications is None:
self._simplifications = []
simplifications = self._generate_simplifications(normalize=False)
for simplification in simplifications:
pattern, replacement = list(simplification.items())[0]
if not no_word_spacing:
pattern = r"(?<=\A|\W|_)%s(?=\Z|\W|_)" % pattern
pattern = re.compile(pattern, flags=re.I | re.U)
self._simplifications.append({pattern: replacement})
return self._simplifications
def _generate_simplifications(self, normalize=False):
simplifications = []
for simplification in self.info.get("simplifications", []):
c_simplification = {}
key, value = list(simplification.items())[0]
if normalize:
key = normalize_unicode(key)
if isinstance(value, int):
c_simplification[key] = str(value)
else:
c_simplification[key] = normalize_unicode(value) if normalize else value
simplifications.append(c_simplification)
return simplifications
def _clear_future_words(self, words):
freshness_words = {"day", "week", "month", "year", "hour", "minute", "second"}
if set(words).isdisjoint(freshness_words):
words.remove("in")
return words
def _join(self, tokens, separator=" ", settings=None):
if not tokens:
return ""
capturing_splitters = self._get_splitters(settings)["capturing"]
joined = tokens[0]
for i in range(1, len(tokens)):
left, right = tokens[i - 1], tokens[i]
if left not in capturing_splitters and right not in capturing_splitters:
joined += separator
joined += right
return joined
def _get_dictionary(self, settings=None):
if not settings.NORMALIZE:
if self._dictionary is None:
self._generate_dictionary()
self._dictionary._settings = settings
return self._dictionary
else:
if self._normalized_dictionary is None:
self._generate_normalized_dictionary()
self._normalized_dictionary._settings = settings
return self._normalized_dictionary
def _get_wordchars(self, settings=None):
if self._wordchars is None:
self._set_wordchars(settings)
return self._wordchars
def _get_splitters(self, settings=None):
if self._splitters is None:
self._set_splitters(settings)
return self._splitters
def _set_splitters(self, settings=None):
splitters = {
# The ones that split string only if they are not surrounded by letters from both sides:
"wordchars": set(),
# The ones that are not filtered out from tokens after split:
"capturing": set(),
}
splitters["capturing"] |= set(ALWAYS_KEEP_TOKENS)
wordchars = self._get_wordchars(settings)
skip = set(self.info.get("skip", [])) | splitters["capturing"]
for token in skip:
if not re.match(r"^\W+$", token, re.UNICODE):
continue
if token in wordchars:
splitters["wordchars"].add(token)
self._splitters = splitters
def _set_wordchars(self, settings=None):
wordchars = set()
for word in self._get_dictionary(settings):
if re.match(r"^[\W\d_]+$", word, re.UNICODE):
continue
for char in word:
wordchars.add(char.lower())
self._wordchars = wordchars - {" "} | {
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
}
def get_wordchars_for_detection(self, settings):
if self._wordchars_for_detection is None:
wordchars = set()
for word in self._get_dictionary(settings):
if re.match(r"^[\W\d_]+$", word, re.UNICODE):
continue
for char in word:
wordchars.add(char.lower())
self._wordchars_for_detection = wordchars - {
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
":",
"(",
")",
"'",
"q",
"a",
"m",
"p",
" ",
}
return self._wordchars_for_detection
def _generate_dictionary(self, settings=None):
self._dictionary = Dictionary(self.info, settings=settings)
def _generate_normalized_dictionary(self, settings=None):
self._normalized_dictionary = NormalizedDictionary(self.info, settings=settings)
def to_parserinfo(self, base_cls=parser.parserinfo):
attributes = {
"JUMP": self.info.get("skip", []),
"PERTAIN": self.info.get("pertain", []),
"WEEKDAYS": [
self.info["monday"],
self.info["tuesday"],
self.info["wednesday"],
self.info["thursday"],
self.info["friday"],
self.info["saturday"],
self.info["sunday"],
],
"MONTHS": [
self.info["january"],
self.info["february"],
self.info["march"],
self.info["april"],
self.info["may"],
self.info["june"],
self.info["july"],
self.info["august"],
self.info["september"],
self.info["october"],
self.info["november"],
self.info["december"],
],
"HMS": [self.info["hour"], self.info["minute"], self.info["second"]],
}
name = "{language}ParserInfo".format(language=self.info["name"])
return type(name, bases=[base_cls], dict=attributes)
|
<filename>erp_parent/erp_client/src/main/java/com/redsum/bos/ws/ObjectFactory.java
package com.redsum.bos.ws;
import javax.xml.bind.JAXBElement;
import javax.xml.bind.annotation.XmlElementDecl;
import javax.xml.bind.annotation.XmlRegistry;
import javax.xml.namespace.QName;
/**
* This object contains factory methods for each
* Java content interface and Java element interface
* generated in the com.redsum.bos.ws package.
* <p>An ObjectFactory allows you to programatically
* construct new instances of the Java representation
* for XML content. The Java representation of XML
* content can consist of schema derived interfaces
* and classes representing the binding of schema
* type definitions, element declarations and model
* groups. Factory methods for each of these are
* provided in this class.
*
*/
@XmlRegistry
public class ObjectFactory {
private final static QName _AddWaybill_QNAME = new QName("http://ws.bos.redsum.com/", "addWaybill");
private final static QName _WaybilldetailList_QNAME = new QName("http://ws.bos.redsum.com/", "waybilldetailList");
private final static QName _WaybilldetailListResponse_QNAME = new QName("http://ws.bos.redsum.com/", "waybilldetailListResponse");
private final static QName _AddWaybillResponse_QNAME = new QName("http://ws.bos.redsum.com/", "addWaybillResponse");
/**
* Create a new ObjectFactory that can be used to create new instances of schema derived classes for package: com.redsum.bos.ws
*
*/
public ObjectFactory() {
}
/**
* Create an instance of {@link AddWaybill }
*
*/
public AddWaybill createAddWaybill() {
return new AddWaybill();
}
/**
* Create an instance of {@link AddWaybillResponse }
*
*/
public AddWaybillResponse createAddWaybillResponse() {
return new AddWaybillResponse();
}
/**
* Create an instance of {@link WaybilldetailList }
*
*/
public WaybilldetailList createWaybilldetailList() {
return new WaybilldetailList();
}
/**
* Create an instance of {@link WaybilldetailListResponse }
*
*/
public WaybilldetailListResponse createWaybilldetailListResponse() {
return new WaybilldetailListResponse();
}
/**
* Create an instance of {@link Waybilldetail }
*
*/
public Waybilldetail createWaybilldetail() {
return new Waybilldetail();
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link AddWaybill }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://ws.bos.redsum.com/", name = "addWaybill")
public JAXBElement<AddWaybill> createAddWaybill(AddWaybill value) {
return new JAXBElement<AddWaybill>(_AddWaybill_QNAME, AddWaybill.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link WaybilldetailList }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://ws.bos.redsum.com/", name = "waybilldetailList")
public JAXBElement<WaybilldetailList> createWaybilldetailList(WaybilldetailList value) {
return new JAXBElement<WaybilldetailList>(_WaybilldetailList_QNAME, WaybilldetailList.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link WaybilldetailListResponse }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://ws.bos.redsum.com/", name = "waybilldetailListResponse")
public JAXBElement<WaybilldetailListResponse> createWaybilldetailListResponse(WaybilldetailListResponse value) {
return new JAXBElement<WaybilldetailListResponse>(_WaybilldetailListResponse_QNAME, WaybilldetailListResponse.class, null, value);
}
/**
* Create an instance of {@link JAXBElement }{@code <}{@link AddWaybillResponse }{@code >}}
*
*/
@XmlElementDecl(namespace = "http://ws.bos.redsum.com/", name = "addWaybillResponse")
public JAXBElement<AddWaybillResponse> createAddWaybillResponse(AddWaybillResponse value) {
return new JAXBElement<AddWaybillResponse>(_AddWaybillResponse_QNAME, AddWaybillResponse.class, null, value);
}
}
|
/**
* Remove all rules matching the passed predicate.
*
* @param aFilter
* The predicate to apply for deletion. May not be <code>null</code>.
* @return {@link EChange#CHANGED} it at least one rule was removed,
* {@link EChange#UNCHANGED} otherwise.
* @since 5.0.0
*/
@Nonnull
public EChange removeRules (@Nonnull final Predicate <? super ICSSTopLevelRule> aFilter)
{
return EChange.valueOf (m_aRules.removeIf (aFilter));
} |
import { constants } from 'http2';
import { files } from '../src';
describe('files', () => {
it('should be callable', () => {
expect(typeof files).toBe('function');
});
it('should return get route', () => {
expect(files().method).toBe(constants.HTTP2_METHOD_GET);
});
});
|
s=input()
t=input()
i=0
o="Yes"
if(len(s)!=len(t)):
o="No"
while(len(s)==len(t)and o=="Yes"and i<len(s)):
if(s[i] == "a" or s[i] =="o" or s[i] =="i" or s[i] =="e" or s[i] =="u" )and (t[i] == "a" or t[i] =="o" or t[i] =="i" or t[i] =="e" or t[i] =="u"):
o="Yes"
elif(s[i] != "a" and s[i] !="o" and s[i] !="i" and s[i] !="e" and s[i] !="u" )and (t[i]!="a" and t[i]!="o" and t[i]!="i" and t[i]!="e" and t[i]!="u" ):
o="Yes"
else:
o="No"
i+=1
print(o)
|
async def undocking(self, guild : discord.Guild):
def has_subname_role(member : discord.Member) -> bool:
roles = member.roles
role_names = map(lambda r: r.name, roles)
return self._name in role_names
in_sub = filter(has_subname_role, guild.members)
for member in in_sub:
roles_to_remove = []
for role in member.roles:
if str.startswith(role.name, "docked-at-"):
roles_to_remove.append(role)
await member.remove_roles(*roles_to_remove) |
// Choose the timeout : setupTimeout (if set) limits the protocolTimeout.
private static long chooseTimeout(long setupTimeout, long protocolTimeout) {
if ( setupTimeout < 0 )
return protocolTimeout;
if (protocolTimeout > 0 )
return Math.min(setupTimeout, protocolTimeout);
else
return setupTimeout;
} |
/**
* Copyright 2019 zgqq <<EMAIL>>
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.flycat.platform.springboot;
import com.github.flycat.context.bean.annotation.Primary;
import com.github.flycat.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
import org.springframework.beans.factory.support.BeanDefinitionRegistryPostProcessor;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.Ordered;
import org.springframework.core.annotation.Order;
import java.util.concurrent.CompletableFuture;
@Configuration
@ComponentScan(basePackages = {"com.github.flycat.spi.impl",
"com.github.flycat.support.spring",
"com.github.flycat.component",
"com.github.flycat.service"})
public class SpringContextConfiguration {
private static final Logger LOGGER = LoggerFactory.getLogger(SpringContextConfiguration.class);
@Bean
public static DispatcherRegisterProcessor dispatcherRegisterProcessor() {
return new DispatcherRegisterProcessor();
}
@Bean
@Order(value = Ordered.HIGHEST_PRECEDENCE)
public static BeanDefinitionRegistryPostProcessor beanDefinitionRegistryPostProcessor() {
return new BeanDefinitionRegistryPostProcessor() {
@Override
public void postProcessBeanDefinitionRegistry(BeanDefinitionRegistry registry)
throws BeansException {
final String[] beanDefinitionNames = registry.getBeanDefinitionNames();
for (int i = 0; i < beanDefinitionNames.length; i++) {
final String beanDefinitionName = beanDefinitionNames[i];
final BeanDefinition beanDefinition = registry.getBeanDefinition(beanDefinitionName);
final String beanClassName = beanDefinition.getBeanClassName();
if (StringUtils.isBlank(beanClassName)) {
continue;
}
final Primary annotation;
if (beanClassName.contains("spi.impl")) {
try {
annotation = Class.forName(beanClassName).getAnnotation(Primary.class);
if (annotation != null) {
LOGGER.info("Setting primary bean, class:{}", beanClassName);
beanDefinition.setPrimary(true);
}
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
}
}
}
@Override
public void postProcessBeanFactory(ConfigurableListableBeanFactory beanFactory) throws BeansException {
}
};
}
}
|
<filename>years/2020/02/passwords.ts
export const validatePasswords = (input: string[]) => {
const data = input.map((row) => {
const [, minCount, maxCount, character, password] = row.split(/(\d+)-(\d+) (.): (.+)/);
return { minCount: Number(minCount), maxCount: Number(maxCount), character, password };
});
const correctPasswords = data.filter(({ minCount, maxCount, character, password }) => {
const wantedCharacters = password.match(new RegExp(character, 'g'));
if (wantedCharacters == null) {
return false;
}
const numberOfWantedCharacters = wantedCharacters.length;
return numberOfWantedCharacters >= minCount && numberOfWantedCharacters <= maxCount;
});
return correctPasswords.length;
};
export const validatePasswords2 = (input: string[]) => {
const data = input.map((row) => {
const [, firstPos, lastPos, character, password] = row.split(/(\d+)-(\d+) (.): (.+)/);
return { firstPos: Number(firstPos), lastPos: Number(lastPos), character, password };
});
const correctPasswords = data.filter(({ firstPos, lastPos, character, password }) => {
let matches = 0;
if (password[firstPos - 1] === character) {
matches += 1;
}
if (password[lastPos - 1] === character) {
matches += 1;
}
return matches === 1;
});
return correctPasswords.length;
};
|
/**
* Save a graduacao.
*
* @param graduacaoDTO the entity to save.
* @return the persisted entity.
*/
public GraduacaoDTO save(GraduacaoDTO graduacaoDTO) {
log.debug("Request to save Graduacao : {}", graduacaoDTO);
Graduacao graduacao = graduacaoMapper.toEntity(graduacaoDTO);
graduacao = graduacaoRepository.save(graduacao);
GraduacaoDTO result = graduacaoMapper.toDto(graduacao);
graduacaoSearchRepository.save(graduacao);
return result;
} |
import java.util.*;
public class TravelCard{
public static int n;
public static int[] mincost;
public static int[] t;
public static int f(int x){
int l = 1; int r = n; int a = l;int mid;
while(l <= r){
mid = (l+r)/2;
if(t[mid] >= x){
a = mid;
r = mid - 1;
}
else{
l = mid + 1;
}
}
return a;
}
public static void main(String[] args){
Scanner sc = new Scanner(System.in);
n = sc.nextInt();
mincost = new int[n+5];
t = new int[n+5];
mincost[0] = 0;
mincost[1] = 20;
mincost[2] = 40;
for(int i=1;i<=n;i++){
t[i] = sc.nextInt();
}
for(int i=3;i<=n;i++){
mincost[i] = mincost[i-1] + 20;
mincost[i] = Math.min(mincost[i], mincost[f(t[i]-89)-1] + 50);
mincost[i] = Math.min(mincost[i], mincost[f(t[i]-1439)-1] + 120);
}
for(int i=1;i<=n;i++){
System.out.println(mincost[i] - mincost[i-1]);
}
}
} |
#!/usr/bin/env python2
# Copyright 2016 The Fontbakery Authors
# Copyright 2017 The Google Font Tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import argparse
import csv
import os
import sys
import tabulate
from fontTools import ttLib
parser = argparse.ArgumentParser(description='Print out'
' usWidthClass of the fonts')
parser.add_argument('font', nargs="+")
parser.add_argument('--csv', default=False, action='store_true')
parser.add_argument('--set', type=int, default=0)
parser.add_argument('--autofix', default=False, action='store_true')
def print_info(fonts, print_csv=False):
headers = ['filename', 'usWidthClass']
rows = []
warnings = []
for font in fonts:
ttfont = ttLib.TTFont(font)
usWidthClass = ttfont['OS/2'].usWidthClass
rows.append([os.path.basename(font), usWidthClass])
if usWidthClass != 5:
warning = "WARNING: {} is {}, expected 5"
warnings.append(warning.format(font, usWidthClass))
def as_csv(rows):
writer = csv.writer(sys.stdout)
writer.writerows([headers])
writer.writerows(rows)
sys.exit(0)
if print_csv:
as_csv(rows)
print(tabulate.tabulate(rows, headers, tablefmt="pipe"))
for warn in warnings:
print(warn, file=sys.stderr)
def getFromFilename(filename):
if "UltraCondensed-" in filename:
usWidthClass = 1
elif "ExtraCondensed-" in filename:
usWidthClass = 2
elif "SemiCondensed-" in filename:
usWidthClass = 4
elif "Condensed-" in filename:
usWidthClass = 3
elif "SemiExpanded-" in filename:
usWidthClass = 6
elif "ExtraExpanded-" in filename:
usWidthClass = 8
elif "UltraExpanded-" in filename:
usWidthClass = 9
elif "Expanded-" in filename:
usWidthClass = 7
else:
usWidthClass = 5
return usWidthClass
def fix(fonts, value=None):
rows = []
headers = ['filename', 'usWidthClass was', 'usWidthClass now']
for font in fonts:
row = [font]
ttfont = ttLib.TTFont(font)
if not value:
usWidthClass = getFromFilename(font)
else:
usWidthClass = value
row.append(ttfont['OS/2'].usWidthClass)
ttfont['OS/2'].usWidthClass = usWidthClass
row.append(ttfont['OS/2'].usWidthClass)
ttfont.save(font + '.fix')
rows.append(row)
if rows:
print(tabulate.tabulate(rows, headers, tablefmt="pipe"))
def main():
args = parser.parse_args()
if args.autofix:
fix(args.font)
sys.exit(0)
if args.set:
fix(args.font, value=int(args.set))
sys.exit(0)
print_info(args.font, print_csv=args.csv)
if __name__ == '__main__':
main()
|
use azure_sdk_core::errors::{check_status_extract_body, AzureError};
use azure_sdk_storage_core::key_client::KeyClient;
use azure_sdk_storage_core::prelude::*;
use azure_sdk_storage_core::{
client, get_default_json_mime, get_json_mime_fullmetadata, get_json_mime_nometadata,
ConnectionString, ServiceType,
};
use http::request::Builder;
use hyper::{
client::ResponseFuture,
header::{self, HeaderValue},
};
use hyper::{Method, StatusCode};
use log;
use serde_json;
const TABLE_TABLES: &str = "TABLES";
/// Requetsed meta data detail
pub enum MetadataDetail {
Default,
None,
Full,
}
#[derive(Clone)]
pub struct TableClient<C>
where
C: Client,
{
client: C,
}
impl TableClient<KeyClient> {
/// Create a new `TableClient` using a key.
pub fn new(account: &str, key: &str) -> Self {
TableClient {
client: client::with_access_key(account, key),
}
}
/// Create a new `TableClient` using a SAS token.
pub fn azure_sas(account: &str, sas_token: &str) -> Self {
TableClient {
client: client::with_azure_sas(account, sas_token),
}
}
pub fn from_connection_string(connection_string: &str) -> Result<Self, AzureError> {
match ConnectionString::new(connection_string)? {
ConnectionString {
account_name: Some(account),
account_key: Some(_),
sas: Some(sas_token),
..
} => {
log::warn!("Both account key and SAS defined in connection string. Using only the provided SAS.");
Ok(TableClient {
client: client::with_azure_sas(account, sas_token),
})
}
ConnectionString {
account_name: Some(account),
sas: Some(sas_token),
..
} => Ok(TableClient {
client: client::with_azure_sas(account, sas_token),
}),
ConnectionString {
account_name: Some(account),
account_key: Some(key),
..
} => Ok(TableClient {
client: client::with_access_key(account, key),
}),
_ => {
return Err(AzureError::GenericErrorWithText(
"Could not create an Azure Table client from the provided connection string. Please validate that you have specified the account name and means of authentication (key, SAS, etc.)."
.to_owned(),
))
}
}
}
}
impl<C> TableClient<C>
where
C: Client,
{
pub async fn list_tables(&self) -> Result<Vec<String>, AzureError> {
let future_response = self.request_with_default_header(
TABLE_TABLES,
&Method::GET,
None,
MetadataDetail::None,
&|req| req,
)?;
let body = check_status_extract_body(future_response, StatusCode::OK).await?;
let entities = serde_json::from_str::<TableDataCollection>(&body)?;
// todo: shall we use the continuation or query result always fits into a single page
let e: Vec<String> = entities.value.into_iter().map(|x| x.table_name).collect();
Ok(e)
}
// Create table if not exists.
pub async fn create_table<T: Into<String>>(&self, table_name: T) -> Result<(), AzureError> {
let body = &serde_json::to_string(&TableData {
table_name: table_name.into(),
})
.unwrap();
log::debug!("body == {}", body);
let future_response = self.request_with_default_header(
TABLE_TABLES,
&Method::POST,
Some(body),
MetadataDetail::None,
&|req| req,
)?;
check_status_extract_body(future_response, StatusCode::CREATED).await?;
Ok(())
}
pub fn get_uri_prefix(&self) -> String {
self.client.get_uri_prefix(ServiceType::Table)
}
pub(crate) fn request_with_default_header(
&self,
segment: &str,
method: &Method,
request_str: Option<&str>,
metadata: MetadataDetail,
http_header_adder: &dyn Fn(Builder) -> Builder,
) -> Result<ResponseFuture, AzureError> {
self.request(segment, method, request_str, &|mut request| {
request = match metadata {
MetadataDetail::Full => request.header(
header::ACCEPT,
HeaderValue::from_static(get_json_mime_fullmetadata()),
),
MetadataDetail::None => request.header(
header::ACCEPT,
HeaderValue::from_static(get_json_mime_nometadata()),
),
MetadataDetail::Default => request.header(
header::ACCEPT,
HeaderValue::from_static(get_default_json_mime()),
),
};
if request_str.is_some() {
request = request.header(
header::CONTENT_TYPE,
HeaderValue::from_static(get_default_json_mime()),
);
}
http_header_adder(request)
})
}
pub(crate) fn request(
&self,
segment: &str,
method: &Method,
request_str: Option<&str>,
http_header_adder: &dyn Fn(Builder) -> Builder,
) -> Result<ResponseFuture, AzureError> {
log::trace!("{:?} {}", method, segment);
if let Some(body) = request_str {
log::trace!("Request: {}", body);
}
let request_vec: Option<&[u8]> = match request_str {
Some(s) => Some(s.as_bytes()),
None => None,
};
self.client
.perform_table_request(segment, method, http_header_adder, request_vec)
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "PascalCase")]
struct TableData {
table_name: String,
}
#[derive(Serialize, Deserialize)]
struct TableDataCollection {
value: Vec<TableData>,
}
#[inline]
pub(crate) fn get_batch_mime() -> &'static str {
"multipart/mixed; boundary=batch_a1e9d677-b28b-435e-a89e-87e6a768a431"
}
pub(crate) fn entity_path(table_name: &str, partition_key: &str, row_key: &str) -> String {
table_name.to_owned() + "(PartitionKey='" + partition_key + "',RowKey='" + row_key + "')"
}
|
def fixslash(url, relative=True):
url = url.strip("/")
if relative:
url = "/" + url
return url |
/**
* Discards a batch of Pig commands.
*
* @throws FrontendException
*/
public void discardBatch() throws FrontendException {
if (currDAG == null || !isBatchOn()) {
int errCode = 1083;
String msg = "setBatchOn() must be called first.";
throw new FrontendException(msg, errCode, PigException.INPUT);
}
currDAG = graphs.pop();
} |
<gh_stars>1-10
import * as t from "@babel/types";
import getDefinitionName from "./get-definition-name";
import typeNodeName from "./type-node-name";
import some from "../../utils/some";
import {
MetaTypeTree,
TypeNode,
TypeTree,
MetaTypeNode
} from "../../node-types";
import filter from "../../utils/filter";
// NOTE: JSX.Element is supposedly the thing typescript has that is most similar to PropTypes.node
const typesToStrip = ["JSX.Element"];
export default function parseTypes(
types: (t.TSEnumMember | t.TSTypeElement)[],
typeDeclarations: { [key: string]: (t.TSEnumMember | t.TSTypeElement)[] },
meta?: MetaTypeTree
) {
return types.reduce((accum: TypeTree, node) => {
if (
!t.isTSPropertySignature(node) ||
!t.isIdentifier(node.key) ||
node.typeAnnotation === null
) {
return accum;
}
const name = node.key.name;
try {
const type = parseType(
node.typeAnnotation.typeAnnotation,
typeDeclarations,
meta ? meta[name] : undefined,
!node.optional
);
return Object.assign(accum, type ? { [name]: type } : {});
} catch (error) {
throw new Error(`Invalid type for prop '${name}': ${error.message}`);
}
}, {});
}
const getChildMeta = (node?: MetaTypeNode): MetaTypeTree | undefined => {
if (node && node.type === "object") return node.children;
};
const getListMeta = (node?: MetaTypeNode): MetaTypeNode | undefined => {
if (node && node.type === "list") return node.elementType;
};
const getNumberFromMeta = (node?: MetaTypeNode) => {
if (!node) return;
if (
node.type === "double" ||
node.type === "double?" ||
node.type === "int" ||
node.type === "int?" ||
node.type === "float" ||
node.type === "float?"
) {
return node.type;
}
};
const parseType = (
node: t.Node,
typeDeclarations: { [key: string]: (t.TSEnumMember | t.TSTypeElement)[] },
meta?: MetaTypeNode,
required?: boolean
): TypeNode | undefined => {
if (meta && meta.type === "ignore") return;
const base = { required };
const parse = (n: t.Node, m?: MetaTypeNode, r?: boolean) =>
parseType(n, typeDeclarations, m, r);
if (t.isTSAnyKeyword(node)) {
return { ...base, type: "any" };
} else if (t.isTSArrayType(node)) {
const type = parse(node.elementType, getListMeta(meta));
return type ? { ...base, type: "list", elementType: type } : undefined;
} else if (t.isTSBooleanKeyword(node)) {
return { ...base, type: "bool" };
} else if (t.isTSFunctionType(node)) {
return undefined;
} else if (t.isTSNumberKeyword(node)) {
const type = getNumberFromMeta(meta) || "int";
return { ...base, type };
} else if (t.isTSStringKeyword(node)) {
return { ...base, type: "string" };
} else if (t.isTSTypeLiteral(node)) {
const childMeta = getChildMeta(meta);
const type = parseTypes(node.members, typeDeclarations, childMeta);
return type ? { ...base, type: "object", children: type } : undefined;
} else if (t.isTSParenthesizedType(node)) {
return parse(node.typeAnnotation, meta);
} else if (t.isTSTypeReference(node)) {
const name = getDefinitionName(node.typeName);
const types = typeDeclarations[name];
if (typesToStrip.includes(name)) return;
// When type isn't defined in file
if (!types) return { ...base, type: "ref", ref: name };
if (some(types, t.isTSEnumMember)) {
const children = filter(
types.map(n => n.initializer),
t.isStringLiteral
).map(node => ({ key: node.value, value: node.value }));
return { ...base, type: "enum", children };
}
throw new Error(
`Unable to resolve type '${name}'. This might be a bug! Consider reporting the issue on GitHub! :)`
);
}
throw new Error(`Type '${typeNodeName(node)}' is not supported.`);
};
|
/**
* Eureka 内部传输数据编解码转换器
* 支持XML/JSON 格式
*/
package com.netflix.discovery.converters; |
def DictStartVisit(self, obj , topology_model):
output_dir = os.environ["DICT_DIR"] + "/commands"
if not (os.path.isdir(output_dir)):
os.makedirs(output_dir)
init_file = output_dir + os.sep + "__init__.py"
open(init_file, "w+")
try:
instance_obj_list = topology_model.get_base_id_dict()[obj.get_component_base_name()]
except Exception:
if type(obj) == Parameter.Parameter or type(obj) == Command.Command:
PRINT.info("ERROR: Could not find instance object for component " + obj.get_component_base_name() + ". Check topology model to see if the component was instanced.")
else:
PRINT.info("ERROR: Could not find instance object for the current component and the current component is not of Parameter or Command type, which are the only two supported command dictionary generation types. Check everything!")
raise
if type(obj) is Command.Command:
self.__fp1 = {}
for instance_obj in instance_obj_list:
if instance_obj[3].get_dict_short_name() != None:
fname = "{}_{}".format(instance_obj[3].get_dict_short_name() , obj.get_mnemonic())
elif not topology_model.get_prepend_instance_name() and len(instance_obj_list) == 1:
fname = obj.get_mnemonic()
else:
fname = "{}_{}".format(instance_obj[0] , obj.get_mnemonic())
pyfile = "{}/{}.py".format(output_dir , fname)
DEBUG.info('Open file: {}'.format(pyfile))
fd = open(pyfile,'w')
if fd == None:
raise Exception("Could not open {} file.".format(pyfile))
DEBUG.info('Completed {} open'.format(pyfile))
self.__fp1[fname] = fd
elif type(obj) is Parameter.Parameter:
self.__fp1 = {}
self.__fp2 = {}
self.__stem = obj.get_name().upper()
for instance_obj in instance_obj_list:
if instance_obj[3].get_dict_short_name() != None:
fname = "{}_{}".format(instance_obj[3].get_dict_short_name() , self.__stem)
elif not topology_model.get_prepend_instance_name() and len(instance_obj_list) == 1:
fname = self.__stem
else:
fname = "{}_{}".format(instance_obj[0] , self.__stem)
pyfile = "{}/{}_PRM_SET.py".format(output_dir,fname)
DEBUG.info('Open file: {}'.format(pyfile))
fd = open(pyfile,'w')
if fd == None:
raise Exception("Could not open {} file.".format(pyfile))
self.__fp1[fname] = fd
DEBUG.info('Completed {} open'.format(pyfile))
pyfile = "{}/{}_PRM_SAVE.py".format(output_dir,fname)
DEBUG.info('Open file: {}'.format(pyfile))
fd = open(pyfile,'w')
if fd == None:
raise Exception("Could not open {} file.".format(pyfile))
self.__fp2[fname] = fd
DEBUG.info('Completed {} open'.format(pyfile))
else:
print(("Invalid type {}".format(obj)))
sys.exit(-1) |
<reponame>abagusetty/Uintah
/*
* The MIT License
*
* Copyright (c) 1997-2021 The University of Utah
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <CCA/Components/Models/MultiMatlExchange/Slip.h>
#include <CCA/Components/ICE/CustomBCs/BoundaryCond.h>
#include <CCA/Components/ICE/Materials/ICEMaterial.h>
#include <CCA/Components/MPM/Materials/MPMMaterial.h>
#include <CCA/Ports/Scheduler.h>
#include <Core/Exceptions/ProblemSetupException.h>
#include <Core/Grid/Material.h>
#include <Core/Grid/MaterialManager.h>
#include <Core/Grid/MaterialManagerP.h>
#include <Core/Grid/Variables/ComputeSet.h>
#include <Core/Grid/Variables/SFCXVariable.h>
#include <Core/Grid/Variables/SFCYVariable.h>
#include <Core/Grid/Variables/SFCZVariable.h>
#include <Core/Grid/Variables/VarTypes.h>
#include <Core/ProblemSpec/ProblemSpec.h>
#include <ostream> // for operator<<, basic_ostream
#include <vector>
#define MAX_MATLS 8
using namespace Uintah;
using namespace ExchangeModels;
using namespace std;
extern DebugStream dbgExch;
//______________________________________________________________________
//
SlipExch::SlipExch(const ProblemSpecP & exch_ps,
const MaterialManagerP & materialManager,
const bool with_mpm )
: ExchangeModel( exch_ps, materialManager, with_mpm )
{
proc0cout << "__________________________________\n";
proc0cout << " Now creating the Slip Exchange model " << endl;
d_exchCoeff = scinew ExchangeCoefficients();
d_vel_CCTransLabel = VarLabel::create("vel_CCTransposed", CCVariable<Vector>::getTypeDescription());
d_meanFreePathLabel = VarLabel::create("meanFreePath", CCVariable<double>::getTypeDescription());
}
//______________________________________________________________________
//
SlipExch::~SlipExch()
{
delete d_exchCoeff;
VarLabel::destroy( d_vel_CCTransLabel );
VarLabel::destroy( d_meanFreePathLabel );
}
//______________________________________________________________________
//
void SlipExch::problemSetup(const ProblemSpecP & matl_ps)
{
// read in the exchange coefficients
ProblemSpecP exch_ps;
d_exchCoeff->problemSetup( matl_ps, d_numMatls, exch_ps );
ProblemSpecP model_ps = exch_ps->findBlock( "Model" );
model_ps->require("fluidMatlIndex", d_fluidMatlIndx);
model_ps->require("solidMatlIndex", d_solidMatlIndx);
model_ps->require("momentum_accommodation_coeff", d_momentum_accommodation_coeff);
model_ps->require("thermal_accommodation_coeff", d_thermal_accommodation_coeff);
model_ps->get( "useSlipCoeffs", d_useSlipCoeffs );
proc0cout << " fluidMatlIndex: " << d_fluidMatlIndx << " thermal_accommodation_coeff " << d_thermal_accommodation_coeff << endl;
// d_exchCoeff->problemSetup(mat_ps, d_matlManager);
}
//______________________________________________________________________
//
void SlipExch::outputProblemSpec(ProblemSpecP & matl_ps )
{
ProblemSpecP exch_prop_ps;
d_exchCoeff->outputProblemSpec(matl_ps, exch_prop_ps);
// <Model type="slip">
ProblemSpecP model_ps = exch_prop_ps->appendChild("Model");
model_ps->setAttribute("type","slip");
model_ps->appendElement( "fluidMatlIndex", d_fluidMatlIndx );
model_ps->appendElement( "solidMatlIndex", d_solidMatlIndx );
model_ps->appendElement( "thermal_accommodation_coeff", d_thermal_accommodation_coeff);
model_ps->appendElement( "momentum_accommodation_coeff", d_momentum_accommodation_coeff);
}
//______________________________________________________________________
// These tasks are called before semi-implicit pressure solve.
// All computed variables live in the parent NewDW
void SlipExch::sched_PreExchangeTasks(SchedulerP & sched,
const PatchSet * patches,
const MaterialSubset * ice_matls,
const MaterialSubset * mpm_matls,
const MaterialSet * allMatls)
{
//__________________________________
// compute surface normal and isSurfaceCell
schedComputeSurfaceNormal( sched, patches, mpm_matls );
//__________________________________
// compute Mean Free Path
schedComputeMeanFreePath( sched, patches );
}
//______________________________________________________________________
// This method requires variables from inside the semi-implicit pressure
// solve sub-scheduler. Put variables that are required from the
// Parent OldDW and NewDW
void SlipExch::addExchangeModelRequires ( Task* t,
const MaterialSubset * zeroMatl,
const MaterialSubset * ice_matls,
const MaterialSubset * mpm_matls)
{
Ghost::GhostType gac = Ghost::AroundCells;
t->requires( Task::NewDW, d_meanFreePathLabel, ice_matls, gac, 1 );
t->requires( Task::NewDW, d_isSurfaceCellLabel, zeroMatl, gac, 1 );
t->requires( Task::NewDW, d_surfaceNormLabel, mpm_matls, gac, 1 );
}
//______________________________________________________________________
//
void SlipExch::sched_AddExch_VelFC(SchedulerP & sched,
const PatchSet * patches,
const MaterialSubset * ice_matls,
const MaterialSubset * mpm_matls,
const MaterialSet * all_matls,
customBC_globalVars * BC_globalVars,
const bool recursion)
{
//__________________________________
//
Task* t = scinew Task( "SlipExch::addExch_VelFC", this, &SlipExch::addExch_VelFC,
BC_globalVars, recursion);
printSchedule( patches, dbgExch, "SlipExch::sched_AddExch_VelFC" );
if(recursion) {
t->requires(Task::ParentOldDW, Ilb->delTLabel,getLevel(patches));
} else {
t->requires(Task::OldDW, Ilb->delTLabel,getLevel(patches));
}
Ghost::GhostType gac = Ghost::AroundCells;
Ghost::GhostType gaf_X = Ghost::AroundFacesX;
Ghost::GhostType gaf_Y = Ghost::AroundFacesY;
Ghost::GhostType gaf_Z = Ghost::AroundFacesZ;
//__________________________________
// define parent data warehouse
// change the definition of parent(old/new)DW
// when using semi-implicit pressure solve
Task::WhichDW pNewDW = Task::NewDW;
Task::WhichDW pOldDW = Task::OldDW;
if(recursion) {
pNewDW = Task::ParentNewDW;
pOldDW = Task::ParentOldDW;
}
// All matls
t->requires( pNewDW, Ilb->rho_CCLabel, gac, 1);
t->requires( pNewDW, Ilb->sp_vol_CCLabel, gac, 1);
t->requires( pNewDW, Ilb->vol_frac_CCLabel,gac, 1);
t->requires( Task::NewDW, Ilb->uvel_FCLabel, gaf_X, 1);
t->requires( Task::NewDW, Ilb->vvel_FCLabel, gaf_Y, 1);
t->requires( Task::NewDW, Ilb->wvel_FCLabel, gaf_Z, 1);
t->requires( pNewDW, d_meanFreePathLabel, ice_matls, gac, 1 );
t->requires( pNewDW, d_surfaceNormLabel, mpm_matls, gac, 1 );
t->requires( pNewDW, d_isSurfaceCellLabel, d_zero_matl,gac, 1 );
t->requires( pOldDW, Ilb->vel_CCLabel, ice_matls, gac, 1 );
t->requires( pNewDW, Ilb->vel_CCLabel, mpm_matls, gac, 1 );
computesRequires_CustomBCs(t, "velFC_Exchange", Ilb, ice_matls,
BC_globalVars, recursion);
t->computes( Ilb->sp_volX_FCLabel );
t->computes( Ilb->sp_volY_FCLabel );
t->computes( Ilb->sp_volZ_FCLabel );
t->computes( Ilb->uvel_FCMELabel );
t->computes( Ilb->vvel_FCMELabel );
t->computes( Ilb->wvel_FCMELabel );
sched->addTask(t, patches, all_matls);
}
/* _____________________________________________________________________
Purpose~ Add the exchange contribution to vel_FC and compute
sp_vol_FC for implicit Pressure solve
_____________________________________________________________________*/
template<class constSFC, class SFC>
void SlipExch::vel_FC_exchange( CellIterator iter,
const Patch * patch,
const IntVector adj_offset,
const double delT,
constCCVariable<int> & isSurfaceCell,
std::vector<constCCVariable<Vector> >& surfaceNorm,
std::vector<constCCVariable<double> >& meanFreePath,
std::vector<constCCVariable<double> >& vol_frac_CC,
std::vector<constCCVariable<double> >& sp_vol_CC,
std::vector< constSFC> & vel_FC,
std::vector< SFC > & sp_vol_FC,
std::vector< SFC > & vel_FCME)
{
double b[MAX_MATLS];
FastMatrix Q(3,3);
double b_sp_vol[MAX_MATLS];
double vel[MAX_MATLS];
double tmp[MAX_MATLS];
FastMatrix a(d_numMatls, d_numMatls);
int gm = d_fluidMatlIndx;
int sm = d_solidMatlIndx;
Vector dx = patch->dCell();
//__________________________________
//
for(;!iter.done(); iter++){
IntVector c = *iter;
IntVector adj = c + adj_offset;
double K_R = 1e15;
double K_L = 1e15;
// Q.identity(); // Transformation matrix is initialized
/***** Compute K value at current cell [c] (R) *****/
if( isSurfaceCell[c] && d_useSlipCoeffs ){
//__________________________________
// <start> This should be put in a function, since it's a duplicate of the code below except for the cell index --Todd
Q.identity();
computeSurfaceRotationMatrix(Q, surfaceNorm[sm][c]); // Makes Q at each cell c
double A_V = 1.0/( dx.x() * fabs(Q(1,0))+
dx.y() * fabs(Q(1,1))+
dx.z() * fabs(Q(1,2)) );
double av = d_momentum_accommodation_coeff;
double Beta_v = (2 - av)/av;
K_R = A_V / (Beta_v * meanFreePath[gm][c] * vol_frac_CC[sm][c]);
if(K_R > 1e15) {
K_R = 1e15; // K > Kslip in reality, so if Kslip > K in computation, fix this.
}
// < end>
//__________________________________
} // if a surface cell
/***** Compute K value at adjacent cell [adj] (L) *****/
if( isSurfaceCell[adj] && d_useSlipCoeffs ){
Q.identity();
// This should work for more that one solid. It's hard wired for 2 matls-- Todd
computeSurfaceRotationMatrix(Q, surfaceNorm[sm][adj]); // Makes Q at each cell c
double A_V = 1.0/( dx.x() * fabs(Q(1,0))+
dx.y() * fabs(Q(1,1))+
dx.z() * fabs(Q(1,2)) );
double av = d_momentum_accommodation_coeff;
double Beta_v = (2 - av)/av;
K_L = A_V / (Beta_v * meanFreePath[gm][adj] * vol_frac_CC[sm][adj]);
if(K_L > 1e15) {
K_L = 1e15; // K > Kslip in reality, so if Kslip > K in computation, fix this.
}
} // if a surface cell
//__________________________________
// Compute beta and off diagonal term of
// Matrix A, this includes b[m][m].
// You need to make sure that mom_exch_coeff[m][m] = 0
// - Form diagonal terms of Matrix (A)
// - Form RHS (b)
for(int m = 0; m < d_numMatls; m++) {
b_sp_vol[m] = 2.0 * (sp_vol_CC[m][adj] * sp_vol_CC[m][c])/
(sp_vol_CC[m][adj] + sp_vol_CC[m][c]);
tmp[m] = -0.5 * delT * (vol_frac_CC[m][adj]*K_L + vol_frac_CC[m][c]*K_R);
vel[m] = vel_FC[m][c];
}
for(int m = 0; m < d_numMatls; m++) {
double betasum = 1;
double bsum = 0;
double bm = b_sp_vol[m];
double vm = vel[m];
for(int n = 0; n < d_numMatls; n++) {
if ( n!=m ) {
double b = bm * tmp[n];
a(m,n) = b;
betasum -= b;
bsum -= b * (vel[n] - vm);
}
else{
double b = 0;
a(m,n) = b;
betasum -= b;
bsum -= b * (vel[n] - vm);
}
}
a(m,m) = betasum;
b[m] = bsum;
}
//__________________________________
// - solve and backout velocities
a.destructiveSolve(b, b_sp_vol);
// For implicit solve we need sp_vol_FC
for(int m = 0; m < d_numMatls; m++) {
vel_FCME[m][c] = vel_FC[m][c] + b[m];
sp_vol_FC[m][c] = b_sp_vol[m]; // only needed by implicit Pressure
}
} // iterator
}
//______________________________________________________________________
//
void SlipExch::addExch_VelFC(const ProcessorGroup * pg,
const PatchSubset * patches,
const MaterialSubset * matls,
DataWarehouse * old_dw,
DataWarehouse * new_dw,
customBC_globalVars * BC_globalVars,
const bool recursion)
{
const Level* level = getLevel(patches);
for(int p=0;p<patches->size();p++){
const Patch* patch = patches->get(p);
printTask(patches, patch, dbgExch, "Doing SlipExch::addExch_VelFC" );
// change the definition of parent(old/new)DW
// if using semi-implicit pressure solve
DataWarehouse* pNewDW;
DataWarehouse* pOldDW;
if(recursion) {
pNewDW = new_dw->getOtherDataWarehouse(Task::ParentNewDW);
pOldDW = new_dw->getOtherDataWarehouse(Task::ParentOldDW);
} else {
pNewDW = new_dw;
pOldDW = old_dw;
}
delt_vartype delT;
pOldDW->get(delT, Ilb->delTLabel, level);
constCCVariable<int> isSurfaceCell;
std::vector< constCCVariable<double> > sp_vol_CC( d_numMatls );
std::vector< constCCVariable<double> > mass_L( d_numMatls );
std::vector< constCCVariable<double> > vol_frac_CC( d_numMatls );
std::vector< constCCVariable<double> > meanFreePath( d_numMatls );
std::vector< constCCVariable<Vector> > vel_CC( d_numMatls );
std::vector< constCCVariable<Vector> > surfaceNorm( d_numMatls );
std::vector< constSFCXVariable<double> > uvel_FC( d_numMatls );
std::vector< constSFCYVariable<double> > vvel_FC( d_numMatls );
std::vector< constSFCZVariable<double> > wvel_FC( d_numMatls );
std::vector< SFCXVariable<double> >uvel_FCME( d_numMatls ), sp_vol_XFC( d_numMatls );
std::vector< SFCYVariable<double> >vvel_FCME( d_numMatls ), sp_vol_YFC( d_numMatls );
std::vector< SFCZVariable<double> >wvel_FCME( d_numMatls ), sp_vol_ZFC( d_numMatls );
Ghost::GhostType gac = Ghost::AroundCells;
Ghost::GhostType gaf_X = Ghost::AroundFacesX;
Ghost::GhostType gaf_Y = Ghost::AroundFacesY;
Ghost::GhostType gaf_Z = Ghost::AroundFacesZ;
pNewDW->get( isSurfaceCell, d_isSurfaceCellLabel, 0, patch, gac, 1);
//__________________________________
// Multimaterial arrays
for(int m = 0; m < d_numMatls; m++) {
Material* matl = d_matlManager->getMaterial( m );
ICEMaterial* ice_matl = dynamic_cast<ICEMaterial*>(matl);
MPMMaterial* mpm_matl = dynamic_cast<MPMMaterial*>(matl);
int indx = matl->getDWIndex();
// retreive from dw
pNewDW->get( sp_vol_CC[m], Ilb->sp_vol_CCLabel, indx, patch,gac, 1);
pNewDW->get( vol_frac_CC[m], Ilb->vol_frac_CCLabel,indx, patch,gac, 1);
new_dw->get( uvel_FC[m], Ilb->uvel_FCLabel, indx, patch,gaf_X, 1);
new_dw->get( vvel_FC[m], Ilb->vvel_FCLabel, indx, patch,gaf_Y, 1);
new_dw->get( wvel_FC[m], Ilb->wvel_FCLabel, indx, patch,gaf_Z, 1);
if(mpm_matl) {
pNewDW->get( vel_CC[m], Ilb->vel_CCLabel, indx, patch,gac, 1);
pNewDW->get( surfaceNorm[m], d_surfaceNormLabel, indx, patch,gac, 1);
}
if(ice_matl) {
pOldDW->get( vel_CC[m], Ilb->vel_CCLabel, indx, patch,gac, 1);
pNewDW->get( meanFreePath[m], d_meanFreePathLabel, indx, patch,gac, 1);
}
new_dw->allocateAndPut( uvel_FCME[m], Ilb->uvel_FCMELabel, indx, patch );
new_dw->allocateAndPut( vvel_FCME[m], Ilb->vvel_FCMELabel, indx, patch );
new_dw->allocateAndPut( wvel_FCME[m], Ilb->wvel_FCMELabel, indx, patch );
new_dw->allocateAndPut( sp_vol_XFC[m],Ilb->sp_volX_FCLabel,indx, patch );
new_dw->allocateAndPut( sp_vol_YFC[m],Ilb->sp_volY_FCLabel,indx, patch );
new_dw->allocateAndPut( sp_vol_ZFC[m],Ilb->sp_volZ_FCLabel,indx, patch );
// lowIndex is the same for all face centered vars
IntVector lowIndex(patch->getExtraSFCXLowIndex());
uvel_FCME[m].initialize(0.0, lowIndex,patch->getExtraSFCXHighIndex() );
vvel_FCME[m].initialize(0.0, lowIndex,patch->getExtraSFCYHighIndex() );
wvel_FCME[m].initialize(0.0, lowIndex,patch->getExtraSFCZHighIndex() );
sp_vol_XFC[m].initialize(0.0, lowIndex,patch->getExtraSFCXHighIndex() );
sp_vol_YFC[m].initialize(0.0, lowIndex,patch->getExtraSFCYHighIndex() );
sp_vol_ZFC[m].initialize(0.0, lowIndex,patch->getExtraSFCZHighIndex() );
}
vector<IntVector> adj_offset(3);
adj_offset[0] = IntVector(-1, 0, 0); // X faces
adj_offset[1] = IntVector(0, -1, 0); // Y faces
adj_offset[2] = IntVector(0, 0, -1); // Z faces
CellIterator XFC_iterator = patch->getSFCXIterator();
CellIterator YFC_iterator = patch->getSFCYIterator();
CellIterator ZFC_iterator = patch->getSFCZIterator();
//__________________________________
// tack on exchange contribution to FC velocities
vel_FC_exchange<constSFCXVariable<double>, SFCXVariable<double> >
(XFC_iterator, patch, adj_offset[0], delT, isSurfaceCell,
surfaceNorm, meanFreePath, vol_frac_CC, sp_vol_CC, uvel_FC, sp_vol_XFC, uvel_FCME);
vel_FC_exchange<constSFCYVariable<double>, SFCYVariable<double> >
(YFC_iterator, patch, adj_offset[1], delT, isSurfaceCell,
surfaceNorm, meanFreePath, vol_frac_CC, sp_vol_CC, vvel_FC, sp_vol_YFC, vvel_FCME);
vel_FC_exchange<constSFCZVariable<double>, SFCZVariable<double> >
(ZFC_iterator, patch, adj_offset[2], delT, isSurfaceCell,
surfaceNorm, meanFreePath, vol_frac_CC, sp_vol_CC, wvel_FC, sp_vol_ZFC, wvel_FCME);
//________________________________
// Boundary Conditons
for (int m = 0; m < d_numMatls; m++) {
Material* matl = d_matlManager->getMaterial( m );
int indx = matl->getDWIndex();
customBC_localVars* BC_localVars = scinew customBC_localVars();
BC_localVars->recursiveTask = recursion;
preprocess_CustomBCs("velFC_Exchange",pOldDW, pNewDW, Ilb, patch, indx,
BC_globalVars, BC_localVars);
setBC<SFCXVariable<double> >(uvel_FCME[m], "Velocity", patch, indx,
d_matlManager, BC_globalVars, BC_localVars);
setBC<SFCYVariable<double> >(vvel_FCME[m], "Velocity", patch, indx,
d_matlManager, BC_globalVars, BC_localVars);
setBC<SFCZVariable<double> >(wvel_FCME[m], "Velocity", patch, indx,
d_matlManager, BC_globalVars, BC_localVars);
delete_CustomBCs( BC_globalVars, BC_localVars );
}
} // patch loop
}
//______________________________________________________________________
// This method computes the cell centered exchange
void SlipExch::vel_CC_exchange( CellIterator iter,
const Patch * patch,
FastMatrix & k_org,
const double delT,
constCCVariable<int> & isSurfaceCell,
std::vector< constCCVariable<Vector> > & surfaceNorm,
std::vector< constCCVariable<double> > & vol_frac_CC,
std::vector< constCCVariable<double> > & sp_vol_CC,
std::vector< constCCVariable<double> > & meanFreePath,
std::vector< constCCVariable<Vector> > & vel_CC,
std::vector< CCVariable<Vector> > & vel_T_CC,
std::vector< CCVariable<Vector> > & delta_vel_exch)
{
double b[MAX_MATLS];
Vector bb[MAX_MATLS];
double tmp;
FastMatrix beta( d_numMatls, d_numMatls );
FastMatrix Q(3,3);
FastMatrix K( d_numMatls, d_numMatls );
FastMatrix Kslip( d_numMatls, d_numMatls );
FastMatrix junk( d_numMatls, d_numMatls );
FastMatrix a( d_numMatls, d_numMatls );
Vector vel_T[MAX_MATLS]; // Transposed velocity
Vector vel_T_dbg[MAX_MATLS]; // Transposed velocity for visulaization
// for readability
int gm = d_fluidMatlIndx;
int sm = d_solidMatlIndx;
Vector dx = patch->dCell();
for(;!iter.done(); iter++){
IntVector c = *iter;
Q.identity(); // Transformation matrix is initialized
Kslip.copy(k_org);
K.copy(k_org);
//__________________________________
// If cell is a surface cell modify exchange coefficients
if( isSurfaceCell[c] && d_useSlipCoeffs ){
//************************This looks almost identical to the code in vel_FC_exchange. Should consider putting in a function --Todd.
computeSurfaceRotationMatrix(Q, surfaceNorm[sm][c]); // Makes Q at each cell c
double A_V = 1.0/( dx.x()*fabs(Q(1,0)) +
dx.y()*fabs(Q(1,1)) +
dx.z()*fabs(Q(1,2)) );
double av = d_momentum_accommodation_coeff;
double Beta_v = (2 - av)/av;
Kslip(gm,sm) = A_V / (Beta_v * meanFreePath[gm][c] * vol_frac_CC[sm][c]);
if(Kslip(gm,sm) > k_org(gm,sm)) {
Kslip(gm,sm) = k_org(gm,sm); // K > Kslip in reality, so if Kslip > K in computation, fix this.
}
Kslip(sm,gm) = Kslip(gm,sm); // Make the inverse indices of the Kslip matrix equal to each other
//__________________________________
//
for(int i = 0; i < 3; i++) {
if(i != 1) { // if the direction is NOT the normal to the surface
K.copy(Kslip);
} else {
K.copy(k_org);
}
//__________________________________
// coordinate Transformation
for(int m = 0; m < d_numMatls; m++) {
vel_T[m][i] = 0;
vel_T_dbg[m][i] = 0;
for(int j = 0; j < 3; j++) {
vel_T[m][i] += Q(i,j) * vel_CC[m][c][j];
}
vel_T_dbg[m][i] = vel_T[m][i];
}
//__________________________________
// compute exchange using transposed velocity
a.zero();
for(int m = 0; m < d_numMatls; m++) {
double adiag = 1.0;
b[m] = 0.0;
for(int n = 0; n < d_numMatls; n++) {
a(m,n) = - delT * vol_frac_CC[n][c] * sp_vol_CC[m][c] * K(n,m);
adiag -= a(m,n);
b[m] -= a(m,n) * (vel_T[n][i] - vel_T[m][i]);
}
a(m,m) = adiag;
}
a.destructiveSolve(b);
for(int m = 0; m < d_numMatls; m++) {
vel_T[m][i] = b[m];
}
} // loop over directions
//__________________________________
// coordinate transformation
for(int m = 0; m < d_numMatls; m++) {
vel_T_CC[m][c] = vel_T_dbg[m]; // for visualization
Vector vel_exch( Vector(0.0) );
for(int i = 0; i < 3; i++) {
for(int j = 0; j < 3; j++) {
vel_exch[i] += Q(j,i) * vel_T[m][j]; // Use the transpose of Q to back out the velocity in the cartesian system
}
}
delta_vel_exch[m][c] = vel_exch;
}
} // if a surface cell
else{
a.zero();
beta.zero();
for(int m = 0; m < d_numMatls; m++) {
tmp = delT*sp_vol_CC[m][c];
for(int n = 0; n < d_numMatls; n++) {
beta(m,n) = vol_frac_CC[n][c] * K(n,m) * tmp;
a(m,n) = -beta(m,n);
}
}
// Form matrix (a) diagonal terms
for(int m = 0; m < d_numMatls; m++) {
a(m,m) = 1.0;
for(int n = 0; n < d_numMatls; n++) {
a(m,m) += beta(m,n);
}
}
for(int m = 0; m < d_numMatls; m++) {
Vector sum(0,0,0);
const Vector& vel_m = vel_CC[m][c];
for(int n = 0; n < d_numMatls; n++) {
sum += beta(m,n) *(vel_CC[n][c] - vel_m);
}
bb[m] = sum;
}
a.destructiveSolve(bb);
//__________________________________
// save exchange contribution of each material
for(int m = 0; m < d_numMatls; m++) {
delta_vel_exch[m][c] = bb[m];
}
}
}
}
//______________________________________________________________________
//
void SlipExch::sched_AddExch_Vel_Temp_CC(SchedulerP & sched,
const PatchSet * patches,
const MaterialSubset * ice_matls,
const MaterialSubset * mpm_matls,
const MaterialSet * all_matls,
customBC_globalVars * BC_globalVars)
{
//__________________________________
//
string name = "SlipExch::addExch_Vel_Temp_CC";
Task* t = scinew Task(name, this, &SlipExch::addExch_Vel_Temp_CC, BC_globalVars);
printSchedule( patches, dbgExch, name );
Ghost::GhostType gn = Ghost::None;
t->requires( Task::OldDW, Ilb->delTLabel,getLevel(patches));
t->requires( Task::NewDW, d_surfaceNormLabel, mpm_matls, gn, 0 );
t->requires( Task::NewDW, d_isSurfaceCellLabel, d_zero_matl, gn, 0 );
// I C E
t->requires( Task::OldDW, Ilb->temp_CCLabel, ice_matls, gn );
t->requires( Task::NewDW, Ilb->specific_heatLabel, ice_matls, gn );
t->requires( Task::NewDW, Ilb->gammaLabel, ice_matls, gn );
t->requires( Task::NewDW, d_meanFreePathLabel, ice_matls, gn );
// A L L M A T L S
t->requires( Task::NewDW, Ilb->mass_L_CCLabel, gn );
t->requires( Task::NewDW, Ilb->mom_L_CCLabel, gn );
t->requires( Task::NewDW, Ilb->int_eng_L_CCLabel, gn );
t->requires( Task::NewDW, Ilb->sp_vol_CCLabel, gn );
t->requires( Task::NewDW, Ilb->vol_frac_CCLabel, gn );
computesRequires_CustomBCs(t, "CC_Exchange", Ilb, ice_matls, BC_globalVars);
t->computes( Ilb->Tdot_CCLabel );
t->computes( Ilb->mom_L_ME_CCLabel );
t->computes( Ilb->eng_L_ME_CCLabel );
t->computes( d_vel_CCTransLabel );
t->modifies( Ilb->temp_CCLabel, mpm_matls );
t->modifies( Ilb->vel_CCLabel, mpm_matls );
sched->addTask(t, patches, all_matls);
}
//______________________________________________________________________
//
void SlipExch::addExch_Vel_Temp_CC(const ProcessorGroup * pg,
const PatchSubset * patches,
const MaterialSubset * matls,
DataWarehouse * old_dw,
DataWarehouse * new_dw,
customBC_globalVars * BC_globalVars)
{
timeStep_vartype timeStep;
old_dw->get(timeStep, Ilb->timeStepLabel);
bool isNotInitialTimeStep = (timeStep > 0);
const Level* level = getLevel(patches);
for(int p=0;p<patches->size();p++){
const Patch* patch = patches->get(p);
printTask(patches, patch, dbgExch,"Doing SlipExch::addExch_Vel_Temp_CC");
//__________________________________
// Declare variables
constCCVariable<int> isSurfaceCell;
std::vector< CCVariable<double> > cv( d_numMatls );
std::vector< CCVariable<double> > Temp_CC( d_numMatls );
std::vector< constCCVariable<double> > gamma( d_numMatls );
std::vector< constCCVariable<double> > vol_frac_CC( d_numMatls );
std::vector< constCCVariable<double> > sp_vol_CC( d_numMatls );
std::vector< constCCVariable<Vector> > mom_L( d_numMatls );
std::vector< constCCVariable<double> > int_eng_L( d_numMatls );
std::vector< constCCVariable<double> > mass_L( d_numMatls );
std::vector< constCCVariable<double> > old_temp( d_numMatls );
std::vector< constCCVariable<double> > meanFreePath( d_numMatls ); // This mean free path does not have viscosity in it, which is okay per how it is used in this code
std::vector< constCCVariable<Vector> > surfaceNorm( d_numMatls );
std::vector< constCCVariable<Vector> > const_vel_CC( d_numMatls );
// Create variables for the results
std::vector< CCVariable<Vector> > mom_L_ME( d_numMatls );
std::vector< CCVariable<Vector> > vel_CC( d_numMatls );
std::vector< CCVariable<double> > int_eng_L_ME( d_numMatls );
std::vector< CCVariable<double> > Tdot( d_numMatls );
std::vector< CCVariable<Vector> > vel_T_CC( d_numMatls );
std::vector< CCVariable<Vector> > delta_vel_exch(d_numMatls );
//__________________________________
// retreive data from the data warehouse
delt_vartype delT;
old_dw->get(delT, Ilb->delTLabel, level);
Ghost::GhostType gn = Ghost::None;
new_dw->get( isSurfaceCell, d_isSurfaceCellLabel, 0, patch, gn, 0);
for (int m = 0; m < d_numMatls; m++) {
Material* matl = d_matlManager->getMaterial( m );
ICEMaterial* ice_matl = dynamic_cast<ICEMaterial*>(matl);
MPMMaterial* mpm_matl = dynamic_cast<MPMMaterial*>(matl);
int indx = matl->getDWIndex();
new_dw->allocateTemporary(cv[m], patch);
if(mpm_matl){ // M P M
new_dw->get( surfaceNorm[m], d_surfaceNormLabel, indx, patch, gn, 0);
CCVariable<double> oldTempMPM;
new_dw->getCopy( oldTempMPM, Ilb->temp_CCLabel,indx, patch, gn,0 );
new_dw->getModifiable( vel_CC[m], Ilb->vel_CCLabel, indx, patch, gn,0 );
new_dw->getModifiable( Temp_CC[m], Ilb->temp_CCLabel,indx, patch, gn,0 );
old_temp[m] = oldTempMPM;
cv[m].initialize(mpm_matl->getSpecificHeat());
}
if(ice_matl){ // I C E
constCCVariable<double> cv_ice;
old_dw->get( old_temp[m], Ilb->temp_CCLabel, indx, patch, gn, 0 );
new_dw->get( cv_ice, Ilb->specific_heatLabel,indx, patch, gn, 0 );
new_dw->get( gamma[m], Ilb->gammaLabel, indx, patch, gn, 0 );
new_dw->get( meanFreePath[m], d_meanFreePathLabel, indx, patch, gn, 0 );
new_dw->allocateTemporary( vel_CC[m], patch );
new_dw->allocateTemporary( Temp_CC[m], patch );
cv[m].copyData(cv_ice);
}
// A L L M A T L S
new_dw->get( mass_L[m], Ilb->mass_L_CCLabel, indx, patch, gn, 0 );
new_dw->get( sp_vol_CC[m], Ilb->sp_vol_CCLabel, indx, patch, gn, 0 );
new_dw->get( mom_L[m], Ilb->mom_L_CCLabel, indx, patch, gn, 0 );
new_dw->get( int_eng_L[m], Ilb->int_eng_L_CCLabel,indx, patch, gn, 0 );
new_dw->get( vol_frac_CC[m], Ilb->vol_frac_CCLabel, indx, patch, gn, 0 );
new_dw->allocateAndPut( Tdot[m], Ilb->Tdot_CCLabel, indx, patch );
new_dw->allocateAndPut( mom_L_ME[m], Ilb->mom_L_ME_CCLabel,indx, patch );
new_dw->allocateAndPut( int_eng_L_ME[m], Ilb->eng_L_ME_CCLabel,indx, patch );
new_dw->allocateAndPut( vel_T_CC[m], d_vel_CCTransLabel, indx, patch );
vel_T_CC[m].initialize( Vector(0,0,0) ); // diagnostic variable
new_dw->allocateTemporary( delta_vel_exch[m], patch );
delta_vel_exch[m].initialize( Vector(0,0,0) );
}
//__________________________________
// Convert momenta to velocities and internal energy to Temp
for (int m = 0; m < d_numMatls; m++) {
for(CellIterator iter = patch->getExtraCellIterator(); !iter.done();iter++){
IntVector c = *iter;
Temp_CC[m][c] = int_eng_L[m][c]/(mass_L[m][c]*cv[m][c]);
vel_CC[m][c] = mom_L[m][c]/mass_L[m][c];
}
const_vel_CC[m] = vel_CC[m];
}
//__________________________________
// declare local variables
double b[MAX_MATLS];
FastMatrix beta(d_numMatls, d_numMatls);
FastMatrix Q(3,3);
FastMatrix a(d_numMatls, d_numMatls);
FastMatrix K(d_numMatls, d_numMatls);
FastMatrix h(d_numMatls, d_numMatls);
FastMatrix H(d_numMatls, d_numMatls);
// Initialize
K.zero();
h.zero();
H.zero();
d_exchCoeff->getConstantExchangeCoeff( K,h );
//__________________________________
// compute the change in CC velocity due to exchange
CellIterator cell_iterator = patch->getCellIterator();
vel_CC_exchange( cell_iterator, patch, K, delT,
isSurfaceCell, surfaceNorm, vol_frac_CC, sp_vol_CC, /*mass_L,*/
meanFreePath, const_vel_CC, vel_T_CC, delta_vel_exch);
// update the velocity
for (int m = 0; m < d_numMatls; m++) {
for(CellIterator iter = patch->getExtraCellIterator(); !iter.done();iter++){
IntVector c = *iter;
vel_CC[m][c] += delta_vel_exch[m][c];
}
}
//__________________________________
// E N E R G Y E X C H A N G E
int gm = d_fluidMatlIndx;
int sm = d_solidMatlIndx;
Vector dx = patch->dCell();
for(CellIterator iter = patch->getCellIterator(); !iter.done();iter++){
IntVector c = *iter;
//
H.copy(h);
Q.identity();
//__________________________________
// If cell is a surface cell modify exchange coefficients
if ( isSurfaceCell[c] && d_useSlipCoeffs){
// This should work for more that one solid. It's hard wired for 2 matls-- Todd
computeSurfaceRotationMatrix(Q, surfaceNorm[sm][c]); // Makes Q at each cell c
// For the temperature do you need to compute Q?? --Todd
double A_V = 1.0/( dx.x()*fabs(Q(1,0)) +
dx.y()*fabs(Q(1,1)) +
dx.z()*fabs(Q(1,2)) );
// thermal
double at = d_thermal_accommodation_coeff;
double Beta_t = ((2 - at)/at) * (2/(1 + gamma[gm][c])) * (1/cv[gm][c]);
H(gm,sm) = A_V / (Beta_t * meanFreePath[gm][c] * vol_frac_CC[sm][c]); // The viscosity does not appear here because it's taken out of lambda
if(H(gm,sm) > h(gm,sm)) {
H(gm,sm) = h(gm,sm);
}
H(sm,gm) = H(gm,sm);
} // if a surface cell
//__________________________________
// Perform exchange
a.zero();
for(int m = 0; m < d_numMatls; m++) {
double adiag = 1.0;
b[m] = 0.0;
for(int n = 0; n < d_numMatls; n++) {
a(m,n) = - delT * vol_frac_CC[n][c] * sp_vol_CC[m][c] * H(m,n) / cv[m][c]; // double check equation --Todd
adiag -= a(m,n);
b[m] -= a(m,n) * (Temp_CC[n][c] - Temp_CC[m][c]);
}
a(m,m) = adiag;
}
a.destructiveSolve(b);
for(int m = 0; m < d_numMatls; m++) {
Temp_CC[m][c] += b[m];
}
} //end CellIterator loop
//__________________________________
// Boundary Condition Code
if( BC_globalVars->usingLodi || BC_globalVars->usingMicroSlipBCs){
std::vector<CCVariable<double> > temp_CC_Xchange(d_numMatls);
std::vector<CCVariable<Vector> > vel_CC_Xchange(d_numMatls);
for (int m = 0; m < d_numMatls; m++) {
Material* matl = d_matlManager->getMaterial(m);
int indx = matl->getDWIndex();
new_dw->allocateAndPut(temp_CC_Xchange[m], Ilb->temp_CC_XchangeLabel, indx, patch);
new_dw->allocateAndPut(vel_CC_Xchange[m], Ilb->vel_CC_XchangeLabel, indx, patch);
vel_CC_Xchange[m].copy(vel_CC[m]);
temp_CC_Xchange[m].copy(Temp_CC[m]);
}
}
//__________________________________
// Set boundary conditions
for (int m = 0; m < d_numMatls; m++) {
Material* matl = d_matlManager->getMaterial( m );
int indx = matl->getDWIndex();
customBC_localVars* BC_localVars = scinew customBC_localVars();
preprocess_CustomBCs("CC_Exchange", old_dw, new_dw, Ilb, patch, indx, BC_globalVars, BC_localVars);
setBC(vel_CC[m], "Velocity", patch, d_matlManager, indx, new_dw,
BC_globalVars, BC_localVars, isNotInitialTimeStep);
setBC(Temp_CC[m],"Temperature",gamma[m], cv[m], patch, d_matlManager,
indx, new_dw, BC_globalVars, BC_localVars, isNotInitialTimeStep);
#if SET_CFI_BC
// set_CFI_BC<Vector>(vel_CC[m], patch);
// set_CFI_BC<double>(Temp_CC[m], patch);
#endif
delete_CustomBCs( BC_globalVars, BC_localVars );
}
//__________________________________
// Convert vars. primitive-> flux
for(CellIterator iter = patch->getExtraCellIterator(); !iter.done();iter++){
IntVector c = *iter; // shouldn't the loop over matls be outside the cell iterator for speed. --Todd
for (int m = 0; m < d_numMatls; m++) {
int_eng_L_ME[m][c] = Temp_CC[m][c]*cv[m][c] * mass_L[m][c];
mom_L_ME[m][c] = vel_CC[m][c] * mass_L[m][c];
Tdot[m][c] = (Temp_CC[m][c] - old_temp[m][c])/delT;
}
}
} //patches
}
//______________________________________________________________________
//
void SlipExch::schedComputeMeanFreePath(SchedulerP & sched,
const PatchSet * patches)
{
std::string tName = "SlipExch::computeMeanFreePath";
Task* t = scinew Task(tName, this, &SlipExch::computeMeanFreePath);
printSchedule(patches, dbgExch, tName);
Ghost::GhostType gn = Ghost::None;
t->requires(Task::OldDW, Ilb->temp_CCLabel, gn);
t->requires(Task::OldDW, Ilb->sp_vol_CCLabel, gn);
t->requires(Task::NewDW, Ilb->gammaLabel, gn);
t->requires(Task::NewDW, Ilb->specific_heatLabel, gn);
t->computes(d_meanFreePathLabel);
const MaterialSet* ice_matls = d_matlManager->allMaterials( "ICE" );
sched->addTask(t, patches, ice_matls);
}
//______________________________________________________________________
//
void SlipExch::computeMeanFreePath(const ProcessorGroup *,
const PatchSubset * patches,
const MaterialSubset *,
DataWarehouse * old_dw,
DataWarehouse * new_dw)
{
for(int p=0;p<patches->size();p++){
const Patch* patch = patches->get(p);
printTask(patches, patch, dbgExch, "Doing SlipExch::computeMeanFreePath" );
int numICEMatls = d_matlManager->getNumMatls( "ICE" );
Ghost::GhostType gn = Ghost::None;
for (int m = 0; m < numICEMatls; m++) {
ICEMaterial* ice_matl = (ICEMaterial*) d_matlManager->getMaterial( "ICE", m);
int indx = ice_matl->getDWIndex();
constCCVariable<double> temp;
constCCVariable<double> sp_vol;
constCCVariable<double> gamma;
constCCVariable<double> cv;
CCVariable<double> meanFreePath;
old_dw->get(temp, Ilb->temp_CCLabel, indx,patch, gn,0);
old_dw->get(sp_vol, Ilb->sp_vol_CCLabel, indx,patch, gn,0);
new_dw->get(gamma, Ilb->gammaLabel, indx,patch, gn,0);
new_dw->get(cv, Ilb->specific_heatLabel,indx,patch, gn,0);
new_dw->allocateAndPut(meanFreePath, d_meanFreePathLabel, indx, patch);
meanFreePath.initialize(0.0);
//This is really the mean free path divided by the dynamic viscosity
for (CellIterator iter=patch->getExtraCellIterator();!iter.done();iter++){
IntVector c = *iter;
meanFreePath[c] = sp_vol[c] /sqrt(2 * cv[c] * (gamma[c] - 1) * temp[c] / M_PI);
}
}
}
}
//______________________________________________________________________
//
void SlipExch::computeSurfaceRotationMatrix(FastMatrix & Q,
const Vector & surfaceNorm )
{
Q.zero();
if(fabs(surfaceNorm[1]) > 0.9999) {
Q.identity();
}else { //if coord.'s are not already aligned with surface
Q(1,0) = surfaceNorm[0];
Q(1,1) = surfaceNorm[1];
Q(1,2) = surfaceNorm[2];
double sqrtTerm = sqrt(1 - Q(1,1) * Q(1,1));
double invTerm = 1.0/sqrtTerm;
Q(0,0) = Q(1,1);
Q(0,1) =-Q(1,0);
Q(0,2) = Q(1,1) * Q(1,2) * invTerm;
Q(2,0) =-Q(1,2) * invTerm;
Q(2,1) = 0.0;
Q(2,2) = Q(1,0) * invTerm;
}
}
|
#include "obj.h"
#include "handle.h"
#include "gtest/gtest.h"
namespace yukino {
TEST(ObjTest, HandleRefCounting) {
auto obj = String::New(yuki::Slice("1234"));
EXPECT_EQ(0, obj->RefCount());
Handle<String> str(obj);
EXPECT_EQ(1, obj->RefCount());
EXPECT_EQ(obj->RefCount(), str.ref_count());
EXPECT_EQ("1234", str->data().ToString());
}
TEST(ObjTest, HandleAssign) {
Handle<Integer> obj(Integer::New(100));
EXPECT_EQ(1, obj.ref_count());
EXPECT_EQ(100, obj->data());
obj = Integer::New(99);
EXPECT_EQ(1, obj.ref_count());
EXPECT_EQ(99, obj->data());
}
} // namespace yukino |
<filename>ccnxlibs/libccnx-common/ccnx/common/codec/schema_v1/ccnxCodecSchemaV1_MessageDecoder.c<gh_stars>1-10
/*
* Copyright (c) 2017 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <LongBow/runtime.h>
#include <parc/algol/parc_Memory.h>
#include <parc/algol/parc_Object.h>
#include <parc/algol/parc_Buffer.h>
#include <ccnx/common/codec/schema_v1/ccnxCodecSchemaV1_Types.h>
#include <ccnx/common/codec/schema_v1/ccnxCodecSchemaV1_MessageDecoder.h>
#include <ccnx/common/codec/ccnxCodec_TlvUtilities.h>
#include <ccnx/common/ccnx_PayloadType.h>
#include <ccnx/common/ccnx_InterestReturn.h>
static bool
_translateWirePayloadTypeToCCNxPayloadType(CCNxCodecSchemaV1Types_PayloadType wireFormatType, CCNxPayloadType *payloadTypePtr)
{
//by wschoi
printf("######################_translateWirePayloadTypeToCCNxPayloadType\n\n");
bool success = true;
switch (wireFormatType) {
case CCNxCodecSchemaV1Types_PayloadType_Data:
*payloadTypePtr = CCNxPayloadType_DATA;
break;
case CCNxCodecSchemaV1Types_PayloadType_Key:
*payloadTypePtr = CCNxPayloadType_KEY;
break;
case CCNxCodecSchemaV1Types_PayloadType_Link:
*payloadTypePtr = CCNxPayloadType_LINK;
break;
default:
// unknown type
success = false;
}
return success;
}
/**
* Translates the wire format value for the PayloadType to CCNxPayloadType
*/
static bool
_decodePayloadType(CCNxCodecTlvDecoder *decoder, CCNxTlvDictionary *packetDictionary, uint16_t length)
{
//by wschoi
printf("######################_decodePayloadType\n\n");
CCNxPayloadType payloadType;
uint64_t wireFormatVarInt;
bool success = ccnxCodecTlvDecoder_GetVarInt(decoder, length, &wireFormatVarInt);
if (success) {
CCNxCodecSchemaV1Types_PayloadType wireFormatType = (CCNxCodecSchemaV1Types_PayloadType) wireFormatVarInt;
success = _translateWirePayloadTypeToCCNxPayloadType(wireFormatType, &payloadType);
}
if (success) {
success = ccnxTlvDictionary_PutInteger(packetDictionary, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_PAYLOADTYPE, payloadType);
}
//by wschoi
//printf("##############_decodePayloadType success %d\n\n",success);
return success;
}
static bool
_decodeType(CCNxCodecTlvDecoder *decoder, CCNxTlvDictionary *packetDictionary, uint16_t type, uint16_t length)
{
//by wschoi
//printf("######################_decodeType\n\n");
bool success = false;
switch (type) {
case CCNxCodecSchemaV1Types_CCNxMessage_Name:
success = ccnxCodecTlvUtilities_PutAsName(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_NAME);
break;
case CCNxCodecSchemaV1Types_CCNxMessage_Payload:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_PAYLOAD);
break;
//by wschoi
//LOOKUP
case CCNxCodecSchemaV1Types_CCNxMessage_T_GETNAME:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_GETNAME);
break;
case CCNxCodecSchemaV1Types_CCNxMessage_T_PAYLOAD_GETNAME:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_PAYLOAD_GETNAME);
break;
case CCNxCodecSchemaV1Types_CCNxMessage_T_NAME_KEY:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_NAME_KEY);
break;
//REGISTRATION
//registration
case CCNxCodecSchemaV1Types_CCNxMessage_T_NAME_REGISTRATION_KeyName:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_NAME_REG_KEY);
break;
case CCNxCodecSchemaV1Types_CCNxMessage_T_NAME_REGISTRATION_ValueName:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_NAME_REG_VALUE);
break;
case CCNxCodecSchemaV1Types_CCNxMessage_T_NAME_REGISTRATION_ACK:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_NAME_REG_ACK);
break;
//ADD
case CCNxCodecSchemaV1Types_CCNxMessage_T_NAME_ADD_KeyName:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_NAME_ADD_KEY);
break;
case CCNxCodecSchemaV1Types_CCNxMessage_T_NAME_ADD_ValueName:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_NAME_ADD_VALUE);
break;
case CCNxCodecSchemaV1Types_CCNxMessage_T_NAME_ADD_ACK:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_NAME_ADD_ACK);
break;
//DEL
case CCNxCodecSchemaV1Types_CCNxMessage_T_NAME_DEL_KeyName:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_NAME_DEL_KEY);
break;
case CCNxCodecSchemaV1Types_CCNxMessage_T_NAME_DEL_ValueName:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_NAME_DEL_VALUE);
break;
case CCNxCodecSchemaV1Types_CCNxMessage_T_NAME_DEL_ACK:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_NAME_DEL_ACK);
break;
//DEREG
case CCNxCodecSchemaV1Types_CCNxMessage_T_NAME_DEREG_KeyName:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_NAME_DEREG_KEY);
break;
case CCNxCodecSchemaV1Types_CCNxMessage_T_NAME_DEREG_ValueName:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_NAME_DEREG_VALUE);
break;
case CCNxCodecSchemaV1Types_CCNxMessage_T_NAME_DEREG_ACK:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_NAME_DEREG_ACK);
break;
#if 0
//refresh
case CCNxCodecSchemaV1Types_CCNxMessage_T_NAME_REFRESH_KeyName:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_NAME_REFRESH_KEY);
break;
case CCNxCodecSchemaV1Types_CCNxMessage_T_NAME_REFRESH_ValueName:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_NAME_REFRESH_VALUE);
break;
case CCNxCodecSchemaV1Types_CCNxMessage_T_NAME_REFRESH_ACK:
success = ccnxCodecTlvUtilities_PutAsBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_NAME_REFRESH_ACK);
break;
#endif
case CCNxCodecSchemaV1Types_CCNxMessage_KeyIdRestriction:
success = ccnxCodecTlvUtilities_PutAsHash(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_KEYID_RESTRICTION);
break;
case CCNxCodecSchemaV1Types_CCNxMessage_ContentObjectHashRestriction:
success = ccnxCodecTlvUtilities_PutAsHash(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_OBJHASH_RESTRICTION);
break;
case CCNxCodecSchemaV1Types_CCNxMessage_PayloadType:
success = _decodePayloadType(decoder, packetDictionary, length);
break;
case CCNxCodecSchemaV1Types_CCNxMessage_ExpiryTime:
success = ccnxCodecTlvUtilities_PutAsInteger(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_EXPIRY_TIME);
break;
case CCNxCodecSchemaV1Types_CCNxMessage_EndChunkNumber:
success = ccnxCodecTlvUtilities_PutAsInteger(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_MessageFastArray_ENDSEGMENT);
break;
default:
// if we do not know the TLV type, put it in this container's unknown list
success = ccnxCodecTlvUtilities_PutAsListBuffer(decoder, packetDictionary, type, length, CCNxCodecSchemaV1TlvDictionary_Lists_MESSAGE_LIST);
break;
}
if (!success) {
CCNxCodecError *error = ccnxCodecError_Create(TLV_ERR_DECODE, __func__, __LINE__, ccnxCodecTlvDecoder_Position(decoder));
ccnxCodecTlvDecoder_SetError(decoder, error);
ccnxCodecError_Release(&error);
}
return success;
}
/*
* We are given a decoder that points to the first TLV of a list of TLVs. We keep walking the
* list until we come to the end of the decoder.
*/
bool
ccnxCodecSchemaV1MessageDecoder_Decode(CCNxCodecTlvDecoder *decoder, CCNxTlvDictionary *packetDictionary)
{
//by wschoi
//printf("######################ccnxCodecSchemaV1MessageDecoder_Decode\n\n");
return ccnxCodecTlvUtilities_DecodeContainer(decoder, packetDictionary, _decodeType);
}
|
/**
* HTML for each insert tab (neume, grouping, clef, system, and division).
*/
export const insertTabHtml: Record<string, string> = {
primitiveTab: '<p class=\'control\'>' +
'<button id=\'punctum\' class=\'button insertel smallel\' aria-label=\'punctum\' title=\'punctum\'><img src=\'' + __ASSET_PREFIX__ + 'assets/img/punctum.png' + '\' class=\'image\'/></button></p>' +
'<p class=\'control\'>' +
'<button id=\'virga\' class=\'button insertel smallel\' aria-label=\'virga\' title=\'virga\'><img src=\'' + __ASSET_PREFIX__ + 'assets/img/virga.png' + '\' class=\'image\'/></button></p>' +
'<p class=\'control\'>' +
'<button id=\'diamond\' class=\'button insertel smallel\' aria-label=\'inclinatum\' title=\'inclinatum\'><img src=\'' + __ASSET_PREFIX__ + 'assets/img/diamond.png' + '\' class=\'image\'/></button></p>' +
/* "<p class='control'>" +
"<button id='white_punct' class='button insertel smallel' title='white punctum'><img src='" + White__ASSET_PREFIX__ + 'assets/img/punctum.png' + "' class='image'/></button></p>" +
"<p class='control'>" +
"<button id='quilisma' class='button insertel smallel' title='quilisma'><img src='" + __ASSET_PREFIX__ + 'assets/img/quilisma.png' + "' class='image'/></button></p>" + */
'<p class=\'control\'>' +
'<button id=\'custos\' class=\'button insertel smallel\' aria-label=\'custos\' title=\'custos\'><img src=\'' + __ASSET_PREFIX__ + 'assets/img/custos.png' + '\' class=\'image\'/></button></p>' +
'<p class=\'control\'>' +
'<button id=\'cClef\' class=\'button insertel smallel\' aria-label=\'C Clef\' title=\' C Clef\'><img src=\'' + __ASSET_PREFIX__ + 'assets/img/cClef.png' + '\' class=\'image\' /></button></p>' +
'<p class=\'control\'>' +
'<button id=\'fClef\' class=\'button insertel smallel\' aria-label=\'F Clef\' title=\'F Clef\'><img src=\'' + __ASSET_PREFIX__ + 'assets/img/fClef.png' + '\' class=\'image\'/></button></p>',
groupingTab: '<p class=\'control\'>' +
'<button id=\'pes\' class=\'button insertel smallel\' aria-label=\'pes\' title=\'pes\'><img src=\'' + __ASSET_PREFIX__ + 'assets/img/pes.png' + '\' class=\'image\'/></button></p>' +
'<p class=\'control\'>' +
'<button id=\'clivis\' class=\'button insertel smallel\' aria-label=\'clivis\' title=\'clivis\'><img src=\'' + __ASSET_PREFIX__ + 'assets/img/clivis.png' + '\' class=\'image\'/></button></p>' +
'<p class=\'control\'>' +
'<button id=\'scandicus\' class=\'button insertel smallel\' aria-label=\'scandicus\' title=\'scandicus\'><img src=\'' + __ASSET_PREFIX__ + 'assets/img/scandicus.png' + '\' class=\'image\'/></button></p>' +
'<p class=\'control\'>' +
'<button id=\'climacus\' class=\'button insertel smallel\' aria-label=\'climacus\' title=\'climacus\'><img src=\'' + __ASSET_PREFIX__ + 'assets/img/climacus.png' + '\' class=\'image\'/></button></p>' +
'<p class=\'control\'>' +
'<button id=\'torculus\' class=\'button insertel smallel\' aria-label=\'toculus\' title=\'toculus\'><img src=\'' + __ASSET_PREFIX__ + 'assets/img/torculus.png' + '\' class=\'image\'/></button></p>' +
'<p class=\'control\'>' +
'<button id=\'porrectus\' class=\'button insertel smallel\' aria-label=\'porrectus\' title=\'porrectus\'><img src=\'' + __ASSET_PREFIX__ + 'assets/img/porrectus.png' + '\' class=\'image\'/></button></p>' +
'<p class=\'control\'>' +
'<button id=\'pressus\' class=\'button insertel smallel\' aria-label=\'pressus\' title=\'pressus\'><img src=\'' + __ASSET_PREFIX__ + 'assets/img/pressus.png' + '\' class=\'image\'/></button></p>',
systemTab: '<p class=\'control\'>' +
'<button id=\'staff\' class=\'button insertel longel\' aria-label=\'system\' title=\'system\'><img src=\'' + __ASSET_PREFIX__ + 'assets/img/staff.png' + '\' class=\'image\' /></button></p>' +
'<p>Click upper left and lower right corners of new staff.</p>'
// divisionTab: "<p class='control'>" +
// "<button id='smallDiv' class='button insertel tallel'><img src='" + __ASSET_PREFIX__ + 'assets/img/smalldiv.png' + "' class='image'/></button></p>" +
// "<p class='control'>" +
// "<button id='minorDiv' class='button insertel tallel'><img src='" + __ASSET_PREFIX__ + 'assets/img/minordiv.png' +"' class='image'/></button></p>" +
// "<p class='control'>" +
// "<button id='majorDiv' class='button insertel tallel'><img src='" + __ASSET_PREFIX__ + 'assets/img/majordiv.png' + "' class='image'/></button></p>" +
// "<p class='control'>" +
// "<button id='finalDiv' class='button insertel tallel'><img src='" + __ASSET_PREFIX__ + 'assets/img/finaldiv.png' + "' class='image'/></button></p>"
};
/**
* Structure of insert panel with basic grouping tabs.
*/
export const insertControlsPanel: string =
'<p class=\'panel-heading\' id=\'insertMenu\'>Insert' +
'<svg class=\'icon is-pulled-right\'><use id=\'toggleInsert\' xlink:href=\'' + __ASSET_PREFIX__ + 'assets/img/icons.svg' + '#dropdown-down\'></use></svg></p>' +
'<div id=\'insertContents\' style=\'overflow-y: hidden;\'>' +
'<p class=\'panel-tabs\'>' +
'<a id=\'primitiveTab\' class=\'insertTab\'>Primitive Elements</a>' +
'<a id=\'groupingTab\' class=\'insertTab\'>Grouping</a>' +
'<a id=\'systemTab\' class=\'insertTab\'>System</a></p>' +
// "<a id='divisionTab' class='insertTab'>Division</a></p>" +
'<a class=\'panel-block has-text-centered\'>' +
'<div id=\'insert_data\' class=\'field is-grouped buttons\'></div></a></div>';
/**
* Contents of edit panel with buttons.
*/
export const editControlsPanel: string =
'<p class=\'panel-heading\' id=\'editMenu\'>Edit' +
'<svg class=\'icon is-pulled-right\'><use id=\'toggleEdit\' xlink:href=\'' + __ASSET_PREFIX__ + 'assets/img/icons.svg' + '#dropdown-down\'></use></svg></p>' +
'<div id=\'editContents\'>' +
'<a class=\'panel-block\'>' +
'<label>Select By: </label>' +
'<div class=\'field has-addons buttons\' style=\'overflow-x: auto;\'>' +
'<p class=\'control\'>' +
'<button class=\'button sel-by is-active\' id=\'selBySyl\'>Syllable</button></p>' +
'<p class=\'control\'>' +
'<button class=\'button sel-by\' id=\'selByNeume\'>Neume</button></p>' +
'<p class=\'control\'>' +
'<button class=\'button sel-by\' id=\'selByNc\'>Neume Component</button></p>' +
'<p class=\'control\'>' +
'<button class=\'button sel-by\' id=\'selByStaff\'>Staff</button></p></div></a>' +
'<div class=\'field is-grouped buttons\'>' +
'<p class=\'control\'>' +
'<a id=\'moreEdit\' class=\'panel-block is-invisible\'>' +
'<a id=\'extraEdit\' class=\'panel-block is-invisible\'>' +
/*
* The extraEdit panel is added for edit options that have dropdown menus
* Like the Neume and Clef menus
* This is done because the moreEdit menu needs to have overflow for cases where it has lots of buttons
* But overflow ruins dropdown menus
*/
'<a id=\'neumeEdit\' class=\'panel-block is-invisible\'></div>';
/**
* Contents of extra nc action menu.
*/
export const ncActionContents: string =
'<label>Change Head Shape: </label>' +
'<div id=\'drop_select\' class=\'dropdown\'>' +
'<div class=\'dropdown-trigger\'>' +
'<button id=\'select-options\' class=\'button\' aria-haspopup=\'true\' aria-controls=\'dropdown-menu\'>' +
'<span>Head Shapes</span>' +
'<svg class=\'icon\'><use xlink:href=\'' + __ASSET_PREFIX__ + 'assets/img/icons.svg' + '#dropdown-down\'></use></svg></button></div>' +
'<div class=\'dropdown-menu\' id=\'dropdown-menu\' role=\'menu\'>' +
'<div class=\'dropdown-content\'>' +
'<a id=\'Punctum\' class=\'dropdown-item\'>Punctum</a>' +
'<a id=\'Virga\' class=\'dropdown-item\'>Virga</a>' +
'<a id=\'Inclinatum\' class=\'dropdown-item\'>Inclinatum</a></div></div></div>' +
'<p class=\'control\'></p></div>';
/**
* Contents of extra neume action menu.
*/
export const neumeActionContents: string =
'<label>Change Grouping: </label>' +
'<div id=\'drop_select\' class=\'dropdown\'>' +
'<div class=\'dropdown-trigger\'>' +
'<button id=\'select-options\' class=\'button\' aria-haspopup=\'true\' aria-controls=\'dropdown-menu\'>' +
'<span>Groupings</span>' +
'<svg class=\'icon\'><use xlink:href=\'' + __ASSET_PREFIX__ + 'assets/img/icons.svg' + '#dropdown-down\'></use></svg></button></div>' +
'<div class=\'dropdown-menu\' id=\'dropdown-menu\' role=\'menu\'>' +
'<div class=\'dropdown-content scrollable-dropdown\'>' +
'<a id=\'Pes\' class=\'dropdown-item grouping\'>Pes</a>' +
'<a id=\'Pes subpunctis\' class=\'dropdown-item grouping\'>Pes Subpunctis</a>' +
'<a id=\'Clivis\' class=\'dropdown-item grouping\'>Clivis</a>' +
'<a id=\'Scandicus\' class=\'dropdown-item grouping\'>Scandicus</a>' +
'<a id=\'Scandicus flexus\' class=\'dropdown-item grouping\'>Scandicus Flexus</a>' +
'<a id=\'Scandicus subpunctis\' class=\'dropdown-item grouping\'>Scandicus Subpunctis</a>' +
'<a id=\'Climacus\' class=\'dropdown-item grouping\'>Climacus</a>' +
'<a id=\'Climacus resupinus\' class=\'dropdown-item grouping\'>Climacus Resupinus</a>' +
'<a id=\'Torculus\' class=\'dropdown-item grouping\'>Torculus</a>' +
'<a id=\'Torculus resupinus\' class=\'dropdown-item grouping\'>Torculus Resupinus</a>' +
'<a id=\'Porrectus\' class=\'dropdown-item grouping\'>Porrectus</a>' +
'<a id=\'Porrectus flexus\' class=\'dropdown-item grouping\'>Porrectus Flexus</a>' +
'<a id=\'Porrectus subpunctis\' class=\'dropdown-item grouping\'>Porrectus Subpunctis</a>' +
'<a id=\'Pressus\' class=\'dropdown-item grouping\'>Pressus</a>' +
'</div></div></div>' +
'<div><p class=\'control\'>' +
'<button class=\'button\' id=\'ungroupNcs\'>Ungroup</button></p></div>';
/**
* Contents of extra staff action menu.
*/
export const staffActionContents: string =
'<label>Merge Systems: </label>' +
'<div><p class=\'control\'>' +
'<button id=\'merge-systems\' class=\'button\'>Merge</button>' +
'<button class=\'button\' id=\'delete\'>Delete</button></p></div>';
/**
* Contents of default action menu.
*/
export const defaultActionContents: string =
'<div><p class=\'control\'>' +
'<button class=\'button\' id=\'delete\'>Delete</button></p></div>';
/**
* Contents of default action menu when selecting by syllable
* Same as above except includes re-associate to nearest staff
*/
export const defaultSylActionContents: string =
'<div><p class=\'control\'>' +
'<button class=\'button\' id=\'delete\'>Delete</button>' +
'<button class=\'button\' id=\'changeStaff\'>Re-associate to nearest staff</button></p></div>';
/**
* Contents of custos action menu.
*/
export const custosActionContents: string =
'<div><p class=\'control\'>' +
'<button class=\'button\' id=\'delete\'>Delete</button>' +
'<button class=\'button\' id=\'changeStaff\'>Re-associate to nearest staff</button></p></div>';
/**
* Contents of split action menu.
* @type {string}
*/
export const splitActionContents: string=
'<label>Split System: </label>' +
'<div><p class=\'control\'>' +
'<button id=\'split-system\' class=\'button\'>Split</button>' +
'<button id=\'reset-rotate\' class=\'button\'>Reset Rotate</button>' +
'<button class=\'button\' id=\'delete\'>Delete</button></p></div>';
/**
* Contents of extra clef action menu.
*/
export const clefActionContents: string =
'<label>Change Clef Shape: </label>' +
'<div id=\'drop_select\' class=\'dropdown\'>' +
'<div class=\'dropdown-trigger\'overflow=\'auto\'>' +
'<button id=\'select-options\' class=\'button\' aria-haspopup=\'true\' aria-controls=\'dropdown-menu\'>' +
'<span>Clef Shapes</span>' +
'<svg class=\'icon\'><use xlink:href=\'' + __ASSET_PREFIX__ + 'assets/img/icons.svg' + '#dropdown-down\'></use></svg></button></div>' +
'<div class=\'dropdown-menu\' id=\'dropdown-menu\' role=\'menu\'>' +
'<div class=\'dropdown-content\'>' +
'<a id=\'CClef\' class=\'dropdown-item\'>C Clef</a>' +
'<a id=\'FClef\' class=\'dropdown-item\'>F Clef</a></div></div></div></div>';
/**
* HTML for grouping selection menu.
*/
export const groupingMenu: object = {
'nc': '<div class=\'field is-grouped\'>' +
'<div><p class=\'control\'>' +
'<button class=\'button\' id=\'groupNcs\'>Group Neume Components</button>' +
'<button class=\'button\' id=\'delete\'>Delete</button></p></div>',
'neume': '<div class=\'field is-grouped\'>' +
'<div><p class=\'control\'>' +
'<button class=\'button\' id=\'groupNeumes\'>Group Neumes</button>' +
'<button class=\'button\' id=\'delete\'>Delete</button></p></div>',
'syl': '<div class=\'field is-grouped\'>' +
'<div><p class=\'control\'>' +
'<button class=\'button\' id=\'mergeSyls\'>Merge Syllables</button>' +
'<button class=\'button\' id=\'delete\'>Delete</button>' +
'<button class=\'button\' id=\'changeStaff\'>Re-associate to nearest staff</button></p></div>',
'ligatureNc': '<div class=\'field is-grouped\'>' +
'<div><p class=\'control\'>' +
'<button class=\'button\' id=\'groupNcs\'>Group Neume Components</button></p></div>' +
'<div><p class=\'control\'>' +
'<button class=\'button\' id=\'toggle-ligature\'>Toggle Ligature</button>' +
'<button class=\'button\' id=\'delete\'>Delete</button></p></div></div>',
'ligature': '<div class=\'field is-grouped\'>' +
'<div><p class=\'control\'>' +
'<button class=\'button\' id=\'toggle-ligature\'>Toggle Ligature</button>' +
'<button class=\'button\' id=\'delete\'>Delete</button></p></div></div>',
'splitSyllable': '<div class=\'field is-grouped\'>' +
'<div><p class=\'control\'>' +
'<button class=\'button\' id=\'toggle-link\'>Toggle Linked Syllables</button>' +
'<button class=\'button\' id=\'delete\'>Delete</button></p></div></div>'
};
|
/**
* Skip over that many entries. This method is relatively fast (for this map
* implementation) even if many entries need to be skipped.
*
* @param n the number of entries to skip
*/
public void skip(long n) {
if (n < 10) {
while (n-- > 0 && hasNext()) {
next();
}
} else if(hasNext()) {
assert cursorPos != null;
CursorPos cp = cursorPos;
CursorPos parent;
while ((parent = cp.parent) != null) cp = parent;
Page root = cp.page;
@SuppressWarnings("unchecked")
MVMap<K, ?> map = (MVMap<K, ?>) root.map;
long index = map.getKeyIndex(next());
last = map.getKey(index + n);
this.cursorPos = traverseDown(root, last);
}
} |
"""
LSTM for time series classification
This model takes in time series and class labels.
The LSTM models the time series. A fully-connected layer
generates an output to be classified with Softmax
"""
import numpy as np
import tensorflow as tf #TF 1.1.0rc1
tf.logging.set_verbosity(tf.logging.ERROR)
import matplotlib.pyplot as plt
from tsc_model import Model,sample_batch,load_data,check_test
#Set these directories
direc = '/home/rob/Dropbox/ml_projects/LSTM/UCR_TS_Archive_2015'
summaries_dir = '/home/rob/Dropbox/ml_projects/LSTM_TSC/log_tb'
"""Load the data"""
ratio = np.array([0.8,0.9]) #Ratios where to split the training and validation set
X_train,X_val,X_test,y_train,y_val,y_test = load_data(direc,ratio,dataset='ChlorineConcentration')
N,sl = X_train.shape
num_classes = len(np.unique(y_train))
"""Hyperparamaters"""
batch_size = 30
max_iterations = 3000
dropout = 0.8
config = { 'num_layers' : 3, #number of layers of stacked RNN's
'hidden_size' : 120, #memory cells in a layer
'max_grad_norm' : 5, #maximum gradient norm during training
'batch_size' : batch_size,
'learning_rate' : .005,
'sl': sl,
'num_classes': num_classes}
epochs = np.floor(batch_size*max_iterations / N)
print('Train %.0f samples in approximately %d epochs' %(N,epochs))
#Instantiate a model
model = Model(config)
"""Session time"""
sess = tf.Session() #Depending on your use, do not forget to close the session
writer = tf.summary.FileWriter(summaries_dir, sess.graph) #writer for Tensorboard
sess.run(model.init_op)
cost_train_ma = -np.log(1/float(num_classes)+1e-9) #Moving average training cost
acc_train_ma = 0.0
try:
for i in range(max_iterations):
X_batch, y_batch = sample_batch(X_train,y_train,batch_size)
#Next line does the actual training
cost_train, acc_train,_ = sess.run([model.cost,model.accuracy, model.train_op],feed_dict = {model.input: X_batch,model.labels: y_batch,model.keep_prob:dropout})
cost_train_ma = cost_train_ma*0.99 + cost_train*0.01
acc_train_ma = acc_train_ma*0.99 + acc_train*0.01
if i%100 == 1:
#Evaluate validation performance
X_batch, y_batch = sample_batch(X_val,y_val,batch_size)
cost_val, summ,acc_val = sess.run([model.cost,model.merged,model.accuracy],feed_dict = {model.input: X_batch, model.labels: y_batch, model.keep_prob:1.0})
print('At %5.0f/%5.0f: COST %5.3f/%5.3f(%5.3f) -- Acc %5.3f/%5.3f(%5.3f)' %(i,max_iterations,cost_train,cost_val,cost_train_ma,acc_train,acc_val,acc_train_ma))
#Write information to TensorBoard
writer.add_summary(summ, i)
writer.flush()
except KeyboardInterrupt:
pass
epoch = float(i)*batch_size/N
print('Trained %.1f epochs, accuracy is %5.3f and cost is %5.3f'%(epoch,acc_val,cost_val))
#now run in your terminal:
# $ tensorboard --logdir = <summaries_dir>
# Replace <summaries_dir> with your own dir
|
def parse_speed(original, unit):
return parse_distance(original, unit) |
Afghanistan: The Australian Story documentary gives insight into military involvement
Updated
Members of the military who fought in Australia's longest war have re-opened raw memories of bloodshed and death in an official documentary account of their service.
Afghanistan: The Australian Story was commissioned by the Australian War Memorial and tells the story of the men and women who served in the 12-year fight against the Taliban.
The project, produced by award-winning former ABC journalist Chris Masters, features reflective and frank new interviews with Special Forces soldiers, engineers, medics and relatives who were affected by the war.
Among the harrowing stories recounted in the documentary are the reflections of a former special forces engineer who describes his fear of having to continue clearing mines after he saw his mate killed by an improvised explosive device.
"I said 'what do I do now?' You know, like I've never done this before. Who prepares for this sort of stuff?" Dan Costelloe says in the documentary.
Sorry, this video has expired Video: Dogs play a big role in the Afghan war (ABC News)
Other stories include a young Patrol Base Commander coping with the aftermath of a "green on blue" insider attack, and a widow facing up to the dreaded knock on the door informing her of her partner's death.
"The Australian War Memorial is many things, but I've learnt it's also part of therapeutic milieu for men and women, almost 30,000 of them, after 15 years in Afghanistan returning to an Australia that has no idea what they have been doing on our behalf and in our name," memorial director Brendan Nelson explains at the end of the documentary.
"We are proud of what these Australians have done. We are proud of this Afghanistan: The Australian Story."
The DVD of Afghanistan: The Australian Story will be officially launched at the Australian War Memorial on Wednesday October 5.
The powerful interviews will also be used to help update Australian War Memorial's Afghanistan Gallery.
Australian forces completed their withdrawal from Uruzgan province at the end of 2013, after the mission which saw 41 Defence Force personnel killed and 261 seriously wounded.
Topics: unrest-conflict-and-war, defence-forces, defence-and-national-security, documentary, australia, afghanistan
First posted |
<reponame>xuyz/qbase
package baseabci
import (
"github.com/QOSGroup/qbase/account"
"github.com/QOSGroup/qbase/mapper"
"github.com/QOSGroup/qbase/qcp"
"github.com/QOSGroup/qbase/store"
"github.com/tendermint/tendermint/crypto"
)
func (app *BaseApp) SetName(name string) {
if app.sealed {
panic("SetName() on sealed BaseApp")
}
app.name = name
}
func (app *BaseApp) SetInitChainer(initChainer InitChainHandler) {
if app.sealed {
panic("SetInitChainer() on sealed BaseApp")
}
app.initChainer = initChainer
}
func (app *BaseApp) SetBeginBlocker(beginBlocker BeginBlockHandler) {
if app.sealed {
panic("SetBeginBlocker() on sealed BaseApp")
}
app.beginBlocker = beginBlocker
}
func (app *BaseApp) SetEndBlocker(endBlocker EndBlockHandler) {
if app.sealed {
panic("SetEndBlocker() on sealed BaseApp")
}
app.endBlocker = endBlocker
}
//RegisterQcpMapper 注册qcpMapper,
func (app *BaseApp) registerQcpMapper() {
if app.sealed {
panic("RegisterQcpMapper() on sealed BaseApp")
}
mapper := qcp.NewQcpMapper(app.GetCdc())
app.RegisterMapper(mapper)
}
//RegisterQcpMapper 注册AccountMapper
func (app *BaseApp) RegisterAccountProto(proto func() account.Account) {
if app.sealed {
panic("RegisterAccountProto() on sealed BaseApp")
}
mapper := account.NewAccountMapper(app.GetCdc(), proto)
app.RegisterMapper(mapper)
}
func (app *BaseApp) RegisterMapper(mapper mapper.IMapper) {
if app.sealed {
panic("RegisterMapper() on sealed BaseApp")
}
key := mapper.GetStoreKey()
kvKey := key.(*store.KVStoreKey)
app.mountStoresIAVL(kvKey)
if _, ok := app.registerMappers[mapper.MapperName()]; ok {
panic("Register dup mapper")
}
mapper.SetCodec(app.GetCdc())
app.registerMappers[mapper.MapperName()] = mapper
}
func (app *BaseApp) RegisterCustomQueryHandler(handler CustomQueryHandler) {
if app.sealed {
panic("RegisterCustomQueryHandler() on sealed BaseApp")
}
app.customQueryHandler = handler
}
func (app *BaseApp) Seal() { app.sealed = true }
func (app *BaseApp) IsSealed() bool { return app.sealed }
func (app *BaseApp) enforceSeal() {
if !app.sealed {
panic("enforceSeal() on BaseApp but not sealed")
}
}
//-------------------------------------------------------------------
func (app *BaseApp) RegisterTxQcpSigner(signer crypto.PrivKey) {
if app.sealed {
panic("RegisterTxQcpSigner() on sealed BaseApp")
}
app.txQcpSigner = signer
}
func (app *BaseApp) RegisterTxQcpResultHandler(txQcpResultHandler TxQcpResultHandler) {
if app.sealed {
panic("RegisterTxQcpResultHandler() on sealed BaseApp")
}
app.txQcpResultHandler = txQcpResultHandler
}
func (app *BaseApp) SetGasHandler(handler GasHandler) {
if app.sealed {
panic("SetGasHandler() on sealed BaseApp")
}
app.gasHandler = handler
}
|
/**
* A {@link GameMessageReader} implementation that intercepts data sent when the region changes.
*
* @author lare96 <http://github.org/lare96>
*/
public final class RegionChangedMessageReader extends GameMessageReader {
@Override
public Event read(Player player, GameMessage msg) throws Exception {
if (player.isRegionChanged()) {
player.setRegionChanged(false);
World world = player.getWorld();
world.getChunks().updateEntities(player);
return new RegionChangedEvent(player);
}
return null;
}
} |
/**
* Decomposes a complex matrix.
* <p />
* @param Q_re The real part of the matrix to decompose and afterwards
* the real part of the unitary matrix.
* @param Q_im The imaginary part of the matrix to decompose and
* afterwards the imaginary part of the unitary matrix.
* @param R_re A matrix taking the real part of the right triangular
* matrix.
* @param R_im A matrix taking the imaginary part of the right
* triangular matrix.
* @return ||det(A)||
*/
public static double[] decompose(double[][] Q_re, double[][] Q_im, double[][] R_re, double[][] R_im)
{
int n = Q_re.length;
double det = 1.;
for (int j = 0; j < n; j++)
{
for (int i = 0; i < j; i++)
{
R_re[j][i] = 0.;
R_im[j][i] = 0.;
double t_re = 0.;
double t_im = 0.;
for (int k = 0; k < n; k++)
{
double a_re = Q_re[k][i];
double a_im = - Q_im[k][i];
double b_re = Q_re[k][j];
double b_im = Q_im[k][j];
t_re += a_re * b_re - a_im * b_im;
t_im += a_im * b_re + a_re * b_im;
}
R_re[i][j] = t_re;
R_im[i][j] = t_im;
for (int k = 0; k < n; k++)
{
double b_re = Q_re[k][i];
double b_im = Q_im[k][i];
Q_re[k][j] -= t_re * b_re - t_im * b_im;
Q_im[k][j] -= t_im * b_re + t_re * b_im;
}
}
double t = 0.;
for (int i = 0; i < n; i++)
t += Q_re[i][j] * Q_re[i][j] + Q_im[i][j] * Q_im[i][j];
t = Math.sqrt(t);
det *= t;
R_re[j][j] = t;
R_im[j][j] = 0.;
if (t != 0.)
for (int i = 0; i < n; i++)
{
Q_re[i][j] /= t;
Q_im[i][j] /= t;
}
else
for (int i = 0; i < n; i++)
{
Q_re[i][j] = 0.;
Q_im[i][j] = 0.;
}
}
return new double[] {det, 0.};
} |
<filename>src/typings.d.ts
declare const _G: any
declare type VConnection = {
/** trailer Entity */
ent: Entity
/** AdvBallSocket */
socket: Entity
}
/** @todo */
type ExtensionData = {
}
/** Storage for handling car info */
declare type VEntity = {
/** inputPos of vehicle */
inputPos?: Vector
/** outputPos of vehicle */
outputPos?: Vector
/** inputType of vehicle */
inputType?: string
/** outputType of vehicle */
outputType?: string
/** disable use on trailer? */
disableUse?: boolean
/** disable use on wheels only? */
disableUseOnWheels?: boolean
/** entity */
ent: Entity
/** connection */
connection?: VConnection
/** @deprecated */
phys?: PhysObj
/** @deprecated */
hydraulic?: Entity
/** fix for autodisconnect */
lastDisconnected?: number
/** @todo */
extensions?: ExtensionData[]
};
declare type System = {
/** you will use this */
HandleTruck?: (this: void, ventity: VEntity) => void
/** and probably this */
Disconnect?: (this: void, ventity: VEntity) => void
/** pls use this */
Connect?: (this: void, ventity: VEntity, vtrailer: VEntity) => void
}
/** why i should write it by myself? */
declare namespace table {
export function insert<T>(this: void, tbl: T[], T)
}
declare namespace constraint {
export function Hydraulic(this: void, pl: Player | undefined | null, Ent1: Entity, Ent2: Entity, Bone1: number, Bone2: number, LPos1: Vector, LPos2: Vector, Length1: number, Length2: number, width: number, key: KEY, fixed: number, speed: number, material: string, toggle: boolean): [Entity, Entity, Entity, Entity]
}
declare namespace net {
function Start(this: void, name: string, unrealiable?: boolean)
}
|
<filename>src/main/java/org/age/hz/core/services/topology/leader/election/LeaderElector.java
package org.age.hz.core.services.topology.leader.election;
import org.age.hz.core.node.NodeId;
public interface LeaderElector {
NodeId electLeader() throws Throwable;
boolean isCurrentNodeMaster();
}
|
n,k = input().split()
n = int(n)
k = int(k)
ans = n-k+1
s = input()
s = list(s)
l = list(set(s))
l.sort()
i = k-1
if n>=k:
while(i>=0):
if s[i]==l[len(l)-1]:
s[i] = l[0]
i-=1
else:
s[i] = l[l.index(s[i])+1]
break
for i in range(k):
print(s[i],end = '')
else:
for i in range(n,k):
s.append(l[0])
for i in range(k):
print(s[i],end = '')
|
#!/usr/bin/env python2
# pylint: disable=missing-docstring,invalid-name
# XXX: Refactor to a comand line tool and remove pylint disable
"""Compute coordinates for volcano plot."""
from __future__ import absolute_import, division, print_function
import argparse
import json
import os
import pandas as pd # pylint: disable=import-error
import numpy as np # pylint: disable=import-error
parser = argparse.ArgumentParser(description='Compute coordinates for volcano plot.')
parser.add_argument('de_results', help='Differential Expression results')
args = parser.parse_args()
if not os.path.isfile(args.de_results):
print(json.dumps({'proc.error': 'Missing DE results file.'}, separators=(',', ':')))
exit(1)
def is_gzipped(f):
"""Check if file f is gzipped."""
with open(f, 'rb') as de_file:
magic = de_file.read(2)
return magic == '\037\213'
if args.de_results.endswith(('.xls', 'xlsx')):
de = pd.io.excel.read_excel(args.de_results, sheetname=0)
elif is_gzipped(args.de_results):
de = pd.read_csv(args.de_results, compression='gzip', sep="\t")
else:
de = pd.read_csv(args.de_results, sep="\t")
de = de.dropna()
header = list(de)
ids = list(de.ix[:, 0])
# get FC data
if 'log2FoldChange' in header:
x = np.array(de['log2FoldChange'])
xlabel = 'log2FoldChange'
elif 'paired_avg_FC' in header:
x = np.array(de['paired_avg_FC'])
xlabel = 'paired_avg_FC'
elif 'logFC' in header:
x = np.array(de['logFC'])
xlabel = 'logFC'
elif 'log2(fold_change)' in header:
x = np.array(de['log2(fold_change)'])
xlabel = 'log2(fold_change)'
# get FDR/pval data. For limma use log odds (B statistics)
if 'ebays.pval' in header:
y = -np.log10(np.array(de['ebays.pval']))
ylabel = '-log10(' + 'ebays.pval' + ')'
elif 'padj' in header:
y = -np.log10(np.array(de['padj']))
ylabel = '-log10(' + 'padj' + ')'
elif 'FDR.DE' in header:
y = -np.log10(np.array(de['fdr.de']))
ylabel = '-log10(' + 'fdr.de' + ')'
elif 'FDR' in header:
y = -np.log10(np.array(de['FDR']))
ylabel = '-log10(' + 'FDR' + ')'
elif 'q_value' in header:
y = -np.log10(np.array(de['q_value']))
ylabel = '-log10(' + 'q_value' + ')'
elif 'B' in header:
y = de['B']
ylabel = 'Log Odds'
y[y == np.inf] = np.amax(y[np.isfinite(y)])
try:
data = {'volcano_plot': {'flot': {'data': zip(x, y)},
'xlabel': xlabel,
'ylabel': ylabel, 'id': ids}}
print(json.dumps(data, separators=(',', ':')))
except NameError:
print(json.dumps({'proc.error': 'FC and/or FDR/pval data is missing.'}, separators=(',', ':'), allow_nan=False))
exit(1)
|
package de.thatsich.autosort.alias;
import de.thatsich.data.Repository;
import java.nio.file.Path;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
public class NonPersistentPathRepository implements Repository<String, Path> {
private final Map<String, Path> internal = new HashMap<>();
@Override
public void initialize() {
}
@Override
public void persist(String s, Path path) {
this.internal.put(s, path);
}
@Override
public Optional<Path> find(String s) {
return Optional.ofNullable(this.internal.get(s));
}
@Override
public Optional<Path> remove(String alias) {
return Optional.ofNullable(this.internal.remove(alias));
}
@Override
public Map<String, Path> unmodifiable() {
return Collections.unmodifiableMap(this.internal);
}
}
|
//
// Created by <NAME> on 30.12.2019.
//
#include "ParticleEmission.h"
ParticleEmission::ParticleEmission(Particles &incident_particles, Particles &emitted_particles, Grid &grid,
Matrix &domain_condition, array<scalar, 3> emission_direction, scalar gamma,
scalar emission_energy) :
incident_particles(&incident_particles), emitted_particles(&emitted_particles),
grid(&grid), domain_condition(&domain_condition),
emission_direction(emission_direction), gamma(gamma),
emission_energy(emission_energy) {}
void ParticleEmission::emission(int seed) {
random_device rd;
default_random_engine generator(rd());
uniform_real_distribution<scalar> distribution(0.0,1.0);
scalar vel_module = sqrt(2*emission_energy/emitted_particles->get_mass());
int ptcl_idx = 0;
while (ptcl_idx < incident_particles->get_Ntot()) {
if (emission_condition(ptcl_idx)) {
if (distribution(generator) < gamma) {
array<scalar, 2> pos = incident_particles->get_position(ptcl_idx);
array<scalar, 3> vel = {emission_direction[0]*vel_module,
emission_direction[1]*vel_module,
emission_direction[2]*vel_module};
emitted_particles->append(pos, vel);
}
incident_particles->pop(ptcl_idx);
} else {
ptcl_idx++;
}
}
}
bool ParticleEmission::emission_condition(int ptcl_idx) {
int cell_z, cell_r;
const size_t dim = 2;
array<scalar, dim> pos = incident_particles->get_position(ptcl_idx);
cell_z = floor(pos[0] / grid->dz);
cell_r = floor(pos[1] / grid->dr);
return domain_condition->operator()(cell_z, cell_r);
}
|
// Copyright (c) 2020 <NAME>
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
/**
* Data service tokens for Angular dependency injection pattern
*/
export const DATA_INJECTION_TOKENS = {
storeCatalog: 'StoreCatalogData',
people: 'PeopleData',
shared: 'SharedData',
clientsCrud: 'ClientsCrud',
sellersCrud: 'SellersCrud',
productsCrud: 'ProductsCrud',
usersCrud: 'UsersCrud',
salesCrud: 'SalesCrud'
};
|
package com.joymain.jecs.pm.model;
// Generated 2009-11-6 17:25:20 by Hibernate Tools 3.1.0.beta4
/**
* @struts.form include-all="true" extends="BaseForm"
* @hibernate.class
* table="JPM_PRODUCT_COMBINATION"
*
*/
public class JpmProductCombination extends com.joymain.jecs.model.BaseObject implements java.io.Serializable {
// Fields
private Long jpcId;
private String productNo;
private String subProductNo;
private Long qty;
// Constructors
/** default constructor */
public JpmProductCombination() {
}
/** full constructor */
public JpmProductCombination(String productNo, String subProductNo) {
this.productNo = productNo;
this.subProductNo = subProductNo;
}
/**
* * @hibernate.property
* column="QTY"
*
*
*/
public Long getQty() {
return qty;
}
/**
* @spring.validator type="required"
*/
public void setQty(Long qty) {
this.qty = qty;
}
// Property accessors
/**
* * @hibernate.id
* generator-class="native"
* type="java.lang.Long"
* column="JPC_ID"
*
*/
public Long getJpcId() {
return this.jpcId;
}
public void setJpcId(Long jpcId) {
this.jpcId = jpcId;
}
/**
* * @hibernate.property
* column="PRODUCT_NO"
* length="20"
*
*/
public String getProductNo() {
return this.productNo;
}
/**
* @spring.validator type="required"
*/
public void setProductNo(String productNo) {
this.productNo = productNo;
}
/**
* * @hibernate.property
* column="SUB_PRODUCT_NO"
* length="20"
*
*/
public String getSubProductNo() {
return this.subProductNo;
}
/**
* @spring.validator type="required"
*/
public void setSubProductNo(String subProductNo) {
this.subProductNo = subProductNo;
}
/**
* toString
* @return String
*/
public String toString() {
StringBuffer buffer = new StringBuffer();
buffer.append(getClass().getName()).append("@").append(Integer.toHexString(hashCode())).append(" [");
buffer.append("productNo").append("='").append(getProductNo()).append("' ");
buffer.append("subProductNo").append("='").append(getSubProductNo()).append("' ");
buffer.append("]");
return buffer.toString();
}
public boolean equals(Object other) {
if ( (this == other ) ) return true;
if ( (other == null ) ) return false;
if ( !(other instanceof JpmProductCombination) ) return false;
JpmProductCombination castOther = ( JpmProductCombination ) other;
return ( (this.getJpcId()==castOther.getJpcId()) || ( this.getJpcId()!=null && castOther.getJpcId()!=null && this.getJpcId().equals(castOther.getJpcId()) ) )
&& ( (this.getProductNo()==castOther.getProductNo()) || ( this.getProductNo()!=null && castOther.getProductNo()!=null && this.getProductNo().equals(castOther.getProductNo()) ) )
&& ( (this.getSubProductNo()==castOther.getSubProductNo()) || ( this.getSubProductNo()!=null && castOther.getSubProductNo()!=null && this.getSubProductNo().equals(castOther.getSubProductNo()) ) );
}
public int hashCode() {
int result = 17;
result = 37 * result + ( getJpcId() == null ? 0 : this.getJpcId().hashCode() );
result = 37 * result + ( getProductNo() == null ? 0 : this.getProductNo().hashCode() );
result = 37 * result + ( getSubProductNo() == null ? 0 : this.getSubProductNo().hashCode() );
return result;
}
}
|
<reponame>willstudy/iotex-analytics<gh_stars>0
package chainmeta
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/iotexproject/iotex-analytics/indexprotocol"
"github.com/iotexproject/iotex-analytics/indexservice"
s "github.com/iotexproject/iotex-analytics/sql"
"github.com/iotexproject/iotex-analytics/testutil"
)
const (
connectStr = "ba8df54bd3754e:9cd1f263@tcp(us-cdbr-iron-east-02.cleardb.net:3306)/"
dbName = "heroku_7fed0b046078f80"
)
func TestProtocol_MostRecentTPS(t *testing.T) {
require := require.New(t)
ctx := context.Background()
var err error
testutil.CleanupDatabase(t, connectStr, dbName)
store := s.NewMySQL(connectStr, dbName)
require.NoError(store.Start(ctx))
defer func() {
_, err := store.GetDB().Exec("DROP DATABASE " + dbName)
require.NoError(err)
require.NoError(store.Stop(ctx))
}()
var cfg indexservice.Config
cfg.Poll = indexprotocol.Poll{
VoteThreshold: "100000000000000000000",
ScoreThreshold: "0",
SelfStakingThreshold: "0",
}
idx := indexservice.NewIndexer(store, cfg)
p := NewProtocol(idx)
t.Run("Testing unregistered", func(t *testing.T) {
_, err = p.MostRecentTPS(1)
require.EqualError(err, "blocks protocol is unregistered")
})
idx.RegisterDefaultProtocols()
t.Run("Testing 0 range", func(t *testing.T) {
_, err = p.MostRecentTPS(0)
assert.EqualError(t, err, "TPS block window should be greater than 0")
})
}
|
<filename>@DOC by DIPTA/Old/@GEO/ngsdq-geo/ZZtriangle_circle_intersect.cpp
/*
* Author : <NAME>
* Problem Name : Radiation from Fukushima
* Algorithm : ALGEBRAIC sum of triangle & circle's intersection, Binary Search.
* Complexity : O(BS*n)
* Difficulty : Hard
*
* IUT Programming Contest 2011
* Alternate Judge Solution
*/
#include<time.h>
#include<stdio.h>
#include<math.h>
#include<vector>
#include<algorithm>
using namespace std;
#define CO(V) V.x,V.y,V.z
#define COXY(V) V.x,V.y
#define S(x) ((x)*(x))
#define _abs(x) (((x)>0)?(x):-(x))
#define _max(x,y) (((x)>(y))?(x):(y))
#define _min(x,y) (((x)<(y))?(x):(y))
#define EPS 1e-9
#define Z(x) (_abs(x) < EPS)
#define det(a,b,c,d) ((a)*(d)-(b)*(c))
double pi = 2.*acos(0.);
double sqrt2 = sqrt(2.);
double mysqrt(double x){if(x < 0.) return 0;return sqrt(x);}
double myasin(double x){if(x < -1.) return -pi/2;if(x > 1.) return pi/2;return asin(x);}
double myacos(double x){if(x < -1.) return -pi;if(x > 1.) return 0;return acos(x);}
// BEGIN VECTOR CLASS
struct V;
V operator+(V a,V b);
double operator*(V a,V b);
V operator*(V b,double a);
V operator*(double a,V b);
struct V{
double x,y;
V(){}
V(double _x,double _y){x=_x;y=_y;}
double mag2(){ return S(x)+S(y); }
double mag(){ return mysqrt(mag2()); }
void norm(){ double d = mag();x/=d;y/=d;}
V unit(){ V ret = *this; ret.norm(); return ret;}
bool scan(){ return scanf("%lf%lf",&x,&y)==2; }
void show(){ printf("[%.2lf, %.2lf]",x,y); }
void showln(){ printf("[%.4lf, %.4lf]\n",x,y); }
double dot(V b){ return x*b.x + y*b.y;}
double cross(V b){ return x*b.y - y*b.x;}
double angle(V b){ return myacos( unit().dot( b.unit() ) ); }
};
V operator+(V a,V b){ return V(a.x+b.x, a.y+b.y); }
V operator-(V a){ return V (-a.x, -a.y); }
V operator-(V a,V b){ return V(a.x-b.x, a.y-b.y); }
double operator*(V a,V b){ return a.cross(b); }
V operator*(double a,V b){ return V(a*b.x, a*b.y); }
V operator*(V b,double a){ return V(a*b.x, a*b.y); }
V operator/(V b,double a){ return V(b.x/a, b.y/a); }
V I(1,0);
V J(0,1);
V INEG(-1,0);
V JNEG(0,-1);
V Origin(0,0);
// END VECTOR CLASS
#define MAX 5005
#define MAX_COORDINATE 200
int n;
V p[MAX], center;
double area;
double triangleCircleCommonArea(V A, V B, double r){
assert(A*B >= -EPS);
double a = A.mag();
double b = B.mag();
//both inside
if(a <= r && b <= r){
assert(A*B >= 0);
return 0.5*(A*B);
}
// solving vector equation: (A + (B-A)t).(A + (B-A)t) = r*r
double c2 = (B-A).mag2();
double c1 = A.dot(B-A);
double c0 = A.mag2() - r*r;
double D = c1*c1 - c2*c0;
assert(fabs(c2) > EPS); //should be POSITIVE
vector<double> vt;
vt.clear();
vt.push_back(0);
vt.push_back(1);
if(D >= 0.){
double t = (-c1 - mysqrt(D)) / c2;
if(0<=t && t<=1)
vt.push_back(t);
t = (-c1 + mysqrt(D)) / c2;
if(0<=t && t<=1)
vt.push_back(t);
}
//sort 't's
sort(vt.begin(), vt.end());
//no intersection, both outside
if(vt.size()==2)
return 0.5*A.angle(B)*r*r;
//full intersection, both outside
if(vt.size()==4){
V A1 = A + (B-A)*vt[1];
V B1 = A + (B-A)*vt[2];
assert(A*A1 >= -EPS);
assert(A1*B1 >= -EPS);
assert(B1*B >= -EPS);
return 0.5*A1.angle(A)*r*r + 0.5*A1*B1 + 0.5*B1.angle(B)*r*r;
}
//size == 3
//A inside, B outside
if(a <= r){ // A
V B1 = A + (B-A)*vt[1];
assert(A*B1 >= -EPS);
assert(B1*B >= -EPS);
return 0.5*A*B1 + 0.5*B1.angle(B)*r*r;
}
//B inside, A outside
if(b <= r){ // A
V A1 = A + (B-A)*vt[1];
assert(A*A1 >= -EPS);
assert(A1*B >= -EPS);
return 0.5*A1.angle(A)*r*r + 0.5*A1*B;
}
//should never come here!
assert(0);
return 0;
}
double triangleCircleAlgebraicCommonArea(V A, V B, double r){ //positive if CCW, negative if CW
double fcall;
if(A*B < 0){
fcall = triangleCircleCommonArea(B, A, r);
assert(fcall >= -EPS);
return -fcall;
}
else{
fcall = triangleCircleCommonArea(A, B, r);
assert(fcall >= -EPS);
return fcall;
}
}
double solve(double r){
if(r < EPS)
return 0;
double ret = 0;
int i,j;
for(i=0;i<n;i++){
j=(i+1)%n;
ret += triangleCircleAlgebraicCommonArea(p[i], p[j], r);
}
return ret/area;
}
int main(){
freopen("fukushima.in", "r", stdin);
freopen("fukushimaNafi.ans", "w", stdout);
double cl = clock();
int T, N;
int i, j;
double lo, hi, r, per, per2;
scanf("%d", &T);
for(N=1;N<=T;N++){
scanf("%d", &n);
for(i=0;i<n;i++)
p[i].scan();
center.scan();
scanf("%lf", &per);
per/=100.;
for(i=0;i<n;i++)
p[i] = p[i]-center;
area = 0;
for(i=0;i<n;i++){
j = (i+1)%n;
area += p[i] * p[j];
}
area /= 2.;
assert(area >= 0);
lo = 0;
hi = 2*MAX_COORDINATE*sqrt2+10;
while(hi-lo > 1e-7){
r = (hi+lo)/2.;
per2 = solve(r);
if(per2 >= per-EPS)
hi = r;
else
lo = r;
}
double x = r - floor(r);
//passed
//assert(x < 0.3+EPS || 0.7 < x+EPS);
printf("Case %d: %d\n", N, (int)(r+0.5) );
//printf("Case %d: %.4lf\n", N, r + 1e-11);
}
return 0;
}
|
/**
* Checks if ball hits bricks, updates score, and subtracts from brickTracker
* @param ball
* @param elapsedTime
*/
public void checkBrickCollision(Ball ball, double elapsedTime) {
Iterator<Brick> iter = bricksLevel.iterator();
while (iter.hasNext()) {
Brick brick = iter.next();
if (brick.checkBreak(ball)) {
int[] update = brick.update();
SCORE += update[0];
brickTracker += update[1];
ball.bounce(elapsedTime);
}
}
} |
//---------------------------------------------------------------------------//
// Map a reference point to the physical space of an entity.
void POD_PointCloudLocalMap::mapToPhysicalFrame(
const Entity &entity,
const Teuchos::ArrayView<const double> &reference_point,
const Teuchos::ArrayView<double> &physical_point ) const
{
} |
import { locale as _locale } from './zh-CN'
export const locale: typeof _locale = {
title: 'Shanbay Word Syncing',
open: 'Open',
error: {
login: 'Shanbay login failed. Click to open shanbay.com.',
network:
'Unable to access shanbay.com. Please check your network connection.',
word:
"Unable to add to Shanbay notebook. This word is not in Shanbay's vocabulary database."
}
}
|
<reponame>geovisto/geovisto-filters<filename>src/model/internal/filter/basic/MapFilterOperation.ts
// Geovisto core
import {
AbstractMapDomain
} from "geovisto";
import IMapFilterOperation from "../../../types/filter/IMapFilterOperation";
/**
* This class wraps a filter operation defined by constructor props.
*
* @author <NAME>
*/
class MapFilterOperation extends AbstractMapDomain implements IMapFilterOperation {
private label: string;
/**
* It performs the filter operation which compare a value with a pattern.
*
* @param value
* @param pattern
*/
public match: (value: unknown, pattern: unknown) => boolean;
/**
* It creates a new filter operation.
*
* @param label
* @param acceptFunction
*/
public constructor(label: string, acceptFunction: (value: unknown, pattern: unknown) => boolean) {
super();
this.label = label;
this.match = acceptFunction;
}
/**
* It returns a unique string label of the filter representing operator given by the parameter of constructor.
*/
public getName(): string {
return this.label;
}
}
export default MapFilterOperation; |
<filename>test/_test_Animate_Preprocess.py
# -*- coding: utf-8 -*-
"""
Created on Sat May 1 23:49:09 2021
@author: Christian
"""
import numpy as np
import hysteresis.plotSpecial.animate as ani
import hysteresis as hys
import scipy
np.random.seed(101)
x = np.linspace(0, 1, 1001)*10
triangleBig = scipy.signal.sawtooth(x*2,0.5)
permutate = np.random.normal(0,1,1001)/2
Ynoise = triangleBig + permutate
Ynoise = scipy.signal.savgol_filter(Ynoise,53,2)
trianglexy = np.column_stack((x, Ynoise))
test1 = hys.Hysteresis(trianglexy)
# test.plot()
permutate = np.random.normal(0,1,1001)/2
Ynoise = triangleBig + permutate
Ynoise = scipy.signal.savgol_filter(Ynoise,53,2)
trianglexy = np.column_stack((x, Ynoise))
test2 = hys.Hysteresis(trianglexy)
permutate = np.random.normal(0,1,1001)/2
Ynoise = triangleBig + permutate
Ynoise = scipy.signal.savgol_filter(Ynoise,53,2)
trianglexy = np.column_stack((x, Ynoise))
test3 = hys.Hysteresis(trianglexy)
# xyAni = ani.getAnixy(trianglexy, 2)
# frames =ani.getAniFrames(trianglexy[:,0], 0.1)
# myAnimation = ani.Animation(test1,1,5)
# myAnimation.Animate()
myAnimation = ani.JointAnimation([test1, test2,test3],1,5)
myAnimation.Animate()
# myAnimation = ani.Animation(test,1,5)
# myAnimation.Animate() |
The Centre on Tuesday told the Delhi high court that criminalising marital rape “may destabilise the institution of marriage” and would become an easy tool for harassing husbands, a position slammed as retrograde by rights activists.
Responding to a bunch of petitions seeking criminalisation of marital rape, the government said in an affidavit that the Supreme Court and high courts had already pointed to the misuse of Section 498A of Indian Penal Code (IPC) that prescribes punishment for husband for subjecting his wife to cruelty.
“As to what constitutes marital rape and what would constitute marital non-rape needs to be defined precisely before a view on its criminalisation is taken,” it said.
In India, marital rape is not defined in any statute or law. Rights activists want it to be made a criminal offence, saying it forces women to suffer the worst form of sexual violence in their homes.
Petitioners NGO RIT Foundation, All India Democratic Women’s Association and a marital rape victim have challenged as unconstitutional an exception to Section 375 of IPC and Section 376B.
Section 375 that defines rape also says sexual intercourse by a man with his wife aged 15 years or above is not rape even if it is without her consent. Section 376B deals with sexual intercourse by man with his wife during separation.
“If all sexual acts by a man with his own wife qualify to be marital rape, then the judgment as to whether it is a marital rape or not will singularly rest with the wife,” it said, adding there could be no lasting evidence of such acts between a man and his own wife.
Defining marital rape would need a broad-based consensus and a change in the attitude of prosecutors, police officers and society in generally, the Centre said.
“It has to be ensured adequately that marital rape does not become a phenomenon which may destabilise the institution of marriage apart from being an easy tool for harassing the husbands,” it said.
What could appear to be marital rape to one, might not appear the same to others, it said, warning against aping the West blindly.
Appearing for a petitioner, senior advocate Colin Gonsalves said marriage licence shouldn’t be viewed as a licence for a husband to rape his wife with impunity.
Marital rape remains a divisive issue.
Supreme Court lawyer Swaraj Kaushal, who is the husband of external affairs minister Sushma Swaraj, stirred a row on social media when he tweeted, “There is nothing like marital rape. Our homes should not become police stations.” He was reacting to the government’s affidavit.
The Centre was, however, hammered by rights activists.
People’s Union for Civil Liberties national secretary Kavita Srivastava called it stand retrograde. “Rape is rape, whether on marital bed or elsewhere, whether by husband or a stranger, and it’s time the governments understood this,” she said.
The government should criminalise marital rape to stop violence against women, Lad Kumari Jain, former chairperson of the Rajasthan State Commission for Women, said.
A men’s rights group on Monday told the court that existing laws were “very much capable of dealing with the cases of sexual abuse of women”.
The court will continue hearing the case on Wednesday.
First Published: Aug 29, 2017 22:49 IST |
<gh_stars>10-100
""" Implements functions that handle the serialization of types and classes.
Type handlers store and load objects of exactly that type. Instance handlers
work also work for subclasses of that type.
The instance handlers are processed in the order they are stored. This means
that if an object is an instance of several handled classes it will not raise
an error and will be handled by the first matching handler in the OrderedDict.
"""
import numpy as np
import types
import inspect
from collections import OrderedDict
""" The handler dictionaries are automatically filled when Handler class
definitions are parsed via the metaclass __new__ function.
"""
# Saver dictionaries: classes as keys, handler classes as values
type_saver_handlers = dict()
instance_saver_handlers = OrderedDict()
# Loader dictionaries: class names as keys, handler classes as values
loader_handlers = dict()
def classname(val):
""" Returns a qualified class name as string.
The qualified class name consists of the module and the class name,
separated by a dot. If an instance is passed to this function, the name
of its class is returned.
Parameters
----------
val : instance or class
The instance or a class of which the qualified class name is returned.
Returns
-------
str : The qualified class name.
"""
if inspect.isclass(val):
return ".".join([val.__module__,
val.__name__])
return ".".join([val.__class__.__module__,
val.__class__.__name__])
def set_attribute(level, key, value):
level.attrs[key] = np.string_(value)
def get_attribute(level, key):
return level.attrs[key].decode('ascii')
def set_classname(level, clsname):
set_attribute(level, '__class__', clsname)
def get_classname(level):
return get_attribute(level, '__class__')
def save_to_level(val, level, options, name=None):
""" A generic save function that dispatches the correct handler.
"""
t = type(val)
if t in type_saver_handlers:
return type_saver_handlers[t].save_to_level(val, level, options, name)
for i in instance_saver_handlers:
if isinstance(val, i):
return instance_saver_handlers[i].save_to_level(val, level,
options, name)
raise ValueError("%s of type %s is not supported by any handler!" %
(str(val), str(t)))
def load_from_level(level, obj=None):
""" Loads an object from an HDF5 group or dataset.
Parameters
----------
level : h5py.Dataset or h5py.Group
An HDF5 node that stores an object in a valid format.
obj : instance or None
If provided this instance will be updated from the HDF5 node instead
of creating a new instance of the stored object.
Returns
-------
instance of the stored object
"""
clsname = get_classname(level)
if clsname not in loader_handlers:
raise ValueError('Class `%s` has no registered handler.' % clsname)
handler = loader_handlers[clsname]
return handler.load_from_level(level, obj=obj)
class TypeRegister(type):
""" Metaclass that registers a type handler in a global dictionary.
"""
def __new__(cls, clsname, bases, attrs):
# convert all methods to classmethods
for attr_name, attr_value in attrs.items():
if isinstance(attr_value, types.FunctionType):
attrs[attr_name] = classmethod(attr_value)
newclass = super().__new__(cls, clsname, bases, attrs)
# register the class as a handler for all specified types
for t in newclass.types:
newclass.register(t)
return newclass
class InstanceRegister(type):
"""Metaclass that registers an instance handler in global dictionary.
"""
def __new__(cls, clsname, bases, attrs):
# convert all methods to classmethods
for attr_name, attr_value in attrs.items():
if isinstance(attr_value, types.FunctionType):
attrs[attr_name] = classmethod(attr_value)
newclass = super().__new__(cls, clsname, bases, attrs)
# register the class as a handler for all specified instances
for t in newclass.instances:
newclass.register(t)
return newclass
class Handler:
# a default for subclasses (only `group` subclasses have to overwrite)
level_type = 'dataset'
@classmethod
def save_to_level(cls, val, level, options, name):
""" A generic wrapper around the custom save method that each
handler implements. It creates a dataset or a group depending
on the `level_type` class attribute and sets the `__class__`
attribute correctly.
For more flexibility subclasses can overwrite this method.
"""
# get the qualified class name of the object to be saved
clsname = classname(val)
# create the dataset or the group and call the save method on it
if cls.is_dataset() and name is None:
# if we want to save a dataset in the root group (name = None)
# we have to give it a name
name = "default"
if cls.is_group():
if name is not None:
level = cls.create_group(level, name, options)
set_classname(level, clsname)
ret = cls.save(val, level, options, name)
if cls.is_dataset():
set_classname(ret, clsname)
return ret
@classmethod
def load_from_level(cls, level, obj=None):
""" The loader that has to be implemented by subclasses.
"""
raise NotImplementedError()
@classmethod
def create_group(cls, level, name, options):
return level.create_group(name)
@classmethod
def create_dataset(cls, data, level, name, **kwargs):
ds = level.create_dataset(name, data=data, **kwargs)
return ds
@classmethod
def is_group(cls):
return cls.level_type == 'group'
@classmethod
def is_dataset(cls):
return cls.level_type == 'dataset'
@classmethod
def get_type(cls, level):
return cls.casting[get_classname(level)]
class TypeHandler(Handler, metaclass=TypeRegister):
""" Handles data of a specific type or class.
"""
types = []
casting = {}
@classmethod
def register(cls, t):
global type_saver_handlers, loader_handlers
if t in type_saver_handlers:
raise ValueError('Type `%s` is already handled by `%s`.' %
(str(t), str(type_saver_handlers[t])))
typename = classname(t)
type_saver_handlers[t] = cls
loader_handlers[typename] = cls
cls.casting[typename] = t
class InstanceHandler(Handler, metaclass=InstanceRegister):
""" Handles all instances of a specific (parent) class.
If an instance is subclass to several classes for which a handler exists,
no error will be raised (in contrast to TypeHandler). Rather, the first
match in the global instance_saver_handlers OrderedDict will be used.
"""
instances = []
casting = {}
@classmethod
def register(cls, t):
global instance_saver_handlers, loader_handlers
if t in instance_saver_handlers:
raise ValueError('Instance `%s` is already handled by `%s`.' %
(str(t), str(instance_saver_handlers[t])))
typename = classname(t)
instance_saver_handlers[t] = cls
loader_handlers[typename] = cls
cls.casting[typename] = t
# Specific handlers
class NoneHandler(TypeHandler):
types = [type(None)]
def save(cls, val, level, options, name):
ds = cls.create_dataset(0, level, name, **options(0))
return ds
def load_from_level(cls, level, obj=None):
return None
class ScalarHandler(TypeHandler):
types = [float, bool, complex, np.int8, np.int16, np.int32,
np.int64, np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64, np.bool_, np.complex64,
np.complex128]
def save(cls, val, level, options, name):
ds = cls.create_dataset(val, level, name, **options(val))
return ds
def load_from_level(cls, level, obj=None):
# cast to the correct type
type_ = cls.get_type(level)
# retrieve scalar dataset
return type_(level[()])
class IntHandler(TypeHandler):
""" Special int handler to deal with Python's variable size ints.
They are stored as byte arrays. Probably not the most efficient solution...
"""
types = [int]
def save(cls, val, level, options, name):
val = val.to_bytes((val.bit_length() + 7) // 8, byteorder='little')
data = np.frombuffer(val, dtype=np.uint8)
ds = cls.create_dataset(data, level, name, **options(data))
return ds
def load_from_level(cls, level, obj=None):
return int.from_bytes(level[:].tobytes(), byteorder='little')
class TimeHandler(TypeHandler):
types = [np.datetime64, np.timedelta64]
def save(cls, val, level, options, name):
val2 = val.view('<i8')
ds = cls.create_dataset(val2, level, name, **options(val2))
set_attribute(ds, '__dtype__', val.dtype)
return ds
def load_from_level(cls, level, obj=None):
val = level[()]
dtype = get_attribute(level, '__dtype__')
return val.view(dtype)
class StringHandler(TypeHandler):
types = [str]
def save(cls, val, level, options, name):
b = val.encode(encoding=options.encoding)
ds = BytesHandler.save(b, level, options, name)
set_attribute(ds, '__encoding__', options.encoding)
return ds
def load_from_level(cls, level, obj=None):
bstring = level[:].tobytes()
return bstring.decode(get_attribute(level, '__encoding__'))
class BytesHandler(TypeHandler):
types = [bytes]
def save(cls, val, level, options, name):
data = np.frombuffer(val, dtype=np.uint8)
ds = cls.create_dataset(data, level, name, **options(data))
return ds
def load_from_level(cls, level, obj=None):
return level[:].tobytes()
class DictHandler(TypeHandler):
level_type = 'group'
types = [dict]
def save(cls, val, level, options, name):
for key, value in val.items():
save_to_level(value, level, options, key)
def load_from_level(cls, level, obj=None):
obj = dict()
for key, value in level.items():
obj[key] = load_from_level(value)
return obj
class SimpleNamespaceHandler(TypeHandler):
level_type = 'group'
types = [types.SimpleNamespace]
def save(cls, val, level, options, name):
for key, value in val.__dict__.items():
save_to_level(value, level, options, key)
def load_from_level(cls, level, obj=None):
obj = types.SimpleNamespace()
for key, value in level.items():
setattr(obj, key, load_from_level(value))
return obj
class ListHandler(TypeHandler):
""" Despite its name it also handles tuples.
"""
level_type = 'group'
types = [list, tuple]
def save(cls, val, level, options, name):
for idx, element in enumerate(val):
save_to_level(element, level, options, 'idx_%d' % idx)
def load_from_level(cls, level, obj=None):
obj = []
length = len(list(level.keys()))
for idx in range(length):
obj.append(load_from_level(level['idx_%d' % idx]))
# cast to tuple if necessary
type_ = cls.get_type(level)
return type_(obj)
class NDArrayHandler(TypeHandler):
types = [np.ndarray]
def save(cls, val, level, options, name):
ds = cls.create_dataset(val, level, name, **options(val))
return ds
def load_from_level(cls, level, obj=None):
return level[:]
|
package com.se.entity;
public class ReportKey {
/**
*
* This field was generated by MyBatis Generator.
* This field corresponds to the database column REPORT.STUDENT_ID
*
* @mbg.generated Wed Oct 27 22:15:26 CST 2021
*/
private String studentId;
/**
*
* This field was generated by MyBatis Generator.
* This field corresponds to the database column REPORT.COURSE_ID
*
* @mbg.generated Wed Oct 27 22:15:26 CST 2021
*/
private String courseId;
/**
*
* This field was generated by MyBatis Generator.
* This field corresponds to the database column REPORT.SECTION_ID
*
* @mbg.generated Wed Oct 27 22:15:26 CST 2021
*/
private String sectionId;
/**
*
* This field was generated by MyBatis Generator.
* This field corresponds to the database column REPORT.LAB_ID
*
* @mbg.generated Wed Oct 27 22:15:26 CST 2021
*/
private String labId;
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table REPORT
*
* @mbg.generated Wed Oct 27 22:15:26 CST 2021
*/
public ReportKey(String studentId, String courseId, String sectionId, String labId) {
this.studentId = studentId;
this.courseId = courseId;
this.sectionId = sectionId;
this.labId = labId;
}
/**
* This method was generated by MyBatis Generator.
* This method corresponds to the database table REPORT
*
* @mbg.generated Wed Oct 27 22:15:26 CST 2021
*/
public ReportKey() {
super();
}
/**
* This method was generated by MyBatis Generator.
* This method returns the value of the database column REPORT.STUDENT_ID
*
* @return the value of REPORT.STUDENT_ID
*
* @mbg.generated Wed Oct 27 22:15:26 CST 2021
*/
public String getStudentId() {
return studentId;
}
/**
* This method was generated by MyBatis Generator.
* This method sets the value of the database column REPORT.STUDENT_ID
*
* @param studentId the value for REPORT.STUDENT_ID
*
* @mbg.generated Wed Oct 27 22:15:26 CST 2021
*/
public void setStudentId(String studentId) {
this.studentId = studentId == null ? null : studentId.trim();
}
/**
* This method was generated by MyBatis Generator.
* This method returns the value of the database column REPORT.COURSE_ID
*
* @return the value of REPORT.COURSE_ID
*
* @mbg.generated Wed Oct 27 22:15:26 CST 2021
*/
public String getCourseId() {
return courseId;
}
/**
* This method was generated by MyBatis Generator.
* This method sets the value of the database column REPORT.COURSE_ID
*
* @param courseId the value for REPORT.COURSE_ID
*
* @mbg.generated Wed Oct 27 22:15:26 CST 2021
*/
public void setCourseId(String courseId) {
this.courseId = courseId == null ? null : courseId.trim();
}
/**
* This method was generated by MyBatis Generator.
* This method returns the value of the database column REPORT.SECTION_ID
*
* @return the value of REPORT.SECTION_ID
*
* @mbg.generated Wed Oct 27 22:15:26 CST 2021
*/
public String getSectionId() {
return sectionId;
}
/**
* This method was generated by MyBatis Generator.
* This method sets the value of the database column REPORT.SECTION_ID
*
* @param sectionId the value for REPORT.SECTION_ID
*
* @mbg.generated Wed Oct 27 22:15:26 CST 2021
*/
public void setSectionId(String sectionId) {
this.sectionId = sectionId == null ? null : sectionId.trim();
}
/**
* This method was generated by MyBatis Generator.
* This method returns the value of the database column REPORT.LAB_ID
*
* @return the value of REPORT.LAB_ID
*
* @mbg.generated Wed Oct 27 22:15:26 CST 2021
*/
public String getLabId() {
return labId;
}
/**
* This method was generated by MyBatis Generator.
* This method sets the value of the database column REPORT.LAB_ID
*
* @param labId the value for REPORT.LAB_ID
*
* @mbg.generated Wed Oct 27 22:15:26 CST 2021
*/
public void setLabId(String labId) {
this.labId = labId == null ? null : labId.trim();
}
} |
package storage
import (
"github.com/t-yuki/zipkin-go/models"
"github.com/t-yuki/zipkin-go/storage/mysql"
)
func Open() (Storage, error) {
stor, err := mysql.Open()
return stor, err
}
type Storage interface {
StoreSpans(spans models.ListOfSpans) error
Close() error
}
|
<filename>api/rx/zb_explicit.go
package rx
import (
"encoding/binary"
)
const (
zbExplicitAPIID byte = 0x91
zbeAddr64Offset = 0
zbeAddr16Offset = 8
zbeSrcEPOffset = 10
zbeDstEPOffset = 11
zbeClusterIDOffset = 12
zbeClusterIDLength = 2
zbeProfileIDOffset = 14
zbeProfileIDLength = 2
zbeOptionsOffset = 16
zbeDataOffset = 17
)
var _ Frame = (*ZBExplicit)(nil)
// ZBExplicit rx frame
type ZBExplicit struct {
buffer []byte
}
func newZBExplicit() Frame {
return &ZBExplicit{
buffer: make([]byte, 0),
}
}
// RX frame data
func (f *ZBExplicit) RX(b byte) error {
f.buffer = append(f.buffer, b)
return nil
}
// Addr64 64-bit address of sender
func (f *ZBExplicit) Addr64() uint64 {
return binary.BigEndian.Uint64(f.buffer[zbeAddr64Offset : zbeAddr64Offset+addr64Length])
}
// Addr16 16-bit address of sender
func (f *ZBExplicit) Addr16() uint16 {
return binary.BigEndian.Uint16(f.buffer[zbeAddr16Offset : zbeAddr16Offset+addr16Length])
}
// SrcEP source endpoint
func (f *ZBExplicit) SrcEP() byte {
return f.buffer[zbeSrcEPOffset]
}
// DstEP destination endpoint
func (f *ZBExplicit) DstEP() byte {
return f.buffer[zbeDstEPOffset]
}
// ClusterID clister ID
func (f *ZBExplicit) ClusterID() uint16 {
return binary.BigEndian.Uint16(f.buffer[zbeClusterIDOffset : zbeClusterIDOffset+zbeClusterIDLength])
}
// ProfileID profile ID
func (f *ZBExplicit) ProfileID() uint16 {
return binary.BigEndian.Uint16(f.buffer[zbeProfileIDOffset : zbeProfileIDOffset+zbeProfileIDLength])
}
// Options frame options
func (f *ZBExplicit) Options() byte {
return f.buffer[zbeOptionsOffset]
}
// Data frame data
func (f *ZBExplicit) Data() []byte {
if len(f.buffer) == zbeDataOffset {
return nil
}
return f.buffer[zbeDataOffset:]
}
|
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: Copyright 2019-2022 Heal Research
#ifndef OPERON_FORMAT_HPP
#define OPERON_FORMAT_HPP
#include <unordered_map>
#include "tree.hpp"
namespace Operon {
class Dataset;
class OPERON_EXPORT TreeFormatter {
static void FormatNode(Tree const& tree, std::unordered_map<Operon::Hash, std::string> variableNames, size_t i, std::string& current, std::string indent, bool isLast, bool initialMarker, int decimalPrecision);
public:
static auto Format(Tree const& tree, Dataset const& dataset, int decimalPrecision = 2) -> std::string;
static auto Format(Tree const& tree, std::unordered_map<Operon::Hash, std::string> const& variableNames, int decimalPrecision = 2) -> std::string;
};
class OPERON_EXPORT InfixFormatter {
static void FormatNode(Tree const& tree, std::unordered_map<Operon::Hash, std::string> const& variableNames, size_t i, fmt::memory_buffer& current, int decimalPrecision);
public:
static auto Format(Tree const& tree, Dataset const& dataset, int decimalPrecision = 2) -> std::string;
static auto Format(Tree const& tree, std::unordered_map<Operon::Hash, std::string> const& variableNames, int decimalPrecision = 2) -> std::string;
};
} // namespace Operon
#endif
|
<gh_stars>1-10
from unicorn import *
from keystone import *
from capstone import *
from unicorn.x86_const import *
from binaryninja import *
def decrypt(bv, _address, _key, _len):
xor_key = Transform['XOR']
address = _address
key = _key
for i in range(_len):
enc_str = bv.read(address, 4)
decrypted_str = xor_key.decode(enc_str, {'key': key.to_bytes(4, 'little')})
bv.write(address, decrypted_str)
key += int.from_bytes(bv.read(address, 4), 'little')
key &= 0xffffffff
address += 4
def _parse_xor_token(_token):
offset = _token[6].value
eip = _token[4].text
key = _token[-1].text
return key, eip, offset
def str_to_unicorn_reg(reg_str):
reg = None
if reg_str == 'eax':
reg = UC_X86_REG_EAX
if reg_str == 'ebx':
reg = UC_X86_REG_EBX
if reg_str == 'ecx':
reg = UC_X86_REG_ECX
if reg_str == 'edx':
reg = UC_X86_REG_EDX
if reg_str == 'esi':
reg = UC_X86_REG_ESI
if reg_str == 'edi':
reg = UC_X86_REG_EDI
if reg_str == 'ebp':
reg = UC_X86_REG_EBP
if reg_str == 'esp':
reg = UC_X86_REG_ESP
return reg
class DeobfuscateShikataGaNai:
def __init__(self, bv, addr):
self.bv = bv
self.emu = Uc(UC_ARCH_X86, UC_MODE_32)
self.begin_address = addr
self.eip_reg = None
self.key_reg = None
self.offset = 0
# intialise component for emulate
emulate_bytes, counter_addr = self.get_code_to_emulate(bv, addr)
self.opcodes = emulate_bytes
self.len = 0
self.eip = 0
self.key = 0
self.xor_address = counter_addr
def get_code_to_emulate(self, bv, addr):
emulate_byte = [] # variable to store the byte to run the emulation
counter_addr = addr # this variable will store the address to count
token_gen = bv.disassembly_tokens(addr)
for token in token_gen:
# Now we want to find the token in the form ['xor', ' ', 'dword', ...]
# We first check if token contain xor
if token[0][0].text != 'xor':
temp_store = bv.read(counter_addr, token[1])
emulate_byte += temp_store
counter_addr += token[1]
continue
# Now we check if the len of token is 10
# Note: have it smaller than 10 also work because i don't know if the number is out of range or not
if len(token[0]) < 10:
temp_store = bv.read(counter_addr, token[1])
emulate_byte += temp_store
counter_addr += token[1]
continue
# Now we have found the xor that we need, parse it and return
# the key,
# the offset
# and the address of the last FPU instructions
key, eip, offset = _parse_xor_token(token[0])
self.key_reg, self.eip_reg , self.offset = key, eip, offset
break
return emulate_byte, counter_addr
def _check_block_addr_for_fnstenv(self):
# check the first 15 instructions after the begin_address
# The right way to do this is to check every instruction in the basic block against the list at: https://docs.oracle.com/cd/E18752_01/html/817-5477/eoizy.html
# but i am too lazy and this work everytime
tokens = self.bv.disassembly_tokens(self.begin_address)
counter = 0
for token in tokens:
if token[0][0].text == 'fnstenv':
return True
if counter > 15:
return False
counter += 1
return False
def _emulate(self):
'''
The initialise emulation is taken from https://github.com/tkmru/nao/blob/778d7c9eef929589a4a43b74ec6dcee249d2f37f/nao/eliminate.py#L12
'''
begin_address = self.begin_address
page_map = begin_address - begin_address % 0x1000 # page alignment
self.emu.mem_map(page_map, 0x400000)
self.emu.mem_write(begin_address, bytes(self.opcodes))
# initialize stack
self.emu.reg_write(UC_X86_REG_ESP, begin_address + 0x200000)
self.emu.reg_write(UC_X86_REG_EBP, begin_address + 0x200100)
# initialize registers
self.emu.reg_write(UC_X86_REG_EAX, 0x1234)
self.emu.reg_write(UC_X86_REG_EBX, 0x1234)
self.emu.reg_write(UC_X86_REG_ECX, 0x1234)
self.emu.reg_write(UC_X86_REG_EDX, 0x1234)
self.emu.reg_write(UC_X86_REG_EDI, 0x1234)
self.emu.reg_write(UC_X86_REG_ESI, 0x1234)
# initialize flags
self.emu.reg_write(UC_X86_REG_EFLAGS, 0x0)
# ============== My code start from here =================
self.emu.emu_start(begin_address, begin_address + len(self.opcodes))
eip = self.emu.reg_read(str_to_unicorn_reg(self.eip_reg))
key = self.emu.reg_read(str_to_unicorn_reg(self.key_reg))
eip += self.offset
self.key = key
self.eip = eip
self.len = self.emu.reg_read(UC_X86_REG_ECX)
return
def run_deobfuscate(self):
# first check if the new begin_address of the block contain the instruciton fnstenv
check = True
count = 0
while check:
check = self._check_block_addr_for_fnstenv()
if check == False:
break
self._emulate()
log_info(f"Start decrypt at position: {hex(self.eip)}")
log_info(f"Using the key: {hex(self.key)}")
log_info(f"With len: {hex(self.len)}")
decrypt(self.bv, self.eip, self.key, self.len)
self.bv.update_analysis_and_wait()
count += 1
loop_address = self.xor_address
for instr in self.bv.disassembly_tokens(self.xor_address):
if instr[0][0].text == 'loop':
break
loop_address += instr[1]
# setup the next address
self.begin_address = self.bv.get_next_basic_block_start_after(loop_address)
self.emu = Uc(UC_ARCH_X86, UC_MODE_32)
emulate_bytes, counter_addr = self.get_code_to_emulate(self.bv, self.begin_address)
self.opcodes = emulate_bytes
self.len = 0
self.eip = 0
self.key = 0
self.xor_address = counter_addr
log_info(f"Done decode shikata ga nai, the number of layers is: {count}")
return
class RunInBackground(BackgroundTaskThread):
def __init__(self, bv, addr, msg):
BackgroundTaskThread.__init__(self, msg, True)
self.bv = bv
self.addr = addr
def run(self):
bv = self.bv
DeShikata = DeobfuscateShikataGaNai(self.bv, self.addr)
DeShikata.run_deobfuscate()
def main(bv, address):
s = RunInBackground(bv, address, "Deobfuscate shikata ga nai")
s.start()
PluginCommand.register_for_address("DeShikata", "Decode a round of shikata ga nai on given address", main) |
/** Jdbc implementation for {@link CodeStore} */
public class JdbcCodeStore implements CodeStore {
private final DataSource dataSource;
private final Logger log = Logger.getLogger("JdbcCodeStore");
@Inject
public JdbcCodeStore(DataSource dataSource) {
this.dataSource = dataSource;
}
@Override
public Optional<OAuth2Request> consumeCode(String code) {
Connection conn = null;
PreparedStatement statement = null;
ResultSet result = null;
try {
conn = dataSource.getConnection();
conn.setAutoCommit(false);
String stmt = "SELECT * FROM code WHERE code = ?;";
statement = conn.prepareStatement(stmt);
statement.setString(1, code);
result = statement.executeQuery();
if (result.next()) {
stmt = "DELETE FROM code WHERE code = ?;";
statement = conn.prepareStatement(stmt);
statement.setString(1, code);
statement.execute();
Optional<OAuth2Request> client =
Optional.ofNullable(OAuth2Request.parseFrom(result.getBytes("request")));
conn.commit();
return client;
}
} catch (SQLException | InvalidProtocolBufferException exception) {
try {
if (conn != null) {
conn.rollback();
}
} catch (SQLException exception1) {
throw new OAuth2ServerException(exception1);
}
throw new OAuth2ServerException(exception);
} finally {
if (result != null) {
try {
result.close();
} catch (SQLException exception2) {
throw new OAuth2ServerException(exception2);
}
}
if (statement != null) {
try {
statement.close();
} catch (SQLException exception3) {
throw new OAuth2ServerException(exception3);
}
}
if (conn != null) {
try {
conn.close();
} catch (SQLException exception4) {
throw new OAuth2ServerException(exception4);
}
}
}
return Optional.empty();
}
@Override
public boolean setCode(String code, OAuth2Request request) {
Connection conn = null;
PreparedStatement statement = null;
ResultSet result = null;
try {
conn = dataSource.getConnection();
conn.setAutoCommit(false);
String stmt = "SELECT * FROM code WHERE code = ?;";
statement = conn.prepareStatement(stmt);
statement.setString(1, code);
result = statement.executeQuery();
if (!result.next()) {
stmt = "INSERT INTO code VALUES(?, ?);";
statement = conn.prepareStatement(stmt);
statement.setString(1, code);
statement.setBytes(2, request.toByteArray());
statement.execute();
conn.commit();
return true;
} else {
return false;
}
} catch (SQLException exception) {
try {
if (conn != null) {
conn.rollback();
}
} catch (SQLException exception1) {
throw new OAuth2ServerException(exception1);
}
throw new OAuth2ServerException(exception);
} finally {
if (result != null) {
try {
result.close();
} catch (SQLException exception2) {
throw new OAuth2ServerException(exception2);
}
}
if (statement != null) {
try {
statement.close();
} catch (SQLException exception3) {
throw new OAuth2ServerException(exception3);
}
}
if (conn != null) {
try {
conn.close();
} catch (SQLException exception4) {
throw new OAuth2ServerException(exception4);
}
}
}
}
@Override
public void reset() {
Connection conn = null;
PreparedStatement statement = null;
ResultSet result = null;
try {
conn = dataSource.getConnection();
conn.setAutoCommit(false);
String stmt = "DELETE FROM code;";
statement = conn.prepareStatement(stmt);
statement.execute();
conn.commit();
} catch (SQLException exception) {
try {
if (conn != null) {
conn.rollback();
}
} catch (SQLException exception1) {
throw new OAuth2ServerException(exception1);
}
throw new OAuth2ServerException(exception);
} finally {
if (result != null) {
try {
result.close();
} catch (SQLException exception2) {
throw new OAuth2ServerException(exception2);
}
}
if (statement != null) {
try {
statement.close();
} catch (SQLException exception3) {
throw new OAuth2ServerException(exception3);
}
}
if (conn != null) {
try {
conn.close();
} catch (SQLException exception4) {
throw new OAuth2ServerException(exception4);
}
}
}
}
} |
def save_binary_file(path: str, data: bytes):
return Path(path).write_bytes(data) |
s = input()
ans = 0
if len(s)==1:
if s.count("A")+s.count("G")+s.count("C")+s.count("T") == 1:
print(1)
exit()
for i in range(len(s)-1):
for j in range(i+1, len(s)+1):
x = s[i:j]
if x.count("A")+x.count("G")+x.count("C")+x.count("T") == len(x) and len(x) > ans:
ans = len(x)
print(ans) |
<reponame>philipsorst/gitki.java
package net.dontdrinkandroot.gitki.wicket.page.file;
import net.dontdrinkandroot.gitki.model.FilePath;
import net.dontdrinkandroot.gitki.wicket.page.BrowsePage;
import net.dontdrinkandroot.gitki.wicket.util.PageParameterUtils;
import org.apache.wicket.model.IModel;
import org.apache.wicket.model.Model;
import org.apache.wicket.request.mapper.parameter.PageParameters;
/**
* @author <NAME> <<EMAIL>>
*/
public class FilePage extends BrowsePage<FilePath>
{
public FilePage(PageParameters parameters)
{
super(parameters);
FilePath path = PageParameterUtils.toFilePath(parameters);
this.setModel(Model.of(path));
}
public FilePage(IModel<FilePath> model)
{
super(model);
PageParameterUtils.from(model.getObject(), this.getPageParameters());
}
@Override
protected void onInitialize()
{
super.onInitialize();
}
}
|
// De l'image par une transfo affine d'un repere orthonorme
// au parametre ABC de l'ellipse passant par ce repere
void ImRON2ParmEllipse
(
REAL & A,
REAL & B,
REAL & C,
const Pt2dr & aV0,
const Pt2dr & aV1
)
{
ElMatrix<REAL> aMat(2,2);
SetCol(aMat,0,aV0);
SetCol(aMat,1,aV1);
aMat = gaussj(aMat);
aMat = aMat.transpose() * aMat;
ElMatrix<REAL> aVecP(2,2);
ElMatrix<REAL> aValP(2,2);
jacobi_diag(aMat,aValP,aVecP);
aValP(0,0) = sqrt(aValP(0,0));
aValP(1,1) = sqrt(aValP(1,1));
ElMatrix<REAL> aABC = aVecP * aValP * aVecP.transpose();
A = aABC(0,0);
B = aABC(1,0);
C = aABC(1,1);
} |
def mul(self, other):
if not (isinstance(other, Variable) or isinstance(other, LazyVariable)) or \
(isinstance(other, Variable) and other.numel() == 1):
from .constant_mul_lazy_variable import ConstantMulLazyVariable
return ConstantMulLazyVariable(self, other)
else:
from .mul_lazy_variable import MulLazyVariable
return MulLazyVariable(self, other) |
/** Invokes parseOutput. This will block until the end :-(*/
public void start() throws IOException {
program_start = new Date();
errThread.start();
parseOutput(br);
} |
/**
* Created by Roman Kontchakov on 02/12/2016.
*/
public class ExpressionParserTest {
private static final DBBooleanFunctionSymbol NOT = SQLTestingTools.DB_FS_FACTORY.getDBNot();
private static final QuotedIDFactory IDFAC;
private static final DBTypeFactory DB_TYPE_FACTORY;
private static final DBTermType dbLongType;
static {
OfflineMetadataProviderBuilder builder = SQLTestingTools.createMetadataProviderBuilder();
IDFAC = builder.getQuotedIDFactory();
DB_TYPE_FACTORY = builder.getDBTypeFactory();
dbLongType = DB_TYPE_FACTORY.getDBLargeIntegerType();
}
@Test
public void null_test() throws JSQLParserException {
ImmutableTerm translation = parseTerm("SELECT NULL AS A FROM DUMMY", ImmutableMap.of());
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getNullConstant(), translation);
}
@Test
public void double_test() throws JSQLParserException {
ImmutableTerm translation = parseTerm("SELECT 1.0 AS A FROM DUMMY", ImmutableMap.of());
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBConstant("1.0", DB_TYPE_FACTORY.getDBDoubleType()), translation);
}
@Test
public void hex_test() throws JSQLParserException {
ImmutableTerm translation = parseTerm("SELECT 0xFF AS A FROM DUMMY", ImmutableMap.of());
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBConstant("255", dbLongType), translation);
}
@Test
public void hex_quote_test() throws JSQLParserException {
ImmutableTerm translation = parseTerm("SELECT x'FF' AS A FROM DUMMY", ImmutableMap.of());
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBConstant("255", dbLongType), translation);
}
@Test
public void hex_quote2_test() throws JSQLParserException {
ImmutableTerm translation = parseTerm("SELECT X'FF' AS A FROM DUMMY", ImmutableMap.of());
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBConstant("255", dbLongType), translation);
}
@Test
public void long_test() throws JSQLParserException {
ImmutableTerm translation = parseTerm("SELECT 1 AS A FROM DUMMY", ImmutableMap.of());
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBConstant("1", dbLongType), translation);
}
@Test
public void string_test() throws JSQLParserException {
ImmutableTerm translation = parseTerm("SELECT '1' AS A FROM DUMMY", ImmutableMap.of());
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBStringConstant("1"), translation);
}
@Test
public void date_test() throws JSQLParserException {
// ODBC escape sequence syntax
ImmutableTerm translation = parseTerm("SELECT {d '2016-12-02'} AS A FROM DUMMY", ImmutableMap.of());
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBConstant("2016-12-02", DB_TYPE_FACTORY.getDBDateType()), translation);
}
@Test
public void time_test() throws JSQLParserException {
// ODBC escape sequence syntax
ImmutableTerm translation = parseTerm("SELECT {t '15:57:02'} AS A FROM DUMMY", ImmutableMap.of());
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBConstant("15:57:02", DB_TYPE_FACTORY.getDBTimeType()), translation);
}
@Test
public void timestamp_test() throws JSQLParserException {
// ODBC escape sequence syntax
ImmutableTerm translation = parseTerm("SELECT {ts '2016-12-02 15:57:02.03'} AS A FROM DUMMY", ImmutableMap.of());
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBConstant("2016-12-02 15:57:02.03",
DB_TYPE_FACTORY.getDBDateTimestampType()), translation);
}
@Test
public void date_literal_test() throws JSQLParserException {
ImmutableTerm translation = parseTerm("SELECT DATE '1998-03-07' FROM DUMMY", ImmutableMap.of());
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBConstant("1998-03-07", DB_TYPE_FACTORY.getDBDateType()), translation);
}
@Test
public void time_literal_test() throws JSQLParserException {
ImmutableTerm translation = parseTerm("SELECT TIME '15:57:02' FROM DUMMY", ImmutableMap.of());
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBConstant("15:57:02", DB_TYPE_FACTORY.getDBTimeType()), translation);
}
@Test
public void timestamp_literal_test() throws JSQLParserException {
ImmutableTerm translation = parseTerm("SELECT TIMESTAMP '2016-12-02 15:57:02.03' FROM DUMMY", ImmutableMap.of());
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBConstant("2016-12-02 15:57:02.03", DB_TYPE_FACTORY.getDBDateTimestampType()), translation);
}
@Test
public void addition_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT X + 1 AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(
SQLTestingTools.DB_FS_FACTORY.getUntypedDBMathBinaryOperator("+"),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", dbLongType)), translation);
}
@Test
public void subtraction_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT X - 1 AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(
SQLTestingTools.DB_FS_FACTORY.getUntypedDBMathBinaryOperator("-"),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", dbLongType)), translation);
}
@Test
public void multiplication_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT X * 2 AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(
SQLTestingTools.DB_FS_FACTORY.getUntypedDBMathBinaryOperator("*"),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("2", dbLongType)), translation);
}
@Test
public void division_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT X / 2 AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(
SQLTestingTools.DB_FS_FACTORY.getUntypedDBMathBinaryOperator("/"),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("2", dbLongType)), translation);
}
@Test(expected = UnsupportedOperationException.class)
public void modulo_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT X % 2 AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(
SQLTestingTools.DB_FS_FACTORY.getUntypedDBMathBinaryOperator("%"),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("2", dbLongType)), translation);
}
@Test(expected = UnsupportedOperationException.class)
public void integer_div_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT X DIV 2 AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(
SQLTestingTools.DB_FS_FACTORY.getUntypedDBMathBinaryOperator("DIV"),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("2", dbLongType)), translation);
}
@Test
public void concat_2_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT X || 'B' AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(
SQLTestingTools.DB_FS_FACTORY.getNullRejectingDBConcat(2),
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("B")), translation);
}
@Test
public void concat_3_Test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT 'A' || X || 'B' FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
DBFunctionSymbol concat = SQLTestingTools.DB_FS_FACTORY.getNullRejectingDBConcat(2);
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(
concat,
SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(
concat,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("A"),
v),
SQLTestingTools.TERM_FACTORY.getDBStringConstant("B")), translation);
}
@Test
public void function_CONCAT_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT CONCAT('A', X, 'B') FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(
SQLTestingTools.DB_FS_FACTORY.getRegularDBFunctionSymbol("CONCAT", 3),
SQLTestingTools.TERM_FACTORY.getDBStringConstant("A"),
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("B")), translation);
}
// Boolean expressions are not allowed in the SELECT clause
// so, the tests below depend on the WHERE clause
@Test
public void equalsTo_Test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X = 'B'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("B")), translation.get(0));
}
@Test
public void not_equalsTo_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X <> 'B'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBNot(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("B"))), translation.get(0));
}
@Test
public void not_equalsTo_bang_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X != 'B'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBNot(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("B"))), translation.get(0));
}
@Test
public void greater_than_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X > 3", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(GT),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("3", dbLongType)), translation.get(0));
}
@Test
public void greater_than_or_equals_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X >= 3", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(GTE),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("3", dbLongType)), translation.get(0));
}
@Test
public void minor_than_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X < 3", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(LT),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("3", dbLongType)), translation.get(0));
}
@Test
public void minor_than_equals_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X <= 3", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(LTE),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("3", dbLongType)), translation.get(0));
}
@Test
public void not_equals_to_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE NOT X = 'B'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(NOT, SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("B"))), translation.get(0));
}
@Test
public void not_not_equals_to_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE NOT X <> 'B'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("B")), translation.get(0));
}
@Test
public void not_not_equals_to_bang_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE NOT X != 'B'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("B")), translation.get(0));
}
@Test
public void not_greater_than_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE NOT X > 3", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(NOT, SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(GT),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("3", dbLongType))), translation.get(0));
}
@Test
public void not_greater_than_equals_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE NOT X >= 3", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(NOT, SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(GTE),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("3", dbLongType))), translation.get(0));
}
@Test
public void not_minor_than_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE NOT X < 3", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(NOT, SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(LT),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("3", dbLongType))), translation.get(0));
}
@Test
public void not_minor_than_equals_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE NOT X <= 3", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(NOT, SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(LTE),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("3", dbLongType))), translation.get(0));
}
@Test
public void in_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X IN (1, 3)", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBOr(2),
SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", dbLongType)),
SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("3", dbLongType))), translation.get(0));
}
@Test
public void not_in_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X NOT IN (1, 3)", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(NOT,
SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBOr(2),
SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", dbLongType)),
SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("3", dbLongType)))), translation.get(0));
}
@Test(expected = JSQLParserException.class)
public void in_multi_test() throws JSQLParserException {
Variable v1 = SQLTestingTools.TERM_FACTORY.getVariable("x0");
Variable v2 = SQLTestingTools.TERM_FACTORY.getVariable("y0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE (X, Y) IN ((1, 3), (2,4))", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v1,
new QualifiedAttributeID(null, IDFAC.createAttributeID("Y")), v2));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBOr(2),
SQLTestingTools.TERM_FACTORY.getConjunction(
SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(
v1,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", dbLongType)),
SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(
v2,
SQLTestingTools.TERM_FACTORY.getDBConstant("3", dbLongType))),
SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(
v1,
SQLTestingTools.TERM_FACTORY.getDBConstant("2", dbLongType)),
SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(
v2,
SQLTestingTools.TERM_FACTORY.getDBConstant("4", dbLongType))), translation.get(0));
}
@Test
public void is_null_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X IS NULL", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBIsNull(), v), translation.get(0));
}
@Test
public void is_not_null_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X IS NOT NULL", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(NOT,
SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBIsNull(), v)), translation.get(0));
}
@Test
public void between_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X BETWEEN 1 AND 3", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
assertEquals(ImmutableList.of(
SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(GTE),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", dbLongType)),
SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(LTE),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("3", dbLongType))), translation);
}
@Test
public void not_between_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X NOT BETWEEN 1 AND 3", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBOr(2),
SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(LT),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", dbLongType)),
SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(GT),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("3", dbLongType))), translation.get(0));
}
@Test
public void like_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X LIKE '_A%'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBLike(),
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("_A%")), translation.get(0));
}
@Test
public void not_like_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X NOT LIKE '_A%'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(NOT,
SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBLike(),
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("_A%"))), translation.get(0));
}
@Test
public void regexp_match_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X ~ 'A.*B'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBRegexpMatches2(),
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("A.*B")), translation.get(0));
}
@Test
public void regexp_not_match_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X !~ 'A.*B'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(NOT,
SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBRegexpMatches2(),
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("A.*B"))), translation.get(0));
}
@Test
public void regexp_match_ignore_case_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X ~* 'A.*B'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBRegexpMatches3(),
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("A.*B"),
SQLTestingTools.TERM_FACTORY.getDBStringConstant("i")), translation.get(0));
}
@Test
public void regexp_not_match_ignore_case_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X !~* 'A.*B'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(NOT,
SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBRegexpMatches3(),
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("A.*B"),
SQLTestingTools.TERM_FACTORY.getDBStringConstant("i"))), translation.get(0));
}
@Test
public void not_regexp_match_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE NOT X ~ 'A.*B'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(NOT,
SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBRegexpMatches2(),
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("A.*B"))), translation.get(0));
}
@Test
public void not_regexp_not_match_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE NOT X !~ 'A.*B'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBRegexpMatches2(),
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("A.*B")), translation.get(0));
}
@Test
public void not_regexp_match_ignore_case_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE NOT X ~* 'A.*B'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(NOT,
SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBRegexpMatches3(),
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("A.*B"),
SQLTestingTools.TERM_FACTORY.getDBStringConstant("i"))), translation.get(0));
}
@Test
public void not_regexp_not_match_ignore_case_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE NOT X !~* 'A.*B'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBRegexpMatches3(),
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("A.*B"),
SQLTestingTools.TERM_FACTORY.getDBStringConstant("i")), translation.get(0));
}
@Test
public void regexp_MySQL_match_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X REGEXP BINARY 'A.*B'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBRegexpMatches2(),
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("A.*B")), translation.get(0));
}
@Test
public void regexp_MySQL_match_ignore_case_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X REGEXP 'A.*B'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBRegexpMatches3(),
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("A.*B"),
SQLTestingTools.TERM_FACTORY.getDBStringConstant("i")), translation.get(0));
}
@Test
public void not_regexp_MySQL_match_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE NOT X REGEXP BINARY 'A.*B'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(NOT,
SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBRegexpMatches2(),
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("A.*B"))), translation.get(0));
}
@Test
public void not_regexp_MySQL_match_ignore_case_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE NOT X REGEXP 'A.*B'", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(NOT,
SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBRegexpMatches3(),
v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("A.*B"),
SQLTestingTools.TERM_FACTORY.getDBStringConstant("i"))), translation.get(0));
}
@Test
public void and_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X >= 1 AND X <= 3", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
assertEquals(ImmutableList.of( //FACTORY.getImmutableExpression(AND,
SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(GTE),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", DB_TYPE_FACTORY.getDBLargeIntegerType())),
SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(LTE),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("3", dbLongType))), translation);
}
@Test
public void not_and_test_brackets() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE NOT (X >= 1 AND X <= 3)", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
assertEquals(ImmutableList.of(SQLTestingTools.TERM_FACTORY.getDBNot(SQLTestingTools.TERM_FACTORY.getConjunction(
SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(GTE),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", DB_TYPE_FACTORY.getDBLargeIntegerType())),
SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(LTE),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("3", dbLongType))))), translation);
}
@Test
public void not_not_and_test_brackets() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE NOT (NOT (X >= 1 AND X <= 3))", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
assertEquals(ImmutableList.of(
SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(GTE),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", DB_TYPE_FACTORY.getDBLargeIntegerType())),
SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(LTE),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("3", dbLongType))), translation);
}
@Test
public void or_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE X < 1 OR X > 3", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBOr(2),
SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(LT),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", dbLongType)),
SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(GT),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("3", dbLongType))), translation.get(0));
}
@Test
public void parenthesis_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE (X >= 1)", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(GTE),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", dbLongType)), translation.get(0));
}
@Test
public void not_parenthesis_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X AS A FROM DUMMY WHERE NOT (X >= 1)", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(NOT,
SQLTestingTools.TERM_FACTORY.getImmutableExpression(
SQLTestingTools.DB_FS_FACTORY.getDBDefaultInequality(GTE),
v,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", dbLongType))), translation.get(0));
}
@Test
public void unary_plus_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT +X AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
assertEquals(v, translation);
}
@Test
public void unary_minus_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT -X AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(
SQLTestingTools.DB_FS_FACTORY.getUntypedDBMathBinaryOperator(SPARQL.NUMERIC_MULTIPLY),
SQLTestingTools.TERM_FACTORY.getDBConstant("-1", DB_TYPE_FACTORY.getDBLargeIntegerType()),
v), translation);
}
@Test
public void case_when_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT CASE A WHEN 1 THEN 3 ELSE 4 END FROM DUMMY;", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("A")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getIfThenElse(
SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(v, SQLTestingTools.TERM_FACTORY.getDBConstant("1", DB_TYPE_FACTORY.getDBLargeIntegerType())),
SQLTestingTools.TERM_FACTORY.getDBConstant("3", DB_TYPE_FACTORY.getDBLargeIntegerType()),
SQLTestingTools.TERM_FACTORY.getDBConstant("4", DB_TYPE_FACTORY.getDBLargeIntegerType())),
translation);
}
@Test
public void case_when_test_3() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT CASE A WHEN 1 THEN 3 WHEN 2 THEN 4 ELSE 5 END FROM DUMMY;", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("A")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBCase(
Stream.of(Maps.immutableEntry(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(v, SQLTestingTools.TERM_FACTORY.getDBConstant("1", DB_TYPE_FACTORY.getDBLargeIntegerType())),
SQLTestingTools.TERM_FACTORY.getDBConstant("3", DB_TYPE_FACTORY.getDBLargeIntegerType())),
Maps.immutableEntry(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(v, SQLTestingTools.TERM_FACTORY.getDBConstant("2", DB_TYPE_FACTORY.getDBLargeIntegerType())),
SQLTestingTools.TERM_FACTORY.getDBConstant("4", DB_TYPE_FACTORY.getDBLargeIntegerType()))),
SQLTestingTools.TERM_FACTORY.getDBConstant("5", DB_TYPE_FACTORY.getDBLargeIntegerType()), false),
translation);
}
@Test
public void case_when_test_3b() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT CASE WHEN A = 1 THEN 3 WHEN A = 2 THEN 4 ELSE 5 END FROM DUMMY;", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("A")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBCase(
Stream.of(Maps.immutableEntry(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(v, SQLTestingTools.TERM_FACTORY.getDBConstant("1", DB_TYPE_FACTORY.getDBLargeIntegerType())),
SQLTestingTools.TERM_FACTORY.getDBConstant("3", DB_TYPE_FACTORY.getDBLargeIntegerType())),
Maps.immutableEntry(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(v, SQLTestingTools.TERM_FACTORY.getDBConstant("2", DB_TYPE_FACTORY.getDBLargeIntegerType())),
SQLTestingTools.TERM_FACTORY.getDBConstant("4", DB_TYPE_FACTORY.getDBLargeIntegerType()))),
SQLTestingTools.TERM_FACTORY.getDBConstant("5", DB_TYPE_FACTORY.getDBLargeIntegerType()), false),
translation);
}
@Test
public void case_when_test_4() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT CASE A WHEN 1 THEN 3 WHEN 2 THEN 4 END FROM DUMMY;", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("A")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBCase(
Stream.of(Maps.immutableEntry(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(v, SQLTestingTools.TERM_FACTORY.getDBConstant("1", DB_TYPE_FACTORY.getDBLargeIntegerType())),
SQLTestingTools.TERM_FACTORY.getDBConstant("3", DB_TYPE_FACTORY.getDBLargeIntegerType())),
Maps.immutableEntry(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(v, SQLTestingTools.TERM_FACTORY.getDBConstant("2", DB_TYPE_FACTORY.getDBLargeIntegerType())),
SQLTestingTools.TERM_FACTORY.getDBConstant("4", DB_TYPE_FACTORY.getDBLargeIntegerType()))),
SQLTestingTools.TERM_FACTORY.getNullConstant(), false),
translation);
}
@Test
public void case_when_test_4_null() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT CASE A WHEN 1 THEN 3 WHEN 2 THEN 4 ELSE NULL END FROM DUMMY;", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("A")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBCase(
Stream.of(Maps.immutableEntry(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(v, SQLTestingTools.TERM_FACTORY.getDBConstant("1", DB_TYPE_FACTORY.getDBLargeIntegerType())),
SQLTestingTools.TERM_FACTORY.getDBConstant("3", DB_TYPE_FACTORY.getDBLargeIntegerType())),
Maps.immutableEntry(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(v, SQLTestingTools.TERM_FACTORY.getDBConstant("2", DB_TYPE_FACTORY.getDBLargeIntegerType())),
SQLTestingTools.TERM_FACTORY.getDBConstant("4", DB_TYPE_FACTORY.getDBLargeIntegerType()))),
SQLTestingTools.TERM_FACTORY.getNullConstant(), false),
translation);
}
@Test
public void case_when_test_4b() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT CASE WHEN A = 1 THEN 3 WHEN A = 2 THEN 4 END FROM DUMMY;", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("A")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBCase(
Stream.of(Maps.immutableEntry(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(v, SQLTestingTools.TERM_FACTORY.getDBConstant("1", DB_TYPE_FACTORY.getDBLargeIntegerType())),
SQLTestingTools.TERM_FACTORY.getDBConstant("3", DB_TYPE_FACTORY.getDBLargeIntegerType())),
Maps.immutableEntry(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(v, SQLTestingTools.TERM_FACTORY.getDBConstant("2", DB_TYPE_FACTORY.getDBLargeIntegerType())),
SQLTestingTools.TERM_FACTORY.getDBConstant("4", DB_TYPE_FACTORY.getDBLargeIntegerType()))),
SQLTestingTools.TERM_FACTORY.getNullConstant(), false),
translation);
}
@Test
public void case_when_test_4b_null() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT CASE WHEN A = 1 THEN 3 WHEN A = 2 THEN 4 ELSE NULL END FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("A")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getDBCase(
Stream.of(Maps.immutableEntry(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(v, SQLTestingTools.TERM_FACTORY.getDBConstant("1", DB_TYPE_FACTORY.getDBLargeIntegerType())),
SQLTestingTools.TERM_FACTORY.getDBConstant("3", DB_TYPE_FACTORY.getDBLargeIntegerType())),
Maps.immutableEntry(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(v, SQLTestingTools.TERM_FACTORY.getDBConstant("2", DB_TYPE_FACTORY.getDBLargeIntegerType())),
SQLTestingTools.TERM_FACTORY.getDBConstant("4", DB_TYPE_FACTORY.getDBLargeIntegerType()))),
SQLTestingTools.TERM_FACTORY.getNullConstant(), false),
translation);
}
@Test(expected = UnsupportedSelectQueryRuntimeException.class)
public void subSelect_Test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT (SELECT A FROM Q WHERE A = P.B) AS C FROM P", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
}
@Test(expected = UnsupportedSelectQueryRuntimeException.class)
public void exists_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT * FROM P WHERE EXISTS (SELECT * FROM Q WHERE A = P.B);", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("A")), v));
}
@Test(expected = UnsupportedSelectQueryRuntimeException.class)
public void not_exists_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT * FROM P WHERE NOT EXISTS (SELECT * FROM Q WHERE A = P.B);", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("A")), v));
}
@Test(expected = UnsupportedSelectQueryRuntimeException.class)
public void all_comparison_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT * FROM P WHERE A > ALL (SELECT C FROM Q WHERE A = P.B);", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("A")), v));
}
@Test(expected = UnsupportedSelectQueryRuntimeException.class)
public void any_comparison_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT * FROM P WHERE A > ANY (SELECT C FROM Q WHERE A = P.B);", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("A")), v));
}
@Test(expected = UnsupportedSelectQueryRuntimeException.class)
public void bitwise_and_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
Variable u = SQLTestingTools.TERM_FACTORY.getVariable("y0");
ImmutableTerm translation = parseTerm("SELECT X & Y AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v,
new QualifiedAttributeID(null, IDFAC.createAttributeID("Y")), u));
}
@Test(expected = UnsupportedSelectQueryRuntimeException.class)
public void bitwise_or_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
Variable u = SQLTestingTools.TERM_FACTORY.getVariable("y0");
ImmutableTerm translation = parseTerm("SELECT X | Y AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v,
new QualifiedAttributeID(null, IDFAC.createAttributeID("Y")), u));
}
@Test(expected = UnsupportedSelectQueryRuntimeException.class)
public void bitwise_xor_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
Variable u = SQLTestingTools.TERM_FACTORY.getVariable("y0");
ImmutableTerm translation = parseTerm("SELECT X ^ Y AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v,
new QualifiedAttributeID(null, IDFAC.createAttributeID("Y")), u));
}
@Test
public void extract_variable_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT EXTRACT(MONTH FROM X) AS C FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getExtractFunctionSymbol("MONTH"),
v), translation);
}
@Test
public void extract_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT EXTRACT(MONTH FROM CURRENT_DATE) AS C FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getExtractFunctionSymbol("MONTH"),
SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getCurrentDateTimeSymbol("DATE"))), translation);
}
@Test
public void extract_current_date_brackets_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT EXTRACT(MONTH FROM CURRENT_DATE()) AS C FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getExtractFunctionSymbol("MONTH"),
SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getCurrentDateTimeSymbol("DATE"))), translation);
}
@Test
public void extract_from_literal_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT EXTRACT(YEAR FROM DATE '1998-03-07') FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getExtractFunctionSymbol("YEAR"),
SQLTestingTools.TERM_FACTORY.getDBConstant("1998-03-07", DB_TYPE_FACTORY.getDBDateType())),
translation);
}
@Test(expected = UnsupportedSelectQueryRuntimeException.class)
public void interval_test() throws JSQLParserException {
ImmutableTerm translation = parseTerm("SELECT INTERVAL '31' DAY FROM DUMMY", ImmutableMap.of());
}
@Test(expected = UnsupportedSelectQueryRuntimeException.class)
public void sum_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT SUM(X) AS C FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
}
@Test(expected = UnsupportedSelectQueryRuntimeException.class)
public void avg_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT AVG(X) AS C FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
}
@Test(expected = UnsupportedSelectQueryRuntimeException.class)
public void min_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT MIN(X) AS C FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
}
@Test(expected = UnsupportedSelectQueryRuntimeException.class)
public void max_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT MAX(X) AS C FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
}
@Test(expected = UnsupportedSelectQueryRuntimeException.class)
public void count_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT COUNT(X) AS C FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
}
@Test(expected = UnsupportedSelectQueryRuntimeException.class)
public void count_star_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT COUNT(*) AS C FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
}
@Test(expected = UnsupportedSelectQueryRuntimeException.class)
public void analytic_expression_test() throws JSQLParserException {
ImmutableTerm translation = parseTerm("SELECT LAG(A) OVER () FROM P", ImmutableMap.of());
}
@Test(expected = UnsupportedSelectQueryRuntimeException.class)
public void json_expression_test() throws JSQLParserException {
ImmutableTerm translation = parseTerm("SELECT A->'B' FROM DUMMY", ImmutableMap.of());
}
@Test(expected = InvalidSelectQueryRuntimeException.class)
public void jdbc_parameter_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT A FROM P WHERE B = ?", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("B")), v));
}
@Test(expected = InvalidSelectQueryRuntimeException.class)
public void jdbc_named_parameter_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT A FROM P WHERE B = :name", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("B")), v));
}
@Test(expected = UnsupportedSelectQueryRuntimeException.class)
public void oracle_outer_join_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
Variable u = SQLTestingTools.TERM_FACTORY.getVariable("y0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT * FROM P, Q WHERE P.A = Q.A(+)", ImmutableMap.of(
new QualifiedAttributeID(IDFAC.createRelationID("P"), IDFAC.createAttributeID("A")), v,
new QualifiedAttributeID(IDFAC.createRelationID( "Q"), IDFAC.createAttributeID("A")), u));
}
@Test
public void true_column_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT * FROM P WHERE A = true", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("A")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(
v,
SQLTestingTools.TERM_FACTORY.getDBBooleanConstant(true)), translation.get(0));
}
@Test
public void false_column_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT * FROM P WHERE A = false", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("A")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getNotYetTypedEquality(
v,
SQLTestingTools.TERM_FACTORY.getDBBooleanConstant(false)), translation.get(0));
}
@Test
public void function_REGEXP_LIKE_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X FROM DUMMY WHERE REGEXP_LIKE(X, '^Ste(v|ph)en$')", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBRegexpMatches2(), v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("^Ste(v|ph)en$")), translation.get(0));
}
@Test
public void function_REGEXP_LIKE_3_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X FROM DUMMY WHERE REGEXP_LIKE(X, '^Ste(v|ph)en$', 'i')", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableExpression(SQLTestingTools.DB_FS_FACTORY.getDBRegexpMatches3(), v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("^Ste(v|ph)en$"),
SQLTestingTools.TERM_FACTORY.getDBStringConstant("i")), translation.get(0));
}
/**
* Not recognized ??? - not a boolean function?
*/
@Ignore
@Test
public void function_REGEXP_LIKE_4_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableList<ImmutableExpression> translation = parseBooleanExpression("SELECT X FROM DUMMY WHERE REGEXP_LIKE(X, '^Ste(v|ph)en$', 'i', '')", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
}
@Test
public void function_REGEXP_REPLACE_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT REGEXP_REPLACE(X, '^Ste(v|ph)en$', '') AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getDBRegexpReplace3(), v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("^Ste(v|ph)en$"),
SQLTestingTools.TERM_FACTORY.getDBStringConstant("")), translation);
}
@Test
public void function_REGEXP_REPLACE_4_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT REGEXP_REPLACE(X, '^Ste(v|ph)en$', '', 'i') AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getDBRegexpReplace4(), v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("^Ste(v|ph)en$"),
SQLTestingTools.TERM_FACTORY.getDBStringConstant(""),
SQLTestingTools.TERM_FACTORY.getDBStringConstant("i")), translation);
}
/**
* Not recognized ????
*/
@Test
public void function_REGEXP_REPLACE_6_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT REGEXP_REPLACE(X, '^Ste(v|ph)en$', '', 1, 0, 'i') AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
}
/**
* Not recognized ????
*/
@Test
public void function_REGEXP_REPLACE_6a_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT REGEXP_REPLACE(X, '^Ste(v|ph)en$', '', 2, 0, 'i') AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
}
/**
* Not recognized ????
*/
@Test
public void function_REGEXP_REPLACE_7_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT REGEXP_REPLACE(X, '^Ste(v|ph)en$', '', 1, 0, 'i', '') AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
}
@Test
public void function_REPLACE_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT REPLACE(X,'J') AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getRegularDBFunctionSymbol("REPLACE", 2), v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("J")), translation);
}
@Test
public void function_REPLACE_3_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT REPLACE(X, 'J', 'BL') AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getRegularDBFunctionSymbol("REPLACE", 3), v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("J"),
SQLTestingTools.TERM_FACTORY.getDBStringConstant("BL")), translation);
}
@Test
public void function_REPLACE_4_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT REPLACE(X, 'J', 'BL', 'i') AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getRegularDBFunctionSymbol("REPLACE", 4), v,
SQLTestingTools.TERM_FACTORY.getDBStringConstant("J"),
SQLTestingTools.TERM_FACTORY.getDBStringConstant("BL"),
SQLTestingTools.TERM_FACTORY.getDBStringConstant("i")), translation);
}
@Test
public void function_SUBSTR_2_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT SUBSTR(X, 1) AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getRegularDBFunctionSymbol("SUBSTR", 2), v,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", dbLongType)), translation);
}
@Test
public void function_SUBSTR_3_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT SUBSTR(X, 1, 2) AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getRegularDBFunctionSymbol("SUBSTR", 3), v,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", dbLongType),
SQLTestingTools.TERM_FACTORY.getDBConstant("2", dbLongType)), translation);
}
@Test
public void function_SQL_SUBSTRING2_test() throws JSQLParserException {
// SQL:99: SUBSTRING <left paren> <character value expression> FROM <start position> [ FOR <string length> ] <right paren>
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT SUBSTRING(X FROM 1) AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getDBSubString2(), v,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", dbLongType)), translation);
}
@Test
public void function_SQL_SUBSTRING3_test() throws JSQLParserException {
// SQL:99: SUBSTRING <left paren> <character value expression> FROM <start position> [ FOR <string length> ] <right paren>
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT SUBSTRING(X FROM 1 FOR 2) AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getDBSubString3(), v,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", dbLongType),
SQLTestingTools.TERM_FACTORY.getDBConstant("2", dbLongType)), translation);
}
@Test
public void function_SUBSTRING_2_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT SUBSTRING(X, 1) AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getDBSubString2(), v,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", dbLongType)), translation);
}
@Test
public void function_SUBSTRING_3_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT SUBSTRING(X, 1, 2) AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getDBSubString3(), v,
SQLTestingTools.TERM_FACTORY.getDBConstant("1", dbLongType),
SQLTestingTools.TERM_FACTORY.getDBConstant("2", dbLongType)), translation);
}
@Test
public void function_LCASE_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT LCASE(X) AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getDBLower(), v), translation);
}
@Test
public void function_LOWER_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT LOWER(X) AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getDBLower(), v), translation);
}
@Test
public void function_UCASE_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT UCASE(X) AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getDBUpper(), v), translation);
}
@Test
public void function_UPPER_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT UPPER(X) AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(SQLTestingTools.DB_FS_FACTORY.getDBUpper(), v), translation);
}
@Test
public void function_LENGTH_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT LENGTH(X) AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
DBFunctionSymbol lengthFunctionSymbol = SQLTestingTools.DB_FS_FACTORY.getRegularDBFunctionSymbol("LENGTH", 1);
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(lengthFunctionSymbol, v), translation);
}
@Test
public void function_LEN_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT LEN(X) AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
DBFunctionSymbol lenFunctionSymbol = SQLTestingTools.DB_FS_FACTORY.getRegularDBFunctionSymbol("LEN", 1);
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(lenFunctionSymbol, v), translation);
}
@Test
public void function_CONVERT_test() throws JSQLParserException {
Variable v = SQLTestingTools.TERM_FACTORY.getVariable("x0");
ImmutableTerm translation = parseTerm("SELECT CONVERT(VARCHAR(50), X) AS A FROM DUMMY", ImmutableMap.of(
new QualifiedAttributeID(null, IDFAC.createAttributeID("X")), v));
DBFunctionSymbol castFunctionSymbol = SQLTestingTools.DB_FS_FACTORY.getDBCastFunctionSymbol(DB_TYPE_FACTORY.getDBTermType("VARCHAR(50)"));
Assert.assertEquals(SQLTestingTools.TERM_FACTORY.getImmutableFunctionalTerm(castFunctionSymbol, v), translation);
}
private ImmutableTerm parseTerm(String sql, ImmutableMap<QualifiedAttributeID, ImmutableTerm> map) throws JSQLParserException {
ExpressionParser parser = new ExpressionParser(IDFAC, SQLTestingTools.CORE_SINGLETONS);
Statement statement = CCJSqlParserUtil.parse(sql);
SelectItem si = ((PlainSelect) ((Select) statement).getSelectBody()).getSelectItems().get(0);
net.sf.jsqlparser.expression.Expression exp = ((SelectExpressionItem) si).getExpression();
ImmutableTerm translation = parser.parseTerm(exp, new RAExpressionAttributes(map, null));
System.out.println(translation);
return translation;
}
private ImmutableList<ImmutableExpression> parseBooleanExpression(String sql, ImmutableMap<QualifiedAttributeID, ImmutableTerm> map) throws JSQLParserException {
ExpressionParser parser = new ExpressionParser(IDFAC, SQLTestingTools.CORE_SINGLETONS);
Statement statement = CCJSqlParserUtil.parse(sql);
net.sf.jsqlparser.expression.Expression exp = ((PlainSelect) ((Select) statement).getSelectBody()).getWhere();
ImmutableList<ImmutableExpression> translation = parser.parseBooleanExpression(exp, new RAExpressionAttributes(map, null));
System.out.println(translation);
return translation;
}
} |
def remove_old_objs(self):
for fb in self._fire_beams:
if fb.is_out()[1]:
self._fire_beams.remove(fb)
for co in self._coins:
if co.is_out()[1]:
self._coins.remove(co)
for ma in self._magnets:
if ma.is_out()[1]:
self._magnets.remove(ma)
for bo in self._boosts:
if bo.is_out()[1]:
self._boosts.remove(bo)
for bu in self._mandalorian_bullets:
if bu.is_out()[3]:
self._mandalorian_bullets.remove(bu)
if self._boss is not None:
for bu in self._boss_bullets:
if bu.is_out()[3]:
self._boss_bullets.remove(bu) |
/**
* Save the classes similarity matrix.
* @param file The output file to which to save the similarity matrix.
*/
public void saveClassesMatrix(SimilarityMatrix matrix, String file) {
List<Node> sourceClassesList = sourceOntology.getClassesList();
List<Node> targetClassesList = targetOntology.getClassesList();
try {
BufferedWriter bwr = new BufferedWriter(new FileWriter(new File(file)));
bwr.write("@source_classes " + sourceClassesList.size() + "\n");
for( Node sourceClass : sourceClassesList ) {
bwr.write(sourceClass.getUri() + "\n");
}
bwr.write("@target_classes " + targetClassesList.size() + "\n");
for( Node targetClass : targetClassesList ) {
bwr.write(targetClass.getUri() + "\n");
}
bwr.write("@similarity_matrix\n");
for( int i = 0; i < matrix.getRows(); i++ ) {
for( int j = 0; j < matrix.getColumns(); j++ ) {
double sim = matrix.getSimilarity(i, j);
bwr.write(Double.toString(sim) + "\n");
}
}
bwr.close();
} catch (Exception e) {
e.printStackTrace();
}
} |
/*
* Return the data for the specified address, or NULL if not found.
*
* The result must be released with dvmReleaseRegisterMapLine().
*/
const u1* dvmRegisterMapGetLine(const RegisterMap* pMap, int addr)
{
int addrWidth, lineWidth;
u1 format = dvmRegisterMapGetFormat(pMap);
u2 numEntries = dvmRegisterMapGetNumEntries(pMap);
assert(numEntries > 0);
switch (format) {
case kRegMapFormatNone:
return NULL;
case kRegMapFormatCompact8:
addrWidth = 1;
break;
case kRegMapFormatCompact16:
addrWidth = 2;
break;
default:
LOGE("Unknown format %d\n", format);
dvmAbort();
return NULL;
}
lineWidth = addrWidth + pMap->regWidth;
static const int kSearchThreshold = 8;
const u1* data = NULL;
int lineAddr;
if (numEntries < kSearchThreshold) {
int i;
data = pMap->data;
for (i = numEntries; i > 0; i--) {
lineAddr = data[0];
if (addrWidth > 1)
lineAddr |= data[1] << 8;
if (lineAddr == addr)
return data + addrWidth;
data += lineWidth;
}
} else {
int hi, lo, mid;
lo = 0;
hi = numEntries -1;
while (hi >= lo) {
mid = (hi + lo) / 2;
data = pMap->data + lineWidth * mid;
lineAddr = data[0];
if (addrWidth > 1)
lineAddr |= data[1] << 8;
if (addr > lineAddr) {
lo = mid + 1;
} else if (addr < lineAddr) {
hi = mid - 1;
} else {
return data + addrWidth;
}
}
}
assert(data == pMap->data + lineWidth * numEntries);
return NULL;
} |
/**
* Created by greatdreams on 11/8/15.
*/
public class SimpleResponseConsumer {
private RequestResponseApi request;
public void init() {
Timer timer = new Timer();
timer.scheduleAtFixedRate(new ConsoleResponder(request), 500l, 5000l);
}
public void setRequest(RequestResponseApi request) {
this.request = request;
}
} |
/**
* @file
* @copyright This code is licensed under the 3-clause BSD license.\n
* Copyright ETH Zurich, Laboratory for Physical Chemistry, Reiher Group.\n
* See LICENSE.txt for details.
*/
#include <Utils/Properties/Thermochemistry/ThermochemistryCalculator.h>
#include <pybind11/eigen.h>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <boost/optional.hpp>
using namespace Scine::Utils;
void init_thermochemical_calculator(pybind11::module& m) {
pybind11::class_<ThermochemicalContainer> thermochemicalContainer(m, "ThermochemicalContainer");
thermochemicalContainer.def_readonly("temperature", &ThermochemicalContainer::temperature);
thermochemicalContainer.def_readonly("entropy", &ThermochemicalContainer::entropy);
thermochemicalContainer.def_readonly("enthalpy", &ThermochemicalContainer::enthalpy);
thermochemicalContainer.def_readonly("heat_capacity_p", &ThermochemicalContainer::heatCapacityP);
thermochemicalContainer.def_readonly("heat_capacity_v", &ThermochemicalContainer::heatCapacityV);
thermochemicalContainer.def_readonly("gibbs_free_energy", &ThermochemicalContainer::gibbsFreeEnergy);
thermochemicalContainer.def_readonly("zero_point_vibrational_energy", &ThermochemicalContainer::zeroPointVibrationalEnergy);
pybind11::class_<ThermochemicalComponentsContainer> thermochemicalComponentsContainer(
m, "ThermochemicalComponentsContainer");
thermochemicalComponentsContainer.def_readonly("vibrational_component",
&ThermochemicalComponentsContainer::vibrationalComponent);
thermochemicalComponentsContainer.def_readonly("rotational_component", &ThermochemicalComponentsContainer::rotationalComponent);
thermochemicalComponentsContainer.def_readonly("translational_component",
&ThermochemicalComponentsContainer::translationalComponent);
thermochemicalComponentsContainer.def_readonly("electronic_component", &ThermochemicalComponentsContainer::electronicComponent);
thermochemicalComponentsContainer.def_readonly("overall", &ThermochemicalComponentsContainer::overall);
}
|
<filename>core/arch/arm/plat-imx/registers/imx7-iomux_regs.h
/* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright 2017-2018 NXP
*
*/
#ifndef __MX7_IOMUX_REGS_H__
#define __MX7_IOMUX_REGS_H__
/*
* Macros definition
*/
#define IOMUX_GPRx_OFFSET(idx) (idx * 4)
/*
* DDR PHY control PD pins
* TrustZone Address Space Controller Select
*/
#define IOMUX_GPR_TZASC_ID 9
#define BP_IOMUX_GPR_DDR_PHY_CTRL_PD 1
#define BM_IOMUX_GPR_DDR_PHY_CTRL_PD (0x1F << BP_IOMUX_GPR_DDR_PHY_CTRL_PD)
#define BP_IOMUX_GPR_TZASC1_MUX_CTRL 0
#define BM_IOMUX_GPR_TZASC1_MUX_CTRL BIT32(BP_IOMUX_GPR_TZASC1_MUX_CTRL)
/*
* OCRAM Configuration
*/
#define IOMUX_GPR_OCRAM_ID 11
/* State Retention configuration */
#define BP_IOMUX_GPR_OCRAM_S_TZ_ADDR 11
#define BM_IOMUX_GPR_OCRAM_S_TZ_ADDR (0x7 << BP_IOMUX_GPR_OCRAM_S_TZ_ADDR)
#define BP_IOMUX_GPR_OCRAM_S_TZ_EN 10
#define BM_IOMUX_GPR_OCRAM_S_TZ_EN BIT32(BP_IOMUX_GPR_OCRAM_S_TZ_EN)
#define IOMUX_GPR_OCRAM_S_TZ_ENABLE BIT32(BP_IOMUX_GPR_OCRAM_S_TZ_EN)
#define IOMUX_GPR_OCRAM_S_TZ_DISABLE (0 << BP_IOMUX_GPR_OCRAM_S_TZ_EN)
/* PXP configuration */
#define BP_IOMUX_GPR_OCRAM_PXP_TZ_ADDR 7
#define BM_IOMUX_GPR_OCRAM_PXP_TZ_ADDR (0x7 << BP_IOMUX_GPR_OCRAM_PXP_TZ_ADDR)
#define BP_IOMUX_GPR_OCRAM_PXP_TZ_EN 6
#define BM_IOMUX_GPR_OCRAM_PXP_TZ_EN BIT32(BP_IOMUX_GPR_OCRAM_PXP_TZ_EN)
#define IOMUX_GPR_OCRAM_PXP_TZ_ENABLE (1 << BP_IOMUX_GPR_OCRAM_PXP_TZ_EN)
#define IOMUX_GPR_OCRAM_PXP_TZ_DISABLE (0 << BP_IOMUX_GPR_OCRAM_PXP_TZ_EN)
/* Running configuration */
#define BP_IOMUX_GPR_OCRAM_TZ_ADDR 1
#define BM_IOMUX_GPR_OCRAM_TZ_ADDR (0x1F << BP_IOMUX_GPR_OCRAM_TZ_ADDR)
#define BP_IOMUX_GPR_OCRAM_TZ_EN 0
#define BM_IOMUX_GPR_OCRAM_TZ_EN BIT32(BP_IOMUX_GPR_OCRAM_TZ_EN)
#define IOMUX_GPR_OCRAM_TZ_ENABLE BIT32(BP_IOMUX_GPR_OCRAM_TZ_EN)
#define IOMUX_GPR_OCRAM_TZ_DISABLE (0 << BP_IOMUX_GPR_OCRAM_TZ_EN)
/* The configuration is locked with register bits 16 to 29 as mirror
* of bits 0 to 13
*/
#define BP_IOMUX_GPR_OCRAM_LOCK 16
#define IOMUX_GPR_OCRAM_LOCK(value) (value << BP_IOMUX_GPR_OCRAM_LOCK)
#endif /* __MX7_IOMUX_REGS_H__ */
|
def create(code, dictionary_data):
jsondata = flask.Response()
jsondata.headers["Content-Type"] = "application/json"
jsondata.status_code = code
jsondata.set_data(json.dumps(dictionary_data))
return jsondata |
// Uncomment these imports to begin using these cool features!
import { inject } from "@loopback/core";
import { CountSchema, Filter, Where } from "@loopback/repository";
import { get, getModelSchemaRef, param, post, requestBody, patch, getWhereSchemaFor } from "@loopback/rest";
import { authenticate, AuthenticationBindings, STRATEGY } from "loopback4-authentication";
import { authorize } from "loopback4-authorization";
import { PubnubMessageRecipient, Pubnubnotification } from "../models";
import { PubnubMessage } from "../models/pubnub-message.model";
import { Messageservice, Notificationservice } from "../services";
import {CONTENT_TYPE, IAuthUserWithPermissions, OPERATION_SECURITY_SPEC, STATUS_CODE} from '@sourceloop/core';
import {PermissionKey} from '../permission-key.enum';
export class PubnubMessageController {
constructor(
@inject('services.Messageservice')
private readonly messageService:Messageservice,
@inject('services.Notificationservice')
private readonly notifService:Notificationservice
) {}
@authenticate(STRATEGY.BEARER)
@authorize({permissions: [PermissionKey.ViewMessage]})
@get('/messages',{
security: OPERATION_SECURITY_SPEC,
responses: {
[STATUS_CODE.OK]: {
description: 'Array of Message model instances',
content: {
[CONTENT_TYPE.JSON]: {
schema: {
type: 'array',
items: getModelSchemaRef(PubnubMessage, {includeRelations: true}),
},
},
},
},
}
})
async find(
@inject(AuthenticationBindings.CURRENT_USER) user:IAuthUserWithPermissions,
@param.header.string('Authorization') token:string,
@param.query.string('ChannelID') channelID?: string,
@param.filter(PubnubMessage) filter?: Filter<PubnubMessage>,
): Promise<PubnubMessage[]> {
const filter1:Filter<PubnubMessage> = {
where : {
createdBy: user.userTenantId,
channelId: channelID
}
};
const sentmessages = await this.messageService.getMessage(token, filter1);
const filter2:Filter<PubnubMessage> = {
where : {
toUserId: user.userTenantId,
channelId: channelID
}
};
const receivedmessages = await this.messageService.getMessage(token,filter2);
return [...sentmessages,...receivedmessages];
}
@authenticate(STRATEGY.BEARER)
@authorize({permissions: [PermissionKey.CreateMessage]})
@post('/messages', {
security: OPERATION_SECURITY_SPEC,
responses: {
[STATUS_CODE.OK]: {
description: 'Message model instance',
content: {[CONTENT_TYPE.JSON]: {schema: getModelSchemaRef(PubnubMessage)}},
},
},
})
async create(
@param.header.string('Authorization') token:string,
@requestBody({
content: {
'application/json': {
schema: getModelSchemaRef(PubnubMessage, {
title: 'Message',
exclude: ['id'],
}),
},
},
})
message: PubnubMessage,
): Promise<PubnubMessage> {
message.channelId = message.channelId ?? message.toUserId;
const msg = await this.messageService.createMessage(message,token);
const msgrecipient = new PubnubMessageRecipient({
channelId : message.channelId,
recipientId: message.toUserId ?? message.channelId,
messageId: msg.id
});
await this.messageService.createMessageRecipients(msgrecipient,token);
const notif = new Pubnubnotification({
subject: message.subject,
body: message.body,
type: 0,
receiver: {
to: [{
type: 0,
id: message.channelId
}]
}
});
await this.notifService.createNotification(notif,token);
return msg;
}
@authenticate(STRATEGY.BEARER)
@authorize({permissions: [PermissionKey.UpdateMessageRecipient]})
@patch(`messages/{messageid}/markAsRead`,{
security: OPERATION_SECURITY_SPEC,
responses: {
[STATUS_CODE.OK]: {
description: 'Message PATCH success count',
content: {[CONTENT_TYPE.JSON]: {schema: CountSchema}},
},
},
})
async patchMessageRecipients(
@param.header.string('Authorization') token:string,
@param.path.string('messageid') msgId: string,
@requestBody({
content: {
'application/json': {
schema: getModelSchemaRef(PubnubMessageRecipient, { partial: true }),
},
},
})
messageRecipient: Partial<PubnubMessageRecipient>,
@param.query.object('where', getWhereSchemaFor(PubnubMessageRecipient))
where?: Where<PubnubMessageRecipient>,
): Promise<PubnubMessageRecipient> {
const patched = {
isRead: true
};
return this.messageService.updateMsgRecipients(
msgId,
patched,
token,
);
}
}
|
import java.util.Scanner;
/*
* Vasya often uses public transport. The transport in the city is of
* two types: trolleys and buses. The city has n buses and m trolleys,
* the buses are numbered by integers from 1 to n, the trolleys are numbered
* by integers from 1 to m.
*
* Public transport is not free. There are 4 types of tickets:
* 1. A ticket for one ride on some bus or trolley. It costs c1 burles;
* 2. A ticket for an unlimited number of rides on some bus or on some trolley.
* It costs c2 burles;
* 3. A ticket for an unlimited number of rides on all buses or all trolleys.
* It costs c3 burles;
* 4. A ticket for an unlimited number of rides on all buses and trolleys.
* It costs c4 burles.
*
* Vasya knows for sure the number of rides he is going to make and the
* transport he is going to use. He asked you for help to find the minimum
* sum of burles he will have to spend on the tickets.
*
* Input
* The first line contains four integers c1, c2, c3, c4 (1 ≤ c1, c2, c3, c4 ≤ 1000)
* — the costs of the tickets.
*
* The second line contains two integers n and m (1 ≤ n, m ≤ 1000) —
* the number of buses and trolleys Vasya is going to use.
*
* The third line contains n integers ai (0 ≤ ai ≤ 1000) — the number of times
* Vasya is going to use the bus number i.
*
* The fourth line contains m integers bi (0 ≤ bi ≤ 1000) — the number of
* times Vasya is going to use the trolley number i.
*
* Output
* Print a single number — the minimum sum of burles Vasya will
* have to spend on the tickets.
*/
/**
* @author Arief Rahman
*/
public class VasyaAndPublicTransport {
public static void main(String[] args) {
Scanner in = new Scanner(System.in);
// Ask for input(4 integers, cost of each ticket)
int oneBTCost = in.nextInt();
int someBTCost = in.nextInt();
int allBOrTCost = in.nextInt();
int allBTCost = in.nextInt();
// Ask for input(2 integers, how buses and trolley she will use.
int bus = in.nextInt();
int trolley = in.nextInt();
// Ask for inputs equal to 'bus' (how many times she will use certain
// buses.
int[] busUse = new int[bus];
for(int i = 0; i < bus; i++) {
busUse[i] = in.nextInt();
}
// Ask for inputs equal to 'trolley' (how many times she will
// use certain trolleys.
int[] trolleyUse = new int[trolley];
for(int i = 0; i < trolley; i++) {
trolleyUse[i] = in.nextInt();
}
// Start counting the minimum cost for buses and trolleys
int minimumCost;
int costBus = 0, costTrolley = 0;
for(int i = 0; i < bus; i++) {
// Compare price of one ride and unlimited ride on particular
// bus. Take the minimum one.
if(oneBTCost * busUse[i] < someBTCost) {
costBus += oneBTCost * busUse[i];
} else {
costBus += someBTCost;
}
}
// Compare the price with the all bus ticket.
if(allBOrTCost < costBus) {
costBus = allBOrTCost;
}
// Do the same for the trolley
for(int i = 0; i < trolley; i++) {
// Compare price of one ride and unlimited ride on particular
// trolley. Take the minimum one.
if(oneBTCost * trolleyUse[i] < someBTCost) {
costTrolley += oneBTCost * trolleyUse[i];
} else {
costTrolley += someBTCost;
}
}
// Compare the price with the all trolley ticket.
if(allBOrTCost < costTrolley) {
costTrolley = allBOrTCost;
}
// Compare all bus & trolley ticket with trolley + bus cost.
minimumCost = costTrolley + costBus;
if(allBTCost < minimumCost) {
minimumCost = allBTCost;
}
// Output the minimum cost.
System.out.println(minimumCost);
}
}
|
package operations_test
import (
"bytes"
"context"
"crypto/md5" //nolint:gosec
"crypto/rand"
"fmt"
"testing"
"github.com/treeverse/lakefs/pkg/api"
"github.com/treeverse/lakefs/pkg/block"
"github.com/treeverse/lakefs/pkg/upload"
)
const (
bucketName = "test"
ObjectBlockSize = 1024 * 3
expensiveString = "EXPENSIVE"
cheapString = "CHEAP"
)
func TestReadBlob(t *testing.T) {
tt := []struct {
name string
size int64
storageClass *string
}{
{"no data", 0, nil},
{"100 bytes", 100, nil},
{"1 block", ObjectBlockSize, api.StringPtr(expensiveString)},
{"1 block and 100 bytes", ObjectBlockSize + 100, api.StringPtr(cheapString)},
{"2 blocks and 1 bytes", ObjectBlockSize*2 + 1, nil},
{"1000 blocks", ObjectBlockSize * 1000, nil},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
data := make([]byte, tc.size)
_, err := rand.Read(data)
if err != nil {
t.Fatal(err)
}
reader := bytes.NewReader(data)
adapter := newMockAdapter()
opts := block.PutOpts{StorageClass: tc.storageClass}
address := upload.DefaultPathProvider.NewPath()
blob, err := upload.WriteBlob(context.Background(), adapter, bucketName, address, reader, tc.size, opts)
if err != nil {
t.Fatal(err)
}
// test bucketName
if adapter.lastBucket != bucketName && tc.size != 0 {
t.Fatalf("write to wrong bucket: expected:%s got:%s", bucketName, adapter.lastBucket)
}
// test data size
expectedSize := int64(len(data))
if expectedSize != blob.Size {
t.Fatalf("expected sent size to be equal to adapter read size, got: sent:%d , adapter:%d", expectedSize, adapter.totalSize)
}
if adapter.totalSize != blob.Size {
t.Fatalf("expected blob size to be equal to adapter read size, got: blob:%d , adapter:%d", blob.Size, adapter.totalSize)
}
// test storage class
if adapter.lastStorageClass != tc.storageClass {
t.Errorf("expected sent storage class to be equal to requested storage class, got: %v , requested: %v",
adapter.lastStorageClass,
tc.storageClass)
}
// test checksum
expectedMD5 := fmt.Sprintf("%x", md5.Sum(data))
if blob.Checksum != expectedMD5 {
t.Fatalf("expected blob checksum to be equal to data checksum, got: blob:%s , data:%s", blob.Checksum, expectedMD5)
}
})
}
}
|
import * as core from '@actions/core'
import scraping from './scraping'
import fetch from 'node-fetch'
async function run(): Promise<void> {
let contents = new Array<string>()
const extract = new Array<string | undefined>()
try {
contents = await scraping('https://atcoder.jp/contests/')
for (let i = 0; i < contents.length; i += 2) {
if (new Date().getDate() === new Date(contents[i]).getDate()) {
extract.push(contents[i])
extract.push(contents[i + 1])
}
}
if (1 <= extract.length) {
const message = {
username: 'AtCoder',
content: '今日開催のAtCoder',
embeds: [{fields: [{name: `${extract[0]}`, value: `${extract[1]}`}]}]
}
fetch(core.getInput('WebhookUrl'), {
method: 'POST',
body: JSON.stringify(message),
headers: {'Content-Type': 'application/json'}
})
}
} catch (error) {
core.setFailed(error.message)
}
}
run()
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.ai.metricsadvisor;
import com.azure.ai.metricsadvisor.models.MetricsAdvisorKeyCredential;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
public class CredentialsTests {
@Test
public void testKeyUpdate() {
final MetricsAdvisorKeyCredential credential
= new MetricsAdvisorKeyCredential("sub-id-1", "key-1");
Assertions.assertTrue(credential.getKeys().getSubscriptionKey().equals("sub-id-1"));
Assertions.assertTrue(credential.getKeys().getApiKey().equals("key-1"));
credential.updateKey(null, null);
Assertions.assertNull(credential.getKeys().getSubscriptionKey());
Assertions.assertNull(credential.getKeys().getApiKey());
credential.updateKey("sub-id-2", "key-2");
Assertions.assertTrue(credential.getKeys().getSubscriptionKey().equals("sub-id-2"));
Assertions.assertTrue(credential.getKeys().getApiKey().equals("key-2"));
}
}
|
/*
* Reopen log is designed by sending sigusr1 to active workers and pending shutdown of them
*/
static void
rspamd_worker_usr1_handler (gint fd, short what, void *arg)
{
struct rspamd_worker_signal_handler *sigh =
(struct rspamd_worker_signal_handler *)arg;
rspamd_log_reopen (sigh->worker->srv->logger);
if (sigh->post_handler) {
sigh->post_handler (sigh->handler_data);
}
} |
import {Component, EventEmitter, OnInit, Output} from '@angular/core';
import {AuthService} from '../api/client/auth/auth.service';
@Component({
selector: 'app-login',
templateUrl: './login.component.html',
styleUrls: ['./login.component.css']
})
export class LoginComponent implements OnInit {
@Output() attemptedLogin: EventEmitter<boolean> = new EventEmitter<boolean>();
showAlert = false;
opening = false;
closing = false;
visible = false;
constructor(
private authService: AuthService
) { }
ngOnInit() {
this.slidein();
}
submitClickHandler(creds) {
this.authService.veryifyUser(creds.user, creds.pass)
.subscribe((res) => {
if (res.result) {
this.slideout(() => {
this.attemptedLogin.emit(res.result);
});
} else {
this.showAlert = true;
this.attemptedLogin.emit(res.result);
}
});
}
slidein() {
this.opening = true;
setTimeout(() => {
this.opening = false;
this.visible = true;
}, 1000);
}
slideout(cb) {
this.closing = true;
setTimeout(() => {
this.closing = false;
this.visible = false;
cb();
}, 1000);
}
}
|
def fqn(self, fqn):
self._fqn = fqn |
/****************************************************/
/* This file is distributed under the */
/* University of Illinois/NCSA Open Source License. */
/* See LICENSE file in top directory for details. */
/* */
/* Copyright (c) 2016 FIGSiM developers */
/****************************************************/
/*!\file
* VecMat.h
* Created by ljohnson on 7/24/09.
*
* Lightweight linear algebra library intended for rapid three-dimensional computation. This library cannot handle
* vectors and matrices of any other dimensionality; a more general library should be used if variable or higher
* dimensions are required. The library consists of two classes: Vec3 (vectors) and Mat33 (matrices). Vectors are not
* tracked as row or column vectors, with behavior instead determined by the function acting on the vector.
* Overloaded operators will always assume that vectors are oriented such that their inner dimensions agree.
* All double-precision arithmatic operations are overloaded for vector-scalar and matrix-scalar operations, but are not currently
* commutative, meaning that the scalar must be placed *AFTER* the vector or matrix in order to avoid an error, as the
* opposite arrangement is not yet defined. All legal vector-vector, vector-matrix, and matrix-matrix BLAS operations are overloaded,
* and should not have a problem with commutivity. Only the == and != comparison operators are defined for vectors and matrices. All
* elementary math functions are inlined and vector-scalar and vector-vector operations were written unrolled. Other functions may be
* partially unrolled. The inverse, linear solve, and string conversion functions are not inlined.
*
* IMPORTANT NOTE: Arguments for operators are defined as constant. Expressions of the form B = A*B will result in a segfault.
* This may be avoided by using an accumulating operator; e.g. B *= A instead.
*
* Updated on Jan 22, 2011 by <NAME>:
* - added complex vector class
* - added solver for third order reduced polynomial (using method of Cardano)
* - use third order polynomial solver to get eigenvalues of 3x3 matrix
* - defined 1/2*cubicroot(3) as constant for third order polynomial solving
* - added Vector subtraction from 3x3 Matrix diagonal (to help solve for eigenvektors)
* another update on Feb 5, 2011 (AT)
* - forward declaration of Mat33 fixes formerly "uncompilable" code, which can now be used
*
* Updated on May 17, 2011 by LEJ
* - added output functions with user-specified delimiters
* - added Vec4 class; no BLAS functions but has minimal I/O needed for X3D output
* - added converter between rotation matrices and axis-angle rotations (Vec4), in addition to AT's vec2rot converter
*
* Updated on June 10, 2011 by AT
* - fixed bad logical fallacy for "!=" operator (all classes in here):
* - code was 1:1 inversion of "==" operator's code
* - returned false when *any* one of the components were equal (which is wrong in most cases ...)
*
* Updated on Oct 24, 2011 by AT
* - added double4 struct for OpenCL
* - use double4 for quaternions
*
* Updated on Feb 28, 2012 by AT
* - fixed bad bug in V3TensProd: return matrix A started out as unit matrix (needs to be zero)
*
*/
#ifndef INCLUDED_VECTORMAT
#define INCLUDED_VECTORMAT
#include <complex>
#include <cstdio>
#include <ctime>
#include <string>
#include <cmath>
#include <cstdlib>
#include <iostream>
#include <sstream>
#include <fstream>
#ifndef INCLUDED_CONFIG
#include "Config.h"
#endif
#ifndef INCLUDED_MC_OPENCL
#include "MC_OpenCL.h"
#endif
using namespace std;
const double sqrt3 = sqrt(3.0);
class Mat33; // Forward declaration of class Mat33 - AT Feb 5, 2011
/// Vec3 class - dedicated class for 3-dimensional vectors
class Vec3
{
public:
double vec[3];
friend class Mat33;
// Constructors and destructors
Vec3();
Vec3(const double, const double, const double);
Vec3(const double);
Vec3(const double*); // construct from array
Vec3(const Vec3 &);
~Vec3() {};
// Utility functions
Vec3& operator = (const Vec3 &);
void V3Zeros();
void V3Swap(Vec3 &);
//Comparison operators
bool operator== (const Vec3 &);
bool operator!= (const Vec3 &);
//Level 1 BLAS
Vec3 operator+ (const double);
Vec3& operator+= (const double);
Vec3 operator- (const double);
Vec3& operator-= (const double);
Vec3 operator* (const double);
Vec3& operator*= (const double);
Vec3 operator/ (const double);
Vec3& operator/= (const double);
Vec3 operator+ (const Vec3 &);
Vec3& operator+= (const Vec3 &);
Vec3 operator- (const Vec3 &);
Vec3& operator-= (const Vec3 &);
double operator* (const Vec3 &) const;
//Level 2 BLAS
//These do not compile properly. Ask someone who knows C++ better than I do. - LEJ 01/07/10
// -- solved by AT Feb 5, 2011 (needed forward declaration of Mat33 class)
Vec3 operator* (const Mat33 &);
Vec3& operator*= (const Mat33 &);
//Special functions
Vec3 V3Cross(const Vec3 &);
Mat33 V3TensProd(const Vec3 &); //Does not compile properly. Ask someone who knows C++ better than I do. - LEJ 01/07/10 - does now (AT, Feb 5, 2011)
double V3Norm() const;
double V3Sum() const;
double V3Prod() const;
std::string V3Str() const;
std::string V3Str(const char &) const;
};
/// complex class - 3d complex vector class, only minimal function set defined needed for eigenvalue problem
class CVec3{
public:
complex<double> cvec[3];
// Constructors and destructor
CVec3();
CVec3(const complex<double>, const complex<double>, const complex<double>);
CVec3(const Vec3 &);
CVec3(const CVec3 &);
~CVec3(){};
// Operators
bool operator== (const CVec3 &);
bool operator!= (const CVec3 &);
bool operator== (const Vec3 &);
bool operator!= (const Vec3 &);
CVec3& operator = (const CVec3 &);
CVec3 operator- (const double);
CVec3& operator-= (const double);
// Functions
Vec3 Re();
Vec3 Im();
Vec3 Abs();
std::string CV3Str() const;
std::string CV3Str(const char &) const;
};
///Four-element vector (needed for X3D rotations)
class Vec4 {
public:
double vec[4];
friend class Vec3;
friend class Mat33;
//Constructors and destructors
Vec4();
Vec4(const double, const double, const double, const double);
Vec4(const Vec3 &, const double);
Vec4(const Vec4 &);
~Vec4() {};
//Operators
bool operator== (const Vec4 &);
bool operator!= (const Vec4 &);
Vec4 operator+ (const Vec4 &);
Vec4 operator* (const double);
Vec4 operator/ (const double);
//Functions
std::string V4Str() const;
std::string V4Str(const char &) const;
};
/// Mat33 class - for when you need to do transformations on 3-vectors…
class Mat33
{
public:
double mat[3][3];
friend class Vec3;
//Constructors and destructors
inline Mat33();
Mat33(const Mat33 &);
Mat33(const Vec3 &, const Vec3 &);
Mat33(const double);
Mat33(const double a, const double b, const double c); // diagonal constructor
Mat33(const double*);
~Mat33(){};
//Utility functions
Mat33& operator= (const Mat33 &);
void M3Eye();
inline void M3Zeros();
//Comparison operators
bool operator== (const Mat33 &);
bool operator!= (const Mat33 &);
//Matrix-scalar operations
Mat33 operator+ (const double);
Mat33& operator+= (const double);
Mat33 operator- (const double);
Mat33& operator-= (const double);
Mat33 operator* (const double);
Mat33& operator*= (const double);
Mat33 operator/ (const double);
Mat33& operator/= (const double);
//Level 2 BLAS
Vec3 operator* (const Vec3 &);
//No *= operator due to datatype mismatch.
Mat33 operator- (const Vec3 &); // subtract vector from diagonal
Mat33& operator-= (const Vec3 &); // subtract vector from diagonal
//Level 3 BLAS
Mat33 operator+ (const Mat33 &);
Mat33& operator+= (const Mat33 &);
Mat33 operator- (const Mat33 &);
Mat33& operator -= (const Mat33 &);
Mat33 operator* (const Mat33 &);
Mat33& operator*= (const Mat33 &);
//These do not compile properly. Ask someone who knows C++ better than I do. - LEJ 01/07/10
Mat33 operator/ (const Mat33 &);
Mat33& operator/= (const Mat33 &);
//Special functions
CVec3 Eigenvalues();
Mat33 Eigenvectors(Vec3 &ew){ bool* multiples=new bool[3]; Mat33 A=Eigenvectors(ew,multiples,false); delete[] multiples; return A; };
Mat33 Eigenvectors(Vec3 &ew, bool normalize){ bool* multiples=new bool[3]; Mat33 A=Eigenvectors(ew,multiples,normalize); delete[] multiples; return A; };
Mat33 Eigenvectors(Vec3 &ew, bool* multiples, bool normalize);
inline Mat33 M3MulDiag(const double a, const double b, const double c);
Mat33 M3Transpose();
Mat33 M3RowSwap(const int, const int);
inline Vec3 ColumnVec3(const int);
inline Mat33 M3Inv(bool &sing);
inline Mat33 MulSymM3(const Mat33 &C);
inline Mat33 TransMulM3(const Mat33 &C);
inline Vec3 TransMulVec(const Vec3 &u);
inline Mat33 SymMulM3(const Mat33 &C);
inline Mat33 SymMulSymM3(const Mat33 &C);
inline Mat33 SymM3Inv(bool &sing);
inline Vec3 SymM3InvMult(bool &sing, Vec3 &z);
double M3Trace();
inline double M3Det();
Vec3 M3Diag();
void M3GEPP(Vec3 &x);
Vec3 M3LinSolve(Vec3 &u,bool &multiples);
void LUDecomposition();
bool M3BackSub(Vec3 &x); // returns true if multiples of vector are also solutions
std::string M3Str();
std::string M3RowStr(const int);
std::string M3RowStr(const int, const char &);
};
/* Function implementations are below. Math functions and constructors are inlined for speed.
* Non-math functions are kept in .cpp file. The vast majority of the class is contained in
* This file, organized in the same order that the prototypes are listed
*/
/// get solutions to third order polynomial in reduced form: z^3 + p*z + q = 0
CVec3 SolvePolynomial3(const double p, const double q);
/* ***Vec3*** */
//Constructors and Destructors
/// Default constructor (zeros)
inline Vec3::Vec3()
{
vec[0] = 0.0; vec[1] = 0.0; vec[2] = 0.0;
}
/// Construct from doubles
inline Vec3::Vec3(const double a, const double b, const double c)
{
vec[0] = a;
vec[1] = b;
vec[2] = c;
}
inline Vec3::Vec3(const double d)
{
vec[0] = d; vec[1] = d; vec[2] = d;
}
/// Construct from an array of 3 doubles
inline Vec3::Vec3(const double* A)
{
vec[0] = A[0];
vec[1] = A[1];
vec[2] = A[2];
}
/// Copy constructor
inline Vec3::Vec3(const Vec3 &u)
{
vec[0] = u.vec[0];
vec[1] = u.vec[1];
vec[2] = u.vec[2];
}
//Non-BLAS utility functions
/// Zero vector v = [0 0 0]
inline void Vec3::V3Zeros()
{
vec[0] = 0.0;
vec[1] = 0.0;
vec[2] = 0.0;
}
/// Assignment operator
inline Vec3& Vec3::operator=(const Vec3 &u)
{
vec[0] = u.vec[0];
vec[1] = u.vec[1];
vec[2] = u.vec[2];
return *this;
}
/// Swap values in two vectors u <-> v
inline void Vec3::V3Swap(Vec3 &u)
{
double temp;
for (int i = 0; i < 3; i ++)
{
temp = vec[i];
vec[i] = u.vec[i];
u.vec[i] = temp;
}
}
// Comparison operators
inline bool Vec3::operator==(const Vec3 &u)
{
if (fabs(u.vec[0]-vec[0])>EPS) return false;
if (fabs(u.vec[1]-vec[1])>EPS) return false;
if (fabs(u.vec[2]-vec[2])>EPS) return false;
return true;
}
inline bool Vec3::operator!=(const Vec3 &u)
{
if (fabs(u.vec[0]-vec[0])>EPS) return true;
if (fabs(u.vec[1]-vec[1])>EPS) return true;
if (fabs(u.vec[2]-vec[2])>EPS) return true;
return false;
}
//Level 1 BLAS
/// Vector scalar addition v = u + a
inline Vec3 Vec3::operator+ (const double a)
{
Vec3 v(a+vec[0], a+vec[1], a+vec[2]);
return v;
}
/// Vector scalar addition u = u + a
inline Vec3& Vec3::operator += (const double a)
{
vec[0] +=a;
vec[1] +=a;
vec[2] +=a;
return *this;
}
/// Vector scalar subtraction v = u - a
inline Vec3 Vec3::operator- (const double a)
{
Vec3 v(vec[0]-a, vec[1]-a, vec[2]-a);
return v;
}
/// Vector scalar subtraction u = u - a
inline Vec3& Vec3::operator -= (const double a)
{
vec[0] -=a;
vec[1] -=a;
vec[2] -=a;
return *this;
}
/// Vector scalar multiplication v = u*a
inline Vec3 Vec3::operator* (const double a)
{
Vec3 v(a*vec[0], a*vec[1], a*vec[2]);
return v;
}
/// Vector scalar multiplication u = u*a
inline Vec3& Vec3::operator*= (const double a)
{
vec[0] *=a;
vec[1] *=a;
vec[2] *=a;
return *this;
}
/// Vector scalar division v = u/a
inline Vec3 Vec3::operator/ (const double a)
{
double b = 1.0/a;
Vec3 v(vec[0]*b, vec[1]*b, vec[2]*b);
return v;
}
/// Vector scalar division u = u/a
inline Vec3& Vec3::operator/= (const double a)
{
double b = 1.0/a;
vec[0] *=b;
vec[1] *=b;
vec[2] *=b;
return *this;
}
/// Vector addition v = u + w
inline Vec3 Vec3::operator+ (const Vec3 &w)
{
Vec3 v(vec[0]+w.vec[0], vec[1]+w.vec[1], vec[2] +w.vec[2]);
return v;
}
/// Vector addition u = u + w
inline Vec3& Vec3::operator+= (const Vec3 &w)
{
vec[0] += w.vec[0];
vec[1] += w.vec[1];
vec[2] += w.vec[2];
return *this;
}
/// Vector subtraction v = u - w
inline Vec3 Vec3::operator- (const Vec3 &w)
{
Vec3 v(vec[0]-w.vec[0], vec[1]-w.vec[1], vec[2] -w.vec[2]);
return v;
}
/// Vector subtraction u = u - w
inline Vec3& Vec3::operator-= (const Vec3 &w)
{
vec[0] -= w.vec[0];
vec[1] -= w.vec[1];
vec[2] -= w.vec[2];
return *this;
}
/// Dot product a = v'*u
inline double Vec3::operator* (const Vec3 &B) const
{
double a = vec[0]*B.vec[0] + vec[1]*B.vec[1] + vec[2]*B.vec[2];
return a;
}
//Level 2 BLAS
/// Vector-Matrix Multiply v = u'*A
inline Vec3 Vec3::operator* (const Mat33 &A)
{
cout << "here\n";
Vec3 v( vec[0]*A.mat[0][0]+vec[1]*A.mat[0][1]+vec[2]*A.mat[0][2],
vec[0]*A.mat[1][0]+vec[1]*A.mat[1][1]+vec[2]*A.mat[1][2],
vec[0]*A.mat[2][0]+vec[1]*A.mat[2][1]+vec[2]*A.mat[2][2]);
return v;
}
/// Vector-Matrix multiply u' = u'*A
inline Vec3& Vec3::operator*= (const Mat33 &A)
{
Vec3 u(*this);
vec[0] = u.vec[0]*A.mat[0][0]+u.vec[1]*A.mat[0][1]+u.vec[2]*A.mat[0][2];
vec[1] = u.vec[0]*A.mat[1][0]+u.vec[1]*A.mat[1][1]+u.vec[2]*A.mat[1][2];
vec[2] = u.vec[0]*A.mat[2][0]+u.vec[1]*A.mat[2][1]+u.vec[2]*A.mat[2][2];
return *this;
}
//Special functions
/// Vector norm |v|
inline double Vec3::V3Norm() const
{
return sqrt(vec[0]*vec[0]+vec[1]*vec[1]+vec[2]*vec[2]);
}
/// Cross product v = u x w
inline Vec3 Vec3::V3Cross(const Vec3 &w)
{
Vec3 v( vec[1]*w.vec[2] - vec[2]*w.vec[1],
vec[2]*w.vec[0] - vec[0]*w.vec[2],
vec[0]*w.vec[1] - vec[1]*w.vec[0]);
vec[0] = v.vec[0];
vec[1] = v.vec[1];
vec[2] = v.vec[2];
return v;
}
/// Tensor product A = u*w'
inline Mat33 Vec3::V3TensProd(const Vec3 &w)
{
Mat33 A;
A.M3Zeros();
for(int i = 0; i < 3; i++){
A.mat[i][0] += vec[i]*w.vec[0];
A.mat[i][1] += vec[i]*w.vec[1];
A.mat[i][2] += vec[i]*w.vec[2];
}
return A;
}
/// Sum of vector elements
inline double Vec3::V3Sum() const
{
double a = vec[0]+vec[1]+vec[2];
return a;
}
/// Product of vector elements
inline double Vec3::V3Prod() const
{
double a = vec[0]*vec[1]*vec[2];
return a;
}
/* ***CVec3*** */
// Constructors
/// Default constructor (everything is zero)
inline CVec3::CVec3(){
cvec[0] = complex<double>(0,0);
cvec[1] = complex<double>(0,0);
cvec[2] = complex<double>(0,0);
}
/// construct from complex<double>
inline CVec3::CVec3(const complex<double> a, const complex<double> b, const complex<double> c){
cvec[0] = a;
cvec[1] = b;
cvec[2] = c;
}
/// construct from Vec3
inline CVec3::CVec3(const Vec3 &u){
cvec[0] = u.vec[0];
cvec[1] = u.vec[1];
cvec[2] = u.vec[2];
}
/// Copy constructor
inline CVec3::CVec3(const CVec3 &u){
cvec[0] = u.cvec[0];
cvec[1] = u.cvec[1];
cvec[2] = u.cvec[2];
}
// Comparison operators
inline bool CVec3::operator==(const CVec3 &u)
{
if (u.cvec[0] != cvec[0]) return false;
if (u.cvec[1] != cvec[1]) return false;
if (u.cvec[2] != cvec[2]) return false;
return true;
}
inline bool CVec3::operator!=(const CVec3 &u)
{
if (u.cvec[0] != cvec[0]) return true;
if (u.cvec[1] != cvec[1]) return true;
if (u.cvec[2] != cvec[2]) return true;
return false;
}
inline bool CVec3::operator==(const Vec3 &u)
{
if (cvec[0] != complex<double>(u.vec[0])) return false;
if (cvec[1] != complex<double>(u.vec[1])) return false;
if (cvec[2] != complex<double>(u.vec[2])) return false;
return true;
}
inline bool CVec3::operator!=(const Vec3 &u)
{
if (cvec[0] != complex<double>(u.vec[0])) return true;
if (cvec[1] != complex<double>(u.vec[1])) return true;
if (cvec[2] != complex<double>(u.vec[2])) return true;
return true;
}
/// Assignment operator
inline CVec3& CVec3::operator=(const CVec3 &u){
cvec[0] = u.cvec[0];
cvec[1] = u.cvec[1];
cvec[2] = u.cvec[2];
return *this;
}
/// Scalar subtraction z = u - d
inline CVec3 CVec3::operator- (const double d)
{
CVec3 cv(cvec[0]-d, cvec[1]-d, cvec[2]-d);
return cv;
}
/// Scalar subtraction z = u - d
inline CVec3& CVec3::operator -= (const double d)
{
cvec[0] -=d;
cvec[1] -=d;
cvec[2] -=d;
return *this;
}
// Functions
inline Vec3 CVec3::Re(){
Vec3 v(real(cvec[0]),real(cvec[1]),real(cvec[2]));
return v;
}
inline Vec3 CVec3::Im(){
Vec3 v(imag(cvec[0]),imag(cvec[1]),imag(cvec[2]));
return v;
}
inline Vec3 CVec3::Abs(){
Vec3 v(abs(cvec[0]),abs(cvec[1]),abs(cvec[2]));
return v;
}
/* ***Mat33*** */
// Constructors and destructors
/// Default constructor (identity matrix)
inline Mat33::Mat33()
{
mat[0][0] = 1.0; mat[0][1] = 0.0; mat[0][2] = 0.0;
mat[1][0] = 0.0; mat[1][1] = 1.0; mat[1][2] = 0.0;
mat[2][0] = 0.0; mat[2][1] = 0.0; mat[2][2] = 1.0;
}
/// Copy constructor
inline Mat33::Mat33(const Mat33 &A)
{
for (int i = 0; i < 3; i++)
{
mat[i][0] = A.mat[i][0];
mat[i][1] = A.mat[i][1];
mat[i][2] = A.mat[i][2];
}
}
/// Tensor constructor
inline Mat33::Mat33(const Vec3 &u, const Vec3 &w)
{
for(int i = 0; i < 3; i++){
mat[i][0] = u.vec[i]*w.vec[0];
mat[i][1] = u.vec[i]*w.vec[1];
mat[i][2] = u.vec[i]*w.vec[2];
}
}
/// Constant constructor
inline Mat33::Mat33(const double a)
{
mat[0][0] = a;
mat[0][1] = a;
mat[0][2] = a;
mat[1][0] = a;
mat[1][1] = a;
mat[1][2] = a;
mat[2][0] = a;
mat[2][1] = a;
mat[2][2] = a;
}
/// Diagonal constructor
inline Mat33::Mat33(const double a, const double b, const double c)
{
mat[0][0] = a;
mat[0][1] = 0.0;
mat[0][2] = 0.0;
mat[1][0] = 0.0;
mat[1][1] = b;
mat[1][2] = 0.0;
mat[2][0] = 0.0;
mat[2][1] = 0.0;
mat[2][2] = c;
}
/// Constant constructor from array
inline Mat33::Mat33(const double* a)
{
mat[0][0] = a[0];
mat[0][1] = a[1];
mat[0][2] = a[2];
mat[1][0] = a[3];
mat[1][1] = a[4];
mat[1][2] = a[5];
mat[2][0] = a[6];
mat[2][1] = a[7];
mat[2][2] = a[8];
}
//Utility functions
/// Assignment operator operator B = A
inline Mat33& Mat33::operator= (const Mat33 &A)
{
for (int i = 0; i < 3; i++){
mat[i][0] = A.mat[i][0];
mat[i][1] = A.mat[i][1];
mat[i][2] = A.mat[i][2];
}
return *this;
}
/// Identity matrix
inline void Mat33::M3Eye()
{
mat[0][0] = 1.0; mat[0][1] = 0.0; mat[0][2] = 0.0;
mat[1][0] = 0.0; mat[1][1] = 1.0; mat[1][2] = 0.0;
mat[2][0] = 0.0; mat[2][1] = 0.0; mat[2][2] = 1.0;
}
/// Zero matrix
inline void Mat33::M3Zeros()
{
mat[0][0] = 0.0; mat[0][1] = 0.0; mat[0][2] = 0.0;
mat[1][0] = 0.0; mat[1][1] = 0.0; mat[1][2] = 0.0;
mat[2][0] = 0.0; mat[2][1] = 0.0; mat[2][2] = 0.0;
}
/// Comparison operators
inline bool Mat33::operator== (const Mat33 &A)
{
for(int i = 0; i < 3; i++){
if(fabs(A.mat[i][0]-mat[i][0])>EPS) return false;
if(fabs(A.mat[i][1]-mat[i][1])>EPS) return false;
if(fabs(A.mat[i][2]-mat[i][2])>EPS) return false;
}
return true;
}
inline bool Mat33::operator!= (const Mat33 &A)
{
for(int i = 0; i < 3; i++){
if(fabs(A.mat[i][0]-mat[i][0])>EPS) return true;
if(fabs(A.mat[i][1]-mat[i][1])>EPS) return true;
if(fabs(A.mat[i][2]-mat[i][2])>EPS) return true;
}
return false;
}
//Matrix-scalar operations
/// Matrix-scalar addition B = A + a
inline Mat33 Mat33::operator+ (const double a)
{
Mat33 B;
B.mat[0][0] = mat[0][0]+a;
B.mat[0][1] = mat[0][1]+a;
B.mat[0][2] = mat[0][2]+a;
B.mat[1][0] = mat[1][0]+a;
B.mat[1][1] = mat[1][1]+a;
B.mat[1][2] = mat[1][2]+a;
B.mat[2][0] = mat[2][0]+a;
B.mat[2][1] = mat[2][1]+a;
B.mat[2][2] = mat[2][2]+a;
return B;
}
/// Matrix-scalar addition A = A + a
inline Mat33& Mat33::operator+= (const double a)
{
mat[0][0] += a;
mat[0][1] += a;
mat[0][2] += a;
mat[1][0] += a;
mat[1][1] += a;
mat[1][2] += a;
mat[2][0] += a;
mat[2][1] += a;
mat[2][2] += a;
return *this;
}
/// Matrix-scalar subtraction B = A - a
inline Mat33 Mat33::operator- (const double a)
{
Mat33 B;
B.mat[0][0] = mat[0][0]-a;
B.mat[0][1] = mat[0][1]-a;
B.mat[0][2] = mat[0][2]-a;
B.mat[1][0] = mat[1][0]-a;
B.mat[1][1] = mat[1][1]-a;
B.mat[1][2] = mat[1][2]-a;
B.mat[2][0] = mat[2][0]-a;
B.mat[2][1] = mat[2][1]-a;
B.mat[2][2] = mat[2][2]-a;
return B;
}
/// Matrix-scalar subtraction A = A - a
inline Mat33& Mat33::operator-= (const double a)
{
mat[0][0] -= a;
mat[0][1] -= a;
mat[0][2] -= a;
mat[1][0] -= a;
mat[1][1] -= a;
mat[1][2] -= a;
mat[2][0] -= a;
mat[2][1] -= a;
mat[2][2] -= a;
return *this;
}
/// Matrix-vector subtraction from diagonal
inline Mat33 Mat33::operator- (const Vec3 &u)
{
Mat33 B;
B.mat[0][0] = mat[0][0]-u.vec[0];
B.mat[1][1] = mat[1][1]-u.vec[1];
B.mat[2][2] = mat[2][2]-u.vec[2];
return B;
}
/// Matrix-vector subtraction from diagonal
inline Mat33& Mat33::operator-= (const Vec3 &u)
{
mat[0][0] -= u.vec[0];
mat[1][1] -= u.vec[1];
mat[2][2] -= u.vec[2];
return *this;
}
/// Matrix-scalar multiplication B = A*a
inline Mat33 Mat33::operator* (const double a)
{
Mat33 B;
B.mat[0][0] = mat[0][0]*a;
B.mat[0][1] = mat[0][1]*a;
B.mat[0][2] = mat[0][2]*a;
B.mat[1][0] = mat[1][0]*a;
B.mat[1][1] = mat[1][1]*a;
B.mat[1][2] = mat[1][2]*a;
B.mat[2][0] = mat[2][0]*a;
B.mat[2][1] = mat[2][1]*a;
B.mat[2][2] = mat[2][2]*a;
return B;
}
/// Matrix-scalar multiplication A = A*a
inline Mat33& Mat33::operator*= (const double a)
{
mat[0][0] *= a;
mat[0][1] *= a;
mat[0][2] *= a;
mat[1][0] *= a;
mat[1][1] *= a;
mat[1][2] *= a;
mat[2][0] *= a;
mat[2][1] *= a;
mat[2][2] *= a;
return *this;
}
/// Matrix-scalar division B = A/a
inline Mat33 Mat33::operator/ (const double a)
{
Mat33 B(*this);
double inva = 1.0/a;
B.mat[0][0] = mat[0][0]*inva;
B.mat[0][1] = mat[0][1]*inva;
B.mat[0][2] = mat[0][2]*inva;
B.mat[1][0] = mat[1][0]*inva;
B.mat[1][1] = mat[1][1]*inva;
B.mat[1][2] = mat[1][2]*inva;
B.mat[2][0] = mat[2][0]*inva;
B.mat[2][1] = mat[2][1]*inva;
B.mat[2][2] = mat[2][2]*inva;
return B;
}
/// Matrix-scalar division A = A/a
inline Mat33& Mat33::operator/= (const double a)
{
double inva = 1.0/a;
mat[0][0] *= inva;
mat[0][1] *= inva;
mat[0][2] *= inva;
mat[1][0] *= inva;
mat[1][1] *= inva;
mat[1][2] *= inva;
mat[2][0] *= inva;
mat[2][1] *= inva;
mat[2][2] *= inva;
return *this;
}
//Level 2 BLAS
/// Matrix-vector multiplication v = A*u
inline Vec3 Mat33::operator* (const Vec3 &u)
{
return Vec3(mat[0][0]*u.vec[0]+mat[0][1]*u.vec[1]+mat[0][2]*u.vec[2],
mat[1][0]*u.vec[0]+mat[1][1]*u.vec[1]+mat[1][2]*u.vec[2],
mat[2][0]*u.vec[0]+mat[2][1]*u.vec[1]+mat[2][2]*u.vec[2]);
}
//Level 3 BLAS
/// Matrix addition B = A + C
inline Mat33 Mat33::operator+ (const Mat33 &C)
{
Mat33 B(*this);
for(int i = 0; i < 3; i++){
B.mat[i][0] += C.mat[i][0];
B.mat[i][1] += C.mat[i][1];
B.mat[i][2] += C.mat[i][2];
}
return B;
}
/// Matrix addition C = A + C
inline Mat33& Mat33::operator+= (const Mat33 &C)
{
for(int i = 0; i < 3; i++){
mat[i][0] += C.mat[i][0];
mat[i][1] += C.mat[i][1];
mat[i][2] += C.mat[i][2];
}
return *this;
}
/// Matrix subtraction B = A - C
inline Mat33 Mat33::operator- (const Mat33 &C)
{
Mat33 B(*this);
for(int i = 0; i < 3; i++){
B.mat[i][0] -= C.mat[i][0];
B.mat[i][1] -= C.mat[i][1];
B.mat[i][2] -= C.mat[i][2];
}
return B;
}
/// Matrix subtraction A = A - C
inline Mat33& Mat33::operator -= (const Mat33 &C)
{
for(int i = 0; i < 3; i++){
mat[i][0] -= C.mat[i][0];
mat[i][1] -= C.mat[i][1];
mat[i][2] -= C.mat[i][2];
}
return *this;
}
/// Matrix multiplication B = A*C
inline Mat33 Mat33::operator* (const Mat33 &C)
{
Mat33 B;
B.mat[0][0] = mat[0][0]*C.mat[0][0] + mat[0][1]*C.mat[1][0] + mat[0][2]*C.mat[2][0];
B.mat[0][1] = mat[0][0]*C.mat[0][1] + mat[0][1]*C.mat[1][1] + mat[0][2]*C.mat[2][1];
B.mat[0][2] = mat[0][0]*C.mat[0][2] + mat[0][1]*C.mat[1][2] + mat[0][2]*C.mat[2][2];
B.mat[1][0] = mat[1][0]*C.mat[0][0] + mat[1][1]*C.mat[1][0] + mat[1][2]*C.mat[2][0];
B.mat[1][1] = mat[1][0]*C.mat[0][1] + mat[1][1]*C.mat[1][1] + mat[1][2]*C.mat[2][1];
B.mat[1][2] = mat[1][0]*C.mat[0][2] + mat[1][1]*C.mat[1][2] + mat[1][2]*C.mat[2][2];
B.mat[2][0] = mat[2][0]*C.mat[0][0] + mat[2][1]*C.mat[1][0] + mat[2][2]*C.mat[2][0];
B.mat[2][1] = mat[2][0]*C.mat[0][1] + mat[2][1]*C.mat[1][1] + mat[2][2]*C.mat[2][1];
B.mat[2][2] = mat[2][0]*C.mat[0][2] + mat[2][1]*C.mat[1][2] + mat[2][2]*C.mat[2][2];
return B;
}
/// Matrix multiplication A = A*C
inline Mat33& Mat33::operator*= (const Mat33 &C)
{
Mat33 A(*this);
mat[0][0] = A.mat[0][0]*C.mat[0][0] + A.mat[0][1]*C.mat[1][0] + A.mat[0][2]*C.mat[2][0];
mat[0][1] = A.mat[0][0]*C.mat[0][1] + A.mat[0][1]*C.mat[1][1] + A.mat[0][2]*C.mat[2][1];
mat[0][2] = A.mat[0][0]*C.mat[0][2] + A.mat[0][1]*C.mat[1][2] + A.mat[0][2]*C.mat[2][2];
mat[1][0] = A.mat[1][0]*C.mat[0][0] + A.mat[1][1]*C.mat[1][0] + A.mat[1][2]*C.mat[2][0];
mat[1][1] = A.mat[1][0]*C.mat[0][1] + A.mat[1][1]*C.mat[1][1] + A.mat[1][2]*C.mat[2][1];
mat[1][2] = A.mat[1][0]*C.mat[0][2] + A.mat[1][1]*C.mat[1][2] + A.mat[1][2]*C.mat[2][2];
mat[2][0] = A.mat[2][0]*C.mat[0][0] + A.mat[2][1]*C.mat[1][0] + A.mat[2][2]*C.mat[2][0];
mat[2][1] = A.mat[2][0]*C.mat[0][1] + A.mat[2][1]*C.mat[1][1] + A.mat[2][2]*C.mat[2][1];
mat[2][2] = A.mat[2][0]*C.mat[0][2] + A.mat[2][1]*C.mat[1][2] + A.mat[2][2]*C.mat[2][2];
return *this;
}
//Matrix right division B = A/C
//In need of attention by a C++ expert LEJ 01/07/2010
/*inline Mat33 Mat33::operator/ (const Mat33 &C)
{
Mat33 B(0.0);
Mat33 D = C.M3Inv(NULL);
for(int i = 0; i < 3; i++)
{
B.mat[i][0] += mat[i][0]*D.mat[0][0] + mat[i][1]*D.mat[1][0] + mat[i][2]*D.mat[2][0];
B.mat[i][1] += mat[i][0]*D.mat[0][1] + mat[i][1]*D.mat[1][1] + mat[i][2]*D.mat[2][1];
B.mat[i][2] += mat[i][0]*D.mat[0][2] + mat[i][1]*D.mat[1][2] + mat[i][2]*D.mat[2][2];
}
return B;
}
//Matrix right division A = A/C
inline Mat33& Mat33::operator/= (const Mat33 &C)
{
Mat33 A(*this);
Mat33 D = C.M3Inv(NULL);
for(int i = 0; i < 3; i++)
{
for(int j = 0; j < 3; j++)
{
mat[i][j] = A.mat[i][0]*C.mat[0][j] + A.mat[i][1]*C.mat[1][j] + A.mat[i][2]*C.mat[2][j];
}
}
return *this;
}*/
//Special functions
inline Mat33 Mat33::TransMulM3(const Mat33 &C)
{
Mat33 B;
B.mat[0][0] = mat[0][0]*C.mat[0][0] + mat[1][0]*C.mat[1][0] + mat[2][0]*C.mat[2][0];
B.mat[0][1] = mat[0][0]*C.mat[0][1] + mat[1][0]*C.mat[1][1] + mat[2][0]*C.mat[2][1];
B.mat[0][2] = mat[0][0]*C.mat[0][2] + mat[1][0]*C.mat[1][2] + mat[2][0]*C.mat[2][2];
B.mat[1][0] = mat[0][1]*C.mat[0][0] + mat[1][1]*C.mat[1][0] + mat[2][1]*C.mat[2][0];
B.mat[1][1] = mat[0][1]*C.mat[0][1] + mat[1][1]*C.mat[1][1] + mat[2][1]*C.mat[2][1];
B.mat[1][2] = mat[0][1]*C.mat[0][2] + mat[1][1]*C.mat[1][2] + mat[2][1]*C.mat[2][2];
B.mat[2][0] = mat[0][2]*C.mat[0][0] + mat[1][2]*C.mat[1][0] + mat[2][2]*C.mat[2][0];
B.mat[2][1] = mat[0][2]*C.mat[0][1] + mat[1][2]*C.mat[1][1] + mat[2][2]*C.mat[2][1];
B.mat[2][2] = mat[0][2]*C.mat[0][2] + mat[1][2]*C.mat[1][2] + mat[2][2]*C.mat[2][2];
return B;
}
/// Matrix-vector multiplication v = A*u
inline Vec3 Mat33::TransMulVec(const Vec3 &u)
{
return Vec3(mat[0][0]*u.vec[0]+mat[1][0]*u.vec[1]+mat[2][0]*u.vec[2],
mat[0][1]*u.vec[0]+mat[1][1]*u.vec[1]+mat[2][1]*u.vec[2],
mat[0][2]*u.vec[0]+mat[1][2]*u.vec[1]+mat[2][2]*u.vec[2]);
}
inline Vec3 Mat33::ColumnVec3(const int col)
{
Vec3 v(mat[0][col],mat[1][col],mat[2][col]);
return v;
}
/// Matrix determinant d = Det(A)
inline double Mat33::M3Det()
{
double det = mat[0][0]*(mat[1][1]*mat[2][2]-mat[1][2]*mat[2][1])
+ mat[1][0]*(mat[2][1]*mat[0][2]-mat[0][1]*mat[2][2])
+ mat[2][0]*(mat[0][1]*mat[1][2]-mat[0][2]*mat[1][1]);
return det;
}
// Multiplication of non-symmetric matrix with symmetric matrix C
inline Mat33 Mat33::MulSymM3(const Mat33 &C)
{
Mat33 B;
B.mat[0][0] = mat[0][0]*C.mat[0][0] + mat[0][1]*C.mat[0][1] + mat[0][2]*C.mat[0][2];
B.mat[0][1] = mat[0][0]*C.mat[0][1] + mat[0][1]*C.mat[1][1] + mat[0][2]*C.mat[1][2];
B.mat[0][2] = mat[0][0]*C.mat[0][2] + mat[0][1]*C.mat[1][2] + mat[0][2]*C.mat[2][2];
B.mat[1][0] = mat[1][0]*C.mat[0][0] + mat[1][1]*C.mat[0][1] + mat[1][2]*C.mat[0][2];
B.mat[1][1] = mat[1][0]*C.mat[0][1] + mat[1][1]*C.mat[1][1] + mat[1][2]*C.mat[1][2];
B.mat[1][2] = mat[1][0]*C.mat[0][2] + mat[1][1]*C.mat[1][2] + mat[1][2]*C.mat[2][2];
B.mat[2][0] = mat[2][0]*C.mat[0][0] + mat[2][1]*C.mat[0][1] + mat[2][2]*C.mat[0][2];
B.mat[2][1] = mat[2][0]*C.mat[0][1] + mat[2][1]*C.mat[1][1] + mat[2][2]*C.mat[1][2];
B.mat[2][2] = mat[2][0]*C.mat[0][2] + mat[2][1]*C.mat[1][2] + mat[2][2]*C.mat[2][2];
return B;
}
// Multiplication of symmetric matrix with non-symmetric matrix C
inline Mat33 Mat33::SymMulM3(const Mat33 &C)
{
Mat33 B;
B.mat[0][0] = mat[0][0]*C.mat[0][0] + mat[0][1]*C.mat[1][0] + mat[0][2]*C.mat[2][0];
B.mat[0][1] = mat[0][0]*C.mat[0][1] + mat[0][1]*C.mat[1][1] + mat[0][2]*C.mat[2][1];
B.mat[0][2] = mat[0][0]*C.mat[0][2] + mat[0][1]*C.mat[1][2] + mat[0][2]*C.mat[2][2];
B.mat[1][0] = mat[0][1]*C.mat[0][0] + mat[1][1]*C.mat[1][0] + mat[1][2]*C.mat[2][0];
B.mat[1][1] = mat[0][1]*C.mat[0][1] + mat[1][1]*C.mat[1][1] + mat[1][2]*C.mat[2][1];
B.mat[1][2] = mat[0][1]*C.mat[0][2] + mat[1][1]*C.mat[1][2] + mat[1][2]*C.mat[2][2];
B.mat[2][0] = mat[0][2]*C.mat[0][0] + mat[1][2]*C.mat[1][0] + mat[2][2]*C.mat[2][0];
B.mat[2][1] = mat[0][2]*C.mat[0][1] + mat[1][2]*C.mat[1][1] + mat[2][2]*C.mat[2][1];
B.mat[2][2] = mat[0][2]*C.mat[0][2] + mat[1][2]*C.mat[1][2] + mat[2][2]*C.mat[2][2];
return B;
}
// Multiplication of two symmetric matrices
inline Mat33 Mat33::SymMulSymM3(const Mat33 &C)
{
Mat33 B;
B.mat[0][0] = mat[0][0]*C.mat[0][0] + mat[0][1]*C.mat[0][1] + mat[0][2]*C.mat[0][2];
B.mat[0][1] = mat[0][0]*C.mat[0][1] + mat[0][1]*C.mat[1][1] + mat[0][2]*C.mat[1][2];
B.mat[0][2] = mat[0][0]*C.mat[0][2] + mat[0][1]*C.mat[1][2] + mat[0][2]*C.mat[2][2];
B.mat[1][0] = mat[0][1]*C.mat[0][0] + mat[1][1]*C.mat[0][1] + mat[1][2]*C.mat[0][2];
B.mat[1][1] = mat[0][1]*C.mat[0][1] + mat[1][1]*C.mat[1][1] + mat[1][2]*C.mat[1][2];
B.mat[1][2] = mat[0][1]*C.mat[0][2] + mat[1][1]*C.mat[1][2] + mat[1][2]*C.mat[2][2];
B.mat[2][0] = mat[0][2]*C.mat[0][0] + mat[1][2]*C.mat[0][1] + mat[2][2]*C.mat[0][2];
B.mat[2][1] = mat[0][2]*C.mat[0][1] + mat[1][2]*C.mat[1][1] + mat[2][2]*C.mat[1][2];
B.mat[2][2] = mat[0][2]*C.mat[0][2] + mat[1][2]*C.mat[1][2] + mat[2][2]*C.mat[2][2];
return B;
}
/*!
* Cramer's rule inversion for 3x3 matrix
*/
inline Mat33 Mat33::M3Inv(bool &sing)
{
Mat33 A(*this);
A.mat[0][0] = mat[1][1]*mat[2][2]-mat[1][2]*mat[2][1];
A.mat[0][1] = mat[2][1]*mat[0][2]-mat[0][1]*mat[2][2];
A.mat[0][2] = mat[0][1]*mat[1][2]-mat[0][2]*mat[1][1];
double det = mat[0][0]*A.mat[0][0] + mat[1][0]*A.mat[0][1] + mat[2][0]*A.mat[0][2];
if(fabs(det) < EPS){
cout << "WARNING: Matrix is close to singular. det = " << det << "\n";
sing=true;
}
double invdet = 1.0/det;
A.mat[0][0] *= invdet;
A.mat[0][1] *= invdet;
A.mat[0][2] *= invdet;
A.mat[1][0] = (mat[2][0]*mat[1][2]-mat[1][0]*mat[2][2])*invdet;
A.mat[1][1] = (mat[0][0]*mat[2][2]-mat[2][0]*mat[0][2])*invdet;
A.mat[1][2] = (mat[1][0]*mat[0][2]-mat[0][0]*mat[1][2])*invdet;
A.mat[2][0] = (mat[1][0]*mat[2][1]-mat[1][1]*mat[2][0])*invdet;
A.mat[2][1] = (mat[2][0]*mat[0][1]-mat[0][0]*mat[2][1])*invdet;
A.mat[2][2] = (mat[0][0]*mat[1][1]-mat[1][0]*mat[0][1])*invdet;
return A;
}
/*!
* Cramer's rule inversion for *symmetric* 3x3 matrix (expects upper half to be filled, WARNING: no checking if matrix is actually symmetric)
*/
inline Mat33 Mat33::SymM3Inv(bool &sing)
{
Mat33 A;
A.mat[0][0] = mat[1][1]*mat[2][2]-mat[1][2]*mat[1][2];
A.mat[0][1] = mat[1][2]*mat[0][2]-mat[0][1]*mat[2][2];
A.mat[0][2] = mat[0][1]*mat[1][2]-mat[0][2]*mat[1][1];
double det = mat[0][0]*A.mat[0][0] + mat[0][1]*A.mat[0][1] + mat[0][2]*A.mat[0][2];
if(fabs(det) < EPS){
cout << "WARNING: Matrix is close to singular. det = " << det << "\n";
sing=true;
}
double invdet = 1.0/det;
A.mat[0][0] *= invdet;
A.mat[0][1] *= invdet;
A.mat[0][2] *= invdet;
A.mat[1][0] = A.mat[0][1];
A.mat[1][1] = (mat[0][0]*mat[2][2]-mat[0][2]*mat[0][2])*invdet;
A.mat[1][2] = (mat[0][1]*mat[0][2]-mat[0][0]*mat[1][2])*invdet;
A.mat[2][0] = A.mat[0][2];
A.mat[2][1] = A.mat[1][2];
A.mat[2][2] = (mat[0][0]*mat[1][1]-mat[0][1]*mat[0][1])*invdet;
return A;
}
/*!
* Cramer's rule inversion for *symmetric* 3x3 matrix (expects upper half to be filled, WARNING: no checking if matrix is actually symmetric)
* followed by multiplication with vector
*/
inline Vec3 Mat33::SymM3InvMult(bool &sing, Vec3 &z)
{
Vec3 v, r;
v.vec[0]=mat[1][1]*mat[2][2]-mat[1][2]*mat[1][2];
v.vec[1]=mat[1][2]*mat[0][2]-mat[0][1]*mat[2][2];
v.vec[2]=mat[0][1]*mat[1][2]-mat[0][2]*mat[1][1];
double det = mat[0][0]*v.vec[0] + mat[0][1]*v.vec[1] + mat[0][2]*v.vec[2];
if(fabs(det) < EPS){
cout << "WARNING: Matrix is close to singular. det = " << det << "\n";
sing=true;
}
r.vec[0] = v.vec[0]*z.vec[0]+v.vec[1]*z.vec[1]+v.vec[2]*z.vec[2];
v.vec[0] = (mat[0][1]*mat[0][2]-mat[0][0]*mat[1][2]); // a_12
r.vec[1] = v.vec[1]*z.vec[0]+(mat[0][0]*mat[2][2]-mat[0][2]*mat[0][2])*z.vec[1]+v.vec[0]*z.vec[2];
r.vec[2] = v.vec[2]*z.vec[0]+v.vec[0]*z.vec[1]+(mat[0][0]*mat[1][1]-mat[0][1]*mat[0][1])*z.vec[2];
r/=det;
return r;
}
/// Multiply matrix with diagonal matrix specified through a,b,c C = A*[a,0,0 ; 0,b,0 ; 0,0,c ]
inline Mat33 Mat33::M3MulDiag(const double a, const double b, const double c)
{
Mat33 B;
B.mat[0][0] = mat[0][0]*a;
B.mat[0][1] = mat[0][1]*b;
B.mat[0][2] = mat[0][2]*c;
B.mat[1][0] = mat[1][0]*a;
B.mat[1][1] = mat[1][1]*b;
B.mat[1][2] = mat[1][2]*c;
B.mat[2][0] = mat[2][0]*a;
B.mat[2][1] = mat[2][1]*b;
B.mat[2][2] = mat[2][2]*c;
return B;
}
/// Matrix transpose C = A'
inline Mat33 Mat33::M3Transpose()
{
Mat33 C;
C.mat[0][0] = mat[0][0];
C.mat[0][1] = mat[1][0];
C.mat[0][2] = mat[2][0];
C.mat[1][0] = mat[0][1];
C.mat[1][1] = mat[1][1];
C.mat[1][2] = mat[2][1];
C.mat[2][0] = mat[0][2];
C.mat[2][1] = mat[1][2];
C.mat[2][2] = mat[2][2];
return C;
}
/// Matrix row swap
inline Mat33 Mat33::M3RowSwap(const int a, const int b)
{
Mat33 A;
double temp;
temp = mat[b][0];
A.mat[b][0] = mat[a][0];
A.mat[a][0] = temp;
temp = mat[b][1];
A.mat[b][1] = mat[a][1];
A.mat[a][1] = temp;
temp = mat[b][2];
A.mat[b][2] = mat[a][2];
A.mat[a][2] = temp;
return A;
}
/// Extract diagonal elements of matrix v = Aii -- goes in Mat33 class
inline Vec3 Mat33::M3Diag()
{
Vec3 v(mat[0][0],mat[1][1],mat[2][2]);
return v;
}
/// Trace of a matrix a = Tr(A)
inline double Mat33::M3Trace()
{
double a = mat[0][0]+mat[1][1]+mat[2][2];
return a;
}
/// Back-substitution on 3x3 triangular system
inline bool Mat33::M3BackSub(Vec3 &x)
{
bool multiples=false;
if (fabs(mat[2][2])>EPS){
x.vec[2] = x.vec[2]/mat[2][2];
} else{
x.vec[2]=1.0;
multiples=true;
}
if (fabs(mat[1][1])>EPS){
x.vec[1] = (x.vec[1] - mat[1][2]*x.vec[2])/mat[1][1];
} else{
x.vec[1]=1.0;
multiples=true;
}
if (fabs(mat[0][0])>EPS){
x.vec[0] = (x.vec[0] - mat[0][1]*x.vec[1] - mat[0][2]*x.vec[2])/mat[0][0];
} else{
x.vec[0]=1.0;
multiples=true;
}
#if DEBUG_LEVEL>2
cout << M3Str() << "\n---\n";
#endif
return multiples;
}
/// Linear Solver for 3x3 systems using GEPP + back-substitution
inline Vec3 Mat33::M3LinSolve(Vec3 &u, bool &multiples)
{
Vec3 v(u);
Mat33 A(*this);
A.M3GEPP(v);
multiples=A.M3BackSub(v);
return v;
}
inline Mat33 Vec2Rot(Vec3 &rot_vec, const unsigned int a0) // a0 is reference axis {0,1,2} = {x,y,z}
{
Mat33 a,b;
rot_vec/=rot_vec.V3Norm(); // normalize (just in case)
unsigned int a1=(a0+1)%3;
unsigned int a2=(a0+2)%3;
double sqr_argument=rot_vec.vec[a1]*rot_vec.vec[a1]+rot_vec.vec[a2]*rot_vec.vec[a2];
if(sqr_argument>EPS){
double invhyp=1.0/sqrt(sqr_argument);
double cp=rot_vec.vec[a2]*invhyp;
double sp=rot_vec.vec[a1]*invhyp;
double st=sqrt(1.0-rot_vec.vec[a0]*rot_vec.vec[a0]);
// we're doing axis rotation here, that's why the sign of the sin term sp is opposite
a.mat[a0][a0]=1.0; a.mat[a0][a1]=0.0; a.mat[a0][a2]=0.0;
a.mat[a1][a0]=0.0; a.mat[a1][a1]=cp; a.mat[a1][a2]=sp;
a.mat[a2][a0]=0.0; a.mat[a2][a1]=-1.0*sp; a.mat[a2][a2]=cp;
b.mat[a0][a0]=rot_vec.vec[a0]; b.mat[a0][a1]=0.0; b.mat[a0][a2]=-1.0*st;
b.mat[a1][a0]=0.0; b.mat[a1][a1]=1.0; b.mat[a1][a2]=0.0;
b.mat[a2][a0]=st; b.mat[a2][a1]=0.0; b.mat[a2][a2]=rot_vec.vec[a0];
return a*b; // return rotation matrix if vector not pointing in z-direction
}
// if we get here then the vector is in axis-direction
if(rot_vec.vec[a0]<0.0){ // rotate 180 degrees around next axis
a.mat[a0][a0]=-1.0;
a.mat[a2][a2]=-1.0;
}
return a; // return unit matrix if no rotation necessary (also applies to zero-vector)
}
/* ***Vec4*** */
/// Default constructor (zeros)
inline Vec4::Vec4()
{
vec[0] = 0.0; vec[1] = 0.0; vec[2] = 0.0; vec[3] = 0.0;
}
/// Construct from doubles
inline Vec4::Vec4(const double a, const double b, const double c, const double d)
{
vec[0] = a;
vec[1] = b;
vec[2] = c;
vec[3] = d;
}
/// Construct from Vec3 and double
inline Vec4::Vec4(const Vec3 &u, const double d)
{
vec[0] = u.vec[0];
vec[1] = u.vec[1];
vec[2] = u.vec[2];
vec[3] = d;
}
/// Copy constructor
inline Vec4::Vec4(const Vec4 &u)
{
vec[0] = u.vec[0];
vec[1] = u.vec[1];
vec[2] = u.vec[2];
vec[3] = u.vec[3];
}
/// Vector addition v = u + w
inline Vec4 Vec4::operator+ (const Vec4 &w)
{
Vec4 v(vec[0]+w.vec[0], vec[1]+w.vec[1], vec[2] +w.vec[2], vec[3] +w.vec[3]);
return v;
}
/// Vector scalar multiplication v = u*a
inline Vec4 Vec4::operator* (const double a)
{
Vec4 v(a*vec[0], a*vec[1], a*vec[2], a*vec[3]);
return v;
}
/// Vector scalar division v = u/a
inline Vec4 Vec4::operator/ (const double a)
{
double b = 1.0/a;
Vec4 v(b*vec[0], b*vec[1], b*vec[2], b*vec[3]);
return v;
}
/// Comparison operators
inline bool Vec4::operator==(const Vec4 &u)
{
if(fabs(u.vec[0]-vec[0])>EPS) return false;
if(fabs(u.vec[1]-vec[1])>EPS) return false;
if(fabs(u.vec[2]-vec[2])>EPS) return false;
if(fabs(u.vec[3]-vec[3])>EPS) return false;
return true;
}
inline bool Vec4::operator!=(const Vec4 &u)
{
if(fabs(u.vec[0]-vec[0])>EPS) return true;
if(fabs(u.vec[1]-vec[1])>EPS) return true;
if(fabs(u.vec[2]-vec[2])>EPS) return true;
if(fabs(u.vec[3]-vec[3])>EPS) return true;
return false;
}
/// Rotation axis and angle from rotation matrix
inline Vec4 Rot2AxisAngle(Mat33 R)
{
Vec4 AaA(1.0,0.0,0.0,0.0); // using axis=(1,0,0), theta=0 as failsafe b/c X3D animation would fail with (0,0,0,0) ...
double cost=(R.M3Trace()-1.0)/2.0;
if(cost>1.0) cost=1.0; // safety first
if(cost<-1.0) cost=-1.0;
double theta = acos(cost);
double sintheta = sin(theta);
double n_norm=1.0;
if(fabs(sintheta)>EPS*EPS){
n_norm = 1.0/(2.0*sintheta);
}
AaA.vec[0] = n_norm*(R.mat[2][1]-R.mat[1][2]);
AaA.vec[1] = n_norm*(R.mat[0][2]-R.mat[2][0]);
AaA.vec[2] = n_norm*(R.mat[1][0]-R.mat[0][1]);
if(AaA.vec[0]*AaA.vec[0]+AaA.vec[1]*AaA.vec[1]+AaA.vec[2]*AaA.vec[2]<EPS) return Vec4(1.0,0.0,0.0,0.0);
AaA.vec[3] = theta;
return AaA;
}
/// Rotation matrix from axis and angle
inline Mat33 AxisAngle2Rot(Vec4 AxisAngle)
{
// normalize axis part, just in case
double norm=AxisAngle.vec[0]*AxisAngle.vec[0]+AxisAngle.vec[1]*AxisAngle.vec[1]+AxisAngle.vec[2]*AxisAngle.vec[2];
Mat33 rot;
norm=sqrt(norm);
if(norm>=EPS){ // safety first
AxisAngle.vec[0]/=norm; AxisAngle.vec[1]/=norm; AxisAngle.vec[2]/=norm;
double oneminuscos=1.0-cos(AxisAngle.vec[3]);
double sine=sin(AxisAngle.vec[3]);
double x=AxisAngle.vec[0]; double y=AxisAngle.vec[1]; double z=AxisAngle.vec[2];
rot.mat[0][0]+=oneminuscos*(x*x-1.0); rot.mat[0][1]=-z*sine+oneminuscos*x*y; rot.mat[0][2]=y*sine+oneminuscos*x*z;
rot.mat[1][0]=z*sine+oneminuscos*x*y; rot.mat[1][1]+=oneminuscos*(y*y-1.0); rot.mat[1][2]=-x*sine+oneminuscos*y*z;
rot.mat[2][0]=-y*sine+oneminuscos*x*z; rot.mat[2][1]=x*sine+oneminuscos*y*z; rot.mat[2][2]+=oneminuscos*(z*z-1.0);
}
return rot;
}
/// Rotation matrix from axis and angle
inline Mat33 AxisAngle2Rot(Vec3 &axis, double &angle)
{
Vec4 aangle(axis.vec[0],axis.vec[1],axis.vec[2],angle);
return AxisAngle2Rot(aangle);
}
inline bool Point_in_Ellipsoid(Vec3 &point, Vec3 &saxes, Vec3 ¢er, Mat33 &rot)
{
// first rotate point-to-center distance vector so that ellipsoid is aligned with coordinate system
Vec3 dist=rot.M3Transpose()*(point-center);
dist.vec[0]/=saxes.vec[0]; // x/a
dist.vec[1]/=saxes.vec[1]; // y/b
dist.vec[2]/=saxes.vec[2]; // z/c
if(dist*dist<=1.0) return true; else return false; // point is inside if (x^2/a^2 + y^2/b^2 + z^2/c^2 <= 1)
}
#define atan_a -0.012299380859105
#define atan_b 0.054082655552459
#define atan_c -0.11769677376706
#define atan_d 0.19402227554937
#define atan_e -0.33269718723178
#define atan_f 0.99998657415361
inline double fastatan(double x)
{
double arg=fabs(x);
double arg2=x*x;
if(arg<=1.0){
return copysign((((((atan_a*arg2+atan_b)*arg2+atan_c)*arg2+atan_d)*arg2+atan_e)*arg2+atan_f)*arg,x);
} else{
arg=1.0/arg;
arg2=arg*arg;
return copysign(pi/2-(((((atan_a*arg2+atan_b)*arg2+atan_c)*arg2+atan_d)*arg2+atan_e)*arg2+atan_f)*arg,x);
}
}
inline double fastatan2(double y, double x)
{
if(x>0.0) return fastatan(y/x);
if(y>=0.0) return fastatan(y/x)+pi;
return fastatan(y/x)-pi;
}
inline void UnitVec2ThetaPhi(Vec3 &v, double &theta, double &phi)
{
if(v.vec[2]>1.0) v.vec[2]=1.0; // safety first
if(v.vec[2]<-1.0) v.vec[2]=-1.0;
theta=acos(v.vec[2]);
phi=atan(v.vec[1]/v.vec[0]);
}
inline void Vec2ThetaPhi(Vec3 &v, double &theta, double &phi)
{
Vec3 r=v/v.V3Norm();
if(r.vec[2]>1.0) r.vec[2]=1.0; // safety first
if(r.vec[2]<-1.0) r.vec[2]=-1.0;
theta=acos(r.vec[2]);
phi=atan(r.vec[1]/r.vec[0]); // atan is 1/0 safe ;-)
}
inline void Vec2ThetaPhi(Vec3 &v, double &theta, double &phi, double &r)
{
r=v.V3Norm();
Vec3 rvec=v/r;
if(rvec.vec[2]>1.0) rvec.vec[2]=1.0; // safety first
if(rvec.vec[2]<-1.0) rvec.vec[2]=-1.0;
theta=acos(rvec.vec[2]);
phi=atan(rvec.vec[1]/rvec.vec[0]);
}
inline double VecDist2ThetaPhi(Vec3 &v, double r, double &theta, double &phi)
{
Vec3 rvec=v;
rvec.vec[2]/=r;
if(rvec.vec[2]>1.0) rvec.vec[2]=1.0; // safety first
if(rvec.vec[2]<-1.0) rvec.vec[2]=-1.0;
theta=acos(rvec.vec[2]);
phi=atan(rvec.vec[1]/rvec.vec[0]);
return rvec.vec[2];
}
inline double VecDist2Phi(Vec3 &v, double r, double &phi)
{
double cost=v.vec[2]/r;
if(cost>1.0) cost=1.0; // safety first
if(cost<-1.0) cost=-1.0;
phi=fastatan2(v.vec[1],v.vec[0]);
return cost;
}
inline double EllipsoidRmin(double &theta, double &phi, Vec3 &saxes)
{
// first rotate point-to-center distance vector so that ellipsoid is aligned with coordinate system
double sint=sin(theta);
Vec3 dist(sint*cos(phi),sint*sin(phi),cos(theta));
dist.vec[0]/=saxes.vec[0]; // x/a
dist.vec[1]/=saxes.vec[1]; // y/b
dist.vec[2]/=saxes.vec[2]; // z/c
// Now solve x^2/a^2+y^2/b^2+z^2/c^2=1/r^2 => r = sqrt(1/(x^2/a^2+y^2/b^2+z^2/c^2))
return 1.0/sqrt(dist*dist);
}
inline double EllipsoidRmin(double &theta, double &phi, Vec3 &saxes, Mat33 &rot)
{
// first rotate point-to-center distance vector so that ellipsoid is aligned with coordinate system
double sint=sin(theta);
Vec3 direction(sint*cos(phi),sint*sin(phi),cos(theta));
Vec3 dist=rot.M3Transpose()*direction;
dist.vec[0]/=saxes.vec[0]; // x/a
dist.vec[1]/=saxes.vec[1]; // y/b
dist.vec[2]/=saxes.vec[2]; // z/c
// Now solve x^2/a^2+y^2/b^2+z^2/c^2=1/r^2 => r = sqrt(1/(x^2/a^2+y^2/b^2+z^2/c^2))
return 1.0/sqrt(dist*dist);
}
inline double EllipsoidRmin(Vec3 &direction, Vec3 &saxes, Mat33 rot)
{
// first rotate point-to-center distance vector so that ellipsoid is aligned with coordinate system
Vec3 dist=rot.M3Transpose()*direction/direction.V3Norm();
dist.vec[0]/=saxes.vec[0]; // x/a
dist.vec[1]/=saxes.vec[1]; // y/b
dist.vec[2]/=saxes.vec[2]; // z/c
// Now solve x^2/a^2+y^2/b^2+z^2/c^2=1/r^2 => r = sqrt(1/(x^2/a^2+y^2/b^2+z^2/c^2))
return 1.0/sqrt(dist*dist);
}
// Based on <NAME>, "On the Ellipsoid and Plane Intersection Equation", Applied Mathematics 3, 1634 (2012)
// see page 1639
inline double Ellipsoid_Cross_Section(Vec3 &invsaxes2, Vec3 direction)
{
double a=direction*direction;
if(a>EPS*EPS){
a=1.0/a;
double c=direction.vec[0]*direction.vec[0]*(invsaxes2.vec[1]*invsaxes2.vec[2])+direction.vec[1]*direction.vec[1]*(invsaxes2.vec[0]*invsaxes2.vec[2])+direction.vec[2]*direction.vec[2]*(invsaxes2.vec[0]*invsaxes2.vec[1]);
// a*beta^2 + b*beta + c = 0
// beta_+/- = -b/2a +/- sqrt(b^2 - 4*a*c)/2a
// beta+ * beta_ = (-b + sqrt(b^2 - 4*a*c)) * (-b - sqrt(b^2 - 4*a*c))/(2a)^2 = (b^2 - (b^2 - 4ac))/4a^2 = 4ac/4a^2 = c/a
return pi/sqrt(c*a);
} else return 0.0;
}
inline double VectorAngle(Vec3 &a, Vec3 &b)
{
double theta;
double cosine=(a*b)/sqrt((a*a)*(b*b));
if(cosine>1.0){ // the Cauchy-Schwartz inequality can be broken by computers ...
theta=0.0;
} else{
if(cosine<-1.0){
theta=pi;
} else theta=acos(cosine);
}
return theta;
}
/// Obtain rotation matrix mapping vector a to b direction
inline Mat33 RotAtoB(Vec3 &a, Vec3 &b)
{
Vec3 axis=a;
axis.V3Cross(b);
double angle=VectorAngle(a,b);
if(axis*axis<EPS){ // vectors are colinear, two solutions here: everything stays as is (angle=0) or rotate pi around vector perpendicular to a (and b)
if(fabs(fabs(angle)-pi)<EPS){
// find non zero component of a
if(fabs(a.vec[0])>EPS){
axis.vec[1]=1.0;
axis.vec[2]=1.0;
// solve for axis*a = 0
// => a_x*axis_x+a_y*axis_y+a_z*axis_z
// => axis_x = -(a_y+a_z)/a_x
axis.vec[0]=-(a.vec[1]+a.vec[2])/a.vec[0];
} else{
if(fabs(a.vec[1])>EPS){
axis.vec[0]=1.0;
axis.vec[2]=1.0;
axis.vec[1]=-(a.vec[0]+a.vec[2])/a.vec[1];
} else{
if(fabs(a.vec[2])>EPS){
axis.vec[0]=1.0;
axis.vec[1]=1.0;
axis.vec[2]=-(a.vec[0]+a.vec[1])/a.vec[2];
}
}
}
}
}
return AxisAngle2Rot(axis,angle);
}
inline double VectorCos(Vec3 &a, Vec3 &b)
{
double cosine=(a*b)/sqrt((a*a)*(b*b));
if(cosine>1.0){ // the Cauchy-Schwartz inequality can be broken by computers ...
cosine=1.0;
} else{
if(cosine<-1.0) cosine=-1.0;
}
return cosine;
}
inline Vec3 Rot2AlphaBetaGamma(Mat33 &rot)
{
Vec3 result;
/* Z(a) Y(b) Z(c)
* ( ca sa 0) * ( cb 0 sb) * ( cc sc 0) ( ca sa 0) * ( cb*cc cb*sc sb) ( ca*cb*cc+sa*sc ca*cb*sc+sa*cc ca*sb)
* (-sa ca 0) * ( 0 1 0) * (-sc cc 0) = (-sa ca 0) * ( -sc cc 0 ) = (-sa*cb*cc-ca*sc -sa*cb*sc+ca*cc -sa*sb)
* ( 0 0 1) * (-sb 0 cb) * ( 0 0 1) ( 0 0 1) * (-sb*cc -sb*sc cb) ( -sb*cc -sb*sc cb )
*/
double cb=rot.mat[2][2];
if(cb>1.0) cb=1.0;
if(cb<-1.0) cb=-1.0;
double y=-rot.mat[1][2];
double x=rot.mat[0][2];
result.vec[0] = atan2(y,x);
result.vec[1] = acos(cb);
y=-rot.mat[2][1];
x=-rot.mat[2][0];
result.vec[2] = atan2(y,x);
return result;
}
#ifndef OpenCL
struct double4
{
double x,y,z,w;
double4(){}
double4(double v){ x = y = z = w = v; }
double4 operator*(const double4& other)
{
double4 tmp;
tmp.x = x*other.x;
tmp.y = y*other.y;
tmp.z = z*other.z;
tmp.w = w*other.w;
return tmp;
}
double4 operator*(const double& other)
{
double4 tmp;
tmp.x = x*other;
tmp.y = y*other;
tmp.z = z*other;
tmp.w = w*other;
return tmp;
}
double4& operator+=(const double4& other)
{
x += other.x;
y += other.y;
z += other.z;
w += other.w;
return *this;
}
double4& operator-=(const double4& other)
{
x -= other.x;
y -= other.y;
z -= other.z;
w -= other.w;
return *this;
}
double4& operator*=(double scalar)
{
x *= scalar;
y *= scalar;
z *= scalar;
w *= scalar;
return (*this);
}
double4& operator/=(double scalar)
{
x /= scalar;
y /= scalar;
z /= scalar;
w /= scalar;
return (*this);
}
};
inline double4 fabs(const double4& a)
{
double4 tmp;
tmp.x = a.x < 0.f ? 0.f : a.x;
tmp.y = a.y < 0.f ? 0.f : a.y;
tmp.z = a.z < 0.f ? 0.f : a.z;
tmp.w = a.w < 0.f ? 0.f : a.w;
return tmp;
}
inline double4 operator+(const double4& a,const double4& b)
{
double4 tmp;
tmp.x = a.x + b.x;
tmp.y = a.y + b.y;
tmp.z = a.z + b.z;
tmp.w = a.w + b.w;
return tmp;
}
inline double4 operator-(const double4& a,const double4& b)
{
double4 tmp;
tmp.x = a.x - b.x;
tmp.y = a.y - b.y;
tmp.z = a.z - b.z;
tmp.w = a.w - b.w;
return tmp;
}
inline double4 operator*(const double4& a,const double& s)
{
double4 tmp;
tmp.x = a.x*s;
tmp.y = a.y*s;
tmp.z = a.z*s;
tmp.w = a.w*s;
return tmp;
}
inline double4 operator/(const double4& a,const double& s)
{
double4 tmp;
tmp.x = a.x/s;
tmp.y = a.y/s;
tmp.z = a.z/s;
tmp.w = a.w/s;
return tmp;
}
inline double4 cross(const double4& p0, const double4& p1)
{
double4 result;
result.w=0.0;
result.x=p0.y*p1.z-p0.z*p1.y;
result.y=p0.z*p1.x-p0.x*p1.z;
result.z=p0.x*p1.y-p0.y*p1.x;
return result;
}
inline double dot(const double4& p0, const double4& p1)
{
return p0.x*p1.x+p0.y*p1.y+p0.z*p1.z+p0.w*p1.w;
}
inline double4 normalize(const double4& a)
{
double norm=dot(a,a);
norm=1.0/sqrt(norm);
return a*norm;
}
typedef double double16[16];
#else // ifndef OpenCL
typedef cl_double4 double4;
typedef cl_double16 double16;
#endif
inline double4 create_double4(double a, double b, double c, double d)
{
double4 result;
result.x = a;
result.y = b;
result.z = c;
result.w = d;
return result;
}
inline double4 create_double4(Vec3 v)
{
double4 result;
result.x = v.vec[0];
result.y = v.vec[1];
result.z = v.vec[2];
result.w = 0.0;
return result;
}
inline double4 create_double4(double d)
{
double4 result;
result.x = d;
result.y = d;
result.z = d;
result.w = d;
return result;
}
inline double4 AxisAngle2Quaternion(Vec4& aa)
{
double4 result;
result.w=cos(aa.vec[3]/2.0);
double norm=sin(aa.vec[3]/2.0)/sqrt(aa.vec[0]*aa.vec[0]+aa.vec[1]*aa.vec[1]+aa.vec[2]*aa.vec[2]);
result.x=aa.vec[0]*norm;
result.y=aa.vec[1]*norm;
result.z=aa.vec[2]*norm;
return result;
}
inline Mat33 RotFromEigenvectors(Mat33 ev)
{
Mat33 rot;
// check if eigenvectors are orthogonal
#if DEBUG_LEVEL>2
cout << "Eigenvectors:\n#1 = (" << ev.ColumnVec3(0).V3Str(',') << ")\n#2 = (" << ev.ColumnVec3(1).V3Str(',') << ")\n#3 = (" << ev.ColumnVec3(2).V3Str(',') << ")\n";
#endif
bool no12=(fabs(ev.ColumnVec3(0)*ev.ColumnVec3(1))>EPS);
bool no13=(fabs(ev.ColumnVec3(0)*ev.ColumnVec3(2))>EPS);
bool no23=(fabs(ev.ColumnVec3(1)*ev.ColumnVec3(2))>EPS);
bool all_equal=false;
if(no12 || no13 || no23){
#if DEBUG_LEVEL>2
cout << "first eigenvector times second: " << ev.ColumnVec3(0)*ev.ColumnVec3(1) << " (" << (ev.ColumnVec3(0)-ev.ColumnVec3(1)).V3Norm() << ")\n";
cout << "first eigenvector times third: " << ev.ColumnVec3(0)*ev.ColumnVec3(2) << " (" << (ev.ColumnVec3(0)-ev.ColumnVec3(2)).V3Norm() << ")\n";
cout << "second eigenvector times third: " << ev.ColumnVec3(1)*ev.ColumnVec3(2) << " (" << (ev.ColumnVec3(1)-ev.ColumnVec3(2)).V3Norm() << ")\n";
#endif
bool diff12=((ev.ColumnVec3(0)-ev.ColumnVec3(1)).V3Norm()<EPS);
bool diff13=((ev.ColumnVec3(0)-ev.ColumnVec3(2)).V3Norm()<EPS);
bool diff23=((ev.ColumnVec3(1)-ev.ColumnVec3(2)).V3Norm()<EPS);
if(!(diff12 || diff13 || diff23)){ // numerical issue (calculating the respective crossproduct again should get solved)
if(no12) diff12=true;
if(no13) diff13=true;
if(no23) diff23=true;
}
if((unsigned int)(diff12+diff13+diff23)<=1){
Vec3 a,b;
unsigned int col=0;
#if DEBUG_LEVEL>1
cout << "WARNING: Eigenvectors ";
#endif
if(diff12){ // #1=#2
col=1;
a=ev.ColumnVec3(0);
b=ev.ColumnVec3(2);
#if DEBUG_LEVEL>1
cout << "#1 and #2";
#endif
} else{
if(diff13){ // #1=#3
col=2;
a=ev.ColumnVec3(0);
b=ev.ColumnVec3(1);
#if DEBUG_LEVEL>1
cout << "#1 and #3";
#endif
} else{ // diff23 (#2=#3)
col=2;
a=ev.ColumnVec3(0);
b=ev.ColumnVec3(1);
#if DEBUG_LEVEL>1
cout << "#2 and #3";
#endif
}
}
Vec3 new_ev=a.V3Cross(b);
ev.mat[0][col]=new_ev.vec[0];
ev.mat[1][col]=new_ev.vec[1];
ev.mat[2][col]=new_ev.vec[2];
#if DEBUG_LEVEL>1
cout << " were not orthogonal";
#endif
#if DEBUG_LEVEL>2
cout << ", new eigenvector #" << col+1 << " is: " << new_ev.V3Str(',') << "\n";
#else
cout << ".\n";
#endif
} else all_equal=true;
}
// now determine rotation matrix
rot.M3Eye(); // start with identity matrix
if(!all_equal){
bool xz=false; bool yz=false; bool zz=false;
unsigned int zerocount=0;
Vec3 x=ev.ColumnVec3(0);
double r=x.V3Norm();
if(r>EPS) x/=r; else{ xz=true; zerocount++; }
Vec3 y=ev.ColumnVec3(1);
r=y.V3Norm();
if(r>EPS) y/=r; else{ yz=true; zerocount++; }
Vec3 z=ev.ColumnVec3(2);
r=z.V3Norm();
if(r>EPS) z/=r; else{ zz=true; zerocount++; }
if(zerocount<=1){
if(xz){
x=y;
x.V3Cross(z);
} else{
if(yz){
y=z;
y.V3Cross(x);
} else{
if(zz){
z=x;
x.V3Cross(y);
}
}
}
rot.mat[0][0]=x.vec[0]; rot.mat[0][1]=y.vec[0]; rot.mat[0][2]=z.vec[0];
rot.mat[1][0]=x.vec[1]; rot.mat[1][1]=y.vec[1]; rot.mat[1][2]=z.vec[1];
rot.mat[2][0]=x.vec[2]; rot.mat[2][1]=y.vec[2]; rot.mat[2][2]=z.vec[2];
} else{
if(zerocount==2){
if(!xz){
rot=Vec2Rot(x,0);
} else{
if(!yz){
rot=Vec2Rot(y,1);
} else{
if(!zz){
rot=Vec2Rot(z,2);
}
}
}
}
}
}
// Need to check determinat being +1, if -1 coordinate system ended up being right-handed ...
double determinant=rot.M3Det();
if(fabs(fabs(determinant)-1.0)>EPS){ // looks weird, but is correct b/c det(rot)=-1 is also acceptable here (recoverable)
cout << "Could not determine proper rotation matrix, det(rot)=" << determinant << ", which is not +1\n";
exit(2);
}
if(determinant<0.0){ // right-handed coordinate system needs to be changed to left-handed
rot.mat[0][0]*=-1.0; // do this by flipping x around
rot.mat[1][0]*=-1.0;
rot.mat[2][0]*=-1.0;
#if DEBUG_LEVEL>2
cout << "Fixed improper rotation matrix\n";
#endif
}
#if DEBUG_LEVEL>2
cout << "rotation matrix:\n" << rot.M3Str() << "\n";
cout << "det(rot) = " << rot.M3Det() << "\n";
bool sing=false;
cout << "inverse rotation matrix:\n" << (rot.M3Inv(sing)).M3Str() << "\n";
cout << "rotated x: " << (rot*Vec3(1.0,0.0,0.0)).V3Str(',') << "\n";
cout << "rotated y: " << (rot*Vec3(0.0,1.0,0.0)).V3Str(',') << "\n";
cout << "rotated z: " << (rot*Vec3(0.0,0.0,1.0)).V3Str(',') << "\n";
#endif
return rot;
}
inline void BackSubstitute(Mat33 &A, Vec3& solution, unsigned l)
{
bool diagzero[3], columnzero[3];
for(unsigned int j=0; j<3; j++){
diagzero[j]=(fabs(A.mat[j][j])<=EPS);
columnzero[j]=false;
for(unsigned int k=0; k<3; k++) columnzero[j]&=(fabs(A.mat[k][j])<=EPS);
}
bool goty=false;
if(diagzero[2]){
if(!columnzero[2] && (l==2)){
if(diagzero[1]){
if(fabs(A.mat[1][2])>EPS){
solution.vec[2] = solution.vec[2]/A.mat[1][2]; // z-solution is uniquely defined
} else{ // element 02 is non-zero
if(fabs(A.mat[0][1])>EPS){ // y-solution is defined here and can compensate any z-solution (may as well be 1 then)
solution.vec[2]=1.0;
} else{ // y-solution is arbitrary
if(diagzero[0]){ // x-solution is arbitary, but z-solution is defined
solution.vec[2] = solution.vec[2]/A.mat[0][2];
} else solution.vec[2] = 1.0; // since x-solution exists it can compensate any z-solution (may as well be 1)
}
}
} else{ // unique y-solution exists
if(fabs(A.mat[1][2])>EPS){ // we care for z-solution existing but not really for y-solution, so let's try to eliminate the y-solution
// element 01 *must* exist (otherwise Gaussian elimination is not sorted)
// no matter if x-solution exists or not, y and z-solutions need to be the
// same for the first and the second row (and we can set y-solution to zero)
solution.vec[2] = solution.vec[1]/A.mat[1][2];
solution.vec[1] = 0.0;
goty=true;
} else{ // second row has nothing to do with z-solution which defines y-solution (and means z-solution is in first row)
solution.vec[1] = solution.vec[1]/A.mat[1][1];
goty=true;
if(fabs(A.mat[0][1])>EPS){
if(diagzero[0]){ // x-solution is arbitrary (has nothing to do with first row), hence y-solution solves unique z-solution
solution.vec[2] = (solution.vec[0] - solution.vec[1]*A.mat[0][1])/A.mat[0][2];
} else solution.vec[2] = 1.0; // x-solution can compensate any y and z-solution (which may as well be 1 then)
} else solution.vec[2] = 1.0; // z-solution is independent of y-solution, and no matter what the x-solution is the z-solution is either arbitrary or compensate (aka may as well be 1)
}
}
} else solution.vec[2]=0.0; // it does not matter what number we choose for z-solution, may as well get rid of it ...
} else solution.vec[2] = solution.vec[2]/A.mat[2][2]; // z-solution is uniquely defined
if(!goty){
if(diagzero[1]){
if(!columnzero[1] && (l==1)){ // y-solution is defined in first row and we care (b/c it's corresponding to the largest eigenvalue)
if(diagzero[0]){ // x-solution is arbitrary and has nothing to do with anything, hence y-solution is defined
solution.vec[1] = (solution.vec[0] - solution.vec[2]*A.mat[0][2])/A.mat[0][1];
} else{ // x-solution exists and can compensate for y-solution (which may as well be 1 then)
solution.vec[1] = 1.0;
}
} else solution.vec[1] = 0.0; // we can't be bothered
} else solution.vec[1] = (solution.vec[1] - A.mat[1][2]*solution.vec[2])/A.mat[1][1]; // y-solution is uniquely defined
}
if(diagzero[0]){ // x-solution can be anything
if(l==0) solution.vec[0]=1.0; else solution.vec[0]=0.0;
} else solution.vec[0] = (solution.vec[0] - A.mat[0][1]*solution.vec[1] - A.mat[0][2]*solution.vec[2])/A.mat[0][0];
}
inline bool M3equal(Mat33 A, Mat33 B, double error)
{
for(int i = 0; i < 3; i++){
if(fabs(A.mat[i][0]-B.mat[i][0])>error) return false;
if(fabs(A.mat[i][1]-B.mat[i][1])>error) return false;
if(fabs(A.mat[i][2]-B.mat[i][2])>error) return false;
}
return true;
}
inline double touch_sphere_sigma(Vec3 &r, Vec3 &saxes, Mat33 &rot, double rT)
{
if(rT<EPS) return EllipsoidRmin(r,saxes,rot);
// Minimization vectors
Vec3 V, CI, saxes2;
double t;
// Create lab frame version of A and B matrices (both are symmetric matrices R * L_A/B * R^T)
// A/B_ij=sum_k L_A/B_k*R_ik*R_jk
// move into A ellipsoid frame of reference
// doing so:
// - replaces 36 multiplication and 12 additions with 36 multiplications and 24 additions once
// - saves 4 multiplications and 6 additions in loop
Vec3 z; // multply with kk's transposed (inverse) rotation matrix
z.vec[0] = r.vec[0]*rot.mat[0][0]+r.vec[1]*rot.mat[1][0]+r.vec[2]*rot.mat[2][0];
z.vec[1] = r.vec[0]*rot.mat[0][1]+r.vec[1]*rot.mat[1][1]+r.vec[2]*rot.mat[2][1];
z.vec[2] = r.vec[0]*rot.mat[0][2]+r.vec[1]*rot.mat[1][2]+r.vec[2]*rot.mat[2][2];
double r2=r*r;
double d=r2/(saxes.vec[0]*saxes.vec[1]*saxes.vec[2]);
double e=d*d;
double rT2 = rT*rT*e;
saxes2.vec[0] = saxes.vec[0]*saxes.vec[0]*e;
saxes2.vec[1] = saxes.vec[1]*saxes.vec[1]*e;
saxes2.vec[2] = saxes.vec[2]*saxes.vec[2]*e;
z*=d;
double lambda=0.5;
double xlx=1.0; // x=lambda/(1-lambda) -> do manual calculation with value above
double Var = 1.0; // trying different forms found 6 loops gives 7 figs for Fx
double VAV, VBV, det, Vz, V22;
while(Var > 1E-6){ // loop until variance in distance is sufficiently small
Var = lambda; // keep lambda around but do CI matrix in terms of x (scale CI by 1/(1-lambda))
//Populate CI matrix
CI.vec[0] = xlx*rT2+saxes2.vec[0];
CI.vec[1] = xlx*rT2+saxes2.vec[1];
CI.vec[2] = xlx*rT2+saxes2.vec[2];
// Solve z = CI*V for V using inverse => V=CI^-1*z
V.vec[0] = CI.vec[1]*CI.vec[2]*z.vec[0];
V.vec[1] = CI.vec[0]*CI.vec[2]*z.vec[1];
V.vec[2] = CI.vec[0]*CI.vec[1]*z.vec[2];
// VAV=V*A_LF*V (uses fact that A_LF is symmetric)
V22=V.vec[2]*V.vec[2];
VAV = V.vec[0]*V.vec[0]*saxes2.vec[0]+V.vec[1]*V.vec[1]*saxes2.vec[1]+V22*saxes2.vec[2];
// denominator=V*B_LF*V (uses fact that B_LF is symmetric)
VBV = (V.vec[0]*V.vec[0]+V.vec[1]*V.vec[1]+V22)*rT2;
//Calculate minimization parameter lambda
/* if(VBV < EPS*EPS){
cout << "ERROR: Denominator between oids in touch is too close to zero (" << VBV << ").\n";
exit(3);
}*/
xlx = sqrt(VAV/VBV); // independent of z-scaling (and determinant) -> also, interesting note: the sqrt is better than anything else in terms of speed and convergence
lambda = xlx/(1.0+xlx);
Var -= lambda;
Var *= Var;
}
//Reconstruct CI and run a final iteration once converged
CI.vec[0] = xlx*rT2+saxes2.vec[0];
CI.vec[1] = xlx*rT2+saxes2.vec[1];
CI.vec[2] = xlx*rT2+saxes2.vec[2];
t=CI.vec[1]*CI.vec[2];
det = CI.vec[0]*t;
V22=2.0*z.vec[2];
Vz=z.vec[0]*z.vec[0]*t+CI.vec[0]*CI.vec[2]*z.vec[1]*z.vec[1]+CI.vec[0]*CI.vec[1]*z.vec[2]*z.vec[2];
// return sigma
return sqrt(r2*det/(lambda*Vz));
}
#endif
|
n,a,b = map(int,input().split())
#制限なしで全ての花束の作り方
wa = pow(2, n, 10 ** 9 + 7) - 1
#a本の花束の作り方
#val1 = comb(n, a, exact=True)
#val2 = comb(n, b, exact = True)
x = 1
y = 1
for i in range(a):
x = (x * (n-i)) % (10 ** 9 + 7)
y = (y * (i+1)) % (10 ** 9 + 7)
val1 = (x * pow(y, 10 ** 9 + 7 - 2, 10 ** 9 + 7)) % (10 ** 9 + 7)
x = 1
y = 1
for i in range(b):
x = (x * (n-i)) % (10 ** 9 + 7)
y = (y * (i+1)) % (10 ** 9 + 7)
val2 = (x * pow(y, 10 ** 9 + 7 - 2, 10 ** 9 + 7)) % (10 ** 9 + 7)
ans = wa - val1 - val2
for i in range(100):
if ans < 0:
ans += 10 ** 9 + 7
print(ans) |
import torch
import torch.nn.functional as F
import torch.nn as nn
from learning.modules.downsample_map.downsample_res import DownsampleResidual
from learning.modules.blocks import DenseMlpBlock2
HIDDEN_SIZE = 64
downsample_factor = 2
class EgoMapToActionTriplet(nn.Module):
def __init__(self, map_channels=1, map_size=32, other_features_size=120):
super(EgoMapToActionTriplet, self).__init__()
self.map_channels = map_channels
# Downsample the map to get something suitable for feeding into the perceptron
self.downsample = DownsampleResidual(map_channels, factor=downsample_factor)
map_size_s = int(map_size / downsample_factor)
# Apply the perceptron to produce the action
map_size_flat = map_size_s * map_size_s * map_channels
mlp_in_size = map_size_flat# + other_features_size
self.mlp = DenseMlpBlock2(mlp_in_size, HIDDEN_SIZE, 4)
self.dropout = nn.Dropout(0.5)
def init_weights(self):
self.downsample.init_weights()
self.mlp.init_weights()
def forward(self, maps_r, other_features):
# TODO: Log this somewhere
if self.map_channels < maps_r.size(1):
maps_r = maps_r[:, 0:self.map_channels]
maps_s = self.downsample(maps_r)
map_features = maps_s.view([maps_s.size(0), -1])
#other_features_zero = torch.zeros_like(other_features)
#mlp_in_features = torch.cat([map_features, other_features_zero], dim=1)
mlp_in_features = map_features
mlp_in_features = self.dropout(mlp_in_features)
actions_pred = self.mlp(mlp_in_features)
# this must be in 0-1 range for BCE loss
actions_pred[:,3] = torch.sigmoid(actions_pred[:,3])
return actions_pred |
def merge_k(arr_k: list):
if len(arr_k) == 1:
return arr_k[0]
mid = len(arr_k) // 2
arr1 = arr_k[:mid]
arr2 = arr_k[mid:]
arr1 = merge_k(arr1)
arr2 = merge_k(arr2)
merged = merge_2(arr1, arr2)
if (DEBUG):
print(f"arr1: {arr1}")
print(f"arr2: {arr2}")
print(f"merged: {merged}")
return merged |
/**
* In-game Score Label. Former {@code Score} class.
*/
public final class ScoreLabel extends VisTable implements Comparable<Long>, Disposable {
private final AtomicLong score;
private final AtomicLong tmp;
private final VisLabel lblScore;
private boolean bool;
private boolean valid;
private float elasped;
private Animation<TextureRegion> tutGif;
public ScoreLabel() {
super(true);
super.setFillParent(true);
this.valid = true; // default score value to 0
// They default to 1
this.tmp = new AtomicLong(0);
this.score = new AtomicLong(0);
// --
this.tutGif = GifDecoder.loadGIFAnimation(Animation.PlayMode.LOOP,
Gdx.files.internal(TUTORIAL_GIF).read());
this.lblScore = new VisLabel(score.toString(), Color.BLACK);
this.lblScore.setFontScale(2); // FIXME: 18 Nov 2018
super.align(Align.center).add(lblScore).row();
// super.align(Align.center).add(new VisLabel("Test score", Color.BLACK));
zeroScore();
super.setVisible(true); // TODO: 13 Nov 2018 Really needed?
}
/**
* Update.
*
* @param dt Delta time
*/
@Override
public void act(float dt) {
// FIXME: 28 Dec 2018
this.elasped += dt / 2f;
if (elasped > 1000) {
this.elasped = 0;
}
// --
if (!valid) {
this.score.addAndGet((long) Math.pow(2, tmp.get()));
this.tmp.set(0);
this.valid = true;
}
long tmp = Long.parseLong(lblScore.getText().toString());
if (tmp != score.get()) {
if (bool = !bool) {
this.lblScore.setText(Long.toString(tmp + 1));
}
}
super.act(dt);
}
@Override
public void draw(Batch batch, float parentAlpha) {
if (score.get() == 0) {
TextureRegion tex = tutGif.getKeyFrame(elasped, true);
batch.draw(tex,
(SCREEN_WIDTH - tex.getRegionWidth()) / 2f,
(SCREEN_HEIGHT - tex.getRegionHeight()) / 2f
);
return;
}
super.draw(batch, parentAlpha);
}
/**
* @param delta change in score
*/
public void updateScore(int delta) {
// TODO: 18 Nov 2018
// this.tmp.addAndGet(delta);
// this.valid = false;
// --
this.score.addAndGet(delta);
}
/**
* Reset the value of the score to zero. Former {@code reset()} method. Reset already in use by super class.
*/
public void zeroScore() {
this.tmp.set(0);
this.score.set(0);
this.lblScore.setText(0 + "");
}
/**
* Compares two {@code long} values numerically.
*/
@Override
public int compareTo(final Long other) {
final Long score = this.score.get();
return (score < other) ? -1 : ((score.equals(other)) ? 0 : 1);
}
@Override
public String toString() {
return score.toString();
}
@Override
public boolean equals(final Object other) {
if (this == other) return true;
if (!(other instanceof ScoreLabel)) return false;
ScoreLabel score1 = (ScoreLabel) other;
return score.equals(score1.score);
}
@Override
public int hashCode() {
return score.hashCode();
}
public long getScore() {
return this.score.get();
}
@Override
public void dispose() {
// for (TextureRegion texture : tutGif.getKeyFrames()) {
// texture.getTexture().dispose();
// }
}
} |
<gh_stars>1-10
package gov.med.va.innovations.domain;
import gov.med.va.innovations.service.EventManager;
import java.io.Serializable;
public abstract class VistaTO implements Serializable {
private static final long serialVersionUID = -7656569560070101993L;
private String errorMessage;
private boolean htmlApplied;
public String getErrorMessage() {
return errorMessage;
}
public void setErrorMessage(String errorMessage) {
this.errorMessage = errorMessage;
}
public abstract Integer getListLength();
public abstract EventManager.EventCode getEventCode();
public void setHtmlApplied(boolean htmlApplied) {
this.htmlApplied = htmlApplied;
}
public boolean isHtmlApplied() {
return htmlApplied;
}
}
|
Birefringence control for ion-exchanged channel waveguides at 1.55-μm wavelength
We show at 1.55 micrometer wavelength that the waveguide birefringence of ion-exchanged channel waveguides in glass can be broadly tuned by a potassium and silver double-ion- exchange. Two different potassium and silver double-ion- exchange processes are used to make surface waveguides with negligible waveguide birefringence. This is of crucial importance in making devices for dense wavelength division multiplexing (DWDM) systems. The dependence of the waveguide birefringence on the channel width is also reported. |
// Input queueid and jobname string
// Return jobid, jobstatus string and error only based on jenkins job status
func (mock *mock) GetJobIdAndStatus(queueid string, jobname string) (string, string, error) {
for _, job := range mock.jobs {
if job.name == jobname {
qid, err := strconv.ParseInt(queueid, 10, 0)
if err != nil {
break
}
job.RLock()
if jobid, exist := job.queue[int(qid)]; exist {
job.RUnlock()
return string(jobid), SUCCESS, nil
}
job.RUnlock()
}
}
return "", "", errors.New("No jobid found")
} |
/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package uniltiranyu.examples.labyrinth.multeseo;
import uniltiranyu.examples.labyrinth.LabyrinthPercept;
import uniltiranyu.examples.labyrinth.LabyrinthUtil;
/**
*
* @author Jonatan
*/
public class MultiAgentLabyrinthPercept extends LabyrinthPercept{
public MultiAgentLabyrinthPercept( int value ) {
super( value );
int flag = (1<<4);
set( LabyrinthUtil.TREASURE, (value & flag)==flag );
}
@Override
public void rotate( ){
super.rotate();
Object f = get(LabyrinthUtil.AGENT[0]);
for( int i=0; i<3; i++ ){
set( LabyrinthUtil.AGENT[i], get(LabyrinthUtil.AGENT[i+1]) );
}
set( LabyrinthUtil.AGENT[3], f );
}
}
|
/* Convert an int to four-byte integer number in GDS format.
*/
void print_int4 (int n)
{
PUTBYTE (n >> 24);
PUTBYTE (n >> 16);
PUTBYTE (n >> 8);
PUTBYTE (n);
} |
#include "pch.h"
#include "BirdObject.h"
BirdObject::BirdObject(VulkanCore* core)
:QuadObj(core)
{
Texture.reset(new VulkanTexture2D("assets/textures/flappyBird/bird.png", core));
SetScale(0.5);
}
BirdObject::~BirdObject()
{
}
void BirdObject::OnUpdate(float deltaTime)
{
}
|
Customizable FPGA-Based Hardware Accelerator for Standard Convolution Processes Empowered with Quantization Applied to LiDAR Data
In recent years there has been an increase in the number of research and developments in deep learning solutions for object detection applied to driverless vehicles. This application benefited from the growing trend felt in innovative perception solutions, such as LiDAR sensors. Currently, this is the preferred device to accomplish those tasks in autonomous vehicles. There is a broad variety of research works on models based on point clouds, standing out for being efficient and robust in their intended tasks, but they are also characterized by requiring point cloud processing times greater than the minimum required, given the risky nature of the application. This research work aims to provide a design and implementation of a hardware IP optimized for computing convolutions, rectified linear unit (ReLU), padding, and max pooling. This engine was designed to enable the configuration of features such as varying the size of the feature map, filter size, stride, number of inputs, number of filters, and the number of hardware resources required for a specific convolution. Performance results show that by resorting to parallelism and quantization approach, the proposed solution could reduce the amount of logical FPGA resources by 40 to 50%, enhancing the processing time by 50% while maintaining the deep learning operation accuracy.
Introduction
The increased focus on research and development on intelligent systems has been growing with different technologies providing different applications on a variety of complex systems. Concerning autonomous vehicles, the main motivation centers on reducing human interference while driving, thereby reducing the likelihood of road accidents caused by human error, improving road safety . With that, a highly detailed perception of objects surrounding every vehicle is required, enriching the perception capabilities of these vehicles, allowing thus an efficient capture of information about the localization, classification, and tracking of such vehicles.
In this scope, LiDAR sensors have been highlighted as a technology that allows a description of the vehicle surrounding by means of point cloud data, being exploited in the literature as an augmentation to RGB cameras as a standalone solutions . Currently, deep learning models are widely used to process point cloud data, provided from LiDAR sensors, to extract relevant information that may be used in a mechanism for object detection layers like convolution, pooling, and fully connected. Convolution layers perform a vital role in how CNN architecture operates, being responsible for around 90% of all computation. Thus, this section focuses on presenting a brief description of 3D point cloud model architectures and developments of hardware accelerators already implemented.
Deep Learning for 3D Point Cloud
Recent works on 3D point cloud models present a sequential architecture that is split into three stages: (1) data representation, (2) feature extraction, and (3) detection modules. Stage (1) processes the data from the LiDAR sensor and organizes it as a structure that can be easily readable and processed by the following stage. Concerning literature, those structures are created as "Voxels", "Frustums", "Pillars", or 2D projections . Stage (2) presents the feature extraction process for a given point cloud. The last stage (3) is defined by its output values which lead to possible object detection. Those outputs describe the probability of object classification, bounding box regression, and object orientation.
These models have in common the conversion of the input into a pseudo-image upon the first stage, (1) data representation, which means that 2D representations are applied in further convolution layers. The convolution operation is a fundamental process for feature extraction, providing object classification and bounding box regression.
Convolution Implementations in FPGAs
Convolutional layers present in CNN architectures introduce high computational costs, due to the extensive number of arithmetic operations, parameter sharing, and memory access. These issues not only increase the amount of hardware resources required but also hampers some complex CNN from achieving their full potential as they are not able to output inferences in a real-time manner. Therefore, migrating convolutional blocks to hardware aims at mitigating those problems, providing a hardware architecture optimized for these operations and, consequently, more reliable, efficient, and time-consuming with fewer resources. Research works provide advanced architectures that take advantage of parallel computation.
Sliding Window Dataflow
In the proposed architecture in work , the processing element unit has a MAC unit and multiply and accumulate operation. Besides the MAC unit, PE blocks hold on-chip memories for input data, weight values, and outputs results. The proposed architecture in work presents parallel multipliers to compute all output products in a single clock cycle, and an adder tree to aggregate all outcomes. The adder tree is predefined to hold a fixed number of multiply operations, which means is limited to a certain convolution process regarding filter size. Both works provide hardware blocks optimized for processing time. However, these solutions rely on redundant on-chip memory access, promoting high energy consumption.
Rescheduled Dataflow Optimized for Energy Efficiency
In work , a processing element is featured with a multiply and an accumulate operation, MAC unit. Input feature map data and weight data needed for the convolution process are loaded from off-chip memory to on-chip memory. Once on-chip memory is connected to the processing units, memory access requires low energy consumption as data transfer is faster than between off-chip and on-chip memories. Data present on on-chip memory are never discarded, providing access reduction to off-chip memory, decreasing system latency. After finishing the convolution process, the output values are sent to an on-chip memory reserved to hold output values. If necessary, output data stored on output on-chip memory is transferred to off-chip memory for data analysis.
Optimization Methods
Deep learning algorithms are usually implemented in software using 32 bit floatingpoint values (FP32). Migrating a deep learning algorithm to an ASIC or an FPGA requires for a bit width reduction which is possible using the quantization technique . A quantized model and a non-quantized model execute the same operations, however, a quantized model with bit-width reduction promotes a memory reduction and allows the execution of more operations per cycle. This memory reduction allows a more compact model representation, which leads to a better deployment in a hardware platform. For hardware, implementation is intended to convert a 32 bit floating-point value to a 16/8/4 bit fixed-point value INT (fixed-point expression), respectively . The bit reduction may lead to a considerable accuracy gap on full precision models as suggested by .
Therefore, it is necessary to achieve a trade-off regarding model accuracy, model parameters, and hardware (HW) performance. The work in presents a method that takes full advantage of a DSP block for 8 bit quantization. However, the trade-off between accuracy and inference time might be required and applied whenever possible, therefore, this study provides insights about the model degradation for various model depths, i.e., number of layers.
Convolution Hardware-Based Block
The proposed block IP was designed taking into consideration the developments in deep learning models for object detection addressed in the last five years. Our work based on rescheduled dataflow, exploited by work , provides a different implementation since we create an IP capable of computing not only convolution but also rectified linear units, padding, and max pooling, that can be configured by simply changing its parameters and adapting it to different CNN architectures. Besides that, quantization was applied to each weight value which leads to a reduction in bit-width leading to parameter sharing and promoting a DSP resource usage decrease. Therefore, the parameters and the range of values that might change from model to model were identified, in order to tailor the block for any desired architecture whenever required.
Block Architecture
As Figure 1 depicts, the architecture of the convolution block proposed in this work comprises the following three distinct modules: processing element, rectified linear unit, and max pooling. The convolution block has six parameters that provide different architecture configurations. Five of the six parameters are related to the theoretical convolution process. The ability to change the six parameters provides an advantage to other works since different configurations are possible. We defined the number and set of configurable parameters according to the convolutions layers implemented in the literature for 3D object detection. These parameters are as follows: (1) feature map size, (2) weight/filter/kernel size, (3) stride value, (4) padding value, (5) maxpool enable/disable, and (6) number of DSP block per convolution block. Inside the convolution block, on Figure 1, processing data flows through modules sequentially, i.e., PE result output is forwarded towards the following module, ReLU, which after performing its operations, forwarded its output to the next module, maxpool, whenever parameter (5) is enabled. A "controller" module ensures precise BRAM addresses management, providing an ordered data transfer from a block RAM to a convolution block.
Processing Element
The processing element module is considered a low-level block inside the proposed architecture. All convolution processes, meaning multiply and accumulate operations, are carried out by the PE module. At the same clock instant, three new values are fed to the input ports of each PE module, as Figure 2 illustrates: (1) feature map value, (2) filter (weight) value, and (3) previous PE output value. We explore the DSP block usage to reduce the amount of resources required, as DSP templates provide a favorable trade-off between developed configuration and resources usage. Flexibility during architecture implementation is ensured as every DSP signal and parameter is changeable during instantiation, otherwise using another of the three types of inference leads to lower flexibility and higher resource consumption.
Memory Access and Dataflow
In order to reduce the limitations of the sliding window approach discussed in state of the art, namely on memory access and redundant data, our solution proposes a distinct approach of the research works , being thus inspired by the research work . The proposed architecture follows sequential processing data, meaning that each FM value is fed one by one to the processing module. This mechanism ensures that the system architecture only changes when applying filters with different dimensions, providing a scalable and stable architecture.
The dependency data flow chart in Figure 3 illustrates graphically how each outcome from a multiply operation must be connected to other multiply operations, where: (1) F_xx refers to input FM data; (2) W_xx refers to weight/filter/kernel data; (3) O_xx refers to output FM data; (4) for each vertical line, with 9 values corresponding filter size, all data are computed simultaneously; (5) each vertical line is processed with one delay clock cycle; (6) black arrows refer to input data of the next processing element, added up with the corresponding multiply outcome. This architecture provides a modular configuration, meaning that changing the input FM dimensions or filter dimensions leads to easy reconfiguration without losing architecture integrity and maintaining the correct convolution function. Equation (1) presents the required processing time as a function of the characteristics of the input data, namely FM size (FM), number of input FM (num.FM); level of parallelism required (Num.PEs), and board clock (ClockFreq.).
As Figure 4 depicts, for a filter of 3 × 3 the processing element module is built with nine DSP blocks. As illustrated, in every N DSP block, a ShiftRAM is placed, N being equal to filter size (3 for the above example). Each ShiftRAM introduces a predetermined delay which serves as synchronization for data processing in the next DSP block. The predetermined delay depends on feature map and filter dimensions. The delay value is obtained by subtracting the FM dimension from the filter dimension. At the end of this section, a different configuration is present regarding quantization models, for that, DSP block and ShiftRAMs usage decrease due to bit width reduction making it possible to have two multiplication and one accumulate operation for a single DSP block.
Board Resources-Driven Architecture
To provide a convolution block that can be tailored for a certain application, three new configurable parameters were added which will influence the number of PE modules for simultaneously processing. These parameters are as follows: (1) DSP available, (2) BRAM available, and (3) BRAM memory bit. The parameters (2) and (3) emerge from memory limitation, allowing the user to specify the amount of memory available for a set of convolution blocks.
Considering hardware resource limitations as a constraint of many CNN hardware implementations, parameter (1) DSP availability is added to indicate the maximum number of DSP blocks that can be distributed through the PEs components of our convolution block. In order for our block to automatically adapt its architecture to the resources available on the target board, parameters (2) and (3) are considered for specifying the amount of filters that can be simultaneously processed. It will ensure that the number of convolution blocks is limited by the memory available. The minimum memory required for one filter application is given by output FM dimension and output data bit width. Equations (2) and (3) provide information on the number of parallel filters and number of PEs modules that can be instantiated:
Filters Iteration-Control Module
Each convolution block only has access to the values corresponding to a single filter, meaning that for the proposed convolution block architecture, the number of blocks required to instantiate and to assure the correct operation in parallel matches the number of filters desired to apply simultaneously. As illustrated in Figure 5, all convolution blocks are instantiated inside a layer block, which has a state machine that controls each stage for data processing. Each stage has distinct functionalities since reading memory from input BRAMs, data transfer to lower levels modules, and write memory to output BRAMs.
As Figure 5 presents, in idle stage (1), a reset is performed to all internal registers ensuring transition for the second stage, providing weights load from BRAM to PE modules. State machine continues in load weights stage (2) until all weight values are transferred to PE modules. Data transfer ends after Kernel_size*Kernel_size clock cycles. In load in/process FM stage (3), the convolution process begins, all FM data are transferred and processed one by one, as mentioned at the beginning of the design section. The final stage (4), indicates that a new FM was generated being the correspondent output data in memory.
Optimization Methods
This section presents two methods for optimizing the convolution operations on the proposed convolution block architecture by including features such as parameter sharing and quantization. The quantization process is considered an efficient and fundamental approach for compression of such model targeting resource-constraint devices, such as edge devices. Due to input feature map and weight bit width reduction performed by quantization technique from 32 bit floating point to 8 bit fixed point, the previously described architecture should be updated to handle the new data format.
Architecture Reconfiguration with 8 bit Quantization
In research work , two parallel MAC operations compute two dot products. For implementing these two MAC operations, it is necessary to use input port D from the DSP48Ex block. For the two multiply operations, it is necessary to apply the following equation P = (AB + DB) + C. The value that port A receives is arithmetically left-shifted by 18 bits. Data in the D register are stored in the least significant bits positions and data in the A register are left-shifted 18 bits to ensure that the outcome from the pre-adder module does not lose any weight value for each computation.
As Figure 6 illustrates, the reconfigured architecture has, for a single DSP block, two MAC modules, promoting fewer shift RAM blocks usage since more DSP blocks are directly connected. As Figure 6 presents, four DSP blocks have a direct connection which refers to the filter/kernel dimension (in this case a filter of 4 × 4 is applied). The proposed architecture needs to ensure that outcomes from MAC modules are correctly sent for the following DSP blocks. In case of a direct connection, each result is directly sent to the C input port. This new DSP block receives as input the first weight value of the following four consecutive Weight values. After finishing processing the first four weight values, represented in blue, results from the last MAC module are sent to the first shift RAM. Output data from a shift RAM has the same delay as presented for the first proposed architecture, which varies regarding weight/filter/kernel dimension, FM dimension, and stride value. The shift RAM block receives one value that holds two different results regarding blue and red DSP block computation. For a correct accumulation of the first red DSP block, output data from shift RAM need to be 8 bit right-shifted.
Implementation
All code was implemented using Verilog with all the modules previously described being implemented on the programmable logic (PL). To validate the implemented hardware IP, after test bench validation for behavioral and timing simulations, it is necessary to validate the IP on an FPGA board. The tests present in the results section were deployed on a Zybo Z7:Zynq-7000 board . To evaluate the consumed resources by the proposed IP an input image of 252 × 252 dimension was used, which is a typical size widely adopted in deep learning models. To build an entire CNN architecture with our convolution IP, it is only required to instantiate the correct number of convolution blocks while ensuring that data flow is correctly performed.
Using our IP interface, the user is able to configure at design time each parameter addressed to a convolution. This means that for the application of the module in a real CNN deployment it is mandatory to determine the correct number of convolution layers and each parameter value, before implementing it on an FPGA. As found in the literature, a major problem regarding CNN implementation in hardware centers on resources limitations, such as DSP blocks and memory. Considering that constraint, three parameters (DSP available, BRAM available, and BRAM memory bit), which are used to define the number of filters processed at the same time, were added. Figure 7 and Table 1 presents the resources required for implementing a convolution of a 3 × 3 filter to an image of 252 × 252 using only one PE block (9 DSP blocks) with a clock source of 100 MHz. The figure shows the resources required for IP (de-sign_1_layer_2_0_0_synth_1) at the bottom. The total of resources required for managing the ARM processor, DMA module, blocks of RAM, and our IP is displayed as impl_1. Additionally in the figure are illustrated how resources are split on the FPGA device. The resources related to the implemented IP are highlighted with red, inside the purple square box. The other resources are related to AXI modules, DMA controller, BRAMs, and processing system.
Results
The tests presented in this section were obtained with each image loaded to an SD card that is connected to the Zybo board. Using the ARM processor, provided by the Zybo Z7 board , the DMA controller was configured to store input image data in DDR memory. Thus, before the convolution process begins, the DMA controller sends input images to the RAM blocks that are directly connected to the proposed IP.
Generic Convolution
To evaluate the correct functionality of the convolution block, it was applied different filters to a set of figures, changing the values of the stride, padding, and maxpool parameters. The dimensions of input FM are 252 × 252, with a filter application of 3 × 3. Figure 8 depicts each output from applying the same 3 × 3 sharpen filter using a stride of 1 and 3, resulting in outputs of 250 × 250 and 84 × 84, respectively.
Parallelism Influence on Processing Time
Considering that in the previous example we used a 252 × 252 image with a filter application of 3 × 3, the study begins with the simple case which uses only one PE block, providing less resource usage and higher processing time. Each test iteration increases the number of PE blocks to a maximum of 100 PEs. For a filter of 3 × 3 and applying up to 100 PEs blocks, 900 DSP blocks are required. As illustrated by Figure 9, the instantiation of only one PE module leads to a higher processing time. The application of two PE modules in parallel leads to a reduction in 50% of the previous processing time. Applying three PE modules in parallel reduces the processing time by 65%. As illustrated, after 20/30 PE modules in parallel the processing time reduction is less relevant. The blue curve in Figure 9, represents theoretical processing time values previously computed regarding PEs block usage, the orange curve represents processing time values during hardware IP processing measured in the same previous conditions. A slight deviation in theoretical values and hardware measures appears on the graph since for "theoretical time" we do not consider the initial clock cycles that do not generate valid outcomes, as explained in the design section.
Block RAM Influence on PEs/Number of Parallel Filter/Processing Time
For the following example, the IP parameters were configured as: kernel size = 3, FM size = 252, padding = 0, stride = 1, maxpool = 0, input FM channels = 64, num of filters = 32, DSP block available = 1000, and clock source of 100 MHz.
As Figure 10 presents, when only one block of RAM is used only one filter is applied (orange line), consequently, the maximum possible number of PE blocks are used. Increasing the number of blocks of RAM leads to an increase in the number of parallel filters. Consequently, the number of PE blocks per convolution block decreases since more PE blocks are spread to different filters reducing the number of available PEs for a single convolution block. Increasing the number of parallel filters provides a decrease in processing time, as illustrated in the graph on the right side. However, after the two lines intersect in the upper graph, processing time increases. This can be explained once more filters are computed simultaneously which results in fewer DSP blocks allocated per convolution block. Thus, once fewer DSP blocks are used in a single convolution block the processing time increases.
Quantization Influence Study
This section presents a study regarding the quantization impact on two different deep learning models based on a CNN architecture. The MNIST model is used for handwritten digit detection in an image. The other model refers to a 3D object detector and classifier model based on point clouds, namely, PointPillars. The two deep learning models supported by the software version use the input data and weight values in a 32 bit floatingpoint format. This study intends to convert the aforementioned data from single-precision to an 8 bit fixed-point format.
MNIST Dataset Model
The model configuration used for validation is built with two convolutions layers, fully connected and softmax. Figure 11 illustrates what changes were performed to validate the IP functionality. In the first study, the values were hardware computed using the developed IP. In a second study, with an integration of eight new convolution layers, the quantization methods through all layers were implemented. As represented inside the red square, the input values of the first convolution layer are sent to hardware IP. The output values from hardware processing, which are quantized due to hardware conversion from 32 bit floating-point format to 8 bit fixed-point format return to the software (SW) model to feed the next convolution layer, the input values of the second layer are replaced with hardware values.
The fixed-point representation is expressed as Qi.d, where i refers to the integer part of the fixed point, in other words, the number of bits present on the left side of the fixed point, and d refers to the number of bits on the right side of the fixed point, as a decimal fraction. The weights values for the first convolution layer were quantized with three different quantization levels: Q2_6, Q3_5, and Q4_4. For each one of them, the output FM quantization level was tested with different configurations providing a better study on classification score due to weight and output FM value quantization.
Convolution Weights and Bias Quantized
In a second iteration, illustrated in Figure 11 by the green box, the study was extended to apply quantization to all layers of the MNIST model. To the previous MNIST model a few more convolution layers were added, with that we intend to verify the quantization impact on deeper CNN architectures, so we added eight (8) new convolution layers. The quantization was performed using the same IP that was used in the previous study.
With this, we can obtain a quantization result that is approximately close to a possible implementation of the entire architecture in HW. This means if we intend to implement the entire model on hardware, which is not the goal with this study, the results expected will be close enough to the results presented in Figure 12.
For this study, only Q1_7 and Q2_6 configurations were applied, another type of configuration such as Q3_5 or Q4_4 leads to worse results since the number of fractional bits is reduced, leading to accuracy loss. The previous study only uses as input one image, this study applies four different images with handwritten digits of 1, 2, 3, and 4. The graphs depicted in Figures 13 and 14 present the obtained score for the correct classification. In other words, associates each of the four images with a correct classification (image 1-digit 1, image 2-digit 2, and so on). The blue bar on graphs represents the score values obtained using an SW-only version. The gray bars, from left to right, represent the score obtained by applying quantization to all layers of the MNIST model and quantization only on convolution layers, respectively. As expected, applying quantization to all layers leads to a slight score reduction. This reduction, presenting a higher error on image 2 with 1.88%, varies depending on which image is used for classification. Nonetheless, it is almost insignificant, leading to a robust classification for each of the four different images.
PointPillars Model Quantized Convolution Weights
On the PointPillars model, we verified how quantized convolution weights of backbone convolution layers affect score detection and interest over union (IoU). For this analysis, we adopt the performance metric mean average precision (mAP) while using the Kitti dataset. The mAP is used to measure the accuracy of an object detector regarding its precision and recall. Using a Python script, it was possible to obtain a result checkpoint using different frames of the Kitti dataset, the result checkpoint provides mAP values for the evaluation scenarios of bounding box (BBOX), bird's eye view (BEV), 3D, and average orientation similarity (AOS). Figure 15 presents the mAP metric value for each of the four scenarios regarding the three difficulty levels, using SW-only version with 32-bit weight values and quantized version with 8-bit. In Figure 15, below each scenario presents three bars regarding the three difficulty levels (from left to right refers to easy, moderate, and hard). Each difficulty level value for the SW-only version is directly compared with the correspondent value for a quantized version, which means, for the BBOX metric, it is possible to evaluate the SW and Q_CONV result value that combines the same bar color. In this case, the SW-only version for the easy level results in an mAP of 83.73% while the quantized version results in 80.84%. The same evaluations are applied for the remaining metrics. This mAP degradation can be explained by the precision loss of the feature maps, but also due to the fact that the outputted classification scores are affected as will be further shown, suggesting that the score threshold should be adjusted to reduce the metric performance loss. Different frames were evaluated through Kitti Viewer, thus classification score and IoU values were collected using checkpoint files resulting from the previous step, i.e., evaluate using the SW-only version model and quantized model version (quantized weight values from the backbone convolution layer). After analyzing every frame regarding classification score, IoU, and object distance to LiDAR sensor, the graphs in Figure 16 were plotted. Each blue dot represents a detected object for a given frame, describing object depth regarding the LiDAR sensor and its correspondent classification score value and IoU value. This study tries to establish a direct relationship between the obtained classification score value for a given object and its distance to the LiDAR sensor. As showcased in the graphs of Figure 16, greater object accumulation can be indicated for classification scores higher than 0.6. These objects are located at a maximum distance of 30 m from the LiDAR sensor, representing 65.5% of objects for the SW-only version using 32 bit weights. For the quantized version, using weights with 8 bits, object accumulation continues high under 30 m distance from the sensor, with higher score values (total of 65.98% of objects). As for the IoU metric, the graph in Figure 16 presents a higher object concentration with higher IoU values for distances up to 30 m, resulting in 62.02% of the total objects. For the quantized model, regarding IoU values, the graph presents a higher scatter of the object's distribution, due to bit reduction, which leads to some null values, resulting in a quantitative reduction to 51.55%.
The two graphs in Figure 17 illustrate how the score and IoU error is spread regarding object distance from the LiDAR sensor. In each graph, it is possible to verify that some error values are negative, meaning that the classification score or IoU value is higher for the quantized weights model version, providing an improvement of detection accuracy once score and IoU metrics increase. The graph that represents the error score shows a higher accumulation of values for an error under 0.05 and over −0.05, representing a point accumulation of 61.63% for all objects. This point distribution, as expected from previous graphs, is located in the range below the 30-meter distance from the LiDAR sensor, represented by the red box. For the IoU metric, higher point accumulation occurs for an error under 0.25 and a distance less than 30 m, representing 55.43% of all objects.
Convolution Layer Hardware Replacement
As described in work , the backbone stage is built with sixteen convolution layers through each of the three blocks, which requires a lot of DSP and memory hardware resources, only two layers of block 3 were hardware processed. The inference process was done using a robotic operating system (ROS) platform that runs a new point cloud frame while initiating the PointPillars network for object detection and classification. Tables 2 and 3 present the obtained results for each evaluated metric regarding point cloud object detection. The score metric represents the probability of an object belonging to a certain class, in this example, cars. The location metric, expressed in meters, provides a spatial object identification on the point cloud. Each object position is given regarding the LiDAR sensor. The bounding box metric represents the BBOX around an object. Rotation_y and Alpha are related to the observation angle for each object. From the obtained results, it is possible to notice there is not a huge divergence between the SW and hybrid versions, which uses values from hardware processing. During the hybrid model inference process it was verified that the number of false positives increases for the same score threshold value used during SW-only inference. To filter some of the false positives, which do not provide any valuable information about object detection or classification, the threshold value was increased from 0.5 to 0.75. However, the hybrid model presents one detection loss regarding the furthest car (car 6), as Figure 18 depicts and for the lack of metric results in Table 3. This detection loss was expected as expressed from a previous study exploited in Figure 16, which provides a visual perception that further objects tend to be less detected and present lower accuracy.
Conclusions
This paper had as its main goal the design and implementation of a convolution block, with the particularity of being totally customizable and applicable to any 3D object detection model. To increase parallelism for each convolution operation, the possibility of having several processing elements operating at the same time was added to the proposed architecture to improve inference time at the cost of energy and consumed logical resources.
Along with the development and implementation of the proposed generic convolution block, a study was conducted regarding the influence of quantization and parameter sharing. The quantization process, which reduces the bit-width of each parameter value, enables the second possible optimization that is related to "parameter sharing". The bitwidth reduction of weight values promotes a decrease of DSP usage of around 40% to 50%.
The developed IP was validated using RGB and point cloud data, in each evaluation it was verified that the proposed solution was capable of adaption regarding different model requirements. In the case of the model using point cloud data, with the PointPillars model it was possible to verify that higher scores and IoU values tend to appear near the LiDAR sensor, around less than 30 m. Furthermore, the quantization process affected both score and IoU values up a 10% decrease. Using the developed IP we visually confirmed the correct operation of the proposed solution. It was also possible to qualitatively validate the correct operation of the integration setup, the model being able to detect objects within a range of 30 m from the LiDAR. |
def _get(env: gym.Env) -> _MonitorEnv:
assert env, "env not set"
result = None
if isinstance(env, _MonitorEnv):
result = env
else:
if isinstance(env, gym.core.Wrapper):
result = _get(env.env)
return result |
#include <iostream>
#include <string>
#include <iomanip>
using namespace std;
class Package
{
private:
string sender_name;
string sender_address;
string sender_city;
string sender_state;
string sender_ZIP;
string recipient_name;
string recipient_address;
string recipient_city;
string recipient_state;
string recipient_ZIP;
double weight;
double costperounce;
public:
Package(string sender_n, string sender_addr, string sender_c,
string sender_s, string sender_Z, string recipient_n, string recipient_addr,
string recipient_c, string recipient_s, string recipient_Z, double wei,
double cost);
void setsender_name(string sender_n);
string getsender_name();
void setsender_address(string sender_addr);
string getsender_address();
void setsender_city(string sender_c);
string getSendCity();
void setsender_state(string sender_s);
string getsender_state();
void setsender_ZIP(string sender_Z);
string getsender_ZIP();
void setrecipient_name(string recipient_n);
string getrecipient_name();
void setrecipient_address(string recipient_addr);
string getrecipient_address();
void setrecipient_city(string recipient_c);
string getrecipient_city();
void setrecipient_state(string recipient_s);
string getrecipient_state();
void setrecipient_ZIP(string recipient_Z);
string getrecipient_ZIP();
void setweight(double w);
double getweight();
void setcostperounce(double cost);
double getcostperounce();
double calculateCost();
};
Package::Package(string sender_n, string sender_addr, string sender_c, string
sender_s, string sender_Z, string recipient_n, string recipient_addr, string
recipient_c, string recipient_s, string recipient_Z, double wei, double cost)
{
sender_name = sender_n;
sender_address = sender_addr;
sender_city = sender_c;
sender_state = sender_s;
sender_ZIP = sender_Z;
recipient_name = recipient_n;
recipient_address = recipient_addr;
recipient_city = recipient_c;
recipient_state = recipient_s;
recipient_ZIP = recipient_Z;
if (wei > 0.0 && cost > 0.0)
{
weight = wei;
costperounce = cost;
}
else
{
weight = 0.0;
costperounce = 0.0;
}
}
void Package::setsender_name(string sender_n)
{
sender_name = sender_n;
}
string Package::getsender_name()
{
return sender_name;
}
void Package::setsender_address(string sender_addr)
{
sender_address = sender_addr;
}
string Package::getsender_address()
{
return sender_address;
}
void Package::setsender_city(string sender_c)
{
sender_city = sender_c;
}
string Package::getSendCity()
{
return sender_city;
}
void Package::setsender_state(string sender_s)
{
sender_state = sender_s;
}
string Package::getsender_state()
{
return sender_state;
}
void Package::setsender_ZIP(string sender_Z)
{
sender_ZIP = sender_Z;
}
string Package::getsender_ZIP()
{
return sender_ZIP;
}
void Package::setrecipient_name(string recipient_n)
{
recipient_name = recipient_n;
}
string Package::getrecipient_name()
{
return recipient_name;
}
void Package::setrecipient_address(string recipient_addr)
{
recipient_address = recipient_addr;
}
string Package::getrecipient_address()
{
return recipient_address;
}
void Package::setrecipient_city(string recipient_c)
{
recipient_city = recipient_c;
}
string Package::getrecipient_city()
{
return recipient_city;
}
void Package::setrecipient_state(string recipient_s)
{
recipient_state = recipient_s;
}
string Package::getrecipient_state()
{
return recipient_state;
}
void Package::setrecipient_ZIP(string recipient_Z)
{
recipient_ZIP = recipient_Z;
}
string Package::getrecipient_ZIP()
{
return recipient_ZIP;
}
void Package::setweight(double w)
{
weight = (w < 0.0) ? 0.0 : w;
}
double Package::getweight()
{
return weight;
}
void Package::setcostperounce(double cost)
{
costperounce = (cost < 0.0) ? 0.0 : cost;
}
double Package::getcostperounce()
{
return costperounce;
}
double Package::calculateCost()
{
double result;
result = weight * costperounce;
return result;
}
class TwoDayPackage : public Package
{
private:
double two_day_delivery_fee;
public:
TwoDayPackage(string sender_n, string sender_addr, string
sender_c, string sender_s, string sender_Z, string recipient_n,
string recipient_addr, string recipient_c, string recipient_s,
string recipient_Z, double wei, double cost, double delivery_fee);
double gettwo_day_delivery_fee();
void settwo_day_delivery_fee(double delivery_fee);
double calculateCost();
};
TwoDayPackage::TwoDayPackage(string sender_n, string sender_addr,
string sender_c, string sender_s, string sender_Z, string recipient_n,
string recipient_addr, string recipient_c, string recipient_s,
string recipient_Z, double wei, double cost, double delivery_fee)
:Package(sender_n, sender_addr, sender_c, sender_s, sender_Z, recipient_n,
recipient_addr, recipient_c, recipient_s, recipient_Z, wei, cost)
{
settwo_day_delivery_fee(delivery_fee);
}
double TwoDayPackage::gettwo_day_delivery_fee()
{
return two_day_delivery_fee;
}
void TwoDayPackage::settwo_day_delivery_fee(double delivery_fee)
{
two_day_delivery_fee = delivery_fee;
}
double TwoDayPackage::calculateCost()
{
double result;
result = Package::calculateCost() + two_day_delivery_fee;
return result;
}
class OvernightPackage : public Package
{
private:
double overnight_delivery_fee;
public:
OvernightPackage(string sender_n, string sender_addr, string sender_c,
string sender_s, string sender_Z, string recipient_n, string recipient_addr,
string recipient_c, string recipient_s, string recipient_Z, double wei,
double cost, double delivery_fee);
double calculateCost();
double getovernight_delivery_fee();
void setovernight_delivery_fee(double delivery_fee);
};
OvernightPackage::OvernightPackage(string sender_n, string sender_addr,
string sender_c, string sender_s, string sender_Z, string recipient_n,
string recipient_addr, string recipient_c, string recipient_s,
string recipient_Z, double wei, double cost, double delivery_fee)
:Package(sender_n, sender_addr, sender_c, sender_s, sender_Z, recipient_n,
recipient_addr, recipient_c, recipient_s, recipient_Z, wei, cost)
{
setovernight_delivery_fee(delivery_fee);
}
double OvernightPackage::getovernight_delivery_fee()
{
return overnight_delivery_fee;
}
void OvernightPackage::setovernight_delivery_fee(double delivery_fee)
{
overnight_delivery_fee = delivery_fee;
}
double OvernightPackage::calculateCost()
{
double result;
result = (getcostperounce() + overnight_delivery_fee) * getweight();
return result;
}
int main()
{
OvernightPackage item1("<NAME>", "123 Main Street", "Lahore",
"{Pakistan", "89754", "Ali", "123 bent street", "MUltan", "Pakistan",
"87540", 12.00, 1.50, 1.10);
TwoDayPackage item2("Wajid", "987 1st Street", "Mumbai",
"India", "87654", "Hassan", "833 palm Street", "Chaniot", "Pakistan",
"98763", 18.00, 1.05, 8.00);
cout << endl;
cout << "Overnight Delivery\n";
cout << "Sender " << item1.getsender_name() << "\n";
cout << " " << item1.getsender_address() << "\n";
cout << " " << item1.getSendCity() << " " <<
item1.getsender_state() << " " << item1.getsender_ZIP() << "\n";
cout << "\n";
cout << "Recipient " << item1.getrecipient_name() << "\n";
cout << " " << item1.getsender_address() << "\n";
cout << " " << item1.getrecipient_city() << " " <<
item1.getrecipient_state() << " " << item1.getrecipient_ZIP() << "\n";
cout << "Cost Rs " << item1.calculateCost() << "\n";
cout << "\n\n";
cout << "2 Day Delivery\n";
cout << "Sender " << item2.getsender_name() << "\n";
cout << " " << item2.getsender_address() << "\n";
cout << " " << item2.getSendCity() << " " <<
item2.getsender_state() << " " << item2.getsender_ZIP() << "\n";
cout << "\n";
cout << "Recipient " << item2.getrecipient_name() << "\n";
cout << " " << item2.getsender_address() << "\n";
cout << " " << item2.getrecipient_city() << " " <<
item2.getrecipient_state() << " " << item2.getrecipient_ZIP() << "\n";
cout << "Cost Rs " << item2.calculateCost() << "\n";
system("pause");
return 0;
}
|
<gh_stars>1-10
/*====================================================================*
*
* menu.h -
*
*. Motley Tools by <NAME>
*: Published 2006 by <NAME> Associates Limited;
*; Licensed under the Internet Software Consortium License
*
*--------------------------------------------------------------------*/
#ifndef MENU_HEADER
#define MENU_HEADER
/*====================================================================*
* system header files;
*--------------------------------------------------------------------*/
#include <unistd.h>
#include <stdint.h>
/*====================================================================*
* custom header files;
*--------------------------------------------------------------------*/
#include "../tools/types.h"
/*====================================================================*
* variables;
*--------------------------------------------------------------------*/
typedef struct menu
{
struct menu * prior;
struct menu * equal;
struct menu * after;
signed class;
signed value;
signed ascii;
}
MENU;
/*====================================================================*
* functions;
*--------------------------------------------------------------------*/
void menusort (MENU * menu, char * string, char buffer [], size_t length);
void menudump (MENU * menu, char * string, char buffer [], size_t length);
MENU * menuload (MENU * menu, char buffer [], size_t length);
void menushow (MENU * menu, char buffer [], signed length);
unsigned menusize (MENU * menu);
unsigned menulength (MENU * menu, signed length);
unsigned menuvolume (MENU * menu);
char ** menulist (MENU * menu, char * string, char buffer [], size_t length, char * vector [], size_t size);
MENU * menudefine (MENU * menu, char const * string, signed class, signed value);
MENU * menuitem (char const * string, signed class, signed value);
MENU * menulookup (MENU * menu, char const * string);
MENU * menuprefix (MENU * menu, char const * string);
MENU * menuassume (MENU * menu, char const * string);
/*====================================================================*
*
*--------------------------------------------------------------------*/
#endif
|
The Reform Alliance, headed by Lucinda Creighton, has taken a major step towards becoming a fully fledged political party by registering with the Standards in Public Office Commission (SIPO).
The Reform Alliance, headed by Lucinda Creighton, has taken a major step towards becoming a fully fledged political party by registering with the Standards in Public Office Commission (SIPO).
Following their expulsion from Fine Gael, there has been much speculation about the intentions of the Reform Alliance. But it is now expected that the new party will officially be launched next September, after the local and European elections.
It has also been confirmed that Ms Creighton and high-profile independent TD Stephen Donnelly have "chatted" in recent weeks and are "at one" in their views about political and economic reform.
The Reform Alliance has registered as a "third party" with SIPO in order to allow it to raise funds for political research and policy development in the coming weeks.
The group is made up of Ms Creighton; her husband, Senator Paul Bradford; TDs Billy Timmins, Denis Naughten, Terence Flanagan, and Peter Mathews; and Senator Fidelma Healy Eames.
It does not receive any State funding at present and the ramping up of political activity, and its clear attempts to shake off its pro-life image, after the abortion saga are seen as highly significant.
"It is early days but it is about being fully compliant with the rules should we fundraise in order to carry out political research and develop new policy. We want to be a vehicle for new thinking in the Dail and Seanad," Ms Creighton told the Sunday Independent.
Crucially, Ms Creighton also refused to rule out the possibility of ultimately establishing a fully fledged party, saying: "I can't say what the future holds."
In a further hint at what direction the group was heading, the Dublin South East TD also said she never wanted to be an independent, adding she was a big believer in party politics.
"You have freedom in what you can say as an independent, but there is also strength in numbers. I am a great believer in party politics," she said.
It has also emerged that the new party is likely to retain the name "Reform" in its title with some suggesting it will be named either The Reform Alliance Party or The Reform Party.
For his part, Mr Donnelly has in recent weeks spoken of his frustration at being an isolated independent TD in opposition. His financial expertise and career experience in public sector transformation, added to his thirst for driving political and economic reform, make him an ideal fit for not only the Alliance but also Fianna Fail.
Yesterday, several members of the Reform Alliance spoke very warmly about Mr Donnelly's pedigree, and he in turn paid handsome compliments to several members of the Reform Alliance.
Ms Creighton said: "In terms of what he wants to achieve, there is a lot of common ground. On reform, I share a lot of Stephen's views, there is a lot of shared ground.
"In the coming months, I am happy to co-operate with him and definitely there is an opportunity for us to work together."
Speaking yesterday, Mr Donnelly said he was very much open to having formal discussions with the Reform Alliance about possibly joining forces.
"I would entertain a discussion. I am not sitting here waiting, but I'll listen," he said.
"There is most undoubtedly a need for a new party, the current system is so old, stale and so badly in need of reform."
He said they have to move away from the pro-life agenda and he said their stance on the Seanad was important.
"They are going to have to work hard to get away from the pro-life single issue, but they have some seriously smart people. Lucinda is very smart. The only question is will they achieve a critical mass or will some of them go back into Fine Gael," he said.
Mr Donnelly has also moved to distance himself significantly from claims that he is contemplating a move to Fianna Fail.
The focus on Mr Donnelly's political intentions has intensified after Tanaiste Eamon Gilmore predicted that he would be "wearing the Fianna Fail jersey" by the next general election.
Mr Donnelly, though, anxiously moved to dispel any notion "that I am talking to Fianna Fail. Micheal Martin and I are not having pints in Doheny and Nesbitts every week trying to sort the world out."
But affirming his desire for a new party, Mr Donnelly said: "There is a political cartel in Ireland and having a new party to challenge the cartel is a good idea."
He added: "The establishment protects itself very well in Ireland, it's a very closed system in terms of elites, it would be healthy for democracy if a new party challenged our vested interests".
He also warned this "cartel" had created the scenario where "huge swathes of the public sector and the political system are stuck in a 1970s style time-warp".
By Daniel McConnell and John Drennan
Irish Independent |
def fetch_token(
self, username: str = "", password: str = "", credentials_file_path: str = ""
) -> None:
if credentials_file_path == "":
if (username == "") or (password == ""):
raise AttributeError(f"both username and password should be given.")
else:
username, password = self._load_json_credentials(credentials_file_path)
url = (
"https://geoservices.meteofrance.fr/"
+ f"services/GetAPIKey?username={username}&password={password}"
)
try:
r = requests.get(url)
r.raise_for_status()
except requests.exceptions.HTTPError as e:
print("Http Error:", e)
except requests.exceptions.ConnectionError as e:
print("Error Connecting:", e)
except requests.exceptions.Timeout as e:
print("Timeout Error:", e)
except requests.exceptions.RequestException as e:
print("Something is wrong with the request", e)
print("-- GetAPIKey request --")
xmlData = r.content.decode("utf-8")
d = xmltodict.parse(xmlData, process_namespaces=True)
self.token = d["http://ws.apache.org/ns/synapse:Token"]
assert self.token[:2] == "__"
assert self.token[-2:] == "__" |
package mbrtu
import (
"fmt"
"ckklearn.com/testmodbus/global"
)
// RtuParseResponse 解析从站返回报文
func RtuParseResponse(dst, src []byte, reqFunCode global.FunCode) (int, error) {
// `src[1]` 是读取的报文的功能码
switch src[1] {
case reqFunCode:
// 如果是读取,则写入读取到的数据,否则不写入
// `src[2]` 是读取的数据的字节长度
switch reqFunCode {
case global.ReadCoils, global.ReadInputs, global.ReadInputRegisters, global.ReadHoldingRegisters:
return copy(dst, src[3:3+src[2]]), nil
default:
return 0, nil
}
case reqFunCode + 0x80:
// 判断从站返回的异常类型
errCode := src[2]
err, ok := global.SlaveErrorMap[errCode]
if !ok {
return 0, fmt.Errorf("unknow error code `%x`", errCode)
}
return 0, err
default:
return 0, fmt.Errorf("internal error")
}
}
|
// Checks if the `assert_invalid` message matches the expected one
fn matches_message_assert_invalid(expected: &str, actual: &str) -> bool {
actual.contains(expected)
// Waiting on https://github.com/WebAssembly/bulk-memory-operations/pull/137
// to propagate to WebAssembly/testsuite.
|| (expected.contains("unknown table") && actual.contains("unknown elem"))
// wasmparser return the wrong message
|| (expected.contains("unknown memory") && actual.contains("no linear memories are present"))
// `elem.wast` and `proposals/bulk-memory-operations/elem.wast` disagree
// on the expected error message for the same error.
|| (expected.contains("out of bounds") && actual.contains("does not fit"))
} |
<reponame>moducate/moducate
package cmd
import (
"github.com/moducate/moducate/internal/db"
"github.com/spf13/cobra"
)
// MakeMigrateCmd creates a new instance of the `moducate migrate` command. This
// command performs PostgreSQL database migrations.
func MakeMigrateCmd() *cobra.Command {
return &cobra.Command{
Use: "migrate <DSN>",
Short: "Performs Moducate's PostgreSQL database migrations",
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cmd.PrintErrln("Please provide exactly 1 argument for the DSN (PostgreSQL connection string)!")
return
}
applied, err := db.Migrate(args[0])
if err != nil {
cmd.PrintErrln("Failed to perform migrations!", err.Error())
} else if applied == 0 {
cmd.Println("No migrations could be applied: the provided database is already up to date!")
} else {
cmd.Println("Applied", applied, "migrations successfully!")
}
},
}
}
var migrateCmd = MakeMigrateCmd()
func init() {
RootCmd.AddCommand(migrateCmd)
}
|
/** Remove the stored choice from prefs. */
@VisibleForTesting
static void removePreviousSearchEngineType() {
SharedPreferencesManager.getInstance().removeKey(
ChromePreferenceKeys.SEARCH_ENGINE_CHOICE_DEFAULT_TYPE_BEFORE);
} |
/**
* Concatenates an iterator over iterators into one long iterator.
*
* @author Dan Klein
*/
public class ConcatenationIterator<E> implements Iterator<E> {
final Iterator<Iterator<E>> sourceIterators;
Iterator<E> currentIterator;
Iterator<E> lastIteratorToReturn;
public boolean hasNext() {
return currentIterator.hasNext();
}
public E next() {
if (currentIterator.hasNext()) {
E e = currentIterator.next();
lastIteratorToReturn = currentIterator;
advance();
return e;
}
throw new NoSuchElementException();
}
private void advance() {
while (! currentIterator.hasNext() && sourceIterators.hasNext()) {
currentIterator = sourceIterators.next();
}
}
public void remove() {
if (lastIteratorToReturn == null)
throw new IllegalStateException();
currentIterator.remove();
}
public ConcatenationIterator(Iterator<Iterator<E>> sourceIterators) {
this.sourceIterators = sourceIterators;
this.currentIterator = (new ArrayList<E>()).iterator();
this.lastIteratorToReturn = null;
advance();
}
public ConcatenationIterator(Collection<Iterator<E>> iteratorCollection) {
this(iteratorCollection.iterator());
}
public static void main(String[] args) {
List<String> list0 = Collections.emptyList();
List<String> list1 = Arrays.asList("a b c d".split(" "));
List<String> list2 = Arrays.asList("e f".split(" "));
List<Iterator<String>> iterators = new ArrayList<>();
iterators.add(list1.iterator());
iterators.add(list0.iterator());
iterators.add(list2.iterator());
iterators.add(list0.iterator());
Iterator<String> iterator = new ConcatenationIterator<>(iterators);
while (iterator.hasNext()) {
System.out.println(iterator.next());
}
}
} |
/**
* A GeoJSON object with the type "FeatureCollection" is a feature object which represents a
* collection of feature objects.
*
* @see <a href='geojson.org/geojson-spec.html#feature-collection-objects'>Official GeoJSON FeatureCollection Specifications</a>
* @since 1.0.0
*/
public class FeatureCollection extends BaseFeatureCollection {
private final List<Feature> features;
/**
* Protected constructor.
* Unlike other GeoJSON objects in this package, this constructor is protected to enable
* the deserialization of the Map Matching service response.
*
* @param features List of {@link Feature}.
* @since 1.0.0
*/
FeatureCollection(List<Feature> features) {
this.features = features;
}
/**
* Get the List containing all the features within collection.
*
* @return List of features within collection.
* @since 1.0.0
*/
public List<Feature> getFeatures() {
return features;
}
/**
* Create a {@link FeatureCollection} from a List of features.
*
* @param features List of {@link Feature}
* @return new {@link FeatureCollection}
* @since 1.0.0
*/
public static FeatureCollection fromFeatures(List<Feature> features) {
return new FeatureCollection(features);
}
public static FeatureCollection fromFeatures(Feature[] features) {
return new FeatureCollection(Arrays.asList(features));
}
} |
from collections import defaultdict as dd
from collections import deque
import bisect
import heapq
def ri():
return int(input())
def rl():
return list(map(int, input().split()))
def yield_ones(n):
place = 0
while n:
n, r = divmod(n, 2)
if r:
yield place
place += 1
def solve():
n = ri()
A = rl()
bits = dd(int)
for a in A:
for one in yield_ones(a):
bits[one] += 1
answer = 0
while bits:
new_bits = {}
m = min(bits.values())
partial = 0
for power, count in bits.items():
partial += pow(2, power)
if count > m:
new_bits[power] = count - m
answer += m * partial**2
bits = new_bits
print (answer)
mode = 's'
if mode == 'T':
t = ri()
for i in range(t):
solve()
else:
solve()
|
Get the biggest daily news stories by email Subscribe Thank you for subscribing We have more newsletters Show me See our privacy notice Could not subscribe, try again later Invalid Email
Video Loading Video Unavailable Click to play Tap to play The video will start in 8 Cancel Play now
This intriguing footage captured by NASA shows a 'UFO' hovering near the International Space Station as two astronauts conduct a spacewalk.
In the five-minute clip, posted to YouTube, NASA astronaut Reid Wiseman and Alexander Gerst, from the European Space Agency, are seen on the spacewalk.
The pair, clad in spacesuits, step outside the carry out a series of repairs.
However, for around five seconds, a mystery craft - which is presumably hovering or flying in - is also visible in the background, in the pitch black.
It appears in the camera frame between the space station and Earth - an area filled with just blackness only a few seconds eariler.
Unsurprisingly, NASA has yet to comment on the puzzling object.
But the agency has confirmed that the spacewalk, which took place on Tuesday October 7, saw the two astronauts completing tasks for the Expedition 41 crew.
The duo moved a broken cooling pump into an external storage space.
In addition, they installed gear that provides back up power for the external robotic arm and the 'rail-car' system it uses to move along the length of the station.
A second spacewalk is scheduled to take place today, NASA said.
The purpose is to replace a failed voltage regulator and to move camera equipment in preparation of next year's arrival of new docking adapters for crew vehicles.
The clip, uploaded by NASA, has been viewed 28,000 times. |
package goomg_test
import (
"bytes"
"context"
goomg "github.com/onionltd/go-omg"
"golang.org/x/crypto/openpgp"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestClient_GetCanaryMessage(t *testing.T) {
testServer := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
canary, err := ioutil.ReadFile("testdata/canary.txt")
if err != nil {
t.Fatal(err)
}
if _, err := res.Write(canary); err != nil {
t.Fatal(err)
}
}))
defer testServer.Close()
key, err := ioutil.ReadFile("testdata/pgp.txt")
if err != nil {
t.Fatal(err)
}
keyRing, err := openpgp.ReadArmoredKeyRing(bytes.NewReader(key))
if err != nil {
t.Fatal(err)
}
c := goomg.NewClient(testServer.Client())
canary, err := c.GetCanaryMessage(context.Background(), testServer.URL)
if err != nil {
t.Fatal(err)
}
_, err = canary.VerifySignature(keyRing)
if err != nil {
t.Fatal(err)
}
date, err := time.Parse(time.RFC3339, "2019-11-11T00:00:00Z")
if err != nil {
t.Fatal(err)
}
if err := canary.Validate(date); err != nil {
t.Fatal(err)
}
}
func TestClient_GetMirrorsMessage(t *testing.T) {
testServer := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
mirrors, err := ioutil.ReadFile("testdata/mirrors.txt")
if err != nil {
t.Fatal(err)
}
if _, err := res.Write(mirrors); err != nil {
t.Fatal(err)
}
}))
defer testServer.Close()
key, err := ioutil.ReadFile("testdata/pgp.txt")
if err != nil {
t.Fatal(err)
}
keyRing, err := openpgp.ReadArmoredKeyRing(bytes.NewReader(key))
if err != nil {
t.Fatal(err)
}
c := goomg.NewClient(testServer.Client())
mirrors, err := c.GetMirrorsMessage(context.Background(), testServer.URL)
if err != nil {
t.Fatal(err)
}
_, err = mirrors.VerifySignature(keyRing)
if err != nil {
t.Fatal(err)
}
urls, err := mirrors.List()
if err != nil {
t.Fatal(err)
}
expectedUrls := []string{"http://darkfailllnkf4vf.onion", "https://dark.fail"}
for i := range urls {
if urls[i] != expectedUrls[i] {
t.Fatalf("invalid mirrors: %v", urls)
}
}
}
func TestClient_CustomUserAgent(t *testing.T) {
const customUserAgent = "cua/2.1"
testServer := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
ua := req.Header.Get("User-Agent")
if ua != customUserAgent {
t.Fatal("bad user agent")
}
mirrors, err := ioutil.ReadFile("testdata/mirrors.txt")
if err != nil {
t.Fatal(err)
}
if _, err := res.Write(mirrors); err != nil {
t.Fatal(err)
}
}))
defer testServer.Close()
c := goomg.NewClient(testServer.Client())
c.SetUserAgent(customUserAgent)
_, err := c.GetMirrorsMessage(context.Background(), testServer.URL)
if err != nil {
t.Fatal(err)
}
}
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.