content
stringlengths 10
4.9M
|
---|
{-# LANGUAGE RecordWildCards #-}
module HulkanEngine3D.Utilities.Logger
( logTrivialInfo
, logInfo
, logDebug
, logWarn
, logError
) where
import GHC.Stack hiding (prettyCallStack, prettySrcLoc)
import Data.List
import Data.Time
import Control.Monad
prettySrcLoc :: SrcLoc -> String
prettySrcLoc SrcLoc {..}
= foldr (++) ""
[ srcLocFile, ":"
, show srcLocStartLine, ":"
, show srcLocStartCol, " in "
, srcLocPackage, ":", srcLocModule
]
prettySrcLoc' :: SrcLoc -> String
prettySrcLoc' SrcLoc {..}
= foldr (++) ""
[ srcLocFile, ":"
, show srcLocStartLine, ":"
, show srcLocStartCol
]
prettyCallStack :: CallStack -> String -> String -> String -> String
prettyCallStack cs time loggerLevel msg = intercalate "\n" $ prettyCallStackLines cs time loggerLevel msg
prettyCallStackLines :: CallStack -> String -> String -> String -> [String]
prettyCallStackLines cs time loggerLevel msg = case getCallStack cs of
[] -> []
stk -> map ((++" ") . prettyCallSite) stk
where
--prettyCallSite (f, loc) = "[" ++ time ++ "]" ++ "[" ++ loggerLevel ++ "] " ++ msg ++ " (" ++ prettySrcLoc loc ++ ")"
prettyCallSite (f, loc) = "[" ++ time ++ "]" ++ "[" ++ loggerLevel ++ "] " ++ msg ++ " (" ++ prettySrcLoc' loc ++ ")"
getLoggerTime :: IO String
getLoggerTime = formatTime defaultTimeLocale "%F %T.%3q" <$> getZonedTime
enableLogTrivialInfo :: Bool
enableLogTrivialInfo = False
logTrivialInfo :: HasCallStack => String -> IO ()
logTrivialInfo msg =
when enableLogTrivialInfo $ do
time <- getLoggerTime
putStrLn $ prettyCallStack callStack time "INFO" msg
logInfo :: HasCallStack => String -> IO ()
logInfo msg = do
time <- getLoggerTime
putStrLn $ prettyCallStack callStack time "INFO" msg
logDebug :: HasCallStack => String -> IO ()
logDebug msg = do
time <- getLoggerTime
putStrLn $ prettyCallStack callStack time "DEBUG" msg
logWarn :: HasCallStack => String -> IO ()
logWarn msg = do
time <- getLoggerTime
putStrLn $ prettyCallStack callStack time "WARNING" msg
logError :: HasCallStack => String -> IO ()
logError msg = do
time <- getLoggerTime
putStrLn $ prettyCallStack callStack time "ERROR" msg |
/** Checks if the target is valid for being attacked */
public boolean isValid(Entity entity)
{
if (DamageUtility.canDamage(entity))
{
return !entity.isInvisible();
}
return false;
} |
/**
* Not usually needed except if resources need to be reclaimed in a long running process.
*/
public static void clearResources()
{
nameMap.clear();
} |
by
*Click on each bible passage to expand the text. Luke 1:46b-55 Mary’s Song 46. And Mary said: “My soul glorifies the Lord 47. and my spirit rejoices in God my Savior, 48. for he has been mindful of the humble state of his servant. From now on all generations will call me blessed, 49. for the Mighty One has done great things for me — holy is his name.
50. His mercy extends to those who fear him, from generation to generation.
51. He has performed mighty deeds with his arm; he has scattered those who are proud in their inmost thoughts.
52. He has brought down rulers from their thrones but has lifted up the humble.
53. He has filled the hungry with good things but has sent the rich away empty.
54. He has helped his servant Israel, remembering to be merciful 55. to Abraham and his descendants forever, even as he said to our fathers.” 1 Samuel 2:1-10 1. Then Hannah prayed and said: “My heart rejoices in the Lord; in the Lord my horn is lifted high. My mouth boasts over my enemies, for I delight in your deliverance.
2. “There is no one holy like the Lord; there is no one besides you; there is no Rock like our God.
3. “Do not keep talking so proudly or let your mouth speak such arrogance, for the Lord is a God who knows, and by him deeds are weighed.
4. “The bows of the warriors are broken, but those who stumbled are armed with strength.
5. Those who were full hire themselves out for food, but those who were hungry hunger no more. She who was barren has borne seven children, but she who has had many sons pines away.
6. “The Lord brings death and makes alive; he brings down to the grave and raises up.
7. The Lord sends poverty and wealth; he humbles and he exalts.
8. He raises the poor from the dust and lifts the needy from the ash heap; he seats them with princes and has them inherit a throne of honor. “For the foundations of the earth are the Lord’s; upon them he has set the world.
9. He will guard the feet of his saints, but the wicked will be silenced in darkness. “It is not by strength that one prevails;
10. those who oppose the Lord will be shattered. He will thunder against them from heaven; the Lord will judge the ends of the earth. “He will give strength to his king and exalt the horn of his anointed.” Mark 11:1-11 The Triumphal Entry 1. As they approached Jerusalem and came to Bethphage and Bethany at the Mount of Olives, Jesus sent two of his disciples, 2. saying to them, “Go to the village ahead of you, and just as you enter it, you will find a colt tied there, which no one has ever ridden. Untie it and bring it here. 3. If anyone asks you, ‘Why are you doing this?’ tell him, ‘The Lord needs it and will send it back here shortly.’”4. They went and found a colt outside in the street, tied at a doorway. As they untied it, 5. some people standing there asked, “What are you doing, untying that colt?” 6. They answered as Jesus had told them to, and the people let them go. 7. When they brought the colt to Jesus and threw their cloaks over it, he sat on it. 8. Many people spread their cloaks on the road, while others spread branches they had cut in the fields. 9. Those who went ahead and those who followed shouted, “Hosanna!” “Blessed is he who comes in the name of the Lord!” 10. “Blessed is the coming kingdom of our father David!” “Hosanna in the highest!” 11. Jesus entered Jerusalem and went to the temple. He looked around at everything, but since it was already late, he went out to Bethany with the Twelve.
“My soul glorifies the Lord and my spirit rejoices in God my Savior.” – Luke 1:46-47
“The Lord brings death and makes alive; he brings down to the grave and raises up. – 1 Samuel2 :6
“Hosanna! Blessed is he who comes in the name of the Lord! Blessed is the coming kingdom of our father David! Hosanna in the highest!” – Mark 11:9-10
“Save us NOW!”
Hósanna!
… in the highest.
What is today a cry of triumph was once a desperate cry for salvation, for help.
Hósanna! Save us NOW!
Hósanna, LORD, we pray, save us now!
hósanna/ὡσαννά (Aramaic and Hebrew, originally a cry for help), hosanna!, a cry of happiness.
This was the sentiment of tragically barren Hannah in 1 Samuel 1, who was tormented by the fruitfulness of her sister-wife Peninnah’s womb. She cried for the intercession of The Lord.
This was the cry of the people of Jerusalem during the scene of Yeshua’s Triumphal entry: Lord, save us! Save us NOW!
Save us from under this tyranny of the Romans! Save us from under the oppression of poverty. Save us from this darkness all about Israel!
And on this, The Eve of the Winter Solstice, the shortest day of the year, it should be our spiritual cry, too. As the sun slips past us too quickly, we should allow our souls to express that secret hidden thought, that primitive gnawing fear:
LORD SAVE US NOW! The light is fading, eternal night approaches, and I am afraid!
The Winter Solstice & the Need for Salvation
Since time immemorial, this was a pretty scary time of year; filled with foreboding and fear. The sun, the source of all life and warmth, seems to be losing an epic battle in the heavens. It spends less and less time in the sky every day, as if somehow the night is winning the upper-hand in some titanic cosmic struggle.
The ancients recognized this annual pattern and began to model some of humanity’s earliest spiritual concepts from their observations of the Solstices. The fading sun became a symbol of our inevitable death. And humanity was forced to come to terms with this simple fact: everything dies, even (apparently) the sun.
The days before the Winter Solstice were days of deep sorrow and foreboding, the ritualized form of which has been lost to us today. This “spiritual forgetting” is a great tragedy, I think, because the celebration that is to come (in just a few days) is all the more triumphant when prefaced by this ritualized period of the contemplation our own mortality.
In homage to our ancient (non-Christian) brothers and sisters, I would like to propose a humble mediation for tomorrow, the Winter Solstice, the shortest day of the year:
The Winter Lament The sun is fading. Light is dying. All hope is lost.
Life is so short. We come forth like a flower only to be cut down.
Once we are gone, we fade into the shadows to be remembered no longer.
Is there no hope for me, Lord? Mankind gives the last breath and is no more, forever. This is not fair, Lord! For there is hope for a tree, if its leaves should fall, that it will someday sprout again, that it’s tender branch shall not cease! But there is no hope for mankind. We only fade into darkness, like the Winter Sun. The sun is fading. Light is dying. All hope is lost.
Sad, no? Hopeless, dark and foreboding? Good. It should be. It is healthy for us to contemplate things that aren’t comfortable this time of year, to couch despair and hopelessness with… triumph?
Rebirth, Renewal, and Hope
For in just a few days we enter into the most jubilant celebration in Christendom, which happens to correlate directly with the even more ancient jubilation of the “Pagans” (non-Christians). Why was there such hope and jubilation for a people who never knew Christ? Because in a few days, it will be clear that the sun is winning again, that light is returning to power!
For the ancients who watched the progressive sinking of the sun in the winter sky, it would quickly become clear within days following the Solstice that something had changed: the light was no longer fading earlier and earlier in the day, and the arc of the sun across the sky was no longer closer and closer to the horizon.
The sun was winning. Light was reborn. Hope was renewed!
This is the origin of the pre-Christ celebration we call Christmas, the meaning of the date, and the universal hope that all humanity receives this time of year, every year: light has returned to the world.
Light has returned from darkness.
Hope has returned from fear.
The sun’s resurrection holds a tantalizing promise for us all!
Hósanna!
Hósanna in the highest! |
/// Create validators, returning a list of validator pubkeys on success.
pub fn create(
&self,
quantity_flag: &str,
quantity: usize,
store_withdrawal_key: bool,
) -> Result<Vec<String>, String> {
let mut cmd = validator_cmd();
cmd.arg(format!("--{}", VALIDATOR_DIR_FLAG))
.arg(self.validator_dir.clone().into_os_string())
.arg(CREATE_CMD)
.arg(format!("--{}", WALLETS_DIR_FLAG))
.arg(self.wallet.base_dir().into_os_string())
.arg(format!("--{}", WALLET_NAME_FLAG))
.arg(&self.wallet.name)
.arg(format!("--{}", WALLET_PASSWORD_FLAG))
.arg(self.wallet.password_path().into_os_string())
.arg(format!("--{}", SECRETS_DIR_FLAG))
.arg(self.secrets_dir.clone().into_os_string())
.arg(format!("--{}", DEPOSIT_GWEI_FLAG))
.arg("32000000000")
.arg(format!("--{}", quantity_flag))
.arg(format!("{}", quantity));
let output = if store_withdrawal_key {
output_result(cmd.arg(format!("--{}", STORE_WITHDRAW_FLAG))).unwrap()
} else {
output_result(&mut cmd).unwrap()
};
let stdout = from_utf8(&output.stdout)
.expect("stdout is not utf8")
.to_string();
if stdout == "" {
return Ok(vec![]);
}
let pubkeys = stdout[..stdout.len() - 1]
.split("\n")
.filter_map(|line| {
let tab = line.find("\t").expect("line must have tab");
let (_, pubkey) = line.split_at(tab + 1);
Some(pubkey.to_string())
})
.collect::<Vec<_>>();
Ok(pubkeys)
} |
def _compute_gradient_and_hessian(self, x):
edge_container = self._find_containing_paths()
grad = np.zeros(self.num_edges)
hessian = np.zeros((self.num_edges, self.num_edges))
for edge in range(self.num_edges):
involved_cases = edge_container[edge]
path_weights = [sum([x[i] for i in self.resampled_path_history[case]]) \
for case in involved_cases]
feedbacks = [
self.resampled_feedback_history[case] for case in involved_cases
]
grad[edge] = self._get_diag_grad(path_weights, feedbacks)
hessian[edge][edge] = self._get_diag_hessian(path_weights)
for new_edge in range(edge):
involved_cases_2 = edge_container[new_edge]
involved_both = [i for i in involved_cases if i in involved_cases_2]
path_weights = [
sum([x[i] for i in self.resampled_path_history[case]])
for case in involved_both
]
hessian[edge][new_edge] += self._get_off_diag_hessian(path_weights)
hessian[new_edge][edge] += self._get_off_diag_hessian(path_weights)
grad_prior, hessian_prior = self._compute_gradient_and_hessian_prior_part(x)
grad = grad + grad_prior
hessian = hessian + hessian_prior
return grad, hessian |
#include <bits/stdc++.h>
typedef long long ll;
using namespace std;
bool is_palindrome(string x){
bool ans = true;
ll y = x.length();
for(ll i=0; i<(y+1)/2; i++){
if(x[i] != x[y-i-1]){
ans = false;
}
}
return ans;
}
int main(){
ios::sync_with_stdio(0);
cin.tie(0);
ll t;
cin >> t;
while(t--){
string s;
cin >> s;
ll n = s.length(), curl = 0, curr = 0;
for(ll i=0; i<(n+1)/2; i++){
if(s[i] == s[n-i-1]){
if(n % 2 == 0 || (n % 2 == 1 && i != n / 2)){
curl++;
curr++;
} else{
break;
}
} else{
break;
}
}
ll newl = curl, newr = curr;
for(ll i=1; i<=n-(curl+curr); i++){
if(is_palindrome(s.substr(curl, i))){
newl = curl + i;
}
}
for(ll i=1; i<=n-(curl+curr); i++){
if(is_palindrome(s.substr(n-curr-i, i))){
newr = curr + i;
}
}
// cout << curl << " " << curr << "\n";
// cout << newl << " " << newr << "\n";
string ans = "";
if(n == 1) ans += s[0];
else if(newl >= newr){
ans += s.substr(0, newl);
ans += s.substr(n-curr, curr);
} else{
ans += s.substr(0, curl);
ans += s.substr(n-newr, newr);
}
cout << ans << "\n";
}
} |
<reponame>zrosenbauer/action-tls-monitor
import { Protocol } from '../tls';
export interface AlertInput {
domain: string;
validTo: string;
validFrom: string;
protocol: Protocol;
errorMessage: string;
}
|
/***
* Copy all files from config region's config bucket to destination region's config bucket
* @param destinationRegion Region to copy files to
*/
public void sync(Regions destinationRegion) {
EnvironmentData decryptedEnvironmentData = getDecryptedEnvironmentData();
StoreService destinationStoreService = getStoreServiceForRegion(destinationRegion, decryptedEnvironmentData);
String sourceBucket = findConfigBucketInSuppliedConfigRegion();
listKeys().forEach(k -> destinationStoreService.copyFrom(sourceBucket, k));
} |
def check_column(
data,
columns,
bins=False,
missing=0.1,
cardinality=15,
float_frequency=30,
category_frequency=100,
outlier_function=quartile,
):
if isinstance(columns, str):
columns = [columns]
bins = [bins]
else:
if bins == False:
bins = [False] * len(columns)
if isinstance(bins, int):
bins = [bins] * len(columns)
i = 0
for col in columns:
bin = bins[i]
i += 1
if data[col].dtype == "O":
bin = False
if bin == False:
if data[col].dtype == "O":
chart = (
alt.Chart(data)
.mark_bar(color="#64b5f6")
.encode(
alt.X(
col,
axis=alt.Axis(title=col.title()),
sort=alt.SortField(
field="count()", order="descending", op="values"
),
),
alt.Y("count()"),
)
)
else:
chart = (
alt.Chart(data)
.mark_bar(color="#64b5f6")
.encode(
alt.X(
col,
axis=alt.Axis(title=col.title())
),
alt.Y("count()")
)
)
else:
chart = (
alt.Chart(data)
.mark_bar(color="#64b5f6")
.encode(
alt.X(
col,
bin=alt.Bin(maxbins=bin),
axis=alt.Axis(title=col.title())
),
alt.Y("count()"),
)
)
if data[col].dtype == "float64":
stats = data[col].describe()
else:
stats = data.groupby(col)[col].agg(["count"])
stats["prop"] = stats["count"] / len(data)
stats = pd.DataFrame(stats).T
display(Markdown("#### Column Summary: " + col.title()))
display(stats)
display(chart)
check_data(
data,
[col],
missing=missing,
cardinality=cardinality,
float_frequency=float_frequency,
category_frequency=category_frequency,
outlier_function=outlier_function,
title=False,
) |
Improvement of Atomic-Layer-Deposited Al2O3/GaAs Interface Property by Sulfuration and NH3 Thermal Nitridation
Fermi level pinning at the interface between high-κ gate dielectric and GaAs induced by unstable native oxides is a major obstacle for high performance GaAs-based metal-oxide-semiconductor (MOS) devices. We demonstrate the improved Al2O3/GaAs interfacial characteristics by (NH4)2S immersion and NH3 thermal pretreatment prior to Al2 O3 deposition. X-ray photoelectron spectroscopy (XPS) analysis confirms that sulfuration of GaAs surface by (NH4)2S solution can effectively reduce As-O bonds while Ga-O bonds and elemental As still exist at Al2 O3/GaAs interface. However, it is found that N incorporation during the further thermal nitridation on sulfurated GaAs can effectively suppress the native oxides and elemental As in the sequent deposition of Al2O3. Atomic force microscopy (AFM) shows that the further thermal nitridation on sulfurated GaAs surface can also improve the surface roughness. |
/*
* Copyright (C) 2021 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
*/
#include "opencl/test/unit_test/os_interface/linux/device_command_stream_fixture_exp.h"
#include "opencl/test/unit_test/os_interface/linux/drm_mock_exp.h"
// clang-format off
#include "shared/source/os_interface/linux/drm_tip.h"
// clang-format on
void DrmMockExp::handleQueryItemOnDrmTip(drm_i915_query_item *queryItem) {
switch (queryItem->query_id) {
case DRM_I915_QUERY_MEMORY_REGIONS:
if (queryMemoryRegionInfoSuccessCount == 0) {
queryItem->length = -EINVAL;
} else {
queryMemoryRegionInfoSuccessCount--;
auto numberOfLocalMemories = 1u;
auto numberOfRegions = 1u + numberOfLocalMemories;
int regionInfoSize = sizeof(DRM_TIP::drm_i915_query_memory_regions) + numberOfRegions * sizeof(DRM_TIP::drm_i915_memory_region_info);
if (queryItem->length == 0) {
queryItem->length = regionInfoSize;
} else {
EXPECT_EQ(regionInfoSize, queryItem->length);
auto queryMemoryRegionInfo = reinterpret_cast<DRM_TIP::drm_i915_query_memory_regions *>(queryItem->data_ptr);
EXPECT_EQ(0u, queryMemoryRegionInfo->num_regions);
queryMemoryRegionInfo->num_regions = numberOfRegions;
queryMemoryRegionInfo->regions[0].region.memory_class = I915_MEMORY_CLASS_SYSTEM;
queryMemoryRegionInfo->regions[0].region.memory_instance = 1;
queryMemoryRegionInfo->regions[0].probed_size = 2 * MemoryConstants::gigaByte;
queryMemoryRegionInfo->regions[1].region.memory_class = I915_MEMORY_CLASS_DEVICE;
queryMemoryRegionInfo->regions[1].region.memory_instance = 1;
queryMemoryRegionInfo->regions[1].probed_size = 2 * MemoryConstants::gigaByte;
}
}
break;
}
}
int DrmMockCustomExp::ioctlGemCreateExt(unsigned long request, void *arg) {
if (request == DRM_IOCTL_I915_GEM_CREATE_EXT) {
auto createExtParams = reinterpret_cast<DRM_TIP::drm_i915_gem_create_ext *>(arg);
createExtSize = createExtParams->size;
createExtHandle = createExtParams->handle;
createExtExtensions = createExtParams->extensions;
ioctlExp_cnt.gemCreateExt++;
return 0;
}
return -1;
}
|
/**
* Add a Favorite to the database.
* @param surfer The Surfer that is becoming a Favorite.
* @param userInfo The UserInfo making a Favorite.
*/
public static void add(Surfer surfer, UserInfo userInfo) {
if (surfer != null && userInfo != null) {
Favorite favorite = new Favorite(surfer, userInfo);
favorite.save();
}
} |
'''
@author: <NAME>
@summary: A module to perform batch request processing.
'''
from __future__ import absolute_import, unicode_literals
import json
import logging
from datetime import datetime
import six
from concurrent.futures import TimeoutError
from django.conf import settings
from django.http.response import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseNotFound,
HttpResponseServerError,
)
from django.urls import resolve
from django.urls.exceptions import Resolver404
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from batch_requests.exceptions import BadBatchRequest
from batch_requests.settings import br_settings as _settings
from batch_requests.utils import get_wsgi_request_object
log = logging.getLogger(__name__)
DURATION_HEADER_NAME = _settings.DURATION_HEADER_NAME
UNKNOWN_STATUSES = {
207: "Multiple Statuses",
429: "Too Many Requests",
}
VALID_HTTP_METHODS = {
"GET", "POST", "PUT", "PATCH", "DELETE", "HEAD", "OPTIONS",
"CONNECT", "TRACE"
}
class BytesEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, six.binary_type):
return obj.decode("utf-8")
return super(BytesEncoder, self).default(obj)
class SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (set, frozenset)):
return list(obj)
return super(SetEncoder, self).default(obj)
def timeout_result_handler(future, timeout=None):
"""Allow timing out concurrent requests"""
try:
result = future.result(timeout=timeout)
except TimeoutError:
result = {
"status_code": 408,
"reason_phrase": "Request Timeout",
"body": "",
"headers": {},
}
return result
def handle_sub_response_body(response):
body = getattr(response, "rendered_content", None) or response.content
if body and _settings.DESERIALIZE_RESPONSES:
try:
return json.loads(body)
except ValueError:
pass # fall through and just return body
return body
def handle_sub_reason_phrase(response, unknown=UNKNOWN_STATUSES):
phrase = unknown.get(response.status_code, None)
if phrase:
return phrase
return response.reason_phrase
def construct_duration_header(duration):
return duration.seconds + (duration.microseconds / 1000000.0)
def add_duration_header(resp, start, end):
resp["headers"][DURATION_HEADER_NAME] = construct_duration_header(end - start)
def get_response(wsgi_request):
'''
Given a WSGI request, makes a call to a corresponding view
function and returns the response.
'''
resp = None
service_start_time = datetime.now()
# Get the view / handler for this request
try:
view, args, kwargs = resolve(wsgi_request.path_info)
except Resolver404:
resp = HttpResponseNotFound()
if resp is None:
kwargs.update({"request": wsgi_request})
# Let the view do his task.
try:
resp = view(*args, **kwargs)
except Exception as exc:
resp = HttpResponseServerError(content=str(exc))
if hasattr(resp, '_headers'):
headers = {k: v for k, v in six.itervalues(resp._headers)},
else:
headers = {k: v for k, v in resp.headers.items()}
# Convert HTTP response into simple dict type.
d_resp = {
"status_code": resp.status_code,
"reason_phrase": handle_sub_reason_phrase(resp),
"headers": headers,
"body": handle_sub_response_body(resp),
}
# Check if we need to send across the duration header.
if _settings.ADD_DURATION_HEADER:
add_duration_header(d_resp, service_start_time, datetime.now())
return d_resp
def get_wsgi_requests(request):
'''
For the given batch request, extract the individual requests and create
WSGIRequest object for each.
'''
body = request.body.decode('utf-8')
requests = json.loads(body)
if not isinstance(requests, (list, tuple)):
raise BadBatchRequest("The body of batch request should always be list!")
if len(requests) > _settings.MAX_LIMIT:
raise BadBatchRequest("You can batch maximum of %d requests." % (_settings.MAX_LIMIT))
# We could mutate the current request with the respective parameters, but
# mutation is ghost in the dark, so lets avoid. Construct the new WSGI
# request object for each request.
def construct_wsgi_from_data(data, valid_http_methods=VALID_HTTP_METHODS):
'''
Given the data in the format of url, method, body and headers, construct a new
WSGIRequest object.
'''
url = data.get("url", None)
method = data.get("method", None)
if url is None or method is None:
raise BadBatchRequest("Request definition should have url, method defined.")
method = method.upper()
if method not in valid_http_methods:
raise BadBatchRequest("Invalid request method.")
# support singly/doubly encoded JSON
body = data.get("body", "")
if isinstance(body, dict):
body = json.dumps(body, cls=BytesEncoder)
headers = data.get("headers", {})
return get_wsgi_request_object(request, method, url, headers, body)
return (construct_wsgi_from_data(data) for data in requests)
def execute_requests(wsgi_requests):
'''
Execute the requests either sequentially or in parallel based on parallel
execution setting.
'''
return _settings.executor.execute(
wsgi_requests, get_response, result_handler=timeout_result_handler
)
@csrf_exempt
@require_http_methods(["POST"])
def handle_batch_requests(request, *args, **kwargs):
'''
A view function to handle the overall processing of batch requests.
'''
try:
# Get the Individual WSGI requests.
wsgi_requests = get_wsgi_requests(request)
except BadBatchRequest as brx:
return HttpResponseBadRequest(content=six.text_type(brx))
batch_start_time = datetime.now()
# Fire these WSGI requests, and collect the response for the same.
try:
response = execute_requests(wsgi_requests)
except BadBatchRequest as brx:
return HttpResponseBadRequest(content=six.text_type(brx))
batch_end_time = datetime.now()
BATCH_RESPONSE_STATUS = _settings.BATCH_RESPONSE_STATUS
# Evrything's done, return the response.
resp_kwargs = {
"content": json.dumps(response, cls=BytesEncoder),
"content_type": "application/json",
"status": BATCH_RESPONSE_STATUS,
}
# handle STDLIB unknown reason phrases
batch_reason = UNKNOWN_STATUSES.get(BATCH_RESPONSE_STATUS, None)
if batch_reason:
resp_kwargs["reason"] = batch_reason
resp = HttpResponse(**resp_kwargs)
if _settings.DISALLOW_CACHING:
resp["Cache-Control"] = "Private"
if _settings.ADD_DURATION_HEADER:
resp[DURATION_HEADER_NAME] = construct_duration_header(batch_end_time - batch_start_time)
if settings.DEBUG:
resp[_settings.DEBUG_HEADER_NAME] = json.dumps(_settings.as_dict(), cls=SetEncoder)
return resp
|
//TODO: implement for m>1 in terms of random gates
MPS
randomMPS(InitState const& initstate, int m, Args const& args)
{
if(m>1) Error("randomMPS(InitState,m>1) not currently supported.");
auto psi = MPS(initstate);
psi.randomize(args);
return psi;
} |
Research on Natural Characteristics of Helical Gear in Gearbox for Wind Turbine Generator
The three-dimensional helical gear model based on the geometric parameters is created by using software called PROE3.0. The natural characteristics of helical gear are analyzed by using the finite element method. The natural frequency of the helical gear is calculated by the software ANSYS and the principal mode of vibration is discussed. There are four vibration modes, which are circular vibration, torsional vibration, radial vibration and umbrella vibration. The phenomenon of resonance of the helical gear can be avoided by choosing suitable parameters and changing the natural frequency of the helical gear. |
<reponame>skibaa/dust
extern crate ansi_term;
use crate::utils::Node;
use self::ansi_term::Colour::Red;
use lscolors::{LsColors, Style};
use terminal_size::{terminal_size, Height, Width};
use unicode_width::UnicodeWidthStr;
use std::cmp::max;
use std::cmp::min;
use std::fs;
use std::iter::repeat;
use std::path::Path;
static UNITS: [char; 4] = ['T', 'G', 'M', 'K'];
static BLOCKS: [char; 5] = ['█', '▓', '▒', '░', ' '];
static DEFAULT_TERMINAL_WIDTH: u16 = 80;
pub struct DisplayData {
pub short_paths: bool,
pub is_reversed: bool,
pub colors_on: bool,
pub base_size: u64,
pub longest_string_length: usize,
pub ls_colors: LsColors,
}
impl DisplayData {
#[allow(clippy::collapsible_if)]
fn get_tree_chars(&self, was_i_last: bool, has_children: bool) -> &'static str {
if self.is_reversed {
if was_i_last {
if has_children {
"┌─┴"
} else {
"┌──"
}
} else if has_children {
"├─┴"
} else {
"├──"
}
} else {
if was_i_last {
if has_children {
"└─┬"
} else {
"└──"
}
} else if has_children {
"├─┬"
} else {
"├──"
}
}
}
fn is_biggest(&self, num_siblings: usize, max_siblings: u64) -> bool {
if self.is_reversed {
num_siblings == (max_siblings - 1) as usize
} else {
num_siblings == 0
}
}
fn is_last(&self, num_siblings: usize, max_siblings: u64) -> bool {
if self.is_reversed {
num_siblings == 0
} else {
num_siblings == (max_siblings - 1) as usize
}
}
fn percent_size(&self, node: &Node) -> f32 {
let result = node.size as f32 / self.base_size as f32;
if result.is_normal() {
result
} else {
0.0
}
}
}
fn get_children_from_node(node: Node, is_reversed: bool) -> impl Iterator<Item = Node> {
if is_reversed {
let n: Vec<Node> = node.children.into_iter().rev().map(|a| a).collect();
n.into_iter()
} else {
node.children.into_iter()
}
}
struct DrawData<'a> {
indent: String,
percent_bar: String,
display_data: &'a DisplayData,
}
impl DrawData<'_> {
fn get_new_indent(&self, has_children: bool, was_i_last: bool) -> String {
let chars = self.display_data.get_tree_chars(was_i_last, has_children);
self.indent.to_string() + chars
}
// TODO: can we test this?
fn generate_bar(&self, node: &Node, level: usize) -> String {
let chars_in_bar = self.percent_bar.chars().count();
let num_bars = chars_in_bar as f32 * self.display_data.percent_size(node);
let mut num_not_my_bar = (chars_in_bar as i32) - num_bars as i32;
let mut new_bar = "".to_string();
let idx = 5 - min(4, max(1, level));
for c in self.percent_bar.chars() {
num_not_my_bar -= 1;
if num_not_my_bar <= 0 {
new_bar.push(BLOCKS[0]);
} else if c == BLOCKS[0] {
new_bar.push(BLOCKS[idx]);
} else {
new_bar.push(c);
}
}
new_bar
}
}
fn get_width_of_terminal() -> u16 {
// Windows CI runners detect a very low terminal width
if let Some((Width(w), Height(_h))) = terminal_size() {
max(w, DEFAULT_TERMINAL_WIDTH)
} else {
DEFAULT_TERMINAL_WIDTH
}
}
pub fn draw_it(
permissions: bool,
use_full_path: bool,
is_reversed: bool,
no_colors: bool,
no_percents: bool,
root_node: Node,
) {
if !permissions {
eprintln!("Did not have permissions for all directories");
}
let terminal_width = (get_width_of_terminal() - 14) as usize;
let num_indent_chars = 3;
let longest_string_length = root_node
.children
.iter()
.map(|c| find_longest_dir_name(&c, num_indent_chars, terminal_width, !use_full_path))
.fold(0, max);
let max_bar_length = if no_percents || longest_string_length >= terminal_width as usize {
0
} else {
terminal_width as usize - longest_string_length
};
let first_size_bar = repeat(BLOCKS[0]).take(max_bar_length).collect::<String>();
for c in get_children_from_node(root_node, is_reversed) {
let display_data = DisplayData {
short_paths: !use_full_path,
is_reversed,
colors_on: !no_colors,
base_size: c.size,
longest_string_length,
ls_colors: LsColors::from_env().unwrap_or_default(),
};
let draw_data = DrawData {
indent: "".to_string(),
percent_bar: first_size_bar.clone(),
display_data: &display_data,
};
display_node(c, &draw_data, true, true);
}
}
fn find_longest_dir_name(node: &Node, indent: usize, terminal: usize, long_paths: bool) -> usize {
let printable_name = get_printable_name(&node.name, long_paths);
let longest = min(
UnicodeWidthStr::width(&*printable_name) + 1 + indent,
terminal,
);
// each none root tree drawing is 2 more chars, hence we increment indent by 2
node.children
.iter()
.map(|c| find_longest_dir_name(c, indent + 2, terminal, long_paths))
.fold(longest, max)
}
fn display_node(node: Node, draw_data: &DrawData, is_biggest: bool, is_last: bool) {
// hacky way of working out how deep we are in the tree
let indent = draw_data.get_new_indent(!node.children.is_empty(), is_last);
let level = ((indent.chars().count() - 1) / 2) - 1;
let bar_text = draw_data.generate_bar(&node, level);
let to_print = format_string(
&node,
&*indent,
&*bar_text,
is_biggest,
draw_data.display_data,
);
if !draw_data.display_data.is_reversed {
println!("{}", to_print)
}
let dd = DrawData {
indent: clean_indentation_string(&*indent),
percent_bar: bar_text,
display_data: draw_data.display_data,
};
let num_siblings = node.children.len() as u64;
for (count, c) in get_children_from_node(node, draw_data.display_data.is_reversed).enumerate() {
let is_biggest = dd.display_data.is_biggest(count, num_siblings);
let was_i_last = dd.display_data.is_last(count, num_siblings);
display_node(c, &dd, is_biggest, was_i_last);
}
if draw_data.display_data.is_reversed {
println!("{}", to_print)
}
}
fn clean_indentation_string(s: &str) -> String {
let mut is: String = s.into();
// For reversed:
is = is.replace("┌─┴", " ");
is = is.replace("┌──", " ");
is = is.replace("├─┴", "│ ");
is = is.replace("─┴", " ");
// For normal
is = is.replace("└─┬", " ");
is = is.replace("└──", " ");
is = is.replace("├─┬", "│ ");
is = is.replace("─┬", " ");
// For both
is = is.replace("├──", "│ ");
is
}
fn get_printable_name<P: AsRef<Path>>(dir_name: &P, long_paths: bool) -> String {
let dir_name = dir_name.as_ref();
let printable_name = {
if long_paths {
match dir_name.parent() {
Some(prefix) => match dir_name.strip_prefix(prefix) {
Ok(base) => base,
Err(_) => dir_name,
},
None => dir_name,
}
} else {
dir_name
}
};
printable_name.display().to_string()
}
fn pad_or_trim_filename(node: &Node, indent: &str, display_data: &DisplayData) -> String {
let name = get_printable_name(&node.name, display_data.short_paths);
let indent_and_name = format!("{} {}", indent, name);
let width = UnicodeWidthStr::width(&*indent_and_name);
// Add spaces after the filename so we can draw the % used bar chart.
let name_and_padding = name
+ &(repeat(" ")
.take(display_data.longest_string_length - width)
.collect::<String>());
maybe_trim_filename(name_and_padding, display_data)
}
fn maybe_trim_filename(name_in: String, display_data: &DisplayData) -> String {
if UnicodeWidthStr::width(&*name_in) > display_data.longest_string_length {
let name = name_in
.chars()
.take(display_data.longest_string_length - 2)
.collect::<String>();
name + ".."
} else {
name_in
}
}
pub fn format_string(
node: &Node,
indent: &str,
percent_bar: &str,
is_biggest: bool,
display_data: &DisplayData,
) -> String {
let (percents, name_and_padding) = get_name_percent(node, indent, percent_bar, display_data);
let pretty_size = get_pretty_size(node, is_biggest, display_data);
let pretty_name = get_pretty_name(node, name_and_padding, display_data);
format!("{} {} {}{}", pretty_size, indent, pretty_name, percents)
}
fn get_name_percent(
node: &Node,
indent: &str,
bar_chart: &str,
display_data: &DisplayData,
) -> (String, String) {
if bar_chart != "" {
let percent_size_str = format!("{:.0}%", display_data.percent_size(node) * 100.0);
let percents = format!("│{} │ {:>4}", bar_chart, percent_size_str);
let name_and_padding = pad_or_trim_filename(node, indent, display_data);
(percents, name_and_padding)
} else {
let n = get_printable_name(&node.name, display_data.short_paths);
let name = maybe_trim_filename(n, display_data);
("".into(), name)
}
}
fn get_pretty_size(node: &Node, is_biggest: bool, display_data: &DisplayData) -> String {
let pretty_size = format!("{:>5}", human_readable_number(node.size));
if is_biggest && display_data.colors_on {
format!("{}", Red.paint(pretty_size))
} else {
pretty_size
}
}
fn get_pretty_name(node: &Node, name_and_padding: String, display_data: &DisplayData) -> String {
if display_data.colors_on {
let meta_result = fs::metadata(node.name.clone());
let directory_color = display_data
.ls_colors
.style_for_path_with_metadata(node.name.clone(), meta_result.as_ref().ok());
let ansi_style = directory_color
.map(Style::to_ansi_term_style)
.unwrap_or_default();
format!("{}", ansi_style.paint(name_and_padding))
} else {
name_and_padding
}
}
fn human_readable_number(size: u64) -> String {
for (i, u) in UNITS.iter().enumerate() {
let marker = 1024u64.pow((UNITS.len() - i) as u32);
if size >= marker {
if size / marker < 10 {
return format!("{:.1}{}", (size as f32 / marker as f32), u);
} else {
return format!("{}{}", (size / marker), u);
}
}
}
return format!("{}B", size);
}
mod tests {
#[allow(unused_imports)]
use super::*;
#[allow(unused_imports)]
use std::path::PathBuf;
#[cfg(test)]
fn get_fake_display_data(longest_string_length: usize) -> DisplayData {
DisplayData {
short_paths: true,
is_reversed: false,
colors_on: false,
base_size: 1,
longest_string_length: longest_string_length,
ls_colors: LsColors::from_env().unwrap_or_default(),
}
}
#[test]
fn test_format_str() {
let n = Node {
name: PathBuf::from("/short"),
size: 2_u64.pow(12), // This is 4.0K
children: vec![],
};
let indent = "┌─┴";
let percent_bar = "";
let is_biggest = false;
let s = format_string(
&n,
indent,
percent_bar,
is_biggest,
&get_fake_display_data(6),
);
assert_eq!(s, " 4.0K ┌─┴ short");
}
#[test]
fn test_format_str_long_name() {
let name = "very_long_name_longer_than_the_eighty_character_limit_very_long_name_this_bit_will_truncate";
let n = Node {
name: PathBuf::from(name),
size: 2_u64.pow(12), // This is 4.0K
children: vec![],
};
let indent = "┌─┴";
let percent_bar = "";
let is_biggest = false;
let dd = get_fake_display_data(64);
let s = format_string(&n, indent, percent_bar, is_biggest, &dd);
assert_eq!(
s,
" 4.0K ┌─┴ very_long_name_longer_than_the_eighty_character_limit_very_lon.."
);
}
#[test]
fn test_human_readable_number() {
assert_eq!(human_readable_number(1), "1B");
assert_eq!(human_readable_number(956), "956B");
assert_eq!(human_readable_number(1004), "1004B");
assert_eq!(human_readable_number(1024), "1.0K");
assert_eq!(human_readable_number(1536), "1.5K");
assert_eq!(human_readable_number(1024 * 512), "512K");
assert_eq!(human_readable_number(1024 * 1024), "1.0M");
assert_eq!(human_readable_number(1024 * 1024 * 1024 - 1), "1023M");
assert_eq!(human_readable_number(1024 * 1024 * 1024 * 20), "20G");
assert_eq!(human_readable_number(1024 * 1024 * 1024 * 1024), "1.0T");
}
}
|
Dennis McGuire took 15 minutes to die by lethal injection Thursday morning at the Southern Ohio Correctional Facility in Lucasville for the 1989 rape and murder of a 22-year-old pregnant woman named Joy Stewart.
Eyewitness accounts differ slightly on how much Mr. McGuire, 53, struggled and gasped in those final minutes. But because the execution took unusually long and because Ohio was using a new, untested cocktail of drugs in the procedure, the episode has reignited debate over lethal injection.
States have been scrambling in recent years to come up with a new formula for executions after their stockpiles were depleted or expired when European manufacturers of such previously used drugs as pentobarbital and sodium thiopental stopped selling them for use in executions. No consensus has formed on what available drugs should be used.
Mr. McGuire was given midazolam, a sedative, and hydromorphone, a powerful analgesic derived from morphine, just before 10:30 a.m. on Thursday, the first time that any state has used that combination. The drugs were selected by the Ohio Department of Rehabilitation and Correction after the state’s supply of pentobarbital expired in 2009, said JoEllen Smith, the department’s spokeswoman. A federal court had approved their use, she said. |
<filename>bridges/stripe/stripe.go
package stripe
// TODO: Implementing stripe bridge
|
/**
* Summary of a cluster which does not include all the related information, such as: spectrum or PSM
*
* @author Rui Wang
* @version $Id$
*/
public class ClusterSummary {
private Long id;
private String uuid;
private float averagePrecursorMz;
private int averagePrecursorCharge;
private String consensusSpectrumMz;
private String consensusSpectrumIntensity;
private int numberOfSpectra;
private int totalNumberOfSpectra;
private int numberOfPSMs;
private int totalNumberOfPSMs;
private float maxPeptideRatio;
private int numberOfProjects;
private int totalNumberOfProjects;
private int numberOfSpecies;
private int totalNumberOfSpecies;
private int numberOfModifications;
private int totalNumberOfModifications;
private ClusterQuality quality;
private String annotation;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getUUID() {
return uuid;
}
public void setUUID(String uuid) {
this.uuid = uuid;
}
public float getAveragePrecursorMz() {
return averagePrecursorMz;
}
public void setAveragePrecursorMz(float averagePrecursorMz) {
this.averagePrecursorMz = averagePrecursorMz;
}
public int getAveragePrecursorCharge() {
return averagePrecursorCharge;
}
public void setAveragePrecursorCharge(int averagePrecursorCharge) {
this.averagePrecursorCharge = averagePrecursorCharge;
}
public String getConsensusSpectrumMz() {
return consensusSpectrumMz;
}
public void setConsensusSpectrumMz(String consensusSpectrumMz) {
this.consensusSpectrumMz = consensusSpectrumMz;
}
public String getConsensusSpectrumIntensity() {
return consensusSpectrumIntensity;
}
public void setConsensusSpectrumIntensity(String consensusSpectrumIntensity) {
this.consensusSpectrumIntensity = consensusSpectrumIntensity;
}
public int getNumberOfSpectra() {
return numberOfSpectra;
}
public void setNumberOfSpectra(int numberOfSpectra) {
this.numberOfSpectra = numberOfSpectra;
}
public int getNumberOfPSMs() {
return numberOfPSMs;
}
public void setNumberOfPSMs(int numberOfPSMs) {
this.numberOfPSMs = numberOfPSMs;
}
public int getTotalNumberOfPSMs() {
return totalNumberOfPSMs;
}
public void setTotalNumberOfPSMs(int totalNumberOfPSMs) {
this.totalNumberOfPSMs = totalNumberOfPSMs;
}
public int getTotalNumberOfSpectra() {
return totalNumberOfSpectra;
}
public void setTotalNumberOfSpectra(int totalNumberOfSpectra) {
this.totalNumberOfSpectra = totalNumberOfSpectra;
}
public float getMaxPeptideRatio() {
return maxPeptideRatio;
}
public void setMaxPeptideRatio(float maxPeptideRatio) {
this.maxPeptideRatio = maxPeptideRatio;
}
public int getNumberOfProjects() {
return numberOfProjects;
}
public void setNumberOfProjects(int numberOfProjects) {
this.numberOfProjects = numberOfProjects;
}
public int getTotalNumberOfProjects() {
return totalNumberOfProjects;
}
public void setTotalNumberOfProjects(int totalNumberOfProjects) {
this.totalNumberOfProjects = totalNumberOfProjects;
}
public int getNumberOfSpecies() {
return numberOfSpecies;
}
public void setNumberOfSpecies(int numberOfSpecies) {
this.numberOfSpecies = numberOfSpecies;
}
public int getTotalNumberOfSpecies() {
return totalNumberOfSpecies;
}
public void setTotalNumberOfSpecies(int totalNumberOfSpecies) {
this.totalNumberOfSpecies = totalNumberOfSpecies;
}
public int getNumberOfModifications() {
return numberOfModifications;
}
public void setNumberOfModifications(int numberOfModifications) {
this.numberOfModifications = numberOfModifications;
}
public int getTotalNumberOfModifications() {
return totalNumberOfModifications;
}
public void setTotalNumberOfModifications(int totalNumberOfModifications) {
this.totalNumberOfModifications = totalNumberOfModifications;
}
public ClusterQuality getQuality() {
return quality;
}
public void setQuality(ClusterQuality quality) {
this.quality = quality;
}
public String getAnnotation() {
return annotation;
}
public void setAnnotation(String annotation) {
this.annotation = annotation;
}
@Override
public String toString() {
return "ClusterSummary{" +
"uuid='" + uuid + '\'' +
", averagePrecursorMz=" + averagePrecursorMz +
", averagePrecursorCharge=" + averagePrecursorCharge +
", numberOfSpectra=" + numberOfSpectra +
", maxPeptideRatio=" + maxPeptideRatio +
", numberOfProjects=" + numberOfProjects +
", quality=" + quality +
", annotation=" + annotation +
'}';
}
} |
def exit_clean(self):
self._delete_folder(AutoFlipConf.TMP_PBTXT_FOLDER_PATH)
self._delete_folder(self.DESTINATION_MODEL_FOLDER_LOCATION) |
def _null_wrap_accumulate_block(
ignore_nulls: bool,
accum_block: Callable[[AggType, Block[T]], AggType],
null_merge: Callable[[WrappedAggType, WrappedAggType], WrappedAggType],
) -> Callable[[WrappedAggType, Block[T]], WrappedAggType]:
def _accum_block_null(a: WrappedAggType, block: Block[T]) -> WrappedAggType:
ret = accum_block(block)
if ret is not None:
ret = _wrap_acc(ret, has_data=True)
elif ignore_nulls:
ret = a
return null_merge(a, ret)
return _accum_block_null |
/**
* {@link ManagedFunction} to invoke a {@link Method}.
*
* @author Daniel Sagenschneider
*/
public class MethodFunction implements ManagedFunction<Indexed, Indexed> {
/**
* Invokes the {@link Method} as the {@link ManagedFunction} directly on the
* {@link Object}.
*
* @param instance Instance. May be <code>null</code> if static
* {@link Method}.
* @param method {@link Method}.
* @param parameters Parameters.
* @return {@link Method} return value.
* @throws Throwable Failure invoking the {@link Method}.
*/
public static Object invokeMethod(Object instance, Method method, Object[] parameters) throws Throwable {
// Invoke the function
try {
return method.invoke(instance, parameters);
} catch (InvocationTargetException ex) {
// Propagate failure of function
throw ex.getCause();
} catch (IllegalArgumentException ex) {
// Provide detail of illegal argument
StringBuilder message = new StringBuilder();
message.append("Function failure invoking ");
message.append(method.getName());
message.append("(");
boolean isFirst = true;
for (Class<?> parameterType : method.getParameterTypes()) {
if (isFirst) {
isFirst = false;
} else {
message.append(", ");
}
message.append(parameterType.getName());
}
message.append(") with arguments ");
isFirst = true;
for (Object parameter : parameters) {
if (isFirst) {
isFirst = false;
} else {
message.append(", ");
}
message.append(parameter == null ? "null" : parameter.getClass().getName());
}
// Propagate illegal argument issue
throw new IllegalArgumentException(message.toString());
}
}
/**
* {@link MethodObjectFactory}. Will be <code>null</code> if static
* {@link Method}.
*/
private final MethodObjectFactory methodObjectInstanceFactory;
/**
* Method to invoke for this {@link ManagedFunction}.
*/
private final Method method;
/**
* {@link ClassDependencyFactory} instances.
*/
private final ClassDependencyFactory[] parameterFactories;
/**
* {@link MethodReturnTranslator} or <code>null</code>.
*/
private final MethodReturnTranslator<Object, Object> returnTranslator;
/**
* Initiate.
*
* @param methodObjectInstanceFactory {@link MethodObjectFactory}. Will
* be <code>null</code> if static
* {@link Method}.
* @param method Method to invoke for this
* {@link ManagedFunction}.
* @param parameterFactories {@link ClassDependencyFactory} instances.
* @param returnTranslator {@link MethodReturnTranslator} or
* <code>null</code>.
*/
public MethodFunction(MethodObjectFactory methodObjectInstanceFactory, Method method,
ClassDependencyFactory[] parameterFactories, MethodReturnTranslator<Object, Object> returnTranslator) {
this.method = method;
this.methodObjectInstanceFactory = methodObjectInstanceFactory;
this.parameterFactories = parameterFactories;
this.returnTranslator = returnTranslator;
}
/**
* Returns the {@link Method} for the {@link ManagedFunction}.
*
* @return {@link Method} for the {@link ManagedFunction}.
*/
public Method getMethod() {
return this.method;
}
/*
* ========================= ManagedFunction =========================
*/
@Override
public void execute(ManagedFunctionContext<Indexed, Indexed> context) throws Throwable {
// Obtain the instance to invoke the method on (null if static method)
Object instance = (this.methodObjectInstanceFactory == null) ? null
: this.methodObjectInstanceFactory.createInstance(context);
// May inject context, so need to wrap translating next value
ManagedFunctionContext<Indexed, Indexed> runContext = (this.returnTranslator != null)
? new TranslateManagedFunctionContext(context)
: context;
// Create the listing of parameters
Object[] params = new Object[this.parameterFactories.length];
for (int i = 0; i < params.length; i++) {
params[i] = this.parameterFactories[i].createDependency(runContext);
}
// Invoke the method as the function
Object returnValue = invokeMethod(instance, this.method, params);
// Determine if translate return value
if (returnValue != null) {
runContext.setNextFunctionArgument(returnValue);
}
}
/**
* {@link ManagedFunctionContext} to translate the return value.
*/
private class TranslateManagedFunctionContext implements ManagedFunctionContext<Indexed, Indexed> {
/**
* {@link ManagedFunctionContext} delegate.
*/
private final ManagedFunctionContext<Indexed, Indexed> delegate;
/**
* Instantiate.
*
* @param delegate {@link ManagedFunctionContext} delegate.
*/
private TranslateManagedFunctionContext(ManagedFunctionContext<Indexed, Indexed> delegate) {
this.delegate = delegate;
}
/*
* ======================= ManagedFunctionContext ============================
*/
@Override
public Logger getLogger() {
return this.delegate.getLogger();
}
@Override
public void doFlow(Indexed key, Object parameter, FlowCallback callback) {
this.delegate.doFlow(key, parameter, callback);
}
@Override
public void doFlow(int flowIndex, Object parameter, FlowCallback callback) {
this.delegate.doFlow(flowIndex, parameter, callback);
}
@Override
public AsynchronousFlow createAsynchronousFlow() {
return this.delegate.createAsynchronousFlow();
}
@Override
public Executor getExecutor() {
return this.delegate.getExecutor();
}
@Override
public Object getObject(Indexed key) {
return this.delegate.getObject(key);
}
@Override
public Object getObject(int dependencyIndex) {
return this.delegate.getObject(dependencyIndex);
}
@Override
public void doFlow(String functionName, Object parameter, FlowCallback callback)
throws UnknownFunctionException, InvalidParameterTypeException {
this.delegate.doFlow(functionName, parameter, callback);
}
@Override
public void setNextFunctionArgument(Object argument) throws Exception {
// Translate the return value
MethodFunction.this.returnTranslator.translate(new MethodReturnTranslatorContext<Object, Object>() {
@Override
public Object getReturnValue() {
return argument;
}
@Override
public void setTranslatedReturnValue(Object value) throws Exception {
TranslateManagedFunctionContext.this.delegate.setNextFunctionArgument(value);
}
@Override
public ManagedFunctionContext<?, ?> getManagedFunctionContext() {
return TranslateManagedFunctionContext.this.delegate;
}
});
}
}
} |
<filename>src/app/model/service/crud-request.ts
export class CrudRequest {
entity: string;
id: string;
special: string;
param: any[];
data: object;
}
|
def submit_job(self, bundle, job_config=None):
return self._delegator._submit_job(bundle=bundle, job_config=job_config) |
Sudden Cardiac Arrest in an Intubated Premature Infant With Cerebellar and Brainstem Injury: Is There a Link?
The ventilated premature infant frequently exhibits unprovoked desaturation episodes accompanied by bradycardia. In most instances, these episodes are short-lived and recover spontaneously or with minimal interventions. However, in some infants these episodes may be more profound and require substantial interventions to restore cardiorespiratory status. Here we present the case of a ventilated premature infant who had experienced prolonged, multiple daily desaturation episodes accompanied by bradycardia that required significant interventions. Postoperatively, after placement of a tracheotomy and despite a patent airway, the infant developed acute bradycardia that progressed rapidly to sudden death. At autopsy, significant cerebellar and brainstem injury was noted. We hypothesize that the specific cerebellum and brainstem injury may have contributed to autonomic dysfunction and sudden death. |
<gh_stars>1-10
package com.redhat.uxl.services.service;
import java.io.IOException;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.joda.time.DateTime;
import org.springframework.transaction.annotation.Transactional;
/**
* The interface Csv service.
*/
public interface CsvService {
/**
* Gets team progress csv by program.
*
* @param currentUserId the current user id
* @param request the request
* @param response the response
* @throws ServletException the servlet exception
* @throws IOException the io exception
*/
@Transactional(readOnly = true)
void getTeamProgressCSVByProgram(Long currentUserId, HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException;
/**
* Gets team progress csv by shared programs.
*
* @param currentUserId the current user id
* @param response the response
* @param search the search
* @param startDate the start date
* @param endDate the end date
* @throws ServletException the servlet exception
* @throws IOException the io exception
*/
@Transactional(readOnly = true)
void getTeamProgressCSVBySharedPrograms(Long currentUserId, HttpServletResponse response, String search,
DateTime startDate, DateTime endDate) throws ServletException, IOException;
/**
* Gets team progress csv by team member.
*
* @param currentUserId the current user id
* @param request the request
* @param response the response
* @param search the search
* @param startDate the start date
* @param endDate the end date
* @throws ServletException the servlet exception
* @throws IOException the io exception
*/
@Transactional(readOnly = true)
void getTeamProgressCSVByTeamMember(Long currentUserId, HttpServletRequest request, HttpServletResponse response,
String search, DateTime startDate, DateTime endDate) throws ServletException, IOException;
/**
* Gets team progress csv by course programs.
*
* @param currentUserId the current user id
* @param search the search
* @param startDate the start date
* @param endDate the end date
* @param response the response
* @throws ServletException the servlet exception
* @throws IOException the io exception
*/
@Transactional(readOnly = true)
void getTeamProgressCSVByCoursePrograms(Long currentUserId, String search, DateTime startDate, DateTime endDate,
HttpServletResponse response) throws ServletException, IOException;
/**
* Gets team progress csv by learning path programs.
*
* @param currentUserId the current user id
* @param search the search
* @param startDate the start date
* @param endDate the end date
* @param response the response
* @throws ServletException the servlet exception
* @throws IOException the io exception
*/
@Transactional(readOnly = true)
void getTeamProgressCSVByLearningPathPrograms(Long currentUserId, String search, DateTime startDate,
DateTime endDate, HttpServletResponse response) throws ServletException, IOException;
/**
* Gets all feed back.
*
* @param request the request
* @param response the response
* @throws ServletException the servlet exception
* @throws IOException the io exception
*/
@Transactional(readOnly = true)
void getAllFeedBack(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException;
/**
* Gets all users.
*
* @param request the request
* @param response the response
* @throws ServletException the servlet exception
* @throws IOException the io exception
*/
@Transactional(readOnly = true)
void getAllUsers(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException;
/**
* Gets all audit searches.
*
* @param request the request
* @param response the response
* @throws ServletException the servlet exception
* @throws IOException the io exception
*/
@Transactional(readOnly = true)
void getAllAuditSearches(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException;
}
|
Brewster Kahle, completist Freedom For IP It's no secret that print media is struggling. Devices like the Kindle and iPad seem to point to a world without ink on paper as being not only possible, but practical as well.
Brewster Kahle, who has previously found success with a number of Internet technologies like the Internet Archive, is attempting to collect every book ever published in an effort to preserve them for the future.
"The idea is to be able to collect one copy of every book ever published. We're not going to get there, but that's our goal," he told the Associated Press.
He wants to store them in order to preserve them from a future where digital content has made physical printed materials worthless. To be clear, he is very much in favor of digitization, but simply feels that digitization does not render the physical object obsolete.
He's off to an admirable start -- his warehouse currently holds 500,000 books. |
/*! \brief SPI slave tranfers data to Usart SPI master
*/
static bool spi_slave_transfer(void)
{
usart_spi_select_device(USART_SPI_EXAMPLE, &USART_SPI_DEVICE_EXAMPLE);
data_master_tx[0] = SLAVE_RD_CMD;
count = 0;
usart_spi_write_packet(USART_SPI_EXAMPLE, data_master_tx, 1);
usart_spi_read_packet(USART_SPI_EXAMPLE, data_master_rx,
DATA_BUFFER_SIZE);
usart_spi_deselect_device(USART_SPI_EXAMPLE, &USART_SPI_DEVICE_EXAMPLE);
for (uint8_t i = 0; i < DATA_BUFFER_SIZE; i++) {
if (data_master_rx[i] == data_slave_tx[i]) {
continue;
} else {
return false;
}
}
return true;
} |
/*
* The MIT License
*
* Copyright 2017 <NAME> & <NAME>.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package aptgraph.batch;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.Paths;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
/**
* Main class for Batch Processor.
*
* @author <NAME>
* @author <NAME>
*/
public final class Main {
private static final int DEFAULT_K = 20;
private static final boolean DEFAULT_CHILDREN_BOOL = true;
private static final boolean DEFAULT_OVERWRITE_BOOL = false;
private static final String DEFAULT_FORMAT = "squid";
/**
* Main method of Batch Processor.
*
* @param args Arguments from the command line
* @throws ParseException If we cannot parse command line args
* @throws FileNotFoundException If the input file does not exist
* @throws IOException If we cannot read the input file
* @throws IllegalArgumentException If argument k is not an int
*/
public static void main(final String[] args)
throws ParseException, FileNotFoundException, IOException,
IllegalArgumentException {
// Default value of arguments
int k = DEFAULT_K;
boolean children_bool = DEFAULT_CHILDREN_BOOL;
boolean overwrite_bool = DEFAULT_OVERWRITE_BOOL;
String format = DEFAULT_FORMAT;
// Parse command line arguments
Options options = new Options();
options.addOption("i", true, "Input log file (required)");
options.addOption("o", true, "Output directory for graphs (required)");
Option arg_k = Option.builder("k")
.optionalArg(true)
.desc("Impose k value of k-NN graphs (option, default: 20)")
.hasArg(true)
.numberOfArgs(1)
.build();
options.addOption(arg_k);
Option arg_child = Option.builder("c")
.optionalArg(true)
.desc("Select only temporal children (option, default: true)")
.hasArg(true)
.numberOfArgs(1)
.build();
options.addOption(arg_child);
Option arg_overwrite = Option.builder("x")
.optionalArg(true)
.desc("Overwrite existing graphs (option, default : false)")
.hasArg(true)
.numberOfArgs(1)
.build();
options.addOption(arg_overwrite);
Option arg_format = Option.builder("f")
.optionalArg(true)
.desc("Specify format of input file (squid or json) "
+ "(option, default : squid)")
.hasArg(true)
.numberOfArgs(1)
.build();
options.addOption(arg_format);
options.addOption("h", false, "Show this help");
CommandLineParser parser = new DefaultParser();
CommandLine cmd = parser.parse(options, args);
if (cmd.hasOption("h")
|| !cmd.hasOption("i")
|| !cmd.hasOption("o")) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("java -jar batch-<version>.jar", options);
return;
}
try {
if (cmd.hasOption("k")) {
k = Integer.parseInt(cmd.getOptionValue("k"));
}
} catch (IllegalArgumentException ex) {
System.err.println(ex);
}
try {
if (cmd.hasOption("c")) {
children_bool = Boolean.parseBoolean(cmd.getOptionValue("c"));
}
} catch (IllegalArgumentException ex) {
System.err.println(ex);
}
try {
if (cmd.hasOption("x")) {
overwrite_bool = Boolean.parseBoolean(cmd.getOptionValue("x"));
}
} catch (IllegalArgumentException ex) {
System.err.println(ex);
}
try {
if (cmd.hasOption("f")) {
format = cmd.getOptionValue("f");
if (!format.equals("squid") && !format.equals("json")) {
throw new IllegalArgumentException("Wrong format option");
}
}
} catch (IllegalArgumentException ex) {
System.err.println(ex);
}
// Run analyze
try {
BatchProcessor processor = new BatchProcessor();
processor.analyze(k,
new FileInputStream(cmd.getOptionValue("i")),
Paths.get(cmd.getOptionValue("o")),
format, children_bool, overwrite_bool);
} catch (IllegalArgumentException ex) {
System.err.println(ex);
}
}
private Main() {
}
}
|
<filename>duke-core/src/main/java/no/priv/garshol/duke/Duke.java
package no.priv.garshol.duke;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.Writer;
import java.util.List;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Properties;
import no.priv.garshol.duke.matchers.AbstractMatchListener;
import no.priv.garshol.duke.matchers.PrintMatchListener;
import no.priv.garshol.duke.matchers.TestFileListener;
import no.priv.garshol.duke.utils.YesNoConsole;
import no.priv.garshol.duke.utils.LinkFileWriter;
import no.priv.garshol.duke.utils.NTriplesWriter;
import no.priv.garshol.duke.utils.LinkDatabaseUtils;
import no.priv.garshol.duke.utils.CommandLineParser;
import org.xml.sax.SAXException;
import org.xml.sax.SAXParseException;
/**
* Command-line interface to the engine.
*/
public class Duke {
private static Properties properties;
public static void main(String[] argv) throws IOException {
try {
main_(argv);
} catch (DukeConfigException e) {
System.err.println("ERROR: " + e.getMessage());
}
}
public static void main_(String[] argv) throws IOException {
// parse command-line
CommandLineParser parser = setupParser();
try {
argv = parser.parse(argv);
} catch (CommandLineParser.CommandLineParserException e) {
System.err.println("ERROR: " + e.getMessage());
usage();
System.exit(1);
}
// set up some initial options
boolean datadebug = parser.getOptionState("showdata");
Logger logger = new CommandLineLogger(parser.getOptionState("verbose") ?
1 : 0);
boolean progress = parser.getOptionState("progress");
int count = 0;
int batch_size = parser.getOptionInteger("batchsize", 40000);
int threads = parser.getOptionInteger("threads", 1);
// load the configuration
Configuration config;
try {
config = ConfigLoader.load(argv[0]);
} catch (FileNotFoundException e) {
System.err.println("ERROR: Config file '" + argv[0] + "' not found!");
return;
} catch (SAXParseException e) {
System.err.println("ERROR: Couldn't parse config file: " + e.getMessage());
System.err.println("Error in " + e.getSystemId() + ":" +
e.getLineNumber() + ":" + e.getColumnNumber());
return;
} catch (SAXException e) {
System.err.println("ERROR: Couldn't parse config file: " + e.getMessage());
return;
}
// validate the configuration
if (!datadebug) // unless --showdata
config.validate();
// if we're in data debug mode we branch out here
if (datadebug) {
showdata(config);
return; // stop here
}
// set up listeners
boolean noreindex = parser.getOptionState("noreindex");
Processor processor = new Processor(config, !noreindex);
processor.setLogger(logger);
processor.setThreads(threads);
// sanity check
if (noreindex && processor.getDatabase().isInMemory()) {
System.out.println("Option --noreindex not available with in-memory " +
"database");
return;
}
// display lookup properties?
if (parser.getOptionState("lookups")) {
System.out.println("Lookup properties:");
for (Property p : config.getLookupProperties())
System.out.println(" " + p.getName());
System.out.println();
}
boolean interactive = parser.getOptionState("interactive");
boolean pretty = parser.getOptionState("pretty") || interactive;
boolean showmatches = parser.getOptionState("showmatches") || interactive;
PrintMatchListener listener =
new PrintMatchListener(showmatches,
parser.getOptionState("showmaybe"),
progress,
!config.isDeduplicationMode(),
config.getProperties(),
pretty);
processor.addMatchListener(listener);
// needs to be before the link file handler, in case the link file
// is the same as the test file
TestFileListener testfile = null;
if (parser.getOptionValue("testfile") != null) {
testfile = new TestFileListener(parser.getOptionValue("testfile"),
config,
parser.getOptionState("testdebug"),
processor,
showmatches,
pretty);
testfile.setPessimistic(true);
processor.addMatchListener(testfile);
if (testfile.isEmpty())
System.out.println("WARN: Test file is empty. Did you mean --linkfile?");
}
AbstractLinkFileListener linkfile = null;
if (parser.getOptionValue("linkfile") != null) {
String fname = parser.getOptionValue("linkfile");
if (fname.endsWith(".ntriples"))
linkfile = new NTriplesLinkFileListener(fname, config.getIdentityProperties());
else
linkfile = new LinkFileListener(fname, config.getIdentityProperties(),
interactive,
parser.getOptionValue("testfile"));
processor.addMatchListener(linkfile);
}
// --profile
if (parser.getOptionState("profile"))
processor.setPerformanceProfiling(true);
// --singlematch setting
boolean matchall = true;
if (parser.getOptionState("singlematch")) {
if (config.isDeduplicationMode())
throw new DukeConfigException("--singlematch only works in record linkage mode");
matchall = false;
}
// this is where we get started for real. the first thing we do
// is to distinguish between modes.
if (config.isDeduplicationMode())
// deduplication mode
processor.deduplicate(config.getDataSources(), batch_size);
else {
// record linkage mode
if (noreindex) {
// user has specified that they already have group 1 indexed up,
// and don't want to do it again, for whatever reason. in that
// case we just do the linking, and don't touch group 1 at all.
processor.linkRecords(config.getDataSources(2), matchall);
} else
processor.link(config.getDataSources(1),
config.getDataSources(2),
matchall,
batch_size);
}
// close up shop, then finish
if (parser.getOptionValue("linkfile") != null)
linkfile.close();
processor.close();
}
private static void showdata(Configuration config) {
List<Property> props = config.getProperties();
List<DataSource> sources = new ArrayList();
sources.addAll(config.getDataSources());
sources.addAll(config.getDataSources(1));
sources.addAll(config.getDataSources(2));
for (DataSource src : sources) {
RecordIterator it = src.getRecords();
while (it.hasNext()) {
Record r = it.next();
PrintMatchListener.prettyPrint(r, props);
System.out.println("");
}
it.close();
}
}
private static void usage() {
System.out.println("");
System.out.println("java no.priv.garshol.duke.Duke [options] <cfgfile>");
System.out.println("");
System.out.println(" --progress show progress report while running");
System.out.println(" --showmatches show matches while running");
System.out.println(" --linkfile=<file> output matches to link file");
System.out.println(" --interactive query user before outputting link file matches");
System.out.println(" --testfile=<file> test matches against known correct results in file");
System.out.println(" --testdebug display failures");
System.out.println(" --verbose display diagnostics");
System.out.println(" --noreindex reuse existing Lucene index");
System.out.println(" --batchsize=n set size of Lucene indexing batches");
System.out.println(" --showdata show all cleaned data (data debug mode)");
System.out.println(" --profile display performance statistics");
System.out.println(" --threads=N run processing in N parallell threads");
System.out.println(" --pretty pretty display when comparing records");
System.out.println(" --singlematch (in record linkage mode) only accept");
System.out.println(" the best match for each record");
System.out.println(" --lookups display lookup properties");
System.out.println("");
System.out.println("Duke version " + getVersionString());
}
private static CommandLineParser setupParser() {
CommandLineParser parser = new CommandLineParser();
parser.setMinimumArguments(1);
parser.setMaximumArguments(1);
parser.addBooleanOption("progress", 'p');
parser.addStringOption("linkfile", 'l');
parser.addStringOption("linkendpoint", 'e');
parser.addBooleanOption("showmatches", 's');
parser.addBooleanOption("showmaybe", 'm');
parser.addStringOption("testfile", 'T');
parser.addBooleanOption("testdebug", 't');
parser.addStringOption("batchsize", 'b');
parser.addBooleanOption("verbose", 'v');
parser.addStringOption("threads", 'P');
parser.addBooleanOption("noreindex", 'N');
parser.addBooleanOption("interactive", 'I');
parser.addBooleanOption("showdata", 'D');
parser.addBooleanOption("profile", 'o');
parser.addStringOption("threads", 'n');
parser.addBooleanOption("pretty", 'n');
parser.addBooleanOption("singlematch", 'n');
parser.addBooleanOption("lookups", 'L');
return parser;
}
public static String getVersionString() {
Properties props = getProperties();
return props.getProperty("duke.version") + ", build " +
props.getProperty("duke.build") + ", built by " +
props.getProperty("duke.builder");
}
public static String getVersion() {
return getProperties().getProperty("duke.version");
}
private static Properties getProperties() {
if (properties == null) {
properties = new Properties();
try {
InputStream in = Duke.class.getClassLoader().getResourceAsStream("no/priv/garshol/duke/duke.properties");
properties.load(in);
in.close();
} catch (IOException e) {
throw new DukeException("Couldn't load duke.properties", e);
}
}
return properties;
}
static abstract class AbstractLinkFileListener extends AbstractMatchListener {
private Collection<Property> idprops;
public AbstractLinkFileListener(Collection<Property> idprops) {
this.idprops = idprops;
}
public void close() throws IOException {
}
public abstract void link(String id1, String id2, double confidence)
throws IOException;
public void matches(Record r1, Record r2, double confidence) {
try {
for (Property p : idprops)
for (String id1 : r1.getValues(p.getName()))
for (String id2 : r2.getValues(p.getName()))
link(id1, id2, confidence);
} catch (IOException e) {
throw new DukeException(e);
}
}
}
static class LinkFileListener extends AbstractLinkFileListener {
private Writer out;
private LinkFileWriter writer;
private LinkDatabase linkdb;
private YesNoConsole console;
public LinkFileListener(String linkfile, Collection<Property> idprops,
boolean interactive, String testfile)
throws IOException {
super(idprops);
if (interactive) {
this.console = new YesNoConsole();
this.linkdb = new InMemoryLinkDatabase();
if (testfile != null)
linkdb = LinkDatabaseUtils.loadTestFile(testfile);
}
// have to start writing the link file *after* we load the test
// file, because they may be the same file...
// second param: if there is a test file, we append to the link
// file, instead of overwriting
this.out = new FileWriter(linkfile, testfile != null);
this.writer = new LinkFileWriter(out);
// FIXME: this will only work if the two files are the same
}
public void link(String id1, String id2, double confidence)
throws IOException {
boolean correct = true;
// does this provide new information, or do we know it already?
Link inferredlink = null;
if (linkdb != null)
inferredlink = linkdb.inferLink(id1, id2);
// record it
if (console != null) {
if (inferredlink == null) {
correct = console.yesorno();
confidence = 1.0; // the user told us, which is as certain as it gets
} else {
correct = inferredlink.getKind() == LinkKind.SAME;
confidence = inferredlink.getConfidence();
}
}
// note that we also write inferred links, because the test file
// listener does not do inference
writer.write(id1, id2, correct, confidence);
out.flush(); // make sure we preserve the data
if (linkdb != null && inferredlink == null) {
Link link = new Link(id1, id2, LinkStatus.ASSERTED,
correct ? LinkKind.SAME : LinkKind.DIFFERENT, 1.0);
linkdb.assertLink(link);
}
}
public void close() throws IOException {
out.close();
}
}
static class NTriplesLinkFileListener extends AbstractLinkFileListener {
private FileOutputStream fos;
private NTriplesWriter out;
public NTriplesLinkFileListener(String linkfile,
Collection<Property> idprops)
throws IOException {
super(idprops);
this.fos = new FileOutputStream(linkfile);
this.out = new NTriplesWriter(fos);
}
public void link(String id1, String id2, double confidence)
throws IOException {
out.statement(id1, "http://www.w3.org/2002/07/owl#sameAs", id2, false);
}
public void close() throws IOException {
out.done();
fos.close();
}
}
static class CommandLineLogger implements Logger {
private int loglevel; // 1: trace, 2: debug, 3: info, 4: warn, 5: error
private CommandLineLogger(int loglevel) {
this.loglevel = loglevel;
}
public void trace(String msg) {
if (isTraceEnabled())
System.out.println(msg);
}
public void debug(String msg) {
if (isDebugEnabled())
System.out.println(msg);
}
public void info(String msg) {
if (isInfoEnabled())
System.out.println(msg);
}
public void warn(String msg) {
warn(msg, null);
}
public void warn(String msg, Throwable e) {
if (!isWarnEnabled())
return;
System.out.println(msg + " " + e);
e.printStackTrace();
}
public void error(String msg) {
error(msg, null);
}
public void error(String msg, Throwable e) {
if (!isErrorEnabled())
return;
System.out.println(msg + " " + e);
e.printStackTrace();
}
public boolean isTraceEnabled() {
return loglevel == 1;
}
public boolean isDebugEnabled() {
return loglevel != 0 && loglevel < 3;
}
public boolean isInfoEnabled() {
return loglevel != 0 && loglevel < 4;
}
public boolean isWarnEnabled() {
return loglevel != 0 && loglevel < 5;
}
public boolean isErrorEnabled() {
return loglevel != 0 && loglevel < 6;
}
}
}
|
//Helper function to convert New_York to New York
string convert(string s)
{
string firstWord="";
string secondWord="";
int ch=-1;
for(int i=0;i<s.length();i++)
{
if(s[i]=='_')
{
ch=1;
}
else
{
if(ch==-1)
{
firstWord+=s[i];
}
else
secondWord+=s[i];
}
}
if(secondWord.length()!=0)
{
return (firstWord+" "+ secondWord);
}
else
return firstWord;
} |
#include<bits/stdc++.h>
using namespace std;
int main()
{
int n,d;
cin>>n>>d;
int sum=0,last=-1;
for(int i=1;i<=n;++i)
{
int p;
cin>>p;
if(p<=last)
{
int o=last-p;
int k=o/d+1;
sum+=k;
p=p+k*d;
}
last=p;
}
cout<<sum;
return 0;
} |
<reponame>rhencke/engine<filename>src/third_party/skia/infra/bots/task_drivers/perf_puppeteer_skottie_frames/perf_puppeteer_skottie_frames.go
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This executable is meant to be a general way to gather perf data using puppeteer. The logic
// (e.g. what bench to run, how to process that particular output) is selected using the ExtraConfig
// part of the task name.
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"math"
"os"
"path/filepath"
"sort"
"strings"
"go.skia.org/infra/go/exec"
"go.skia.org/infra/go/skerr"
"go.skia.org/infra/go/sklog"
"go.skia.org/infra/task_driver/go/lib/os_steps"
"go.skia.org/infra/task_driver/go/td"
)
const perfKeyWebGLVersion = "webgl_version"
func main() {
var (
// Required properties for this task.
projectID = flag.String("project_id", "", "ID of the Google Cloud project.")
taskName = flag.String("task_name", "", "Name of the task.")
benchmarkPath = flag.String("benchmark_path", "", "Path to location of the benchmark files (e.g. //tools/perf-puppeteer).")
outputPath = flag.String("output_path", "", "Perf Output will be produced here")
gitHash = flag.String("git_hash", "", "Git hash this data corresponds to")
taskID = flag.String("task_id", "", "task id this data was generated on")
nodeBinPath = flag.String("node_bin_path", "", "Path to the node bin directory (should have npm also). This directory *must* be on the PATH when this executable is called, otherwise, the wrong node or npm version may be found (e.g. the one on the system), even if we are explicitly calling npm with the absolute path.")
// These flags feed into the perf trace keys associated with the output data.
osTrace = flag.String("os_trace", "", "OS this is running on.")
modelTrace = flag.String("model_trace", "", "Description of host machine.")
cpuOrGPUTrace = flag.String("cpu_or_gpu_trace", "", "If this is a CPU or GPU configuration.")
cpuOrGPUValueTrace = flag.String("cpu_or_gpu_value_trace", "", "The hardware of this CPU/GPU")
webGLVersion = flag.String("webgl_version", "", "Major WebGl version to use when creating gl drawing context. 1 or 2")
// Flags that may be required for certain configs
canvaskitBinPath = flag.String("canvaskit_bin_path", "", "The location of a canvaskit.js and canvaskit.wasm")
lottiesPath = flag.String("lotties_path", "", "Path to location of lottie files.")
// Debugging flags.
local = flag.Bool("local", false, "True if running locally (as opposed to on the bots)")
outputSteps = flag.String("o", "", "If provided, dump a JSON blob of step data to the given file. Prints to stdout if '-' is given.")
)
// Setup.
ctx := td.StartRun(projectID, taskID, taskName, outputSteps, local)
defer td.EndRun(ctx)
keys := map[string]string{
"os": *osTrace,
"model": *modelTrace,
perfKeyCpuOrGPU: *cpuOrGPUTrace,
"cpu_or_gpu_value": *cpuOrGPUValueTrace,
perfKeyWebGLVersion: *webGLVersion,
}
outputWithoutResults, err := makePerfObj(*gitHash, *taskID, os.Getenv("SWARMING_BOT_ID"), keys)
if err != nil {
td.Fatal(ctx, skerr.Wrap(err))
}
// Absolute paths work more consistently than relative paths.
nodeBinAbsPath := td.MustGetAbsolutePathOfFlag(ctx, *nodeBinPath, "node_bin_path")
benchmarkAbsPath := td.MustGetAbsolutePathOfFlag(ctx, *benchmarkPath, "benchmark_path")
canvaskitBinAbsPath := td.MustGetAbsolutePathOfFlag(ctx, *canvaskitBinPath, "canvaskit_bin_path")
lottiesAbsPath := td.MustGetAbsolutePathOfFlag(ctx, *lottiesPath, "lotties_path")
outputAbsPath := td.MustGetAbsolutePathOfFlag(ctx, *outputPath, "output_path")
if err := setup(ctx, benchmarkAbsPath, nodeBinAbsPath); err != nil {
td.Fatal(ctx, skerr.Wrap(err))
}
if err := benchSkottieFrames(ctx, outputWithoutResults, benchmarkAbsPath, canvaskitBinAbsPath, lottiesAbsPath, nodeBinAbsPath); err != nil {
td.Fatal(ctx, skerr.Wrap(err))
}
// outputFile name should be unique between tasks, so as to avoid having duplicate name files
// uploaded to GCS.
outputFile := filepath.Join(outputAbsPath, fmt.Sprintf("perf-%s.json", *taskID))
if err := processSkottieFramesData(ctx, outputWithoutResults, benchmarkAbsPath, outputFile); err != nil {
td.Fatal(ctx, skerr.Wrap(err))
}
}
const perfKeyCpuOrGPU = "cpu_or_gpu"
func makePerfObj(gitHash, taskID, machineID string, keys map[string]string) (perfJSONFormat, error) {
rv := perfJSONFormat{}
if gitHash == "" {
return rv, skerr.Fmt("Must provide --git_hash")
}
if taskID == "" {
return rv, skerr.Fmt("Must provide --task_id")
}
rv.GitHash = gitHash
rv.SwarmingTaskID = taskID
rv.SwarmingMachineID = machineID
rv.Key = keys
rv.Key["arch"] = "wasm"
rv.Key["browser"] = "Chromium"
rv.Key["configuration"] = "Release"
rv.Key["extra_config"] = "SkottieFrames"
rv.Key["binary"] = "CanvasKit"
rv.Results = map[string]map[string]perfResult{}
return rv, nil
}
func setup(ctx context.Context, benchmarkPath, nodeBinPath string) error {
ctx = td.StartStep(ctx, td.Props("setup").Infra())
defer td.EndStep(ctx)
if _, err := exec.RunCwd(ctx, benchmarkPath, filepath.Join(nodeBinPath, "npm"), "ci"); err != nil {
return td.FailStep(ctx, skerr.Wrap(err))
}
if err := os.MkdirAll(filepath.Join(benchmarkPath, "out"), 0777); err != nil {
return td.FailStep(ctx, skerr.Wrap(err))
}
return nil
}
// benchSkottieFrames serves lotties and assets from a folder and runs the skottie-frames-load
// benchmark on each of them individually. The output for each will be a JSON file in
// $benchmarkPath/out/ corresponding to the animation name.
func benchSkottieFrames(ctx context.Context, perf perfJSONFormat, benchmarkPath, canvaskitBinPath, lottiesPath, nodeBinPath string) error {
ctx = td.StartStep(ctx, td.Props("perf lotties in "+lottiesPath))
defer td.EndStep(ctx)
// We expect the lottiesPath to be a series of folders, each with a data.json and a subfolder of
// images. For example:
// lottiesPath
// /first-animation/
// data.json
// /images/
// img001.png
// img002.png
// my-font.ttf
var lottieFolders []string
err := td.Do(ctx, td.Props("locate lottie folders"), func(ctx context.Context) error {
return filepath.Walk(lottiesPath, func(path string, info os.FileInfo, _ error) error {
if path == lottiesPath {
return nil
}
if info.IsDir() {
lottieFolders = append(lottieFolders, path)
return filepath.SkipDir
}
return nil
})
})
if err != nil {
return td.FailStep(ctx, skerr.Wrap(err))
}
sklog.Infof("Identified %d lottie folders to benchmark", len(lottieFolders))
for _, lottie := range lottieFolders {
name := filepath.Base(lottie)
err = td.Do(ctx, td.Props("Benchmark "+name), func(ctx context.Context) error {
// See comment in setup about why we specify the absolute path for node.
args := []string{filepath.Join(nodeBinPath, "node"),
"perf-canvaskit-with-puppeteer",
"--bench_html", "skottie-frames.html",
"--canvaskit_js", filepath.Join(canvaskitBinPath, "canvaskit.js"),
"--canvaskit_wasm", filepath.Join(canvaskitBinPath, "canvaskit.wasm"),
"--input_lottie", filepath.Join(lottie, "data.json"),
"--assets", filepath.Join(lottie, "images"),
"--output", filepath.Join(benchmarkPath, "out", name+".json"),
}
if perf.Key[perfKeyCpuOrGPU] != "CPU" {
args = append(args, "--use_gpu")
if perf.Key[perfKeyWebGLVersion] == "1" {
args = append(args, "--query_params webgl1")
}
}
_, err := exec.RunCwd(ctx, benchmarkPath, args...)
if err != nil {
return skerr.Wrap(err)
}
return nil
})
if err != nil {
return td.FailStep(ctx, skerr.Wrap(err))
}
}
return nil
}
type perfJSONFormat struct {
GitHash string `json:"gitHash"`
SwarmingTaskID string `json:"swarming_task_id"`
SwarmingMachineID string `json:"swarming_machine_id"`
Key map[string]string `json:"key"`
// Maps bench name -> "config" -> result key -> value
Results map[string]map[string]perfResult `json:"results"`
}
type perfResult map[string]float32
// processSkottieFramesData looks at the result of benchSkottieFrames, computes summary data on
// those files and adds them as Results into the provided perf object. The perf object is then
// written in JSON format to outputPath.
func processSkottieFramesData(ctx context.Context, perf perfJSONFormat, benchmarkPath, outputFilePath string) error {
perfJSONPath := filepath.Join(benchmarkPath, "out")
ctx = td.StartStep(ctx, td.Props("process perf output "+perfJSONPath))
defer td.EndStep(ctx)
var jsonInputs []string
err := td.Do(ctx, td.Props("locate input JSON files"), func(ctx context.Context) error {
return filepath.Walk(perfJSONPath, func(path string, info os.FileInfo, _ error) error {
if strings.HasSuffix(path, ".json") {
jsonInputs = append(jsonInputs, path)
return nil
}
return nil
})
})
if err != nil {
return td.FailStep(ctx, skerr.Wrap(err))
}
sklog.Infof("Identified %d JSON inputs to process", len(jsonInputs))
for _, lottie := range jsonInputs {
err = td.Do(ctx, td.Props("Process "+lottie), func(ctx context.Context) error {
name := strings.TrimSuffix(filepath.Base(lottie), ".json")
config := "software"
if perf.Key[perfKeyCpuOrGPU] != "CPU" {
config = "webgl2"
if perf.Key[perfKeyWebGLVersion] == "1" {
config = "webgl1"
}
}
b, err := os_steps.ReadFile(ctx, lottie)
if err != nil {
return skerr.Wrap(err)
}
metrics, err := parseSkottieFramesMetrics(b)
if err != nil {
return skerr.Wrap(err)
}
perf.Results[name] = map[string]perfResult{
config: metrics,
}
return nil
})
if err != nil {
return td.FailStep(ctx, skerr.Wrap(err))
}
}
err = td.Do(ctx, td.Props("Writing perf JSON file to "+outputFilePath), func(ctx context.Context) error {
if err := os.MkdirAll(filepath.Dir(outputFilePath), 0777); err != nil {
return skerr.Wrap(err)
}
b, err := json.MarshalIndent(perf, "", " ")
if err != nil {
return skerr.Wrap(err)
}
if err = ioutil.WriteFile(outputFilePath, b, 0666); err != nil {
return skerr.Wrap(err)
}
return nil
})
if err != nil {
return td.FailStep(ctx, skerr.Wrap(err))
}
return nil
}
type skottieFramesJSONFormat struct {
WithoutFlushMS []float32 `json:"without_flush_ms"`
WithFlushMS []float32 `json:"with_flush_ms"`
TotalFrameMS []float32 `json:"total_frame_ms"`
JSONLoadMS float32 `json:"json_load_ms"`
}
func parseSkottieFramesMetrics(b []byte) (map[string]float32, error) {
var metrics skottieFramesJSONFormat
if err := json.Unmarshal(b, &metrics); err != nil {
return nil, skerr.Wrap(err)
}
getNthFrame := func(n int) float32 {
if n >= len(metrics.TotalFrameMS) {
return 0
}
return metrics.TotalFrameMS[n]
}
avgFirstFive := float32(0)
if len(metrics.TotalFrameMS) >= 5 {
avgFirstFive = computeAverage(metrics.TotalFrameMS[:5])
}
avgWithoutFlushMS, medianWithoutFlushMS, stddevWithoutFlushMS, _, _, _ := summarize(metrics.WithoutFlushMS)
avgWithFlushMS, medianWithFlushMS, stddevWithFlushMS, _, _, _ := summarize(metrics.WithFlushMS)
avgFrame, medFrame, stdFrame, p90Frame, p95Frame, p99Frame := summarize(metrics.TotalFrameMS)
rv := map[string]float32{
"json_load_ms": metrics.JSONLoadMS,
"avg_render_without_flush_ms": avgWithoutFlushMS,
"median_render_without_flush_ms": medianWithoutFlushMS,
"stddev_render_without_flush_ms": stddevWithoutFlushMS,
"avg_render_with_flush_ms": avgWithFlushMS,
"median_render_with_flush_ms": medianWithFlushMS,
"stddev_render_with_flush_ms": stddevWithFlushMS,
"avg_render_frame_ms": avgFrame,
"median_render_frame_ms": medFrame,
"stddev_render_frame_ms": stdFrame,
// more detailed statistics on total frame times
"1st_frame_ms": getNthFrame(0),
"2nd_frame_ms": getNthFrame(1),
"3rd_frame_ms": getNthFrame(2),
"4th_frame_ms": getNthFrame(3),
"5th_frame_ms": getNthFrame(4),
"avg_first_five_frames_ms": avgFirstFive,
"90th_percentile_frame_ms": p90Frame,
"95th_percentile_frame_ms": p95Frame,
"99th_percentile_frame_ms": p99Frame,
}
return rv, nil
}
func summarize(input []float32) (float32, float32, float32, float32, float32, float32) {
// Make a copy of the data so we don't mutate the order of the original
sorted := make([]float32, len(input))
copy(sorted, input)
sort.Slice(sorted, func(i, j int) bool {
return sorted[i] < sorted[j]
})
avg := computeAverage(sorted)
variance := float32(0)
for i := 0; i < len(sorted); i++ {
variance += (sorted[i] - avg) * (sorted[i] - avg)
}
stddev := float32(math.Sqrt(float64(variance / float32(len(sorted)))))
medIdx := (len(sorted) * 50) / 100
p90Idx := (len(sorted) * 90) / 100
p95Idx := (len(sorted) * 95) / 100
p99Idx := (len(sorted) * 99) / 100
return avg, sorted[medIdx], stddev, sorted[p90Idx], sorted[p95Idx], sorted[p99Idx]
}
func computeAverage(d []float32) float32 {
avg := float32(0)
for i := 0; i < len(d); i++ {
avg += d[i]
}
avg /= float32(len(d))
return avg
}
|
[6] Here we investigate the use of Li/Ca ratios, and the combination with Mg/Ca as Mg/Li and Li/Mg ratios, as sea surface temperature proxies in tropical corals. We utilize modern Porites corals from locations with exceptional local in situ temperature data to calibrate these promising new proxies while revealing some potential caveats to their use that highlight the complexity of biomineralization processes.
[5] Inorganic precipitation experiments suggest that Li + is incorporated into aragonite by heterovalent substitution of Ca 2+ in the CaCO 3 structure [ Okumura and Kitano , 1986 ; Marriott et al ., 2004b ]. Although there are many studies of Mg incorporation into CaCO 3 [e.g., M orse and Bender , 1990 ; Rimstidt et al ., 1998 , and references therein], the mode of incorporation into coral aragonite is not established [e.g., Allison et al ., 2011 ; Gaetani et al ., 2011 ]. The results of X‐ray Absorption Near Edge Structure (XANES) studies are difficult to interpret but suggest Mg exists in Porites coral samples as magnesite (MgCO 3 ) [ Farges et al ., 2009 ]. In contrast, another XANES study of corals suggests Mg is present as a disordered phase or is associated with organics [ Finch and Allison , 2008 ]. Culturing of Porites corals shows little direct temperature control on Mg/Ca ratios but indicates Mg/Ca increases with the extension rate of the coral [ Inoue et al ., 2007 ]. However, the lack of a measureable Li isotope fractionation within corals skeletons argues against a kinetic control on Li incorporation into coral aragonite [ Marriott et al ., 2004a ; Rollion‐Bard et al ., 2009 ]. Although the existing data concerning the controls of Li and Mg incorporation into biogenic aragonite are often difficult to reconcile, the similarities (apparent partition coefficients <<1) and differences (heterovalent 2Li + versus homovalent Mg 2+ incorporation) between these two elements means their combined study can potentially improve the understanding of trace metal incorporation into biogenic aragonite.
[4] Studies of the Li/Ca ratio of calcite foraminifera shells have generally not reproduced the clear temperature dependence displayed by inorganic calcite but instead suggest Li/Ca is influenced by growth rate and therefore seawater carbonate ion concentration [ Delaney et al ., 1985 ; Hall and Chan , 2004 ; Hathorne and James , 2006 ; Lear and Rosenthal , 2006 ]. More recently, the combination of Li/Ca with the known temperature proxy of foraminiferal Mg/Ca, as Mg/Li ratios has been shown to correlate better with deep sea temperatures than Mg/Ca ratios alone [ Bryan and Marchitto , 2008 ]. Interestingly, the largest improvement found by using Mg/Li instead of Mg/Ca ratios in benthic foraminifera was for the aragonitic species H . elegans [ Bryan and Marchitto , 2008 ]. This work inspired others to investigate Li/Ca and Mg/Li ratios, and clear relationships with deep sea temperature have been observed in various species of aragonitic cold water corals [ Case et al ., 2010 ; J. Raddatz et al., Temperature dependence of stable Sr isotopes, Sr/Ca and Mg/Li in the scleractinian cold water coral Lophelia pertusa , submitted to Chemical Geology . 2012; hereinafter referred to as Raddatz et al., submitted manuscript, 2012].
[3] Calcium carbonate has two major polymorphs in nature, trigonal calcite with six‐fold coordination and orthorhombic aragonite with ninefold coordination. Although tropical corals build their skeletons from aragonite, it is nonetheless informative to consider trace element incorporation into calcite and aragonite. The lithium content of inorganic calcite was observed to be negatively correlated with temperature in laboratory precipitation experiments conducted with low‐salinity solutions [ Marriott et al ., 2004a ]. The same study also analyzed lithium in the aragonite skeleton of a Porites coral from Jarvis Island at a sampling resolution of approximately four samples per year. Over two to three seasonal SST and coral δ 18 O cycles, the Li/Ca values suggested an inverse relationship with temperature where coral Li/Ca decreased by 5% for every degree of temperature increase [ Marriott et al ., 2004a ]. This temperature dependence is large compared to that of only around 0.7% per degree for Sr/Ca [e.g., Corrège , 2006 ]. This makes coral Li/Ca a very promising tool for SST reconstructions, even when considering the lower concentration of Li in coral aragonite (µmol/mol versus mmol/mol) and the poorer measurement precision likely to be obtained as a result. A further study found no influence of salinity on the Li/Ca ratio of inorganically precipitated aragonite [ Marriott et al ., 2004b ]. This is more evidence that the Li/Ca ratio of aragonite could be a robust proxy for SST, but unfortunately these authors did not perform inorganic aragonite precipitation experiments at different temperatures.
[2] The possibility to extend the instrumental record of sea surface temperatures (SSTs) by analyzing the chemistry of the aragonite skeletons of tropical corals has been explored and utilized in recent decades [ Beck et al ., 1992 ; Shen et al ., 1992 ; McCulloch et al ., 1994 ; Tudhope et al ., 2001 ; Quinn and Sampson , 2002 ; Corrège et al ., 2004 ; Felis et al ., 2004 ; Kilbourne et al ., 2004 ; Ayling et al ., 2006 ]. The proxies that have been developed and widely applied for SST are δ 18 O [e.g., Tudhope et al ., 2001 ], Sr/Ca [e.g., Gagan et al ., 1998 ], and U/Ca [e.g., Min et al ., 1995 ]. The δ 18 O composition of corals is also a function of the δ 18 O of seawater (which is correlated with salinity regionally) and therefore compliments a true SST proxy by enabling paleo‐δ 18 Owater (salinity) estimates [e.g., Ren et al ., 2003 ]. Although U/Ca has been observed to correlate well with SST in various locations [e.g., Min et al ., 1995 ; Felis et al ., 2009 , 2010 ] the pH dependence of U speciation in seawater [e.g., Raitzsch et al ., 2011 ] suggests coral U/Ca should also be affected by seawater carbonate chemistry (salinity), as recently confirmed by culturing experiments [ Inoue et al ., 2011 ]. Coral Sr/Ca has been shown to be a robust SST proxy under various conditions [e.g., Gagan et al ., 1998 ], but concerns have been raised about variability in surface water Sr/Ca ratios [e.g., Brass and Turekian , 1974 ; de Villiers , 1999 ] and the possibility of changes in global seawater Sr/Ca ratios over glacial–interglacial cycles [ Stoll and Schrag , 1998 ]. Therefore, the development of other proxies for SST in tropical corals is desirable.
[9] The proxy calibration followed established methods [ Felis et al ., 2004 , 2009 ]. The annual Li/Ca maxima and minima for the OGA coral were tied to the corresponding extreme values in a local monthly SST record (1975–1994; Tokyo Metropolitan Ogasawara Fisheries Center) and subsequently interpolated to bimonthly intervals using the AnalySeries software (freely available at http://www.lsce.ipsl.fr/logiciels/index.php ). The annual Li/Ca maxima and minima for the Tahiti coral were tied to the corresponding extreme values in a local monthly SST record (1987–1991) [ Gerard , 1992 ] and subsequently interpolated to monthly intervals in the same manner. A linear least squares regression was then carried out for interpolated Li/Ca, Mg/Ca, Mg/Li, Li/Mg, and SST data with SST as the independent variable. The Li/Ca‐based age model occasionally (7 out of 43 tie points) differs by one or two samples from the Sr/Ca‐ and U/Ca‐based age models used in previous calibrations of the OGA coral [ Felis et al ., 2009 ]. Similarly, the Li/Ca‐based age model for the Tahiti coral differs little from the Sr/Ca‐based age model used in a previous calibration [ Felis et al ., 2012 ].
[8] For ICP‐MS analysis, ~300 µg of coral powder was weighed and placed into a small (8–30 mL) acid‐cleaned HDPE bottle. The powder was dissolved by adding 5–25 mL of 0.3M HNO 3 made from thermally distilled HNO 3 diluted with 18.2 MΩ cm H 2 O (Milli‐Q). After equilibration for at least 12 h, 100–1000 μL of sample solution was removed and diluted with 0.3M HNO 3 to have a Ca concentration of 10 ppm in an acid‐cleaned PE centrifuge tube. Analyses were performed using either an Element 2 (Thermo Scientific) sector field ICP‐MS at the University of Bremen or the Agilent 7500cs ICP‐MS at the GEOMAR, Kiel. The majority of the data were measured in Bremen, with only the additional sampling transect of the OGA coral measured in Kiel. The isotopes monitored during this study were 7 Li, 24 Mg, 25 Mg, 27 Al, 43 Ca, 48 Ca, 55 Mn, 88 Sr, 137 Ba, and 238 U. Raw intensities were blank corrected and normalized to 43 Ca before element/Ca ratios were calculated using the technique of Rosenthal et al . [ 1999 ]. A standard solution was prepared from single‐element solutions to have known element/Ca ratios similar to the samples and a Ca concentration of 10 ppm. All samples with Ca intensities >15% larger or smaller than the standard were rediluted accordingly and run again. Aliquots of the JCp‐1 Porites coral powder reference material [ Okai et al ., 2002 ] were analyzed as unknowns to ensure consistency between measurement sessions and the two laboratories. The average JCp‐1 value and standard deviation ( n = 25) during the course of this study was 6.29 ± 0.13 µmol/mol for Li/Ca, 4.17 ± 0.05 mmol/mol for Mg/Ca, 1.51 ± 0.04 mmol/mol for Li/Mg, 0.66 ± 0.02 mol/mmol for Mg/Li, and 1.18 ± 0.01 µmol/mol for U/Ca. Based on these analyses the external precision at the 95% confidence level (2σ) is estimated to be 4% for Li/Ca, 2.7% for Mg/Ca, 5.1% for Li/Mg and Mg/Li, and 2% for U/Ca.
[7] The details of the Porites coral (OGA‐02‐1) from the Ogasawara Islands (Japan) located at ~27° N in the western subtropical North Pacific (hereafter OGA coral) are given in Felis et al . [ 2009 ]. Local SST was measured during the calibration period ~1200 m from the OGA coral site (Tokyo Metropolitan Ogasawara Fisheries Center). A Porites coral (TAH‐95) was drilled at a water depth of 4–5 m on the north side of Tahiti (French Polynesia) located at ~17.5° S in the central tropical South Pacific (hereafter Tahiti coral) [ Felis et al ., 2012 ]. Local SST was measured during the calibration period ~850 m from the Tahiti coral site [ Gerard , 1992 ]. Coral samples were slabbed, X‐rayed, and microsampled following established methods [ Felis et al ., 2000 , 2004 ], with an average temporal resolution of >7 samples/yr for the OGA coral and of >17 samples/yr for the Tahiti coral. Aliquots of this powder were analyzed for stable isotopes as well as Sr/Ca and U/Ca ratios [ Felis et al ., 2009 , 2012 ], and the same powder was used for the ICP‐MS analyses reported here. Growth rates for the OGA coral during the studied period were 1.0–1.7 cm/yr with an average of 1.3 cm/yr [ Felis et al ., 2009 ] and were about 1.2–1.6 cm/yr with an average of 1.4 cm/yr for the Tahiti coral [ Felis et al ., 2012 ].
[11] The Li/Ca ratio of the Tahiti coral displays clear annual cycles ranging from 5.88 to 7.22 µmol/mol that track the local SST well (Figure 3 ). The Mg/Ca ratios of the Tahiti coral sometimes show clear annual cycles and range from 4.71 to 5.18 mmol/mol. The Li/Mg and Mg/Li ratios for the Tahiti coral also display clear annual cycles like the Li/Ca ratios and range from 1.47 to 1.18 and 0.68 to 0.85 mol/mmol, respectively.
[10] The original element/Ca data for the OGA and Tahiti corals are presented in Table S1 (auxiliary materials are available at http://www.pangaea.de ). The Li/Ca ratio of the OGA coral varies from 6.01 to 9.07 µmol/mol and displays clear annual cycles that match the SST records well (Figure 1 ). The amplitude of the Li/Ca cycles follows the interannual variability of local SST better than that of a gridded SST product that represents a 1° × 1° area (HadISST1.1; see Rayner et al . [ 2003 ]). During the period between 1979 and 1980 the Li/Ca ratio does not track the SST well, and there are two anomalously high Li/Ca values that will be discussed in detail in section 4.2 . Mg/Ca ratios of the OGA coral sometimes show clear annual cycles and range from 3.63 to 4.88 mmol/mol, with the exception of the period between 1979 and 1980, where the Mg/Ca ratios are 5 mmol/mol or greater, peaking at over 8 mmol/mol. The Li/Mg and Mg/Li ratios for the OGA coral display clear annual cycles like the Li/Ca ratios and range from 2.07 to 1.32 and 0.5 to 0.7 mol/mmol, respectively. The exception is the period between 1979 and 1980, where values are anomalously low or high (only Li/Mg shown in Figure 1 ). The measurements of the period between 1979 and 1980 were repeated, and an additional transect through the same density bands of the coral was also sampled and analyzed. These data confirm the presence of anomalously high Li/Ca and Mg/Ca values (Figure 2 ), while the Mn/Ca and Al/Ca ratios, which are considered indicators of contamination from Mn oxides and clays, are below suspect levels (Table S1).
4 Discussion
4.1 Calibration of Li/Ca, Mg/Li and Li/Mg Ratios With Local SST [12] Regression analysis of the OGA coral Li/Ca excluding the data from samples with Mg/Ca ratios >5 mmol/mol reveals a strong relationship (r2 = 0.73, p < 0.0001) between Li/Ca and SST. A linear least squares regression was also carried out for bimonthly interpolated Mg/Ca, Mg/Li, and Li/Mg ratios with local SST, and the monthly interpolated data for the Tahiti coral were analyzed in the same way (Figure 4). The resulting relationships are detailed in Table 1. The Mg/Ca ratio of the OGA coral was very weakly correlated with SST, while no significant relationship could be found for the Tahiti coral (Table 1). The slopes for the Tahiti coral Li/Ca‐SST and Mg/Li‐SST relationships are twice as steep as those found for the OGA coral. A similar range of temperature sensitivities is also observed for other coral‐based proxies [Corrège, 2006; Gagan et al., 2012, and references therein]. It is interesting to note that the Tahiti Li/Ca data fall directly on those for the OGA coral (Figure 4). Therefore, if only the data from above 26°C were regressed, a similar result would be obtained for both corals. This suggests there may be some degree of nonlinearity to the Li/Ca‐SST relationship, but calibrations using small temperature ranges may not be representative. The OGA coral Li/Ca‐SST relationship decreases by 1.8% per °C (at 25°C), while the Tahiti coral Li/Ca‐SST relationship decreases by 3.9% and is more similar to the 5% per °C reported for a Jarvis Island Porites coral [Marriott et al., 2004a]. Although it is difficult to assign temperatures accurately without the original SST data, the SST estimates derived by applying the Tahiti coral Li/Ca‐SST relationship to the Li/Ca ratios from the Jarvis Island coral fit the SST shown in Marriott et al. [2004a] well. This suggests the higher slope of the Li/Ca data from temperatures above 26°C may be widely applicable to Porites corals across the tropics. Figure 4 Open in figure viewerPowerPoint Regression analysis of the Li/Ca, Mg/Ca, Mg/Li, and Li/Mg ratios of OGA and Tahiti corals with locally measured SST. See Table 1 for more details. Table 1. Results for Ordinary Least Squares Regression of Coral Element/Ca Ratios With Local SSTa Intercept b Slope a Regression Coefficient r2 OGA Li/Ca 9.97 (±0.18) ‐0.123 (±0.007) 0.73 OGA Mg/Ca 3.16 (±0.20) 0.050 (±0.008) 0.26 OGA Mg/Li 0.19 (±0.029) 0.018 (±0.0012) 0.69 OGA Li/Mg 2.76 (±0.08) ‐0.048 (±0.003) 0.67 Average annual OGA Li/Ca 14.4 (±2.2) ‐0.31 (±0.09) 0.45 Average annual OGA Mg/Li ‐0.40 (±0.4) 0.042 (±0.016) 0.32 Average annual OGA Li/Ca 4.30 (±1.0) ‐0.11 (±0.04) 0.32 Tahiti Li/Ca 14.0 (±0.7) ‐0.277 (±0.025) 0.68 Tahiti Mg/Li ‐0.19 (±0.09) 0.035 (±0.003) 0.65 Tahiti Li/Mg 2.96 (±0.16) ‐0.060 (±0.006) 0.64 [13] To test that the relationship between Li/Ca and SST is not an artifact of the annual cycle of another variable causing the Li/Ca ratio to fluctuate, we have conducted a regression of the average annual Li/Ca ratios of the OGA coral with local SST [Crowley et al., 1999; Evans et al., 1999; Felis et al., 2009]. Although the seasonal temperature range at the OGA location is large, the average annual temperatures vary by only ~1°C, making the regression with Li/Ca ratios relatively imprecise (Table 1). Even so, the regression of the annual average OGA Li/Ca with SST gives r2 = 0.45 (p < 0.005). There is no significant relationship between the annual average OGA Li/Ca ratio and the average annual sea surface salinity [Carton and Giese, 2008], suggesting the variance of Li/Ca ratios is dominated by SST and is relatively insensitive to seawater chemistry. Furthermore, the slope and intercept of the OGA coral average annual Li/Ca‐SST relationship are very similar to the seasonal relationship from the Tahiti coral and the OGA coral for temperatures above 26°C. This strongly suggests that the differences between seasonal and average annual Li/Ca‐SST calibrations, also documented for Sr/Ca, U/Ca, and δ18O ratios of the OGA coral (see supplementary information of Felis et al. [2009]), result from the nonlinearity of the relationship between Li/Ca and SST. Recently it was suggested that two types of calibration are required for coral SST reconstructions using Sr/Ca and δ18O ratios, one for seasonal and one for multiannual reconstructions [Gagan et al., 2012]. These authors suggest the differences in proxy sensitivity result from variable “bio‐smoothing” attenuating the proxy signal for different coral colonies. However, it seems that variability in the sensitivity of coral Li/Ca potentially occurs within a single colony at different times of the year. The fact that the data from higher temperatures agree points to a lower sensitivity of Li/Ca at lower temperatures. In some Porites corals from other Japanese islands, a reduction of extension rate and the formation of a high‐density band has been observed during the winter months [Mitsuguchi et al., 2003]. This is not the case for the OGA coral, as the high‐density band is precipitated during summer, and the other geochemical records do not indicate reduced growth during winter [Felis et al., 2009]. [14] Interestingly, the slopes and intercepts of the Li/Mg‐SST relationships are similar for both OGA and Tahiti corals (Table 1). As the Ca concentration in the coral aragonite varies by <1% [e.g., Gaetani and Cohen, 2006], there should be no induced correlations from comparing Li/Ca with Mg/Ca ratios [Lenahan et al., 2011]. Thus it appears that using Mg/Ca as the denominating ratio accounts for some of the nonlinearity of the Li/Ca‐SST relationship above 26°C. More corals from diverse locations should be measured, but it may be that Li/Mg ratios help to overcome some intercoral offsets in paleothermometry. [15] Using the maximum and minimum slopes from the standard error (Table 1) to calculate the range of element ratios and the resulting range of estimated SST produces uncertainties of between ±1.0 and 1.7°C for Li/Ca and ±1.1 and 1.9°C for Li/Mg. This is comparable to Sr/Ca slope uncertainties [e.g., Felis et al., 2004, 2009], making Li/Ca and Li/Mg ratios useful additions to the coral paleothermometer arsenal.
4.2 Anomalously High Li/Ca and Mg/Ca Ratios [16] The anomalously high Li and Mg in the 1979–1980 section of the OGA coral are not the result of contamination by Mn oxides or clays, as confirmed by the analysis of an additional transect through the same density bands. Manganese was below detection in all samples, and Al/Ca ratios were <100 µmol/mol and were often below detectable levels (Table S1). The zone of high Li and Mg is slightly wider in the additional transect than the primary transect, probably the result of a slightly higher growth rate in the additional transect. The X‐ray of the coral looks normal throughout the 1979 and 1980 section, and the Sr/Ca, U/Ca and δ18O ratios measured in this section are within normal ranges and follow the SST well [Felis et al., 2009]. This is also the case for the U/Ca (Figure 2) and Sr/Ca ratios (not shown) measured in the additional transect. The δ13C through this section of coral [Felis et al., 2009] does not suggest the anomalous Li and Mg could have resulted from biological effects related to symbiont photosynthesis and coral metabolism [e.g., Gagan et al., 1994; Felis et al., 1998; Grottoli, 2002]. Secondary aragonite crystals are known to form in modern corals [e.g., Hendy et al., 2007; Sayani et al., 2011], but these have lower Mg/Ca ratios than primary skeleton [e.g., Hathorne et al., 2011]. A thin section from the 1979–1980 section of the OGA coral shows no indication of diagenesis. All this points to the anomalously high Li and Mg occurring in the original coral skeleton. It is interesting to note that the Li/Ca and Mg/Ca ratios are positively correlated with each other in the section where they are anomalously high (Figure 2). This contrasts with the areas of skeleton where Li/Ca ratios closely follow SST and the Li/Ca and Mg/Ca ratios are generally negatively correlated (Figures 1 and 3). High Li and Mg contents have been found associated with centers of calcification (COC) in some cold water corals [Case et al., 2010]. Therefore, the anomalously high Li/Ca and Mg/Ca ratios could potentially have resulted from a higher proportion of COC in this section of the skeleton. Inspection of the current thin sections provides no clear evidence, and different section preparation techniques to highlight COC and image analysis techniques will need to be developed to quantify COC proportion. Even if the underlying mechanism remains elusive, all evidence suggests the anomalously high Li/Ca and Mg/Ca ratios result from a biological effect on the incorporation into the biogenic aragonite.
4.3 Incorporation of Li and Mg Into Biogenic Aragonite [17] The Li/Mg SST relationship obtained for the OGA coral is similar to those found by previous studies of cold water corals [Case et al., 2010; Raddatz et al., submitted manuscript, 2012] and aragonitic benthic foraminifera H. elegans [Bryan and Marchitto, 2008] (Figure 5). The Li/Ca SST relationship is slightly stronger than that for Mg/Li (and Li/Mg), in contrast to previous studies where a stronger relationship was found between Mg/Li ratios and water temperature [Bryan and Marchitto, 2008; Case et al., 2010]. More data from diverse calcifiers are needed, but the comparison of the available Li/Mg (or Mg/Li) data from biogenic aragonite can be explained with a single exponential relationship to water temperature (Figure 6). This hints at the possibility of an overarching mechanism controlling the temperature response of Li and Mg incorporation into aragonite. Figure 5 Open in figure viewerPowerPoint Relationships between Li/Mg ratios with water temperature obtained by this study and from the literature. Figure 6 Open in figure viewerPowerPoint Exponential relationship between Li/Mg ratios and water temperature obtained by this study and others plotted on a log scale. Symbols are the same as in Figure 5 [18] As noted in section 1, the incorporation of Li and Mg into biogenic CaCO 3 is poorly understood. Even so, it is important to consider the possible mechanisms for the temperature influence on coral Li/Ca ratios and the anomalously high Li/Ca and Mg/Ca ratios in the 1979–1980 section of the OGA coral. A positive correlation between Mg/Ca ratios and SST has been reported for some Porites corals [e.g., Mitsuguchi et al., 1996; Sinclair et al., 1998], but the relationship is often not clear or consistent [e.g., Mitsuguchi et al., 2008]. This is also evident in the poor and insignificant correlation of Mg/Ca with local SST in this study (section 4.1), although some years have clear annual cycles of Mg/Ca. It must be noted that no oxidative cleaning of the coral powder was performed in this study, as is sometimes undertaken before Mg/Ca analysis [e.g., Mitsuguchi et al., 1996, 2003]. [19] Rayleigh fractionation from a closed reservoir combined with a temperature‐dependent reservoir depletion has been used to explain the positive relationship between coral Mg/Ca and temperature, as the Mg/Ca ratio of the reservoir would increase as Ca is progressively depleted [Gaetani and Cohen, 2006; Cohen and Gaetani, 2010]. Case et al. [2010] attributed the strong positive correlation between Li/Ca and Mg/Ca ratios within the skeletons of cold water corals to the fact that the apparent partition coefficients of both Li and Mg are <<1, and they would respond to Rayleigh fractionation similarly. However, within the skeletons of the tropical Porites corals examined here the Li/Ca and Mg/Ca ratios are anticorrelated apart from the period of anomalously high Li/Ca and Mg/Ca (Figures 1-3). This difference may simply result from the difference in the sampling scales between the <100 µm laser ablation sampling of Case et al. [2010] and the <1000 µm drill sampling of this study. Additionally, the large Li/Ca and Mg/Ca variability seen in deep‐sea corals, which experience only small changes in temperature, reveals the extent of reservoir depletion and Rayleigh fractionation is not temperature dependent. We suggest that the positively correlated ratios during the period of anomalously high Li/Ca and Mg/Ca of a Porites coral resulted from biological effects unrelated to temperature, possibly Rayleigh fractionation during biomineralization. However, under normal circumstances where such effects are minimal, Li/Ca and Mg/Ca ratios are negatively or poorly correlated, and the Li/Ca ratio faithfully reflects SST. [20] Given the clear nonenvironmental controls on tropical coral Mg/Ca ratios observed here and elsewhere [e.g., Meibom et al., 2008], the underpinning use of Mg/Ca ratios in multielement coral thermometry [Cohen and Gaetani, 2010; Gaetani et al., 2011] should be reconsidered. Li/Ca ratios should be very helpful for such multielement techniques, as the apparent partition coefficient of Li is <<1. |
from django.conf import settings
ENABLE_SELECT2 = getattr(settings, 'DJANGOCMS_LINK_USE_SELECT2', False)
if ENABLE_SELECT2 and 'django_select2' in settings.INSTALLED_APPS:
try:
from djangocms_link.fields_select2 import Select2PageSearchField as PageSearchField
except ImportError:
from djangocms_link.fields_select2_legacy import Select2LegacyPageSearchField as PageSearchField
else:
from cms.forms.fields import PageSelectFormField as PageSearchField
|
-- Copyright (c) 1999 <NAME>.
-- See COPYRIGHT file for terms and conditions.
-- WARNING: The Set operations (insertWith...) are not adequately tested.
-- To be thorough, they should be tested on a type where distinguishable
-- values can still be "equal", and the results should be tested to make
-- sure that the "With" function was called on the right values.
module Data.Edison.Test.Set where
import Prelude hiding (concat,reverse,map,concatMap,foldr,foldl,foldr1,foldl1,
filter,takeWhile,dropWhile,lookup,take,drop,splitAt,
zip,zip3,zipWith,zipWith3,unzip,unzip3,null)
import qualified Prelude
import qualified Data.List as List -- not ListSeq!
import Data.Bits
import Data.Word
import Test.QuickCheck hiding( (===) )
import Test.HUnit (Test(..))
import Data.Edison.Prelude
import Data.Edison.Coll
import Data.Edison.Test.Utils
import qualified Data.Edison.Seq.ListSeq as L
import Data.Edison.Seq.JoinList (Seq)
import qualified Data.Edison.Seq.JoinList as S
----------------------------------------------------
-- Set implementations to test
import qualified Data.Edison.Coll.UnbalancedSet as US
import qualified Data.Edison.Coll.StandardSet as SS
import qualified Data.Edison.Coll.EnumSet as ES
-------------------------------------------------------
-- A utility class to propigate class contexts down
-- to the quick check properites
class (Eq (set a), Arbitrary (set a),
Show (set a),
Eq a, Ord a, Num a, Integral a, Real a,
OrdSet (set a) a) => SetTest a set | set -> a
instance SetTest Int US.Set
instance SetTest Int SS.Set
newtype SmallInt = SI Int deriving (Show,Read,Eq,Ord,Enum,Num,Integral,Real)
instance Arbitrary SmallInt where
arbitrary = arbitrary >>= \x -> return (SI $ abs x `mod` (finiteBitSize (0::Word) - 1))
instance CoArbitrary SmallInt where
coarbitrary (SI x) = coarbitrary x
instance SetTest SmallInt ES.Set
--------------------------------------------------------
-- List all permutations of set types to test
allSetTests :: Test
allSetTests = TestList
[ setTests (empty :: US.Set Int)
, setTests (empty :: SS.Set Int)
, setTests (empty :: ES.Set SmallInt)
, qcTest $ prop_show_read (empty :: US.Set Int)
, qcTest $ prop_show_read (empty :: ES.Set SmallInt)
]
---------------------------------------------------------
-- List all the tests to run for each type
setTests set = TestLabel ("Set Test "++(instanceName set)) . TestList $
[ qcTest $ prop_single set
, qcTest $ prop_single set
, qcTest $ prop_fromSeq set
, qcTest $ prop_insert set
, qcTest $ prop_insertSeq set
, qcTest $ prop_union set
, qcTest $ prop_unionSeq set
, qcTest $ prop_delete set
, qcTest $ prop_deleteAll set
, qcTest $ prop_deleteSeq set
, qcTest $ prop_null_size set -- 10
, qcTest $ prop_member_count set
, qcTest $ prop_toSeq set
, qcTest $ prop_lookup set
, qcTest $ prop_fold set
, qcTest $ prop_strict_fold set
, qcTest $ prop_filter_partition set
, qcTest $ prop_deleteMin_Max set
, qcTest $ prop_unsafeInsertMin_Max set
, qcTest $ prop_unsafeFromOrdSeq set
, qcTest $ prop_unsafeAppend set -- 20
, qcTest $ prop_filter set
, qcTest $ prop_partition set
, qcTest $ prop_minView_maxView set
, qcTest $ prop_minElem_maxElem set
, qcTest $ prop_foldr_foldl set
, qcTest $ prop_strict_foldr_foldl set
, qcTest $ prop_foldr1_foldl1 set
, qcTest $ prop_strict_foldr1_foldl1 set
, qcTest $ prop_toOrdSeq set
, qcTest $ prop_intersect_difference set -- 30
, qcTest $ prop_subset_subsetEq set
, qcTest $ prop_fromSeqWith set
, qcTest $ prop_insertWith set
, qcTest $ prop_insertSeqWith set
, qcTest $ prop_unionl_unionr_unionWith set
, qcTest $ prop_unionSeqWith set
, qcTest $ prop_intersectWith set
, qcTest $ prop_unsafeMapMonotonic set
, qcTest $ prop_symmetricDifference set
, qcTest $ prop_strict set
]
-----------------------------------------------------
-- Utility operations
lmerge :: Ord a => [a] -> [a] -> [a]
lmerge xs [] = xs
lmerge [] ys = ys
lmerge xs@(x:xs') ys@(y:ys')
| x < y = x : lmerge xs' ys
| y < x = y : lmerge xs ys'
| otherwise = x : lmerge xs' ys'
nub :: Eq a => [a] -> [a]
nub (x : xs@(x' : _)) = if x==x' then nub xs else x : nub xs
nub xs = xs
sort :: Ord a => [a] -> [a]
sort = nub . List.sort
(===) :: (Eq (set a),CollX (set a) a) => set a -> set a -> Bool
(===) s1 s2 =
structuralInvariant s1
&&
structuralInvariant s2
&&
s1 == s2
si :: CollX (set a) a => set a -> Bool
si = structuralInvariant
---------------------------------------------------------------
-- CollX operations
prop_single :: SetTest a set => set a -> a -> Bool
prop_single set x =
let xs = singleton x `asTypeOf` set
in si xs
&&
toOrdList xs == [x]
prop_fromSeq :: SetTest a set => set a -> Seq a -> Bool
prop_fromSeq set xs =
let s = fromSeq xs `asTypeOf` set
in si s
&&
toOrdList s == sort (S.toList xs)
prop_insert :: SetTest a set => set a -> a -> set a -> Bool
prop_insert set x xs =
let insert_x_xs = insert x xs
in si insert_x_xs
&&
if member x xs then
toOrdList insert_x_xs == toOrdList xs
else
toOrdList insert_x_xs == List.insert x (toOrdList xs)
prop_insertSeq :: SetTest a set => set a -> Seq a -> set a -> Bool
prop_insertSeq set xs ys =
insertSeq xs ys === union (fromSeq xs) ys
prop_union :: SetTest a set => set a -> set a -> set a -> Bool
prop_union set xs ys =
let xys = union xs ys
in si xys
&&
toOrdList xys == lmerge (toOrdList xs) (toOrdList ys)
prop_unionSeq :: SetTest a set => set a -> Seq (set a) -> Bool
prop_unionSeq set xss =
unionSeq xss === S.foldr union empty xss
prop_delete :: SetTest a set => set a -> a -> set a -> Bool
prop_delete set x xs =
let delete_x_xs = delete x xs
in si delete_x_xs
&&
toOrdList delete_x_xs == List.delete x (toOrdList xs)
prop_deleteAll :: SetTest a set => set a -> a -> set a -> Bool
prop_deleteAll set x xs =
deleteAll x xs === delete x xs
prop_deleteSeq :: SetTest a set => set a -> Seq a -> set a -> Bool
prop_deleteSeq set xs ys =
deleteSeq xs ys === S.foldr delete ys xs
prop_null_size :: SetTest a set => set a -> set a -> Bool
prop_null_size set xs =
null xs == (size xs == 0)
&&
size xs == Prelude.length (toOrdList xs)
prop_member_count :: SetTest a set => set a -> a -> set a -> Bool
prop_member_count set x xs =
mem == not (Prelude.null (Prelude.filter (== x) (toOrdList xs)))
&&
count x xs == (if mem then 1 else 0)
where mem = member x xs
---------------------------------------------------------------
-- Coll operations
prop_toSeq :: SetTest a set => set a -> set a -> Bool
prop_toSeq set xs =
List.sort (S.toList (toSeq xs)) == toOrdList xs
prop_lookup :: SetTest a set => set a -> a -> set a -> Bool
prop_lookup set x xs =
if member x xs then
lookup x xs == x
&&
lookupM x xs == Just x
&&
lookupWithDefault 999 x xs == x
&&
lookupAll x xs == Prelude.take (count x xs) (repeat x)
else
lookupM x xs == Nothing
&&
lookupWithDefault 999 x xs == 999
&&
lookupAll x xs == []
prop_fold :: SetTest a set => set a -> set a -> Bool
prop_fold set xs =
List.sort (fold (:) [] xs) == toOrdList xs
&&
(null xs || fold1 (+) xs == sum (toOrdList xs))
prop_strict_fold :: SetTest a set => set a -> set a -> Bool
prop_strict_fold set xs =
fold' (+) 0 xs == fold (+) 0 xs
&&
(null xs || fold1' (+) xs == fold1 (+) xs)
prop_filter_partition :: SetTest a set => set a -> set a -> Bool
prop_filter_partition set xs =
let filter_p_xs = filter p xs
filter_not_p_xs = filter (not . p) xs
in si filter_p_xs
&&
si filter_not_p_xs
&&
toOrdList filter_p_xs == Prelude.filter p (toOrdList xs)
&&
partition p xs == (filter_p_xs, filter_not_p_xs)
where p x = x `mod` 3 == 2
------------------------------------------------------------------
-- OrdCollX operations
prop_deleteMin_Max :: SetTest a set => set a -> set a -> Bool
prop_deleteMin_Max set xs =
let deleteMin_xs = deleteMin xs
deleteMax_xs = deleteMax xs
in si deleteMin_xs
&&
si deleteMax_xs
&&
toOrdList (deleteMin xs) ==
(let l = toOrdList xs
in if L.null l then L.empty else L.ltail l)
&&
toOrdList (deleteMax xs) ==
(let l = toOrdList xs
in if L.null l then L.empty else L.rtail l)
prop_unsafeInsertMin_Max :: SetTest a set =>
set a -> a -> set a -> Bool
prop_unsafeInsertMin_Max set i xs =
if null xs then
unsafeInsertMin 0 xs === singleton 0
&&
unsafeInsertMax 0 xs === singleton 0
else
unsafeInsertMin lo (delete lo xs) === xs
&&
unsafeInsertMax hi (delete hi xs) === xs
where lo = minElem xs
hi = maxElem xs
prop_unsafeFromOrdSeq :: SetTest a set => set a -> [a] -> Bool
prop_unsafeFromOrdSeq set xs =
unsafeFromOrdSeq (sort xs) === (fromSeq xs `asTypeOf` set)
prop_unsafeAppend :: SetTest a set =>
set a -> a -> set a -> Bool
prop_unsafeAppend set i xs =
union ys zs === unsafeAppend ys zs
where (ys,zs) = partitionLE_GT i xs
prop_filter :: SetTest a set => set a -> a -> set a -> Bool
prop_filter set x xs =
si setLT && si setLE && si setGT && si setGE
&&
toOrdList setLT == Prelude.filter (< x) (toOrdList xs)
&&
toOrdList setLE == Prelude.filter (<= x) (toOrdList xs)
&&
toOrdList setGT == Prelude.filter (> x) (toOrdList xs)
&&
toOrdList setGE == Prelude.filter (>= x) (toOrdList xs)
where setLT = filterLT x xs
setLE = filterLE x xs
setGT = filterGT x xs
setGE = filterGE x xs
prop_partition :: SetTest a set => set a -> a -> set a -> Bool
prop_partition set x xs =
partitionLT_GE x xs == (filterLT x xs, filterGE x xs)
&&
partitionLE_GT x xs == (filterLE x xs, filterGT x xs)
&&
partitionLT_GT x xs == (filterLT x xs, filterGT x xs)
-- OrdColl operations
prop_minView_maxView :: SetTest a set => set a -> set a -> Bool
prop_minView_maxView set xs =
minView xs == (if null xs then Nothing
else Just (minElem xs, deleteMin xs))
&&
maxView xs == (if null xs then Nothing
else Just (maxElem xs, deleteMax xs))
prop_minElem_maxElem :: SetTest a set => set a -> set a -> Property
prop_minElem_maxElem set xs =
not (null xs) ==>
minElem xs == Prelude.head (toOrdList xs)
&&
maxElem xs == Prelude.last (toOrdList xs)
prop_foldr_foldl :: SetTest a set => set a -> set a -> Bool
prop_foldr_foldl set xs =
foldr (:) [] xs == toOrdList xs
&&
foldl (flip (:)) [] xs == Prelude.reverse (toOrdList xs)
prop_strict_foldr_foldl :: SetTest a set => set a -> set a -> Bool
prop_strict_foldr_foldl set xs =
foldr' (+) 0 xs == foldr (+) 0 xs
&&
foldl' (+) 0 xs == foldl (+) 0 xs
prop_foldr1_foldl1 :: SetTest a set => set a -> set a -> Property
prop_foldr1_foldl1 set xs =
not (null xs) ==>
foldr1 f xs == foldr f 1333 xs
&&
foldl1 (flip f) xs == foldl (flip f) 1333 xs
where f x 1333 = x
f x y = 3*x - 7*y
prop_strict_foldr1_foldl1 :: SetTest a set => set a -> set a -> Property
prop_strict_foldr1_foldl1 set xs =
not (null xs) ==>
foldr1' (+) xs == foldr1 (+) xs
&&
foldl1' (+) xs == foldl1 (+) xs
prop_toOrdSeq :: SetTest a set => set a -> set a -> Bool
prop_toOrdSeq set xs =
S.toList (toOrdSeq xs) == toOrdList xs
-----------------------------------------------------------------------
-- SetX operations
prop_intersect_difference :: SetTest a set =>
set a -> set a -> set a -> Bool
prop_intersect_difference set xs ys =
intersection xs ys === filter (\x -> member x xs) ys
&&
difference xs ys === filter (\x -> not (member x ys)) xs
prop_subset_subsetEq :: SetTest a set =>
set a -> set a -> set a -> Bool
prop_subset_subsetEq set xs ys =
properSubset xs ys == (subset xs ys && xs /= ys)
&&
subset xs ys == (intersection xs ys == xs)
--------------------------------------------------------------------------
-- Set operations
prop_fromSeqWith :: SetTest a set => set a -> Seq a -> Bool
prop_fromSeqWith set xs =
fromSeqWith const xs === (fromSeq xs `asTypeOf` set)
prop_insertWith :: SetTest a set => set a -> a -> set a -> Bool
prop_insertWith set x xs =
insertWith const x xs === insert x xs
prop_insertSeqWith :: SetTest a set => set a -> Seq a -> set a -> Bool
prop_insertSeqWith set xs ys =
insertSeqWith const xs ys === insertSeq xs ys
prop_unionl_unionr_unionWith :: SetTest a set =>
set a -> set a -> set a -> Bool
prop_unionl_unionr_unionWith set xs ys =
unionl xs ys === u
&&
unionr xs ys === u
&&
unionWith const xs ys === u
where u = union xs ys
prop_unionSeqWith :: SetTest a set => set a -> Seq (set a) -> Bool
prop_unionSeqWith set xss =
unionSeqWith const xss === unionSeq xss
prop_intersectWith :: SetTest a set => set a -> set a -> set a -> Bool
prop_intersectWith set xs ys =
intersectionWith const xs ys === intersection xs ys
prop_unsafeMapMonotonic :: SetTest a set => set a -> set a -> Bool
prop_unsafeMapMonotonic set xs =
if null xs
then True
else let xs' = deleteMax xs
in toOrdList (unsafeMapMonotonic (+1) xs') == Prelude.map (+1) (toOrdList xs')
prop_show_read :: (SetTest a set,Read (set a),Show (set a))
=> set a -> set a -> Bool
prop_show_read set xs = xs === read (show xs)
prop_symmetricDifference :: SetTest a set => set a -> set a -> set a -> Bool
prop_symmetricDifference set xs ys =
union (difference xs ys) (difference ys xs) === symmetricDifference xs ys
prop_strict :: SetTest a set => set a -> set a -> Bool
prop_strict set xs =
strict xs === xs
&&
strictWith (+1) xs === xs
|
/**
* Utility for reflect.
*/
public class ReflectUtil {
/**
* Finds and returns the Class object associated with the class or interface
* with the given string name.
*
* @param <T> the type of the returned class
* @param className the fully qualified name of the desired class
* @return an {@code Optional<Class<T>>}
*/
public static final <T> Optional<Class<T>> findForName(String className) {
try {
@SuppressWarnings("unchecked")
var clazz = (Class<T>) Class.forName(className);
return Optional.of(clazz);
} catch (ClassNotFoundException e) {
return Optional.empty();
}
}
/**
* Returns {@code true} if has the class or interface with the given string
* name, {@code false} otherwise.
* <p>
* This method is equivalence to:
*
* <pre>
* {@code findForName(className).isPresent()};
* </pre>
*
* @param className the fully qualified name of the desired class
* @return {@code true} if has the class or interface with the given string
* name, {@code false} otherwise
* @see #findForName(String)
*/
public static final boolean hasClassForName(String className) {
return findForName(className).isPresent();
}
/**
* Constructs a new instance for the specified class with the given string name.
*
* @param <T> the type of the returned instance
* @param className the fully qualified name of the desired class
* @param initargs array of objects to be passed as arguments to the
* constructor call
* @return an {@code Optional<T>}
*/
public static final <T> Optional<T> constructForClassName(String className, Object... initargs) {
Optional<Class<T>> oclass = findForName(className);
if (initargs.length == 0) {
return oclass.map(ReflectUtil::constructForClass);
}
return oclass.map(clazz -> {
try {
return clazz.getConstructor(toTypes(initargs)).newInstance(initargs);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
}
private static final <T> T constructForClass(Class<T> clazz) {
try {
return clazz.getConstructor().newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private static final Class<?>[] toTypes(Object... args) {
return Arrays.stream(args).map(Object::getClass).toArray(Class[]::new);
}
/**
* Constructs a new instance for the specified class with the given string name.
*
* @param <T> the type of the returned instance
* @param className the fully qualified name of the desired class
* @param initargs array of objects to be passed as arguments to the
* constructor call
* @return an {@code Optional<T>}
*/
public static final <T> Optional<T> constructDeclaredForClassName(String className, Object... initargs) {
Optional<Class<T>> oclass = findForName(className);
if (initargs.length == 0) {
return oclass.map(ReflectUtil::constructDeclaredForClass);
}
return oclass.map(clazz -> {
try {
var constructor = clazz.getDeclaredConstructor(toTypes(initargs));
constructor.setAccessible(true);
return constructor.newInstance(initargs);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
}
private static final <T> T constructDeclaredForClass(Class<T> clazz) {
try {
var constructor = clazz.getDeclaredConstructor();
constructor.setAccessible(true);
return constructor.newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Invokes the underlying method represented by the {@code Method} object, that
* reflects the specified public member method of the class or interface
* represented by the specified {@code Class} object, on the specified object
* with the specified parameters.
*
* @param <T> the type of the class
* @param <R> the type of the result
* @param clazz the class
* @param methodName the name of the method
* @param obj the object the underlying method is invoked from
* @param args the arguments used for the method call
* @return the result of the method on {@code obj} with parameters {@code args}
*/
@SuppressWarnings("unchecked")
public static final <T, R> R callMethod(Class<T> clazz, String methodName, Object obj, Object... args) {
try {
Method method;
if (args.length == 0) {
method = clazz.getMethod(methodName);
} else {
method = clazz.getMethod(methodName, toTypes(args));
}
return (R) method.invoke(obj, args);
} catch (NoSuchMethodException | IllegalAccessException | IllegalArgumentException
| InvocationTargetException e) {
throw new RuntimeException(e);
}
}
/**
* Invokes the underlying method represented by the {@code Method} object, that
* reflects the specified public static member method of the class or interface
* represented by the specified {@code Class} object, with the specified
* parameters.
*
* @param <T> the type of the class
* @param <R> the type of the result
* @param clazz the class
* @param methodName the name of the method
* @param args the arguments used for the method call
* @return the result of the static method parameters {@code args}
*/
public static final <T, R> R callStaticMethod(Class<T> clazz, String methodName, Object... args) {
return callMethod(clazz, methodName, null, args);
}
/**
* Invokes the underlying method represented by the {@code Method} object, that
* reflects the specified declared member method of the class or interface
* represented by the specified {@code Class} object, on the specified object
* with the specified parameters.
*
* @param <T> the type of the class
* @param <R> the type of the result
* @param clazz the class
* @param methodName the name of the method
* @param obj the object the underlying method is invoked from
* @param args the arguments used for the method call
* @return the result of the declared method on {@code obj} with parameters
* {@code args}
*/
@SuppressWarnings("unchecked")
public static final <T, R> R callDeclaredMethod(Class<T> clazz, String methodName, Object obj, Object... args) {
try {
Method method;
if (args.length == 0) {
method = clazz.getDeclaredMethod(methodName);
} else {
method = clazz.getDeclaredMethod(methodName, toTypes(args));
}
method.setAccessible(true);
return (R) method.invoke(obj, args);
} catch (NoSuchMethodException | IllegalAccessException | IllegalArgumentException
| InvocationTargetException e) {
throw new RuntimeException(e);
}
}
/**
* Invokes the underlying method represented by the {@code Method} object, that
* reflects the specified declared static member method of the class or
* interface represented by the specified {@code Class} object, on the specified
* object with the specified parameters.
*
* @param <T> the type of the class
* @param <R> the type of the result
* @param clazz the class
* @param methodName the name of the method
* @param args the arguments used for the method call
* @return the result of the declared static method parameters {@code args}
*/
public static final <T, R> R callDeclaredStaticMethod(Class<T> clazz, String methodName, Object... args) {
return callDeclaredMethod(clazz, methodName, null, args);
}
/**
* Returns the actual type argument to the specified {@code parameterizedType}.
*
* @param <R> type of the argument type
* @param parameterizedType must be {@link ParameterizedType}
* @param index the index of the type arguments
* @return the actual type argument
*/
@SuppressWarnings("unchecked")
public static final <R extends Type> R getActualTypeArgument(Type parameterizedType, int index) {
return (R) ((ParameterizedType) parameterizedType).getActualTypeArguments()[index];
}
private ReflectUtil() {
}
} |
<reponame>alonmm/VCSamples<filename>VC2012Samples/Windows 8 samples/C++/Windows 8 app samples/Association launching sample (Windows 8)/C++/App.xaml.cpp
// THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF
// ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
// PARTICULAR PURPOSE.
//
// Copyright (c) Microsoft Corporation. All rights reserved
//
// App.xaml.cpp
// Implementation of the App.xaml class.
//
#include "pch.h"
#include "App.xaml.h"
#include "MainPage.xaml.h"
using namespace Windows::UI::Xaml;
using namespace Windows::Foundation;
using namespace Windows::ApplicationModel;
using namespace Windows::ApplicationModel::Activation;
using namespace CppSamplesUtils;
using namespace AssociationLaunching;
using namespace Windows::UI::Xaml::Interop;
App::App()
{
InitializeComponent();
this->Suspending += ref new SuspendingEventHandler(this, &AssociationLaunching::App::OnSuspending);
}
App::~App()
{
}
void App::OnSuspending(Platform::Object^ sender, Windows::ApplicationModel::SuspendingEventArgs^ args)
{
auto deferral = args->SuspendingOperation->GetDeferral();
SuspensionManager::StartSaveTask().then([=]() {
deferral->Complete();
});
}
void App::OnLaunched(Windows::ApplicationModel::Activation::LaunchActivatedEventArgs^ args)
{
if (args->PreviousExecutionState == ApplicationExecutionState::Terminated)
{
SuspensionManager::StartRestoreTask().then([=]()
{
auto rootFrame = ref new Frame();
TypeName pageType = { "AssociationLaunching.MainPage", TypeKind::Custom };
rootFrame->Navigate(pageType);
Window::Current->Content = rootFrame;
auto rootPage = safe_cast<MainPage^>(rootFrame->Content);
rootPage->FileEvent = nullptr;
rootPage->ProtocolEvent = nullptr;
Window::Current->Activate();
});
}
else
{
auto rootFrame = ref new Frame();
TypeName pageType = { "AssociationLaunching.MainPage", TypeKind::Custom };
rootFrame->Navigate(pageType);
Window::Current->Content = rootFrame;
auto rootPage = safe_cast<MainPage^>(rootFrame->Content);
rootPage->FileEvent = nullptr;
rootPage->ProtocolEvent = nullptr;
Window::Current->Activate();
}
}
// Handle file activations.
void App::OnFileActivated(Windows::ApplicationModel::Activation::FileActivatedEventArgs^ args)
{
auto rootFrame = ref new Frame();
TypeName pageType = { "AssociationLaunching.MainPage", TypeKind::Custom };
rootFrame->Navigate(pageType);
Window::Current->Content = rootFrame;
auto rootPage = safe_cast<MainPage^>(rootFrame->Content);
rootPage->FileEvent = args;
rootPage->ProtocolEvent = nullptr;
Window::Current->Activate();
}
// Handle protocol activations.
void App::OnActivated(Windows::ApplicationModel::Activation::IActivatedEventArgs^ args)
{
auto rootFrame = ref new Frame();
TypeName pageType = { "AssociationLaunching.MainPage", TypeKind::Custom };
rootFrame->Navigate(pageType);
Window::Current->Content = rootFrame;
if (args->Kind == Windows::ApplicationModel::Activation::ActivationKind::Protocol)
{
auto rootPage = safe_cast<MainPage^>(rootFrame->Content);
rootPage->ProtocolEvent = safe_cast<Windows::ApplicationModel::Activation::ProtocolActivatedEventArgs^>(args);
rootPage->FileEvent = nullptr;
}
Window::Current->Activate();
} |
Improving Efficiency in Iron Ore Mining Facilities
A cost effective energy optimization project for a 35 MW iron ore pellet plant is presented. Applications involving large centrifugal loads and changing flow rates were prime targets for significant energy savings. Input-output power balances of these units provide significant data to energy improvement possibilities. On site high power fans (representing 35% of plant load) feature long annual operating times, thus they account for a relative large proportion of plant energy costs. A number of corrective actions including fan impeller redesign, proper selection of AC frequency converters and inclusion of passive filters were decided. Results show a reduction in plant energy consumption of 16 GWh/year with an operational cost reduction of 2.4 MUS$/year. |
<filename>DSP Lab 2/Assignment3_1.py
import pyaudio
import struct
WIDTH = 1
print pyaudio.paInt8
print pyaudio.get_format_from_width(WIDTH)
sl = struct.pack('i',-1);
print `sl`
|
/**
* Extends SchemEditValidator to provide validation services for collection configuration
records. Specifically, ensures that status flags are not duplicated,
* nor do they redefine reserved flags<p>
*
*
*
*@author ostwald <p>
*
* $Id: CollectionConfigValidator.java,v 1.10 2005/06/02 18:45:40 ostwald
* Exp $
*/
public class CollectionConfigValidator extends SchemEditValidator {
private static boolean debug = false;
private CollectionRegistry collectionRegistry = null;
/**
* Constructor for the CollectionConfigValidator object
*
*@param sef Description of the Parameter
*@param framework Description of the Parameter
*@param mapping Description of the Parameter
*@param request Description of the Parameter
*@param collectionRegistry Description of the Parameter
*/
public CollectionConfigValidator(CollectionRegistry collectionRegistry,
SchemEditForm sef,
MetaDataFramework framework,
ActionMapping mapping,
HttpServletRequest request) {
super(sef, framework, mapping, request);
this.collectionRegistry = collectionRegistry;
}
/**
* In addition to validating against the schema, check statusFlags for duplicates and
create SchemEditErrors if dups found.
*
*@return Description of the Return Value
*/
public SchemEditActionErrors validateForm() {
prtln("validateForm");
// im.displayAttributeFields();
// im.displayElementFields();
SchemEditActionErrors errors = super.validateForm();
// make a list of reserved status flag labels for this collection
String collection = sef.getRecId();
prtln("collection: " + collection);
CollectionConfig collectionConfig = collectionRegistry.getCollectionConfig(collection);
if (collectionConfig == null) {
prtln("collectionConfig not found");
}
List unavailableFlags = new ArrayList();
for (Iterator i = StatusFlags.reservedStatusLabels().iterator(); i.hasNext(); ) {
String label = (String) i.next();
unavailableFlags.add(label.toLowerCase());
}
/* prtln("\n reserved status labels");
for (Iterator i = unavailableFlags.iterator(); i.hasNext(); ) {
prtln("\t" + (String) i.next());
} */
// ensure the idPrefix is not already assigned
InputField idPrefixField = this.getIdPrefixField();
// Debugging
if (idPrefixField != null) {
prtln ("\nidPrefixField.getValue(): " + idPrefixField.getValue());
prtln ("dup? " + collectionRegistry.isDuplicateIdPrefix(collection, idPrefixField.getValue()));
}
else
prtln ("idPrefixField is NULL");
// don't flag dup error if the idPrefix is null or empty - it will be caught as an empty required field.
if (idPrefixField != null &&
idPrefixField.getValue().trim().length() > 0 &&
collectionRegistry.isDuplicateIdPrefix(collection, idPrefixField.getValue())) {
SchemEditErrors.addError(errors, idPrefixField, "dup.idPrefix.error");
exposeField(idPrefixField);
}
// check the finalStatusLabel
// if it's okay, then add it to flaglist
// if not, create error
InputField finalStatusField = getFinalStatusFlagField();
if (finalStatusField == null) {
// prtln(" .... not found");
}
else {
String finalStatusLabel = finalStatusField.getValue().toLowerCase();
// prtln("finalStatusLabel: " + finalStatusLabel);
if (!finalStatusLabel.equals(StatusFlags.DEFAULT_FINAL_STATUS.toLowerCase()) &&
unavailableFlags.contains(finalStatusLabel)) {
SchemEditErrors.addError(errors, finalStatusField, "status.flag.error");
exposeField(finalStatusField);
}
else {
unavailableFlags.add(finalStatusLabel);
}
}
// check each statusFlag element and if it is on the list, create error
// prtln("\n *** status flags ***");
for (Iterator i = getStatusFlagFields().iterator(); i.hasNext(); ) {
InputField field = (InputField) i.next();
String statusLabel = field.getValue().toLowerCase();
// prtln(statusLabel);
if (unavailableFlags.contains(statusLabel)) {
// prtln(" ... is illegal");
SchemEditErrors.addError(errors, field, "status.flag.error");
exposeField(field);
}
else {
unavailableFlags.add(statusLabel);
}
}
return errors;
}
private InputField getInputField (String xpath) {
for (Iterator i = im.getInputFields().iterator(); i.hasNext(); ) {
InputField field = (InputField) i.next();
if (field.getXPath().equals(xpath)) {
return field;
}
}
return null;
}
/**
* Gets the finalStatusFlag InputField from the InputManager
*
*@return The finalStatusFlagField value
*/
private InputField getFinalStatusFlagField() {
String finalStatusLabelPath = "/collectionConfigRecord/statusFlags/@finalStatusLabel";
/* for (Iterator i = im.getInputFields().iterator(); i.hasNext(); ) {
InputField field = (InputField) i.next();
// prtln (field.toString());
if (field.getXPath().equals(finalStatusLabelPath)) {
return field;
}
}
return null; */
return this.getInputField(finalStatusLabelPath);
}
private InputField getIdPrefixField () {
String idPrefixPath = "/collectionConfigRecord/idPrefix";
return this.getInputField(idPrefixPath);
}
/**
* Gets the statusFlag InputFields from the InputManager
*
*@return The statusFlagFields value
*/
private List getStatusFlagFields() {
List list = new ArrayList();
String statusFlagPath = "/collectionConfigRecord/statusFlags/statusFlag/status";
for (Iterator i = im.getInputFields().iterator(); i.hasNext(); ) {
InputField field = (InputField) i.next();
// prtln (field.toString());
if (field.getNormalizedXPath().startsWith(statusFlagPath)) {
list.add(field);
}
}
return list;
}
/**
* Print a line to standard out.
*
*@param s The String to print.
*/
private static void prtln(String s) {
if (debug) {
// System.out.println("CollectionConfigValidator: " + s);
System.out.println(s);
}
}
} |
/**
*
* @author Santiago Rojas
*/
@Path ("/citaLaboratorio")
@Produces("application/json")
@Consumes("application/json")
@RequestScoped
public class CitaLaboratorioResource {
private static final Logger LOGGER = Logger.getLogger(CitaLaboratorioResource.class.getName());
@Inject
private CitaLaboratorioLogic citaLogic;
/**
* Crea una nueva CitaLaboratorio con la informacion que se recibe en el cuerpo de
* la petición y se regresa un objeto identico con un id auto-generado por
* la base de datos.
*
* @param CitaLaboratorio {@link CitaLaboratorioDTO} - La CitaLaboratorio que se desea
* guardar.
* @return JSON {@link CitaMedicaDTO} - La CitaMedica guardada con el atributo
* id autogenerado.
* @throws BusinessLogicException {@link BusinessLogicExceptionMapper} -
* Error de lógica que se genera cuando ya existe la CitaMedica.
*/
@POST
public CitaLaboratorioDTO createCitaLaboratorio (CitaLaboratorioDTO pCitaLaboratorio) throws BusinessLogicException
{
LOGGER.log(Level.INFO, "CitaLaboratorioResource createCitaLaboratorio: input: {0}", pCitaLaboratorio.toString());
CitaLaboratorioDTO nuevoCitaLabDTO = new CitaLaboratorioDTO(citaLogic.createCitaLaboratorio(pCitaLaboratorio.toEntity()));
LOGGER.log(Level.INFO, "CitaLaboratorioResource createCitaLaboratorio: output: {0}", nuevoCitaLabDTO.toString());
return nuevoCitaLabDTO;
}
/**
* Busca la CitaLaboratorio con el id asociado recibido en la URL y la devuelve.
*
* @param pCitaLaboratorioId Identificador de la CitaLaboratorio que se esta buscando.
* Este debe ser una cadena de dígitos.
* @return JSON {@link CitaLaboratorioDTO} - La CitaLabroatorio buscada
* @throws WebApplicationException {@link WebApplicationExceptionMapper} -
* Error de lógica que se genera cuando no se encuentra la CitaLaboratorio.
*/
@GET
@Path("{CitaLaboratorioId: \\d+}")
public CitaLaboratorioDTO getCitaLaboratorio (@PathParam ("CitaLaboratorioId") Long pCitaLaboratorioId )
{
LOGGER.log(Level.INFO, "CitaLaboratorioResource getCitaLaboratorio: input: {0}", pCitaLaboratorioId);
CitaLaboratorioEntity entity = citaLogic.getCita(pCitaLaboratorioId);
if(entity == null)
{
throw new WebApplicationException("El recurso /citaLaboratorio/" + pCitaLaboratorioId + " no existe .", 404);
}
CitaLaboratorioDTO citaLabDTO = new CitaLaboratorioDTO(entity);
LOGGER.log(Level.INFO, "CitaLaboratorioResource getCitaLaboratorio: output: {0}", citaLabDTO);
return citaLabDTO;
}
/**
* Busca y devuelve todas las citas de laboratorio que existen en la aplicacion.
*
* @return JSONArray {@link CitaLaboratorioDTO} - Las citas laboratorio encontradas en
* la aplicación. Si no hay ninguna retorna una lista vacía.
*/
@GET
public List <CitaLaboratorioDTO> getCitasLaboratorio ()
{
LOGGER.info("CitaLaboratorioResource getCitasLaboratorio: input: void");
List<CitaLaboratorioDTO> listaCitasLab = listEntityDTO(citaLogic.getCitasLab()) ;
LOGGER.log(Level.INFO, "CitaLaboratorioResource getCitasLaboratorio: output: {0}", listaCitasLab.toString());
return listaCitasLab;
}
@GET
@Path ("{CitaLaboratorioId:\\d+}/laboratorio")
public LaboratorioDTO getLaboratorioFromCita (@PathParam ("CitaLaboratorioId") Long pCitaLaboratorioId) throws BusinessLogicException
{
return new LaboratorioDTO(citaLogic.getLaboratorioFromCita(pCitaLaboratorioId));
}
@DELETE
@Path("{CitaLaboratorioId:\\d+}")
public void deleteCitaLaboratorio (@PathParam ("CitaLaboratorioId") Long pCitaLaboratorioId) throws BusinessLogicException
{
LOGGER.log(Level.INFO, "CitaLaboratorioDTO deleteCitaLaboratorio:input: {0}", pCitaLaboratorioId);
if (citaLogic.getCita(pCitaLaboratorioId) == null)
{
throw new WebApplicationException("El recurso /citaLaboratorio/ que desea eliminar" + pCitaLaboratorioId + " no existe.", 404);
}
citaLogic.deleteCitaLab(pCitaLaboratorioId);
LOGGER.info("CitaLaboratorioDTO deleteCitaLaboratorio: output: void");
}
@PUT
@Path("{CitaLaboratorioId:\\d+}")
public CitaLaboratorioDTO updateCitaLaboratorio (@PathParam ("CitaLaboratorioId") Long pCitaLaboratorioId, CitaLaboratorioDTO pCitaD) throws BusinessLogicException,WebApplicationException
{
LOGGER.log(Level.INFO, "CitaLaboratorioResource modificarCitaLaboratorio: input: {0}, citaLaboratorio {1}", new Object[]{pCitaLaboratorioId, pCitaD.toString()});
pCitaD.setId(pCitaLaboratorioId);
if (citaLogic.getCita(pCitaLaboratorioId) == null)
{
throw new WebApplicationException("El recurso /citaLaboratorio/ que quiere editar con id" + pCitaLaboratorioId + " no existe.", 404);
}
CitaLaboratorioDTO nuevoDTO = new CitaLaboratorioDTO(citaLogic.updateCitaLaboratorio(pCitaLaboratorioId, pCitaD.toEntity()));
LOGGER.log(Level.INFO,"CitaLaboratorioResource modificarCitaLaboratorio: output: {0}", nuevoDTO.toString());
return nuevoDTO;
}
private List<CitaLaboratorioDTO> listEntityDTO(List<CitaLaboratorioEntity> entityList)
{
List<CitaLaboratorioDTO> list = new ArrayList<>();
for (CitaLaboratorioEntity entity : entityList)
{
list.add(new CitaLaboratorioDTO(entity));
}
return list;
}
} |
<filename>app/index/controller.go<gh_stars>1-10
package index
import (
"github.com/gin-gonic/gin"
"time"
)
type Controller struct {
Service *Service
}
func (x *Controller) Index(c *gin.Context) interface{} {
return gin.H{"time": time.Now()}
}
type IpQuery struct {
Value string `form:"value" binding:"required,ip4_addr"`
}
func (x *Controller) Ip(c *gin.Context) interface{} {
var query IpQuery
if err := c.ShouldBindQuery(&query); err != nil {
return err
}
data, err := x.Service.FindIp(c.Request.Context(), query.Value)
if err != nil {
return err
}
return data
}
|
<gh_stars>1-10
{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE ScopedTypeVariables #-}
import Criterion.Main
import Data.Functor
import Data.Map (Map)
import qualified Data.Map as M
import Data.Set (Set)
import qualified Data.Set as S
iter :: (a -> a) -> Int -> a -> a
iter f = go
where
go 0 x = x
go !n x = go (pred n) $! f x
type Coord = (Int, Int, Int)
type Coord4 = (Int, Int, Int, Int)
neighbors (a, b, c) = S.fromList [(a + x, b + y, c + z) | x <- l, y <- l, z <- l, (x, y, z) /= (0, 0, 0)]
where
l = [-1 .. 1]
neighbors4 (a, b, c, d) = S.fromList [(a + x, b + y, c + z, d + w) | x <- l, y <- l, z <- l, w <- l, (x, y, z, w) /= (0, 0, 0, 0)]
where
l = [-1 .. 1]
type PC = Set Coord
type PC4 = Set Coord4
step :: forall k. Ord k => (k -> Set k) -> (Int -> Bool) -> (Int -> Bool) -> Set k -> Set k
step ns aliveCond birthCond ps = keep <> birth
where
ncs :: Map k Int
ncs =
M.unionsWith (+) $
S.toList ps <&> \p ->
M.fromSet (const 1) (ns p)
keep = M.keysSet (M.filter aliveCond (ncs `M.restrictKeys` ps))
birth = M.keysSet (M.filter birthCond (ncs `M.withoutKeys` ps))
part1 :: [[Bool]] -> Int
part1 inp = S.size (iter (step neighbors (\n -> n == 2 || n == 3) (== 3)) 6 inp'')
where
inp' = concat (zipWith (\y l -> concatMap (\(x, c) -> [(x, y, 0) | c]) l) [0 ..] (zip [0 ..] <$> inp))
inp'' :: PC
inp'' = S.fromList inp'
part2 :: [[Bool]] -> Int
part2 inp = S.size (iter (step neighbors4 (\n -> n == 2 || n == 3) (== 3)) 6 inp'')
where
inp' = concat (zipWith (\y l -> concatMap (\(x, c) -> [(x, y, 0, 0) | c]) l) [0 ..] (zip [0 ..] <$> inp))
inp'' :: PC4
inp'' = S.fromList inp'
main = do
let dayNumber = 17
let dayString = "day" <> show dayNumber
let dayFilename = dayString <> ".txt"
inpf <- lines <$> readFile dayFilename
let conv c = case c of
'#' -> True
'.' -> False
_ -> error $ "invalid cell: " ++ show c
let inp = map (map conv) inpf
print (part1 inp)
print (part2 inp)
defaultMain
[ bgroup
dayString
[ bench "part1" $ whnf part1 inp,
bench "part2" $ whnf part2 inp
]
]
|
/**
* Sql body utility
*/
public class SqlBodyUtil {
private static final String EMPTY_STRING = "";
/**
* Limit sql body size to specify {@code JDBCPluginConfig.Plugin.JDBC.SQL_BODY_MAX_LENGTH}
* @param sql Sql to limit
*/
public static String limitSqlBodySize(String sql) {
if (sql == null) {
return EMPTY_STRING;
}
if (JDBCPluginConfig.Plugin.JDBC.SQL_BODY_MAX_LENGTH > 0 && sql.length() > JDBCPluginConfig.Plugin.JDBC.SQL_BODY_MAX_LENGTH) {
return sql.substring(0, JDBCPluginConfig.Plugin.JDBC.SQL_BODY_MAX_LENGTH) + "...";
}
return sql;
}
} |
#ifndef COLOURREPLACEMENTRULE_H
#define COLOURREPLACEMENTRULE_H
#include "Vision/visionblackboard.h"
#include "Vision/VisionTools/classificationcolours.h"
#include "Vision/VisionTypes/coloursegment.h"
class ColourReplacementRule
{
//METHOD DEFINITION
public:
enum ReplacementMethod {
BEFORE,
AFTER,
SPLIT,
INVALID
};
/*!
Gets the name of the given method.
@param method The method name desired.
@return String name of the method.
*/
static std::string getMethodName(ReplacementMethod method);
/*!
Gets the method matching the given string.
@param name String name of the method.
@return The method desired.
*/
static ReplacementMethod getMethodFromName(const std::string& name);
public:
static ColourSegment nomatch; //! @variable a static segment used to represent one that cannot be matched to any rule.
ColourReplacementRule();
/*!
Checks if the given segment triplet matches this rule.
@param before the first segment.
@param middle the second segment.
@param after the last segment.
@param dir The scan direction (vertical or horizontal).
@return Whether it is a match.
*/
bool match(const ColourSegment& before, const ColourSegment& middle, const ColourSegment& after) const;
bool oneWayMatch(const ColourSegment& before, const ColourSegment& middle, const ColourSegment& after) const;
/*!
Returns the replacement method (before, after or split) for this rule.
- before - the middle segment is given the colour of the first.
- after - the middle segment is given the colour of the last.
- split - the middle segment is split into two, each given the colour of the adjacent segment.
@return An enum for the method.
*/
ReplacementMethod getMethod() const;
//! output stream operator.
friend std::ostream& operator<< (std::ostream& output, const ColourReplacementRule& c);
//! output stream operator for a vector of rules.
friend std::ostream& operator<< (std::ostream& output, const std::vector<ColourReplacementRule>& v);
//! input stream operator.
friend std::istream& operator>> (std::istream& input, ColourReplacementRule& c);
//! input stream operator for a vector of rules.
friend std::istream& operator>> (std::istream& input, std::vector<ColourReplacementRule>& v);
private:
std::string m_name; //! @variable the name of the rule.
unsigned int m_middle_min, //! @variable the minimum length of the middle segment for a match.
m_middle_max, //! @variable the maximum length of the middle segment for a match.
m_before_min, //! @variable the minimum length of the first segment for a match.
m_before_max, //! @variable the maximum length of the first segment for a match.
m_after_min, //! @variable the minimum length of the last segment for a match.
m_after_max; //! @variable the maximum length of the last segment for a match.
std::vector<Colour> m_before, //! @variable The colour that the first segment must be.
m_middle, //! @variable The colour that the middle segment must be.
m_after; //! @variable The colour that the last segment must be.
ReplacementMethod m_method; //! @variable The replacement method for this rule.
};
#endif // COLOURREPLACEMENTRULE_H
|
package ch05;
/**
*
* 【例5-2】 编写成员函数计算二叉树中结点的个数
*
*/
public class Example5_2 {
// 采用先根遍历的方式对树进行遍历,计算树中的结点数
public int countNode(BiTreeNode T) {
int count = 0;
if (T != null) {
++count;// 结点数增1
count += countNode(T.getLchild()); // 加上左子树上结点数
count += countNode(T.getRchild());// 加上右子树上的结点数
}
return count;
}
public static void main(String[] args) {
BiTree biTree = new BiTreeCreator().createBiTree();// 创建一棵树
BiTreeNode root = biTree.getRoot();// 取得树的根结点
// 调试【例5-1 】编写成员函数完成在二叉树中查找数据元素值为x的结点的操作
Example5_2 e = new Example5_2();
System.out.println("树中的结点个数为: " + e.countNode(root));
}
}
// 运行结果:
// 树中的结点个数为: 13
|
/**
* onCreate method that instantiates the activity and associated the proper layout.
* In FinishActivity, it sets the View, and checks the intent of the PlayActivity.
*
* Sets the internal variables to the values from intent, then changes the display text based on the data
* received.
*
*/
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.finish_activity);
maze = MazeHolder.getData();
Bundle extras = getIntent().getExtras();
if (extras != null) {
win = extras.getBoolean("win");
}
TextView textView = (TextView) findViewById(R.id.textView1);
TextView textView2 = (TextView) findViewById(R.id.textView2);
TextView textView3 = (TextView) findViewById(R.id.textView3);
textView2.setText("Path Length: " + maze.getPathSteps());
textView3.setText("Battery: " + maze.getBattery());
if(win==true){
textView.setText("YOU WIN!");
}else{
textView.setText("YOU LOSE!");
MediaPlayer mediaPlayer = MediaPlayer.create(getApplicationContext(), R.raw.i_cant_lose);
mediaPlayer.start();
}
} |
On this day 12 months ago, Moscow held off from retaliating to Barack Obama's diplomatic parting shot, in hope of a new start. Instead, 2017 has been the worst year of US-Russia relations since the fall of the Berlin Wall.
Three weeks before leaving the Oval Office, Obama expelled 35 Russian diplomats and closed two Moscow-owned properties in the US over claims of election meddling. Counter-measures were expected.
Read more
“I invite all children of US diplomats accredited in Russia to the New Year and Christmas children’s parties in the Kremlin,” Russian President Vladimir Putin announced instead, capitalizing on the moment to show unexpected festive magnanimity.
Yet by that point Russia’s bogeyman role as the explanation of the Democrats’ loss in the 2016 election, and the weapon with which to delegitimize the incoming President Donald Trump, was set. ‘Shattered’, the sympathetic chronicle of Hillary Clinton’s doomed presidential run, details how “less than 24 hours after the concession speech” her campaign manager John Podesta and his team gathered at their emptying headquarters to forge the narrative for the coming months in which “Russian hacking was the centerpiece.”
On January 10, 2017 BuzzFeed published the DNC-funded Christopher Steele dossier – a historic document not for the revelations contained therein, but as a marker for the strength of sentiments against Donald Trump, and a watershed for the deliberate dropping of US journalistic standards for a political cause.
Tit-for-tat
An alliance between the bitter Republican establishment and vengeful Democrats has since formed in Congress, driving the anti-Russian agenda throughout the year. The sanctions bill overwhelmingly passed in July designated Russia alongside Iran and North Korea as America’s official “adversaries.” It also charged Moscow with such a wide-ranging list offenses – encompassing Ukraine, Syria, hacking, energy security threats – and a similarly lengthy list of targets – from politicians, to oligarchs, to companies, to banks – that even with the best will in the world, it is not clear that the Kremlin could fulfil any specific criteria to have them removed.
Read more
Congress also did not pass up the opportunity to humiliate its president like an untrustworthy child, by placing severe restrictions on Trump’s ability to loosen any of the measures regardless of international dynamics, thus rendering all future diplomatic efforts likely futile. Hence the pitiful sight of Trump signing the bill into action even as he denigrated it as “deeply flawed.”
Unsurprisingly, Moscow said that the measures would result in an “out-and-out trade war” while Putin called them “illegal” and bemoaned that “US-Russian relations are being sacrificed to resolve internal policy issues.”
The act gave the start to the most unedifying tit-for-tat saga of the year, in which both sides tasked themselves with calculating exactly how many consular staff they needed to expel to reach “parity”in the stand-off, and arguing over whether each step was an escalation or merely the just response in the dispute.
In response to Congress and those initially unanswered expulsions, Putin ordered US missions to reduce their payroll by 755 employees. One month later, in late August, the White House gave Moscow 72 hours to shutter three of its diplomatic buildings, including its oldest consulate in San Francisco, which in turn led to threats of lawsuits from the Russian side, unhappy about being forced out of properties it owns.
The drip-drip of hostilities has carried on through the past three months, with many of the announcements centered on international media. Following the US order to single out RT America as a “foreign agent” – a designation originally created to root out Nazi propagandists before World War II – Washington and Moscow have engaged in one-upmanship, with the Russian Duma passing a mirror bill, which reduced what was already a nebulous term into a playground insult. In an example of the pettiness and the low stakes, RT has been stripped of its congressional credentials and US reporters can no longer visit the Duma.
Containing Russia?
None of the above measures will pave the way for a détente, but what if this is not the American aim? After all, the White House’ national security strategy published earlier this month talks of “preserving peace through strength, and advancing American influence in the world” all while promoting “our way of life.” In turn, Russia is described as “challenging American power, influence, and interests, attempting to erode American security and prosperity.”
If the US yardstick of success is not peace but domination, then punishing the Kremlin is a success, and the question becomes, “is the US treatment of Russia effective in asserting its superiority and changing Moscow’s behavior?”
But on this score 2017 has also been a failure for the US, due to a lack of a coherent approach, as even John McCain would admit. Trump has spoken of “healing” relations with Moscow – that talk has been echoed by his lieutenants – and does not come across as an obvious figurehead in a new Cold War. However, he hasn’t been so much as able to exchange several words with Putin at an official dinner without being branded a traitor by large swathes of the establishment. On the other hand, no one appears to have been pushing Trump to order the consulate closures, which aggravated the Kremlin more than the more substantial but less arbitrary moves. And yet when Trump made the decision, was he really motivated by a desire to achieve geopolitical results, or as a means of deflecting accusations about being friendly to Russia? Or did he do it at the behest of security and military officials – the same ones whose hand can be seen in the latest doctrine?
This is chaos.
From the angle of effectiveness, if anyone has gained from the erratic US policy-making it is Putin, who looks measured, consistent and statesmanlike in his pronouncements, while his officials speak with one voice, as Russia has clearly pursued a number of international policies over a period of several years.
Lose-lose
While Putin can take solace from his increasingly prominent portrait as an international diplomatic mastermind outwitting the hapless Trump, it is obvious that the last 12 months have been a lose-lose proposition for both Washington and Moscow. The simmering morass leaves Russia cut off from the West, in a situation where international sanctions become the 'new normal.' Meanwhile, the White House misses out on a potential economic and diplomatic partner on issues from the Middle East to North Korea to the Arctic, and retains a regional rival forever capable of throwing around its international weight or scuttling a UN Security Council resolution.
Read more
Ironically, 2017 was geopolitically an opportune year for the two countries to mend relations. Both countries can take credit for defeating ISIS – and not catching each other in the crossfire. The Syrian conflict has swung decisively to President Bashar Assad’s government, with the White House fully aware that from now on deal-making could bring greater dividends than pouring more money and goodwill in the black hole of supporting the opposition. The Ukrainian conflict remains intractable, but has receded in diplomatic importance, while accepting Crimean secession as a fait accompli has become plausible enough if the issue is not to poison Moscow-Washington relations for decades to come.
Even if circumstances continue to be favorable in 2018, prospects for a thaw remain thin. The Democrats will not let go of the Russia-Trump story even if the investigation by Special Counsel Robert Mueller yields no smoking gun, while specific sanctions against Russia as a result of July’s law will likely power a new spiral of strife. Putin is the overwhelming favorite for re-election, while Trump’s political ratings don’t give him much leeway. Nor is his policy-making growing more structured. Over the next 12 months, in the face of inexorable inertia pulling apart Russia and the United States, a stagnant lull would likely count as progress.
Igor Ogorodnev for RT |
Image caption Some legal aid protesters made their opposition to price competitive tendering (PCT) clear in May
Plans to cut the legal aid bill by awarding contracts to the lowest bidder have been dropped, Justice Secretary Chris Grayling has announced.
In an interview with the Times newspaper, Mr Grayling said the move was part of a deal he had reached with the Law Society for England and Wales.
Opponents of the policy had warned the policy would "irrevocably damage the criminal justice system".
But other cuts to legal aid are expected to go ahead.
In a statement in the Commons Mr Grayling said prisoners and households with more than £3,000 per month of disposable income would no longer be able to access legal aid.
And immigrants who had been in the country less than a year would be unable to access aid in civil cases, he said.
Analysis If not a complete U-turn, today's revised plans are a major change of direction. So, what happened? Price Competitive Tendering - bidding for contracts for legal aid work - was central to government plans to reform criminal legal aid. Ministers believed it would have consolidated a fractured and inefficient market with fewer, bigger suppliers. But it was hugely contentious. Law firms painted a picture of a cut- price service provided by the lowest bidder. They feared underbidding each other to win a contract, and losing out to the commercial muscle of new entrants to the legal services market such as the Stobart group. The Law Society argued that "quality" rather than a low bid should be what permits a lawyer to undertake legal aid work. They successfully moved the perennial debate about fat cat lawyers to one about protecting individual rights. Conservative newspapers rallied to their cause, and critically the justice secretary listened.
Ministers had intended to introduce price competitive tendering (PCT) as part of a string of reforms aiming to cut the £2bn annual legal aid bill in England and Wales by £350m a year.
BBC legal correspondent Clive Coleman said the proposal was very controversial, with concerns that the lowest bid would win in a "race to the bottom" which could impact quality.
He said that while the bidding proposal had been scrapped other savings would go ahead as the government remained committed to saving money on the legal aid system - said to be one of the most expensive in the world.
'Factory mentality'
There are currently 1,600 legal aid providers and there is potential for that number to be expanded if firms meet minimum quality standards.
The government plans to put a cap on contracts for duty solicitor work at police stations and to reduce legal aid fees by 17.5% across the board.
Legal aid cuts in the pipeline Legal aid fees will be cut by 17.5% across the board
Residency tests to be introduced for civil legal aid - only those who have lived in the UK for more than 12 months will be eligible
Cap on contracts for duty solicitor work at police stations
Income restrictions will be put in place - those with more than £3,000 per month after mortgage, tax and other "essential outgoings" will not be entitled to aid
11,000 cases brought by prisoners will no longer be eligible
It also plans to set up a working party to look at how thousands of short hearings can be avoided, or dealt with by email or video link.
Its final proposals on legal aid reform will be subject to a six-week consultation, our correspondent added.
Mr Grayling said: "We will introduce a new residency test that will prevent most people who have only just arrived in the UK from accessing civil legal aid until a year after they had arrived.
"We will limit criminal legal aid for prisoners so that it is not available unnecessarily. There will be no more legal aid available because you don't like your prison.
"We will set out new rules that will mean the wealthiest Crown Court defendants - those in households with more than £3,000 in disposable income left after tax, housing costs and other essential outgoings - will have to fund their own legal costs."
He said that when the government set out its plans in April "I was clear that they were for consultation. I have kept that promise".
He said that the agreement is "a sensible proposal which is tough but realistic".
The shadow justice secretary, Labour's Sadiq Khan, described plans to drop plans to award contracts to the lowest bidder as "a humiliating climb-down" for the government.
He added: "If the government had their way, access to justice for many people would have been threatened, with the very real prospect of increased numbers of miscarriages of justice."
The charity Reprieve said the blocking of legal aid to immigrants who have been in the country less than a year would "deny justice to a wide range of people wronged by the UK government - from victims of torture and rendition to Gurkhas and Afghan interpreters denied the right to settle in Britain".
The organisation's legal director, Kat Craig, said: "The reality is that the residence test is the latest in a long line of attempts by the government to silence its critics in the courts.
"David Cameron and Nick Clegg once thought that torture victims and Gurkhas denied the right to live in Britain deserved their day in court - why are they now backing plans which would shut them out?"
Correction 17 September 2013: This story has been amended to clarify that is the legal aid system - rather than the legal system - that is said to be one of the most expensive in the world. |
import { HttpParameterCodec, HttpParams } from '@angular/common/http';
import { TranslateService } from '@ngx-translate/core';
import { Observable } from 'rxjs';
import { ApiInterface } from '../abstract-services/api-interface';
import { Category } from '../model/dataset-api/category';
import { Data } from '../model/dataset-api/data';
import { Dataset, Timeseries, TimeseriesData, TimeseriesExtras } from '../model/dataset-api/dataset';
import { Feature } from '../model/dataset-api/feature';
import { Offering } from '../model/dataset-api/offering';
import { Phenomenon } from '../model/dataset-api/phenomenon';
import { Platform } from '../model/dataset-api/platform';
import { Procedure } from '../model/dataset-api/procedure';
import { Service } from '../model/dataset-api/service';
import { Station } from '../model/dataset-api/station';
import { DataParameterFilter, HttpRequestOptions, ParameterFilter } from '../model/internal/http-requests';
import { Timespan } from '../model/internal/timeInterval';
import { HttpService } from './http.service';
import { DatasetApiV2 } from './interfaces/api-v2.interface';
export class UriParameterCoder implements HttpParameterCodec {
public encodeKey(key: string): string {
return encodeURIComponent(key);
}
public encodeValue(value: string): string {
return encodeURIComponent(value);
}
public decodeKey(key: string): string {
return key;
}
public decodeValue(value: string): string {
return value;
}
}
export abstract class DatasetApiInterface extends ApiInterface implements DatasetApiV2 {
constructor(
protected httpService: HttpService,
protected translate: TranslateService
) { super(); }
public abstract getPlatforms(apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Platform[]>;
public abstract getPlatform(id: string, apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Platform>;
public abstract getDatasets(apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Dataset[]>;
public abstract getDataset(id: string, apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Dataset>;
public abstract getDatasetByInternalId(internalId: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Dataset>;
public abstract getData<T>(id: string, apiUrl: string, timespan: Timespan, params?: DataParameterFilter, options?: HttpRequestOptions): Observable<Data<T>>;
public abstract getServices(apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Service[]>;
public abstract getService(id: string, apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Service>;
public abstract getStations(apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Station[]>;
public abstract getStation(id: string, apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Station>;
public abstract getTimeseries(apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Timeseries[]>;
public abstract getTimeseriesData(apiUrl: string, ids: string[], timespan: Timespan, options?: HttpRequestOptions): Observable<TimeseriesData[]>;
public abstract getSingleTimeseries(id: string, apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Timeseries>;
public abstract getSingleTimeseriesByInternalId(internalId: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Timeseries>;
public abstract getTimeseriesExtras(id: string, apiUrl: string): Observable<TimeseriesExtras>;
public abstract getTsData<T>(id: string, apiUrl: string, timespan: Timespan, params?: DataParameterFilter, options?: HttpRequestOptions): Observable<Data<T>>;
public abstract getCategories(apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Category[]>;
public abstract getCategory(id: string, apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Category>;
public abstract getPhenomena(apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Phenomenon[]>;
public abstract getPhenomenon(id: string, apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Phenomenon>;
public abstract getOfferings(apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Offering[]>;
public abstract getOffering(id: string, apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Offering>;
public abstract getFeatures(apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Feature[]>;
public abstract getFeature(id: string, apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Feature>;
public abstract getProcedures(apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Procedure[]>;
public abstract getProcedure(id: string, apiUrl: string, params?: ParameterFilter, options?: HttpRequestOptions): Observable<Procedure>;
protected requestApi<T>(
url: string, params: ParameterFilter = {}, options: HttpRequestOptions = {}
): Observable<T> {
return this.httpService.client(options).get<T>(url,
{
params: this.prepareParams(params),
headers: this.createBasicAuthHeader(options.basicAuthToken)
}
);
}
protected prepareParams(params: ParameterFilter): HttpParams {
if (this.translate && this.translate.currentLang) {
params.locale = this.translate.currentLang;
}
let httpParams = new HttpParams({
encoder: new UriParameterCoder()
});
Object.getOwnPropertyNames(params)
.forEach((key) => httpParams = httpParams.set(key, params[key]));
return httpParams;
}
}
|
/**
* svc_rdma_send - Post a single Send WR
* @rdma: transport on which to post the WR
* @wr: prepared Send WR to post
*
* Returns zero the Send WR was posted successfully. Otherwise, a
* negative errno is returned.
*/
int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
{
int ret;
might_sleep();
while (1) {
if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
atomic_inc(&rdma_stat_sq_starve);
trace_svcrdma_sq_full(rdma);
atomic_inc(&rdma->sc_sq_avail);
wait_event(rdma->sc_send_wait,
atomic_read(&rdma->sc_sq_avail) > 1);
if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
return -ENOTCONN;
trace_svcrdma_sq_retry(rdma);
continue;
}
svc_xprt_get(&rdma->sc_xprt);
ret = ib_post_send(rdma->sc_qp, wr, NULL);
trace_svcrdma_post_send(wr, ret);
if (ret) {
set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
svc_xprt_put(&rdma->sc_xprt);
wake_up(&rdma->sc_send_wait);
}
break;
}
return ret;
} |
/**
* A bean to memorize the treatement of the application.
*/
public class ManagedAccess implements Resettable, InitializingBean, Serializable {
/**
* The serialization id.
*/
private static final long serialVersionUID = -2380329125800067454L;
/*
******************* PROPERTIES ******************* */
/**
* The current treatement.
*/
private Traitement currentTraitement;
/**
* see {@link ParameterService}.
*/
private ParameterService parameterService;
/**
* The SessionController.
*/
private SessionController sessionController;
private MenuModel menuModel;
private Boolean readAuthorized = null;
private Boolean addAuthorized = null;
private Boolean updateAuthorized = null;
private Boolean deleteAuthorized = null;
/**
* private field with no direct mutator to drive additional menu visibility if current user is a gestionnaire.
* getter at {@link #shouldShowSearch()}
* indirect mutator at {@link #getMenuGestionnaire()}
* applied in view /stylesheets/gestionnaire/user/_student/_lookForIndividu.xhtml
*/
private boolean showSearch = false;
/**
* A logger.
*/
private final Logger log = new LoggerImpl(getClass());
/*
******************* INIT ************************* */
/**
* Constructor.
*/
public ManagedAccess() {
super();
}
/**
* @see org.springframework.beans.factory.InitializingBean#afterPropertiesSet()
*/
@Override
public void afterPropertiesSet() throws Exception {
Assert.notNull(this.parameterService,
"property parameterService of class " + this.getClass().getName() + " can not be null");
Assert.notNull(this.sessionController, "property sessionController of class "
+ this.getClass().getName() + " can not be null");
reset();
}
@Override
public void reset() {
}
/**
* Permet de definir si un user a le droit sur la fonctionnalite courante.
*
* @param codAccess String
* @param gest The userCurrent
* @return boolean
*/
private boolean ctrlAccess(final String codAccess,
final Gestionnaire gest) {
try {
if (gest != null) {
Set<AccessRight> accessRights = gest.getProfile().getAccessRight();
for (AccessRight a : accessRights) {
if (a.getTraitement().getCode().equals(getCurrentTraitement().getCode())) {
if (codAccess.equals(a.getCodeAccessType())) {
return true;
}
}
}
} else {
log.error("Le profil du Current User est null donc pas de droit ");
throw new WebFlowException("L'utilisateur n'est pas autorise");
}
} catch (Exception e) {
throw new WebFlowException("L'utilisateur n'est pas autorise", e);
}
return false;
}
/**
* @return Boolean true if user has the read right.
*/
public Boolean getReadAuthorized() {
if (readAuthorized == null) {
User u = sessionController.getCurrentUser();
if (u != null && u instanceof Gestionnaire) {
Gestionnaire g = (Gestionnaire) u;
readAuthorized = ctrlAccess(AccessType.COD_READ, g);
} else
readAuthorized = false;
}
return readAuthorized;
}
/**
* @return Boolean true if user has the add right.
*/
public Boolean getAddAuthorized() {
if (addAuthorized == null) {
User u = sessionController.getCurrentUser();
if (u != null && u instanceof Gestionnaire) {
Gestionnaire g = (Gestionnaire) u;
addAuthorized = ctrlAccess(AccessType.COD_ADD, g);
} else
addAuthorized = false;
}
return addAuthorized;
}
/**
* @return Boolean true if user has the update right.
*/
public Boolean getUpdateAuthorized() {
if (updateAuthorized == null) {
User u = sessionController.getCurrentUser();
if (u != null && u instanceof Gestionnaire) {
Gestionnaire g = (Gestionnaire) u;
updateAuthorized = ctrlAccess(AccessType.COD_UPDATE, g);
} else
updateAuthorized = false;
}
return updateAuthorized;
}
/**
* @return Boolean true if user has the delete right.
*/
public Boolean getDeleteAuthorized() {
if (deleteAuthorized == null) {
User u = sessionController.getCurrentUser();
if (u != null && u instanceof Gestionnaire) {
Gestionnaire g = (Gestionnaire) u;
deleteAuthorized = ctrlAccess(AccessType.COD_DELETE, g);
} else
deleteAuthorized = false;
}
return deleteAuthorized;
}
public MenuModel getMenuGestionnaire() {
if (menuModel == null) {
menuModel = new DefaultMenuModel();
I18nService i18nService = I18nUtils.createI18nService();
FacesContext fc = FacesContext.getCurrentInstance();
ExpressionFactory factory = fc.getApplication().getExpressionFactory();
MenuItem accueil = new MenuItem();
accueil.setAjax(false);
accueil.setValue(i18nService.getString("NAVIGATION.TEXT.WELCOME"));
accueil.setActionExpression(factory.createMethodExpression(fc.getELContext(), "#{welcomeController.goWelcomeManager}", String.class, new Class[]{}));
accueil.setAjax(false);
menuModel.addMenuItem(accueil);
User u = sessionController.getCurrentUser();
if (u != null) {
if (u instanceof Gestionnaire) {
showSearch = true;
Gestionnaire g = (Gestionnaire) u;
Set<Traitement> domains =
new TreeSet<>(new ComparatorInteger(Traitement.class));
domains.addAll(
parameterService.getTraitements(g.getProfile(), Traitement.TYPE_DOMAIN, null));
for (Traitement d : domains) {
Set<Traitement> dFunctions =
parameterService.getTraitements(g.getProfile(), Traitement.TYPE_FUNCTION, (Domain) d);
if (dFunctions.isEmpty()) {
final MethodExpression me = factory
.createMethodExpression(
fc.getELContext(),
"#{managedAccess.callFunction(" + d.getId() + ")}",
String.class,
new Class[]{Integer.class});
MenuItem sub = new MenuItem();
sub.setValue(d.getLibelle());
sub.setActionExpression(me);
sub.setAjax(false);
menuModel.addMenuItem(sub);
} else {
Submenu sub = new Submenu();
sub.setLabel(d.getLibelle());
Set<Traitement> functions =
new TreeSet<>(new ComparatorInteger(Traitement.class));
functions.addAll(dFunctions);
for (final Traitement f : functions) {
final MethodExpression me = factory
.createMethodExpression(
fc.getELContext(),
"#{managedAccess.callFunction(" + f.getId() + ")}",
String.class,
new Class[]{Integer.class});
MenuItem item = new MenuItem();
item.setValue(f.getLibelle());
item.setActionExpression(me);
item.setAjax(false);
sub.getChildren().add(item);
}
menuModel.addSubmenu(sub);
}
}
}
}
MenuItem logout = new MenuItem();
logout.setRendered(sessionController.getIsServlet());
logout.setValue(i18nService.getString("NAVIGATION.TEXT.LOGOUT"));
logout.setActionExpression(factory.createMethodExpression(fc.getELContext(), "#{sessionController.logoutGest}", String.class, new Class[]{}));
logout.setAjax(false);
menuModel.addMenuItem(logout);
}
return menuModel;
}
/**
* Only show additional menu if user is Gestionnaire
* @return true if search menu should be shown false otherwise
*/
public boolean shouldShowSearch() {
return showSearch;
}
public void setMenuGestionnaire(final MenuModel menuModel) {
this.menuModel = menuModel;
}
public String callFunction(final Integer codTrt) {
Traitement trt = parameterService.getTraitement(codTrt);
setCurrentTraitement(trt);
final FacesContext fc = FacesContext.getCurrentInstance();
final ExpressionFactory factory = fc.getApplication().getExpressionFactory();
final MethodExpression me = factory.createMethodExpression(fc.getELContext(), trt.getAction(), String.class, new Class[]{});
return (String) me.invoke(fc.getELContext(), null);
}
/*
******************* ACCESSORS ******************** */
public MenuModel getMenuModel() {
return menuModel;
}
/**
* @return the currentTraitement
*/
public Traitement getCurrentTraitement() {
return currentTraitement;
}
/**
* @param currentTraitement the currentTraitement to set
*/
public void setCurrentTraitement(final Traitement currentTraitement) {
this.currentTraitement = currentTraitement;
}
/**
* @param parameterService the parameterService to set
*/
public void setParameterService(final ParameterService parameterService) {
this.parameterService = parameterService;
}
/**
* @param sessionController the sessionController to set
*/
public void setSessionController(final SessionController sessionController) {
this.sessionController = sessionController;
}
} |
// GetWord will retrieve a word for the given key
func (c *MemoryClient) GetWord(ctx context.Context, key string) (string, error) {
if c.Data == nil {
return "", errors.New("running in local mode. Data has not been retrieved")
}
if val, ok := c.Data[key]; ok {
return val, nil
}
return "", fmt.Errorf("key not found")
} |
def mutateRow(self, table, trowMutations):
self.send_mutateRow(table, trowMutations)
self.recv_mutateRow() |
import pytest
def pytest_addoption(parser):
parser.addoption(
"--integration",
metavar="LIVY_URL",
nargs="?",
const="http://localhost:8998",
help="Run integration tests against the specified Livy server URL "
+ "(default: http://localhost:8998)",
)
def pytest_configure(config):
config.addinivalue_line(
"markers", "integration: mark test as integration test"
)
def pytest_collection_modifyitems(config, items):
if config.getoption("--integration") is not None:
# --integration given in cli: do not skip slow tests
return
skip = pytest.mark.skip(reason="Add --integration option to run")
for item in items:
if "integration" in item.keywords:
item.add_marker(skip)
@pytest.fixture
def integration_url(request):
return request.config.getoption("--integration")
|
def flatten_data(data):
if isinstance(data, list):
return [flatten_data(e) for e in data]
elif isinstance(data, types.Struct):
keys = data.get_members()
return [flatten_data(data.member_by_key(key)) for key in keys]
else:
return [primitives_to_scrypt_types(data)] |
// ListApplicationTypesWithResponse request returning *ListApplicationTypesResponse
func (c *ClientWithResponses) ListApplicationTypesWithResponse(ctx context.Context, params *ListApplicationTypesParams) (*ListApplicationTypesResponse, error) {
rsp, err := c.ListApplicationTypes(ctx, params)
if err != nil {
return nil, err
}
return ParseListApplicationTypesResponse(rsp)
} |
// checkBufferWillFull check whether the send buffer will be full.
func (t *TCPConnection) checkBufferWillFull() {
if len(t.sendBuffer) >= t.MaxSendBufferSize {
if t.OnBufferFull != nil {
t.OnBufferFull(t)
}
}
} |
/**
* EncryptionKeyInfo contains the encryption key and corresponding metadata which contains additional information about
* the key such as version, timestamp.
*/
public class EncryptionKeyInfo {
private Map<String, String> metadata = null;
private byte[] key = null;
public EncryptionKeyInfo() {
this.key = null;
this.metadata = null;
}
public EncryptionKeyInfo(byte[] key, Map<String, String> metadata) {
this.key = key;
this.metadata = metadata;
}
public byte[] getKey() {
return key;
}
public void setKey(byte[] key) {
this.key = key;
}
public Map<String, String> getMetadata() {
return metadata;
}
public void setMetadata(Map<String, String> metadata) {
this.metadata = metadata;
}
} |
/**
* Main service shell Allows 'subroutines' in Java may be invoked from the primary code of qcert, written in OCaml or extracted to OCaml. This
* service can be obtained either by fork/exec or by running with the -server option.
*/
public class Main extends NanoHTTPD {
/** Constructor; passes through to NanoHTTPD constructor
* @throws various exceptions when initialization fails in whisk mode
*/
private Main(int port) throws Exception {
super(port);
}
/* (non-Javadoc)
* @see fi.iki.elonen.NanoHTTPD#serve(fi.iki.elonen.NanoHTTPD.IHTTPSession)
*/
@Override
public Response serve(IHTTPSession session) {
Method method = session.getMethod();
if (Method.POST.equals(method)) {
List<String> verb = session.getParameters().get("verb");
Map<String, String> files = new HashMap<String, String>();
try {
session.parseBody(files);
} catch (IOException ioe) {
System.out.println("I/O Exception parsing body");
return respond(Response.Status.INTERNAL_ERROR, "SERVER INTERNAL ERROR: IOException: " + ioe.getMessage());
} catch (ResponseException re) {
System.out.println("Response Exception parsing body");
return respond(re.getStatus(), re.getMessage());
}
String arg = files.get("postData");
String response = Dispatcher.dispatch(verb.get(0), arg);
return respond(Response.Status.OK, response);
} else if (Method.OPTIONS.equals(method)) {
Response response = respond(Response.Status.OK, "");
response.addHeader("Access-Control-Allow-Methods", "POST");
response.addHeader("Access-Control-Allow-Headers", "Content-Type");
return response;
} else {
System.out.println("Rejecting non-post request");
return respond(Response.Status.BAD_REQUEST, "Only POST requests accepted");
}
}
/** Issue a response from server mode */
private Response respond(Response.Status status, String content) {
Response response = newFixedLengthResponse(status, NanoHTTPD.MIME_PLAINTEXT, content);
response.addHeader("Access-Control-Allow-Origin", "*");
return response;
}
/**
* Main.
* <p>Command line must conform to one of the following templates.
* <ol>
* <li><em>verb</em>
* <li><b>-server</b> <em>portnumber</em>
* </ol>
* <p>In the first template, the verb must be one recognized by the Java service dispatcher. The argument is read from stdin and
* the result posted to stdout.
* <p>In the second template, the server is started on the given port. It then responds to "old-style" Java service requests via
* http Post (verb passed in the URL query and argument passed in the POST body).
*/
public static void main(String[] args) {
String portString = null;
if (args.length < 1 || args.length > 3)
error("Improperly invoked via command line with " + args.length + " arguments");
else if (args[0].equals("-server")) {
if (args.length != 2)
error("Port number (only) required with -server option");
portString = args[1];
} else if (args.length != 1)
error("Unless -server is specified, there must be exactly one (method name) argument");
else
runAsCmdline(args[0]);
/* Server modes */
int port = -1;
try {
port = Integer.parseInt(portString);
} catch (NumberFormatException e) {}
if (port < 1 || port > Character.MAX_VALUE)
error("Invalid port number " + portString);
runAsServer(port);
}
/**
* Print a message and exit. The message is printed to stdout, not stderr, and is prepended with the ERROR: token in case
* the invokation came from qcert.
* @param msg the message
*/
private static void error(String msg) {
System.out.println("ERROR: " + msg);
System.exit(-1);
}
/** Read stdin into a String until eos (in a pipeline)
* @throws IOException */
private static String readStdin() throws IOException {
InputStreamReader srdr = new InputStreamReader(System.in);
StringWriter swtr = new StringWriter();
PrintWriter wtr = new PrintWriter(swtr);
BufferedReader rdr = new BufferedReader(srdr);
String line = rdr.readLine();
while (line != null) {
wtr.println(line);
line = rdr.readLine();
}
rdr.close();
wtr.close();
return swtr.toString();
}
/**
* Run a single verb and set of arguments through the dispatcher in a single invocation from the command line
* @param cmdargs the command line arguments (at least one, and not "-server")
*/
private static void runAsCmdline(String verb) {
String arg = null;
try {
arg = readStdin();
} catch (Exception e) {
error("Problem reading stdin");
return; // not reached
}
System.out.println(Dispatcher.dispatch(verb, arg));
}
/**
* Run as an http service.
* @param port the port to listen on for http post requests
* @param mode either -server or -whiskserver
*/
private static void runAsServer(int port) {
try {
Main svc = new Main(port);
svc.start(NanoHTTPD.SOCKET_READ_TIMEOUT, false);
} catch (Exception e) {
error("Could not start: " + e.getMessage());
}
System.out.println("Java service started on port " + port);
}
} |
import React from 'react'
import AvatarImage from '../../../../../../components/AvatarImage'
import { Contact } from '../../../../../../interfaces/contact'
import SeenMessageIcon from '../../../../../../assets/icons/seen_message.svg'
import { formatDateNumberToDayMonthYear } from '../../../../../../utils/format_date_utils'
interface Props {
isCurrentCard?: boolean
contact: Contact
onClick(): void
}
const contactCard: React.FC<Props> = ({ isCurrentCard = false, contact, onClick }) => {
return (
<div className="w-full py-4 ">
<div className={`flex rounded-2xl p-4 ${isCurrentCard && 'bg-dark-blue'} cursor-pointer`} onClick={onClick}>
<div className="pr-4">
<AvatarImage height="h-55" width="w-55" status={contact.status} src={'https://image.gala.de/22089206/t/pA/v6/w1440/r0/-/style-heidi-klum-1.jpg'} alt="Contact Picture" />
</div>
<div className="flex-1 flex flex-col overflow-hidden">
<div className="flex justify-between">
<h2 className={`text-xl font-poppins font-semibold ${isCurrentCard ? 'text-white' : 'text-black'}`}>{contact.name}</h2>
<p className={`${isCurrentCard ? 'text-white' : 'text-gray'}`}>{formatDateNumberToDayMonthYear(contact.lastMessageDateNumber)}</p>
</div>
<div className="flex items-center justify-between">
<div className={`overflow-hidden overflow-ellipsis whitespace-nowrap ${isCurrentCard ? 'text-white' : 'text-gray'}`}>{contact.lastMessage}</div>
<div style={{ minWidth: '30px', height: 'auto' }} className="pl-4">
{contact.lastMessageSeen && <SeenMessageIcon />}
</div>
</div>
</div>
</div>
</div>
)
}
export default contactCard
|
<reponame>beamjs/erlv8
#include "erlv8.hh"
typedef TickHandlerResolution (*TickHandler)(VM *, char *, ERL_NIF_TERM, ERL_NIF_TERM, ERL_NIF_TERM, int, const ERL_NIF_TERM*);
struct ErlV8TickHandler {
const char * name;
TickHandler handler;
};
static ErlV8TickHandler tick_handlers[] =
{
{"stop", StopTickHandler},
{"result", ResultTickHandler},
{"call", CallTickHandler},
{"inst", InstantiateTickHandler},
{"delete", DeleteTickHandler},
{"taint", TaintTickHandler},
{"equals", EqualsTickHandler},
{"strict_equals", StrictEqualsTickHandler},
{"get", GetTickHandler},
{"get_proto", GetProtoTickHandler},
{"get_hidden", GetHiddenTickHandler},
{"set", SetTickHandler},
{"set_proto", SetProtoTickHandler},
{"set_hidden", SetHiddenTickHandler},
{"set_accessor", SetAccessorTickHandler},
{"proplist", ProplistTickHandler},
{"list", ListTickHandler},
{"script", ScriptTickHandler},
{"gc", GCTickHandler},
{"to_string", ToStringTickHandler},
{"to_detail_string", ToDetailStringTickHandler},
{"extern_proto", ExternProtoTickHandler},
{"externalize", ExternalizeTickHandler},
{"internal_count", InternalCountTickHandler},
{"set_internal", SetInternalTickHandler},
{"set_internal_extern", SetInternalTickHandler},
{"get_internal", GetInternalTickHandler},
{NULL, UnknownTickHandler}
};
VM::VM() {
env = enif_alloc_env();
mutex = enif_mutex_create((char*)"erlv8_vm_mutex");
isolate = v8::Isolate::New();
v8::Isolate::Scope iscope(isolate);
v8::Locker locker(isolate);
v8::HandleScope handle_scope;
// Moved into the VM object since we have a own isolate for each VM
global_template = v8::Persistent<v8::ObjectTemplate>::New(v8::ObjectTemplate::New());
external_template = v8::Persistent<v8::ObjectTemplate>::New(v8::ObjectTemplate::New());
empty_constructor = v8::Persistent<v8::FunctionTemplate>::New(v8::FunctionTemplate::New(EmptyFun));
string__erlv8__ = v8::Persistent<v8::String>::New(v8::String::New("__erlv8__"));
context = v8::Context::New(NULL, global_template);
v8::Context::Scope context_scope(context);
tid = enif_thread_self();
context->Global()->SetHiddenValue(string__erlv8__,v8::External::New(this));
ctx_res_t *ptr = (ctx_res_t *)enif_alloc_resource(ctx_resource, sizeof(ctx_res_t));
ptr->ctx = v8::Persistent<v8::Context>::New(context);
ERL_NIF_TERM resource_term = enif_make_resource(env, ptr);
enif_release_resource(ptr);
context->Global()->SetHiddenValue(v8::String::New("__erlv8__ctx__"),term_to_external(resource_term));
v8::Local<v8::Object> tmp = external_template->NewInstance();
external_proto_num = v8::Persistent<v8::Object>::New(tmp);
external_proto_atom = v8::Persistent<v8::Object>::New(external_template->NewInstance());
external_proto_bin = v8::Persistent<v8::Object>::New(external_template->NewInstance());
external_proto_ref = v8::Persistent<v8::Object>::New(external_template->NewInstance());
external_proto_fun = v8::Persistent<v8::Object>::New(external_template->NewInstance());
external_proto_port = v8::Persistent<v8::Object>::New(external_template->NewInstance());
external_proto_pid = v8::Persistent<v8::Object>::New(external_template->NewInstance());
external_proto_tuple = v8::Persistent<v8::Object>::New(external_template->NewInstance());
external_proto_list = v8::Persistent<v8::Object>::New(external_template->NewInstance());
push_socket = zmq_socket(zmq_context, ZMQ_PUSH);
ticker_push_socket = zmq_socket(zmq_context, ZMQ_PUSH);
pull_socket = zmq_socket(zmq_context, ZMQ_PULL);
char socket_id[64];
sprintf(socket_id, "inproc://tick-publisher-%ld", (long int) this);
char ticker_socket_id[64];
sprintf(ticker_socket_id, "inproc://tick-publisher-ticker-%ld", (long int) this);
zmq_bind(push_socket, socket_id);
zmq_bind(ticker_push_socket, ticker_socket_id);
zmq_connect(pull_socket, socket_id);
zmq_connect(pull_socket, ticker_socket_id);
};
VM::~VM() {
// v8::Isolate::Scope iscope(isolate);
// v8::Locker locker(isolate);
// v8::HandleScope handle_scope;
isolate->Enter();
TRACE("(%p) VM::~VM - 1\n", isolate);
enif_mutex_destroy(mutex);
TRACE("(%p) VM::~VM - 2\n", isolate);
TRACE("(%p) VM::~VM - 3\n", isolate);
external_proto_bin.Dispose();
external_proto_bin.Clear();
TRACE("(%p) VM::~VM - 4\n", isolate);
external_proto_ref.Dispose();
external_proto_ref.Clear();
external_proto_fun.Dispose();
external_proto_fun.Clear();
external_proto_port.Dispose();
external_proto_port.Clear();
external_proto_pid.Dispose();
external_proto_pid.Dispose();
external_proto_tuple.Clear();
external_proto_list.Dispose();
external_proto_list.Clear();
TRACE("(%p) VM::~VM - 4\n", isolate);
global_template.Dispose();
global_template.Clear();
TRACE("(%p) VM::~VM - 5\n", isolate);
external_template.Dispose();
external_template.Clear();
TRACE("(%p) VM::~VM - 6\n", isolate);
empty_constructor.Dispose();
empty_constructor.Clear();
TRACE("(%p) VM::~VM - 7\n", isolate);
string__erlv8__.Dispose();
string__erlv8__.Clear();
TRACE("(%p) VM::~VM - 8\n", isolate);
external_proto_num.Dispose();
external_proto_num.Clear();
TRACE("(%p) VM::~VM - 9\n", isolate);
external_proto_atom.Dispose();
external_proto_atom.Clear();
TRACE("(%p) VM::~VM - 10\n", isolate);
enif_free_env(env);
TRACE("(%p) VM::~VM - 11\n", isolate);
context.Dispose();
context.Clear();
while (v8::Isolate::GetCurrent() == isolate) {
isolate->Exit();
}
// this should dispoe everything created in the isolate:
// http://markmail.org/message/mcn27ibuijhgkehl
isolate->Dispose();
zmq_close(push_socket);
zmq_close(ticker_push_socket);
zmq_close(pull_socket);
};
void VM::run() {
v8::Isolate::Scope iscope(isolate);
v8::Locker locker(isolate);
v8::HandleScope handle_scope; // the very top level handle scope
ticker(0);
};
void VM::terminate() {
TRACE("(%p) VM::terminate - 1\n", isolate);
v8::V8::TerminateExecution(isolate);
}
v8::Handle<v8::Value> VM::ticker(ERL_NIF_TERM ref0) {
TRACE("(%p) VM::ticker - 0\n", isolate);
LHCS(isolate, context);
isolate->Enter();
TRACE("(%p) VM::ticker - 1\n", isolate);
char name[MAX_ATOM_LEN];
unsigned len;
ErlNifEnv * ref_env = enif_alloc_env();
ERL_NIF_TERM ref;
TRACE("(%p) VM::ticker - 2\n", isolate);
if ((unsigned long) ref0 == 0) {
ref = ref0;
DEBUG(server, enif_make_atom(env, "current_ticker"), enif_make_atom(env, "top"));
} else {
ref = enif_make_copy(ref_env, ref0);
DEBUG(server, enif_make_atom(env, "current_ticker"), enif_make_copy(env, ref));
}
TRACE("(%p) VM::ticker - 3\n", isolate);
zmq_msg_t msg;
Tick tick_s;
ERL_NIF_TERM tick, tick_ref;
while (1) {
{
isolate->Exit();
TRACE("(%p) VM::ticker - 3.1\n", isolate);
v8::Unlocker unlocker(isolate);
TRACE("(%p) VM::ticker - 3.2\n", isolate);
zmq_msg_init (&msg);
TRACE("(%p) VM::ticker - 3.3\n", isolate);
zmq_recv (pull_socket, &msg, 0);
TRACE("(%p) VM::ticker - 3.4\n", isolate);
memcpy(&tick_s, zmq_msg_data(&msg), sizeof(Tick));
TRACE("(%p) VM::ticker - 3.5\n", isolate);
tick = enif_make_copy(env, tick_s.tick);
TRACE("(%p) VM::ticker - 3.6\n", isolate);
tick_ref = enif_make_copy(env, tick_s.ref);
TRACE("(%p) VM::ticker - 3.7\n", isolate);
enif_free_env(tick_s.env);
TRACE("(%p) VM::ticker - 3.8\n", isolate);
zmq_msg_close(&msg);
TRACE("(%p) VM::ticker - 3.9\n", isolate);
}
isolate->Enter();
TRACE("(%p) VM::ticker - 4\n", isolate);
DEBUG(server,
enif_make_tuple2(env,
enif_make_atom(env, "last_tick"),
(unsigned long) ref == 0 ? enif_make_atom(env,"top") : enif_make_copy(env, ref)),
enif_make_copy(env, tick));
if (enif_is_tuple(env, tick)) { // should be always true, just a sanity check
TRACE("(%p) VM::ticker - 5\n", isolate);
ERL_NIF_TERM *array;
int arity;
enif_get_tuple(env,tick,&arity,(const ERL_NIF_TERM **)&array);
enif_get_atom_length(env, array[0], &len, ERL_NIF_LATIN1);
enif_get_atom(env,array[0],(char *)&name,len + 1, ERL_NIF_LATIN1);
// lookup the matrix
unsigned int i = 0;
bool stop_flag = false;
TRACE("(%p) VM::ticker - 6 (%s)\n", isolate, name);
while (!stop_flag) {
if ((!tick_handlers[i].name) ||
(!strcmp(name,tick_handlers[i].name))) { // handler has been located
TRACE("(%p) VM::ticker - 7\n", isolate);
TickHandlerResolution resolution = (tick_handlers[i].handler(this, name, tick, tick_ref, ref, arity, array));
TRACE("(%p) VM::ticker - 8\n", isolate);
switch (resolution.type) {
case DONE:
stop_flag = true;
break;
case NEXT:
break;
case RETURN:
TRACE("(%p) VM::ticker - 9\n", isolate);
enif_free_env(ref_env);
TRACE("(%p) VM::ticker - 10\n", isolate);
enif_clear_env(env);
TRACE("(%p) VM::ticker - 11\n", isolate);
zmq_msg_t tick_msg;
int e;
TRACE("(%p) VM::ticker - 12\n", isolate);
while (!pop_ticks.empty()) {
TRACE("(%p) VM::ticker - 12.1\n", isolate);
Tick newtick = pop_ticks.front();
TRACE("(%p) VM::ticker - 12.2\n", isolate);
pop_ticks.pop();
TRACE("(%p) VM::ticker - 12.3\n", isolate);
zmq_msg_init_size(&tick_msg, sizeof(Tick));
TRACE("(%p) VM::ticker - 12.4\n", isolate);
memcpy(zmq_msg_data(&tick_msg), &newtick, sizeof(Tick));
TRACE("(%p) VM::ticker - 12.5\n", isolate);
do {
e = zmq_send(ticker_push_socket, &tick_msg, ZMQ_NOBLOCK);
TRACE("(%p) VM::ticker - 12.6\n", isolate);
} while (e == EAGAIN);
zmq_msg_close(&tick_msg);
}
TRACE("(%p) VM::ticker - 13\n", isolate);
return handle_scope.Close(resolution.value);
break;
}
}
i++;
}
}
enif_clear_env(env);
}
};
void * start_vm(void *data) {
VM *vm = reinterpret_cast<VM *>(data);
vm->run();
enif_mutex_lock(vm->mutex);
enif_mutex_unlock(vm->mutex);
delete vm;
return NULL;
};
static ERL_NIF_TERM new_vm(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
{
ERL_NIF_TERM term;
VM *vm = new VM();
vm_res_t *ptr = (vm_res_t *)enif_alloc_resource(vm_resource, sizeof(vm_res_t));
ptr->vm = vm;
vm->resource = ptr;
term = enif_make_resource(env, ptr);
enif_release_resource(ptr);
return term;
};
static ERL_NIF_TERM set_server(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
{
vm_res_t *res;
if (enif_get_resource(env,argv[0],vm_resource,(void **)(&res))) {
res->vm->server = (ErlNifPid *) malloc(sizeof(ErlNifPid));
enif_get_local_pid(env, argv[1], res->vm->server);
enif_thread_create((char *)"erlv8", &res->vm->tid, start_vm, res->vm, NULL);
return enif_make_atom(env,"ok");
};
return enif_make_badarg(env);
};
static ERL_NIF_TERM context(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
{
vm_res_t *res;
if (enif_get_resource(env,argv[0],vm_resource,(void **)(&res))) {
LHCS(res->vm->isolate, res->vm->context);
ctx_res_t *ptr = (ctx_res_t *)enif_alloc_resource(ctx_resource, sizeof(ctx_res_t));
ptr->ctx = v8::Persistent<v8::Context>::New(v8::Context::GetCurrent());
ERL_NIF_TERM term = enif_make_resource(env, ptr);
enif_release_resource(ptr);
return term;
};
return enif_make_badarg(env);
};
static ERL_NIF_TERM kill(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
vm_res_t *res;
TRACE("kill - 1\n");
if (enif_get_resource(env,argv[0],vm_resource,(void **)(&res))) {
TRACE("kill - 2\n");
res->vm->terminate();
TRACE("kill - 3\n");
return enif_make_atom(env,"ok");
} else {
TRACE("kill - 3\n");
return enif_make_badarg(env);
}
}
static ERL_NIF_TERM stop(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
vm_res_t *res;
int e;
if (enif_get_resource(env,argv[0],vm_resource,(void **)(&res))) {
if ((!enif_is_ref(env, argv[1])))
return enif_make_badarg(env);
TRACE("(%p) stop\n", res->vm->isolate);
zmq_msg_t tick_msg;
Tick tick;
tick.env = enif_alloc_env();
tick.tick = enif_make_tuple1(tick.env, enif_make_atom(tick.env, "stop"));
tick.ref = enif_make_copy(tick.env, argv[1]);
zmq_msg_init_size(&tick_msg, sizeof(Tick));
memcpy(zmq_msg_data(&tick_msg), &tick, sizeof(Tick));
enif_mutex_lock(res->vm->mutex);
do {
e = zmq_send(res->vm->push_socket, &tick_msg, ZMQ_NOBLOCK);
} while (e == EAGAIN);
zmq_msg_close(&tick_msg);
enif_mutex_unlock(res->vm->mutex);
return enif_make_atom(env,"ok");
} else {
return enif_make_badarg(env);
};
};
static ERL_NIF_TERM tick(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
vm_res_t *res;
int e;
if (enif_get_resource(env,argv[0],vm_resource,(void **)(&res))) {
if ((!enif_is_ref(env, argv[1])))
return enif_make_badarg(env);
zmq_msg_t tick_msg;
Tick tick;
tick.env = enif_alloc_env();
tick.tick = enif_make_copy(tick.env, argv[2]);
tick.ref = enif_make_copy(tick.env, argv[1]);
zmq_msg_init_size(&tick_msg, sizeof(Tick));
memcpy(zmq_msg_data(&tick_msg), &tick, sizeof(Tick));
do {
e = zmq_send(res->vm->push_socket, &tick_msg, ZMQ_NOBLOCK);
} while (e == EAGAIN);
zmq_msg_close(&tick_msg);
return enif_make_atom(env,"tack");
} else {
return enif_make_badarg(env);
};
};
static ERL_NIF_TERM global(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
ctx_res_t *res;
vm_res_t *vm_res;
if (
enif_get_resource(env, argv[0], vm_resource, (void **)(&vm_res))
&& enif_get_resource(env,argv[1],ctx_resource,(void **)(&res))) {
LHCS(vm_res->vm->isolate, res->ctx);
v8::Handle<v8::Object> global = res->ctx->Global();
return js_to_term(res->ctx, vm_res->vm->isolate, env,global);
} else {
return enif_make_badarg(env);
};
};
static ERL_NIF_TERM new_context(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) {
vm_res_t *res;
if (enif_get_resource(env, argv[0], vm_resource, (void **)(&res))) {
LHCS(res->vm->isolate, res->vm->context);
v8::Persistent<v8::Context> context = v8::Context::New(NULL, res->vm->global_template);
context->Global()->SetHiddenValue(res->vm->string__erlv8__, v8::External::New(res->vm));
ctx_res_t *ptr = (ctx_res_t *)enif_alloc_resource(ctx_resource, sizeof(ctx_res_t));
ptr->ctx = v8::Persistent<v8::Context>::New(context);
ERL_NIF_TERM resource_term = enif_make_resource(env, ptr);
enif_release_resource(ptr);
context->Global()->SetHiddenValue(v8::String::New("__erlv8__ctx__"), term_to_external(resource_term));
return resource_term;
} else {
return enif_make_badarg(env);
};
};
static ErlNifFunc nif_funcs[] =
{
{"kill", 1, kill},
{"new_vm", 0, new_vm},
{"set_server", 2, set_server},
{"context", 1, context},
{"new_context", 1, new_context},
{"global", 2, global},
{"tick", 3, tick},
{"stop", 2, stop},
};
v8::Handle<v8::Value> EmptyFun(const v8::Arguments &arguments) {
v8::HandleScope handle_scope;
return v8::Undefined();
}
v8::Handle<v8::Value> WrapFun(const v8::Arguments &arguments) {
v8::HandleScope handle_scope;
VM * vm = (VM *)__ERLV8__(v8::Context::GetCurrent()->Global());
// each call gets a unique ref
ERL_NIF_TERM ref = enif_make_ref(vm->env);
// prepare arguments
ERL_NIF_TERM *arr = (ERL_NIF_TERM *) malloc(sizeof(ERL_NIF_TERM) * arguments.Length());
for (int i=0;i<arguments.Length();i++) {
arr[i] = js_to_term(vm->context, vm->isolate, vm->env,arguments[i]);
}
ERL_NIF_TERM arglist = enif_make_list_from_array(vm->env,arr,arguments.Length());
free(arr);
// send invocation request
SEND(vm->server,
enif_make_tuple3(env,
enif_make_copy(env,external_to_term(arguments.Data())),
enif_make_tuple7(env,
enif_make_atom(env,"erlv8_fun_invocation"),
enif_make_atom(env,arguments.IsConstructCall() ? "true" : "false"),
js_to_term(vm->context, vm->isolate, env, arguments.Holder()),
js_to_term(vm->context, vm->isolate, env, arguments.This()),
enif_make_copy(env, ref),
enif_make_pid(env, vm->server),
enif_make_copy(env, external_to_term(v8::Context::GetCurrent()->Global()->GetHiddenValue(v8::String::New("__erlv8__ctx__"))))),
enif_make_copy(env,arglist)));
return handle_scope.Close(vm->ticker(ref));
};
static void vm_resource_destroy(ErlNifEnv* env, void* obj) {
};
static void val_resource_destroy(ErlNifEnv* env, void* obj) {
};
static void ctx_resource_destroy(ErlNifEnv* env, void* obj) {
};
int load(ErlNifEnv *env, void** priv_data, ERL_NIF_TERM load_info)
{
TRACE("load\n");
zmq_context = zmq_init(0); // we are using inproc only, so no I/O threads
vm_resource = enif_open_resource_type(env, NULL, "erlv8_vm_resource", vm_resource_destroy, (ErlNifResourceFlags) (ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER), NULL);
val_resource = enif_open_resource_type(env, NULL, "erlv8_val_resource", val_resource_destroy, (ErlNifResourceFlags) (ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER), NULL);
ctx_resource = enif_open_resource_type(env, NULL, "erlv8_ctx_resource", ctx_resource_destroy, (ErlNifResourceFlags) (ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER), NULL);
v8::V8::Initialize();
int preemption = 100; // default value
enif_get_int(env, load_info, &preemption);
v8::Locker locker;
v8::Locker::StartPreemption(preemption);
v8::HandleScope handle_scope;
return 0;
};
void unload(ErlNifEnv *env, void* priv_data)
{
TRACE("unload\n");
v8::Locker::StopPreemption();
zmq_term(zmq_context);
};
int reload(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info) {
return 0;
}
int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info) {
return 0;
}
ErlNifResourceType * ctx_resource;
ErlNifResourceType * vm_resource;
ErlNifResourceType * val_resource;
void *zmq_context;
ERL_NIF_INIT(erlv8_nif,nif_funcs,load,reload,upgrade,unload)
|
A Case of Esophagojejunal Variceal Rupture after Total Gastrectomy and Esophagojejunostomy Successfully Treated with Percutaneous Transhepatic Obliteration under Dual-balloon Occlusion of Feeding and Draining Veins
We present the case of a man in his 60s with bleeding esophagojejunal varices occurring after gastrectomy for gastric carcinoma. Percutaneous transhepatic portography depicted the esophagojejunal varices originated from the jejunal vein and drained into the azygos vein. A 5-French occlusion balloon catheter was wedged into the jejunal vein and a 3-French occlusion balloon catheter into one drainage channel of the esophagojejunal varices via the azygos vein. Selective antegrade jejunal venography under dual-balloon occlusion revealed entire esophagojejunal varices with good stagnated and well-opacified contrast medium. Subsequently, 12 mL of 5% ethanolamine oleate-contrast medium mixture was slowly injected into the esophagojejunal varices. He was discharged without complications one week after the procedure, and abdominal computed tomography demonstrated the disappearance of the esophagojejunal varices six months after the procedure.
Introduction
Esophagojejunal varices occurring after gastrectomy are uncommon due to the resection of supplying vessels. Endoscopic treatment can become challenging due to the intricated configuration of the alimentary tract after surgical reconstruction. Therefore, there is no widely accepted standard strategy for treating esophagojejunal varices. We present a case of esophagojejunal variceal rupture after total gastrectomy and esophagojejunostomy that was successfully treated with percutaneous transhepatic obliteration (PTO) under dual-balloon occlusion of feeding and draining veins.
Case Report
Eleven years ago, a man in his 60s underwent total gastrectomy and esophagojejunostomy for gastric carcinoma in our hospital. He presented to our hospital with massive hematemesis and melena. His systolic blood pressure was 60 mmHg with a pulse rate of 120 bpm at admission. He had no symptoms of ascites, jaundice, or hepatic encephalopathy. Laboratory examination demonstrated white blood cells of 16,600/μL; red blood cells of 247 10 4 /μL; hemoglobin, 8.1 g/dL; platelet count, 14 10 4 /μL; total-bilirubin, 0.49 mg/dL; albumin, 2.75 g/dL; prothrombin time, 52%; international normalized ratio of prothrombin time, 1.40; and ammonia, 81 μg/dL. He had a history of alcoholic liver cirrho- sis, and his liver function was Child-Pugh B. An emergent upper gastrointestinal endoscopy revealed esophageal varices with exposed blood vessels (LmF2Cb without RC sign; Terms and conditions of the Japan Society for Portal Hypertension) and hemorrhage originating from the anal side of the esophagojejunal anastomosis. Endoscopic variceal ligation (EVL) for the bleeding varices was successful, with temporal hemostasis and stable hemodynamics. However, additional endoscopic treatment, such as endoscopic injection sclerotherapy (EIS), could not be performed to prevent rebleeding due to its difficulty in reaching and isolating the bleeding source. Abdominal contrast-enhanced computed tomography (CT) 3 d after admission revealed a dilated jejunal vein connected to the ascending jejunal limb. A part of the esophagojejunal varices was contrasted (Fig. 1). Therefore, he was diagnosed with esophagojejunal varices. Additionally, the azygos vein was suspected to be the drainage vein.
PTO was planned to prevent rebleeding from the varices 2 w after admission. Written informed consent was obtained from the patient and his family. The procedure was performed under local anesthesia. The left branch of the portal vein was punctured with a 21-gauge coaxial needle using a micro-puncture kit (Merit Mini Access Kit ; Merit Medical, Tokyo, Japan) under ultrasonographic guidance. A 5-French sheath (Super sheath ; MEDIKIT, Tokyo, Japan) was introduced into the portal vein. Subsequently, superior mesenteric venography identified the feeding and draining veins of the esophagojejunal varices (Fig. 2). The flow direction in the portal vein was hepatopetal, whereas that in the dilated jejunal vein was hepatofugal. The esophagojejunal varices originated from the dilated jejunal vein and drained into the azygos vein, which flowed into the superior vena cava. A 5-French occlusion balloon catheter with a 9-mm diameter balloon (Selecon MP catheter ; Terumo, Tokyo, Japan) was wedged into the dilated jejunal vein to control the blood flow into the varices. Subsequently, a 2.2-French tip microcatheter (Progreat β 3 ; Terumo, Tokyo, Japan) was introduced adjacent to the esophagojejunal varices via a 5-French balloon catheter. However, selective jejunal venography un-
Figure 4. Selective antegrade jejunal venography under dual-balloon occlusion reveals the entire esophagojejunal varices with a good stagnated and well-opacified contrast medium. A 5-French occlusion balloon catheter is wedged into the jejunal vein (arrow), and a 3-French occlusion balloon catheter into the outflow of esophagojejunal varices via the azygos vein (arrowhead).
der balloon occlusion revealed the contrast medium injected from the jejunal vein was washed out through the azygos vein (Fig. 3). We thought that occlusion of the outflow vessels might be necessary to eradicate varices due to the possible migration of sclerosant from the varices into the systemic venous circulation. Therefore, a PTO approach with balloon occlusion of the outflow vessels was planned. Another 5-French sheath was inserted into the right femoral vein. A 5-French catheter (Hanaco Excellent EN catheter ; Hanaco Medical, Saitama, Japan) was introduced into the azygos vein via a 5-French sheath. A 3-French occlusion balloon catheter with a 5-mm diameter balloon (LOGOS ; Piolax, Yokohama, Japan) was wedged into one drainage channel of the esophagojejunal varices using 5-French catheter at the azygos vein. Subsequently, selective antegrade jejunal venography under dual-balloon occlusion revealed the entire esophagojejunal varices with a good stagnated and well-opacified contrast medium (Fig. 4). Further, 4000 units of human haptoglobin (Haptoglobin I.V. 2000 units JB , Japan Blood Products of Organization, Tokyo, Japan) was intravenously delivered to prevent hemolysis and subsequent renal failure. Then, 10 mL of 50% glucose solution was slowly injected into the esophagojejunal varices via a 2.2-French tip microcatheter at the jejunal vein under dualballoon occlusion. Subsequently, 12 mL of 5% ethanolamine oleate (Oldamin ; Takeda Pharmaceutical, Osaka, Japan)contrast medium (Iopamidol 300; Fuji Pharma Corporation, Tokyo, Japan) mixture (EOI) was slowly injected into the esophagojejunal varices under fluoroscopy. Fluoroscopy performed 30 min after the injection demonstrated that the entire esophagojejunal varices were filled with sclerosing agents. Test injection of the contrast medium showed clot formation in the varices. Finally, coil embolization of the jejunal veins with their blood supply routes associated with the esophagojejunal varices was performed using microcoils (Interlock ; Boston Scientific, MA, USA). The 5-French balloon catheter at jejunal vein was withdrawn. The esophagojejunal varices disappeared on superior mesenteric venography after the procedure (Fig. 5). The 5-French sheath at the portal vein was withdrawn after placing a 2.0 mm diameter microcoil (C-stopper ; Piolax, Yokohama, Japan) in the needle tract of the liver parenchyma. Balloon occlusion of the outflow of esophagojejunal varices was pursued until the next day. The following day, 3-French balloon catheter at the azygos vein and 5-French sheath in the right femoral vein was withdrawn. One week after the procedure, the patient was discharged without complications such as fever or abdominal pain.
Endoscopy performed 2 mon after the procedure showed a decrease in the size of the esophagojejunal varices and the appearance of bronze color varices, which suggested thrombosis. Six months after the procedure, abdominal contrastenhanced CT images demonstrated the disappearance of the esophagojejunal varices (Fig. 6). There were no major complications or recurrence of varices, and his liver and renal functions did not significantly change during the 2-year follow-up period.
Discussion
Most variceal bleeding occurs in the gastroesophageal area, and the incidence of ectopic varices is reported to be less than 5% in all variceal bleeding cases . According to a current survey of ectopic varices in Japan, the rectum (44.5%) is the most common site of ectopic varices, followed by the duodenum (32.9%) . The endoscopic treatment for ectopic variceal bleeding is often difficult, and the mortality rate has been reported as high as 40% . Ectopic varices after surgical reconstruction are supplied and drained via a special route. In cases with a history of proximal or total gastrectomy, esophagojejunal varices infrequently develop because the feeding veins, such as the left gastric vein, posterior gastric vein, or short gastric vein, are all resected in operation . An increase in the venous outflow in the anastomotic region after total gastrectomy and esophagojejunostomy and neovascularization due to adhesion and inflammation in the peritoneal cavity cause variceal formation . Particularly in patients with portal hypertension, the esophagojejunal varices are commonly supplied by the dilated jejunal veins, which acquire hepatofugal blood flow in association with resecting the other hepatofugal collaterals in the operation. In this case, the varices were directly supplied by the jejunal vein and almost completely drained through the azygos vein.
Emergent endoscopic treatment is generally performed to achieve hemostasis in variceal bleeding. EIS and EVL are currently the mainstream treatments for hemostasis of esophagogastric varices . However, endoscopic approaches are occasionally unsuccessful due to the complicated configuration of the alimentary tract after surgical reconstruction. In these cases, intraperitoneal tissue adhesion and intricate anatomical reconstruction make reoperation difficult. Several case reports have described the successful treatment of esophagojejunal varices using an endoscopic approach, PTO, or PTO combined with transjugular intrahepatic portosystemic shunt (TIPS) . Initially, we also attempted the PTO technique, and we later combined a transfemoral retrograde approach. We consider that the use of balloon catheters enabled the successful treatment of the esophagojejunal varices entirely filled with the sclerosing agents. Yune et al. reported that using an occlusion balloon catheter could achieve a temporary flow reduction and prevent reflux of the sclerosant back into the portal system. In addition, the transvenous retrograde approach might prevent the sclerosant from treating the large-capacity varices from leaking into the pulmonary circulation. Occlusion of the outflow vessels may be necessary to eradicate varices effectively. We consider that occlusion of the draining vein combined with antegrade approach is useful for the reduction of sclerosant leakage because many variceal systems have multiple feeding and draining pathways. The esophagojejunal varices were trapped through dual-balloon occlusion of the feeding and draining vessels, and the blood flow was almost completely stagnated. This technique allowed the sclerosing agent to be distributed into the entire esophagojejunal varices and stagnate in the varices sufficiently. Coil embolization of the feeding vessels might also contribute to the stagnation of the sclerosant inside the varices.
The possibility of new ectopic varices must be considered, as portal blood pressure is expected to rise after variceal embolization. The shunt occlusion therapy for varices with portal hypertension might risk worsening ascites or portal hypertensive gastropathy. TIPS is another treatment of choice for ectopic variceal bleedings. Wu S et al. reported a case of bleeding esophagojejunal varices after total gastrectomy treated with TIPS plus antegrade embolization; they achieved adequate hemostasis and reduction of portal pressure after the procedure. TIPS plus antegrade embolization, portal pressure reduction therapy and variceal embolization therapy, may be a useful method to achieve hemostasis and improve the complications of portal hypertension, such as ascites, portal vein thrombosis, or new formation of ectopic varices.
Conclusion
PTO under dual-balloon occlusion of the feeding and draining veins was effective for esophagojejunal varices after total gastrectomy and esophagojejunostomy.
Conflict of Interest: None
Author Contribution: Tsuyoshi Kawai is the main operator and lead author and decided final approval of the manuscript.
Shinsaku Yata and Shinya Fujii participated in the editing of the manuscript.
IRB:
The ethics committee at our institution approved the interventional case report. |
“Buddha was asked, ‘What have you gained from meditation?’ He replied, ‘Nothing.’ ‘However,’ Buddha said, ‘let me tell you what I lost: anger, anxiety, depression, insecurity, fear of old age, and death.’”
“I never get stressed.”
I used to say and think this all the time when I saw someone freaking out about an upcoming test, a bad grade, relationship problems, or a boss or coworker.
I had a false sense of being “carefree” because I wouldn’t get stressed over the trivial things that most people did.
I was a “battle hardened” soldier recently back from a deployment in Afghanistan. When I saw people worry about those inconsequential things, I would think to myself, “Please, you have no idea what it means to be stressed.”
As it turns out, my understanding of stress was wrong. It’s also wrong for a lot of people who believe they aren’t stressed.
It wasn’t until I started meditating three years after my deployment that I started to realize that I was stressed—just in a different way and from different things than most people.
After meditating every day for a couple months, my “ah-ha” moment finally hit me.
I was sitting in traffic, late for an appointment (I hate being late), watching all the people around me freaking out. For once, I was calm and collected sitting in that traffic, thinking, “Why freak out about something I can’t change?”
That was when I really started to see the benefits and began reflecting on my past.
I realized that since returning from my deployment, I had become very irritable, not a great people person, and had very little patience.
The reaction time between something happening and my response was almost immediate.
If my girlfriend confronted me about a problem, I would immediately either get defensive and blame her or just shut down and ignore her.
Literally all of this started to change, just from consistently meditating for eight minutes a day!
My life has been drastically different since then. I am much more calm and collected. I don’t get upset over little things, especially if they’re out of my control.
My response time to a stimulus has greatly increased so I can choose the type of reaction I have and think about what to say.
My relationship with my wife (the same girlfriend from before) is incredible, and we know how to communicate like mature adults by allowing time to see the reality of a situation and choose how we respond to it.
I’ve brought about an awareness that allows me to continually grow as a person and manage the hidden stressors that often go by unnoticed.
This is just part of a long list of benefits from meditation, and I could go on and on… like how nice it is to be able to travel in third world countries without constantly keeping an eye out for ambushes or looking for my next piece of cover (a habit I had from deployment).
Although it’s great to talk about meditation and its benefits, what I really want people to understand is that there may be a lot more stress in your life than you realize, and when you meditate you become aware of that stress and are able to shift how you respond to it.
When it comes to this type of stress, the older you are, the worse it gets.
If you have ten, twenty, thirty-plus years of having negative experiences without intentionally prioritizing positive ones, you are much more likely to easily become stressed and have a negative view of the world.
The more hidden stress you experience, the more efficient your body gets at activating your physiological stress response, commonly known as “fight or flight” mode.
Ask yourself this: Were you, or someone you know, once “carefree” but are now afraid of heights, flying, and think natural disasters and shootings are about to happen whenever you leave home?
Well, you can thank your body’s efficient adaptability for that. The more stressful situations you have (and yes, watching all the negative things on the news is stressful), the more your body thinks it needs to switch into the fight or flight response to keep you safe.
That means your brain becomes more efficient at recognizing even the smallest of stressors, and less efficient at calming down or noticing positive things.
For me, it was a condensed time period that required a lot of worst-case scenario thinking. When you are constantly exposed to driving on roads with IEDs (improvised explosive devices), that stress response will condition your physiology to tell you that roads are a very dangerous place.
The same thing happens if you only watch the news; you’ll have a very misconstrued perception of the world, and you’ll be constantly feeding the bias your brain has for negative experiences.
Evolutionarily, your brain has needed to remember negative experiences to protect you much more than it needed to remember positive experiences. It takes time to undo this wiring of neural pathways that your brain has put in place. But it can be done, and meditation is a great way to build new “positive pathways” in your brain.
There’s an enormous amount of ways to meditate so I’ll share what I’ve personally done and am still doing, in the hopes that it will help you as well.
1. Basic mindfulness meditation
I started my practice with a book called 8 Minute Meditation. It takes you through a series of different styles, most of which I liked. But from this I continued to do a simple meditation every morning of focusing on my breath. Just doing this lead me to the “ah-ha moment” I mentioned earlier.
2. Meditation apps
I also use a couple different apps now that I like to use mid-day or at night. In particular, I like the “loving kindness” options, also known as “focus on positive”. This is perfect for trying to counteract the negativity bias and rebuild positive neural pathways. There are a lot of options out there, including Calm, which is free.
3. Reading
This may not be thought of as meditation, but if meditation can be doing one task effortlessly with focused concentration on that one particular task, then reading is a type of meditation for me.
I easily enter what’s called “flow state” when I read. Not only that, I’m reading positive things which helps shape the way I think. The other end of this could also be “not watching the news”, just like I don’t like putting junk in my body by eating it, I don’t like putting junk in my body by watching/hearing it.
If reading isn’t quite your thing, then try listening to podcasts. Preferably podcasts that lift you up and feed your brain with positivity and learning. These can be easily listened to on your way to work, at the gym, cooking, walking, or you can just sit down and listen.
4. Walking
Walking is such an undervalued way to de-stress. I love walking for a lot of reasons, pretty much any major life decision my wife and I have made in the past few years has been made while walking.
In terms of meditation, walking meditation is an awesome practice. It’s a great way to bring about your awareness while getting the benefits of moving your body. Odds are, you walk at some point in your day. So if you’re strapped for time, use walking from the car to work as time to practice mindfulness.
After hating being late to the point of stressing out, I now tell myself, “I’m exactly where I’m supposed to be. In the here, in the now.” This has helped me drastically. Check out Thich Naht Hahn’s How to Walk for more.
There are a number of other ways to help you de-stress and become a more relaxed, positive person. These are just some ways to get started and feel less anxious, worried, and negative.
Start to use some of these strategies and it’ll feel like a weight has been lifted off your shoulders that you didn’t even know was there.
Meditation vector image via Shutterstock |
/**
* Created by wawe on 17/6/4.
*/
@Component
public class Global implements StringConstant {
/**
* 保存全局属性值
*/
private static Map<String, String> map = Maps.newHashMap();
/**
* 属性文件加载对象
*/
private static PropertiesLoader loader = new PropertiesLoader("application.properties");
/**
* 获取配置
*
* @param key
* @return
*/
public static String getConfig(String key) {
String value = map.get(key);
if (value == null) {
value = loader.getProperty(key);
map.put(key, value != null ? value : StringUtils.EMPTY);
}
return value;
}
/**
* 获取管理端根路径
*
* @return
*/
public String getAdminPath() {
return getConfig("adminPath");
}
/**
* 获取API路径
*
* @return
*/
public String getApiPath() {
return getConfig("apiPath");
}
/**
* 是否演示模式,演示模式下不能修改用户、角色、密码、菜单、授权
*
* @return
*/
public boolean isDemoMode() {
String dm = getConfig("demoMode");
return "true".equals(dm) || "1".equals(dm);
}
} |
package com.xiaojukeji.kafka.manager.common.utils;
/**
* @author zhongyuankai
* @date 2020/6/8
*/
public class NumberUtils {
public static Long string2Long(String s) {
if (ValidateUtils.isNull(s)) {
return null;
}
try {
return Long.parseLong(s);
} catch (Exception e) {
}
return null;
}
public static Integer string2Integer(String s) {
if (ValidateUtils.isNull(s)) {
return null;
}
try {
return Integer.parseInt(s);
} catch (Exception e) {
}
return null;
}
}
|
from sys import stdin
from collections import Counter
input = stdin.readline
S = list(input())
T = list(input())
def main():
C1 = sorted(Counter(S).values())
C2 = sorted(Counter(T).values())
print("Yes" if C1 == C2 else "No")
return
main()
|
<filename>netty-spring-websocket/src/main/java/com/github/berrywang1996/netty/spring/web/websocket/bind/annotation/AutowiredMessageSender.java<gh_stars>0
package com.github.berrywang1996.netty.spring.web.websocket.bind.annotation;
import java.lang.annotation.*;
/**
* @author berrywang1996
* @version V1.0.0
*/
@Target(ElementType.FIELD)
@Retention(RetentionPolicy.RUNTIME)
@Documented
public @interface AutowiredMessageSender {
}
|
// Load reads the configuration from a YAML file structure. If path is empty
// this method reads from the configuration file specified by the '-c' command
// line flag.
func Load(path string) (*common.Config, error) {
var config *common.Config
var err error
cfgpath := GetPathConfig()
if path == "" {
list := []string{}
for _, cfg := range configfiles.list {
if !filepath.IsAbs(cfg) {
list = append(list, filepath.Join(cfgpath, cfg))
} else {
list = append(list, cfg)
}
}
config, err = common.LoadFiles(list...)
} else {
if !filepath.IsAbs(path) {
path = filepath.Join(cfgpath, path)
}
config, err = common.LoadFile(path)
}
if err != nil {
return nil, err
}
config, err = common.MergeConfigs(
defaults,
config,
overwrites,
)
if err != nil {
return nil, err
}
config.PrintDebugf("Complete configuration loaded:")
return config, nil
} |
import { ColProps } from 'components/atoms/Grid/Col/Col';
import { AlignItems, JustifyContent } from 'types';
// Form type
export type FormType = 'vertical' | 'horizontal';
export interface LabelOptions extends React.HTMLAttributes<HTMLDivElement> {
align?: AlignItems;
justify?: JustifyContent;
col?: ColProps; // The layout for input label
}
export interface ControlOptions extends React.HTMLAttributes<HTMLDivElement> {
col?: ColProps; // The layout for input control
}
|
/**
* author: WentaoKing
* created on: 2020/3/20
* description:
*/
public class ShutDownHookDemo {
static class ShutdownHook extends Thread{
@Override
public void run() {
System.out.println("I am called during shutting down");
}
}
public static void main(String[] args) {
Runtime.getRuntime().addShutdownHook(new ShutdownHook());
}
} |
<reponame>empun/imgbox-js
import axios from 'axios';
import util from 'util';
const URL = {
baseURL: 'https://imgbox.com',
root: '/',
token: 'ajax/token/generate',
upload: 'upload/process',
gallery_edit: 'gallery/edit',
images: 'api/v1/images',
gallery: 'api/v1/galleries',
delete_images: 'api/v1/images/delete'
};
const default_token = '<KEY>';
const REQUEST = axios.create({ baseURL: URL.baseURL });
const interceptors = REQUEST.interceptors.request.use(config => {
try {
const filename = config.data && config.method == 'post' && config.url === 'upload/process'
? config.data._streams.toString().split('filename="')[1].split('"')[0] : '';
console.log(util.inspect(
`${config.method?.toUpperCase()} request to ` +
`${config.baseURL}${config.url !== '/' ? '/' : ''}${config.url}` +
`${filename !== '' ? ' with ' + filename : ''}`,
false, null, true
));
return config;
} catch (error) {
return config;
}
});
const DisableLogger = () => {
REQUEST.interceptors.request.eject(interceptors);
};
export {
REQUEST,
DisableLogger,
URL,
default_token
};
|
RIO DE JANEIRO (AP) — It was a golden goodbye for Usain Bolt who led his Jamaican teammates to victory in the men’s 4×100 relay. It was the last race of his Olympic career and his ninth Olympic medal. The U.S. men finished third but were disqualified for exchanging illegally outside the zone at the first change.
The U.S. women have retained the 4×100-meter relay title and helped Allyson Felix win her record fifth Olympic gold medal.
The Americans, who needed to set a qualifying time in a solo rerun hours after dropping the baton in the preliminaries and getting a second chance on protest, won the final in 41.01 seconds.
Australian athletes detained by police in Rio de Janeiro after a flap over credentials at an Olympic basketball game have been released and a team official says they will pay a fine. Australian Olympic Committee secretary-general Fiona de Jong says the athletes apologized to a Brazilian judge and prosecutor after accessing a venue without the right credentials.
The Australia Associated Press had reported that 10 athletes were detained for allegedly altering their credentials to improve their seats.
Seven track and field golds involving both men and women are up for grabs on Day 15 of the Rio Olympics.
Copyright 2016 The Associated Press. All rights reserved. This material may not be published, broadcast, rewritten or redistributed. |
<filename>src/main/java/org/hutrace/handy/authority/impl/code/Salt.java
package org.hutrace.handy.authority.impl.code;
/**
* 创建salt(盐)的接口
* <p>所有创建salt(盐)都通过此接口调用
* <p>你可以自定义实现它
* @author hu trace
*/
public interface Salt {
/**
* 创建盐
* @return 盐
*/
String create();
}
|
<filename>src/app/v1/api/user/event/event.go
package event
import (
"github.com/google/uuid"
"github.com/sofyan48/nemo/src/app/v1/api/user/entity"
"github.com/sofyan48/nemo/src/app/v1/utility/kafka"
"github.com/sofyan48/nemo/src/app/v1/utility/mongodb"
)
// USEREVENT ...
const USEREVENT = "user_event"
// UserEvent ...
type UserEvent struct {
Kafka kafka.KafkaLibraryInterface
Mongo mongodb.MongoDBInterface
}
// UserEventHandler ...
func UserEventHandler() *UserEvent {
return &UserEvent{
Kafka: kafka.KafkaLibraryHandler(),
Mongo: mongodb.MongoDBHandler(),
}
}
// UserEventInterface ...
type UserEventInterface interface {
UserCreateEvent(data *entity.UserEvent) (*entity.UserEvent, error)
}
// UserCreateEvent ...
func (event *UserEvent) UserCreateEvent(data *entity.UserEvent) (*entity.UserEvent, error) {
format := event.Kafka.GetStateFull()
format.Action = data.Action
format.CreatedAt = data.CreatedAt
format.Data = data.Data
format.UUID = uuid.New().String()
data.UUID = format.UUID
go event.Kafka.SendEvent(USEREVENT, format)
return data, nil
}
|
from setuptools import setup
setup(name='redbull',
version='0.81',
packages=['redbull'],
python_requires=">=3.6",
zip_safe=False)
|
def update_hand(hand: Dict[str, int], word: str) -> Dict[str, int]:
word = word.lower()
new_hand = dict(hand)
for letter in word:
if letter not in new_hand:
continue
if new_hand[letter] > 0:
new_hand[letter] -= 1
return new_hand |
Floquet Analysis of Space-Time Modulated Huygens' Metasurfaces with Lorentz Dispersion
A rigorous semi-analytical Floquet analysis is proposed for a zero-thickness space-time modulated Huygens' metasurface to model and determine the strengths of the new harmonic components of the scattered fields. The proposed method is based on Generalized Sheet Transition Conditions (GSTCs) treating a metasurface as a spatial discontinuity. The metasurface is described in terms of Lorentzian electric and magnetic surface susceptibilities, $\chi_\text{e}$ and $\chi_\text{m}$, respectively, with parameters (e.g. resonant frequency) that are periodically modulated in both space and time. The unknown scattered fields are expressed in terms of Floquet harmonics, for which the amplitudes can be found by numerically solving a set of linear equations, leading to the total scattered fields. Using existing computational techniques, the method is validated using several examples of pure-space and pure-time modulation with different modulation strengths and pumping frequencies. Finally, two cases of spacetime modulation (standing wave perturbation and a traveling wave perturbation) are presented to demonstrate the breaking of Lorentz reciprocity. The proposed method is simple and versatile and able to determine the steady-state response of a space-time modulated Huygen's metasurface that is excited with an oblique plane wave, or a general incident field such as a Gaussian beam.
Floquet Analysis of Space-Time Modulated Huygens' Metasurfaces with Lorentz Dispersion
Ville Tiukuvaara, Student Member, IEEE, Tom J. Smy, and Shulabh Gupta, Senior Member, IEEE Abstract-A rigorous semi-analytical Floquet analysis is proposed for a zero-thickness space-time modulated Huygens' metasurface to model and determine the strengths of the new harmonic components of the scattered fields. The proposed method is based on Generalized Sheet Transition Conditions (GSTCs) treating a metasurface as a spatial discontinuity. The metasurface is described in terms of Lorentzian electric and magnetic surface susceptibilities, χe and χm, respectively, with parameters (e.g. resonant frequency) that are periodically modulated in both space and time. The unknown scattered fields are expressed in terms of Floquet harmonics, for which the amplitudes can be found by numerically solving a set of linear equations, leading to the total scattered fields. Using existing computational techniques, the method is validated using several examples of pure-space and pure-time modulation with different modulation strengths and pumping frequencies. Finally, two cases of spacetime modulation (standing wave perturbation and a travelling wave perturbation) are presented to demonstrate the breaking of Lorentz reciprocity. The proposed method is simple and versatile and able to determine the steady-state response of a space-time modulated Huygen's metasurface that is excited with an oblique plane wave, or a general incident field such as a Gaussian beam.
I. INTRODUCTION
Space-time modulated materials were studied in the context of parametric amplification in the 1950s, and have received renewed interest recently in the context of metamaterials. While static metamaterials have provided a plethora of wave manipulation devices , they are typically limited by Lorentz reciprocity. While this can be overcome through the use of magnetic-optic materials or nonlinear materials , these methods require bulky implementations or provide weak non-reciprocity, respectively. For this reason, space-time modulated metamaterials have emerged as an appealing alternative: by modulating the constitutive parameters of a linear medium in space and time, it is possible to achieve strong reciprocity. This has been explored in bulk metamaterials and metasurfaces with applications including isolators, circulators, and frequency mixers - , and the possibility of using space-time diffraction patterns as channels for wireless communications .
At the same time, there is a strong interest in Huygen's metasurfaces due to their impedance matching capabilities with free-space and their versatile applications in wavefront Ville Tiukuvaara, Tom J. Smy, and Shulabh Gupta are with Carleton University, Ottawa, Canada (e-mail: [email protected]).
Consequently, a combination of the wave-shaping capabilities of Huygens' metasurfaces with space-time modulation principles, is an interesting avenue to explore for advanced electromagnetic wave control, in both space and time.
To investigate into the properties of space-time modulated Huygens' metasurfaces, finite-difference time-domain (FDTD) techniques have recently been proposed to analyze a zero thickness model of Huygens' metasurfaces - , based on the the generalized sheet transition conditions (GSTCs) . Unlike frequency domain techniques that are typically used for static metasurfaces like the finite-difference frequency-domain (FDFD) method and the boundary element method (BEM) , FDTD lends itself naturally when the surface is timevarying. However, if the modulation and incident field are periodic, then a steady-state will be achieved that is inefficient to compute with FDTD. For such space-time periodic metasurfaces, it is desirable to have an efficient method for computing the steady-state scattered fields, which are expressed in the form of space-time Floquet harmonics.
An important aspect to consider in the analysis of spacetime modulated metasurfaces is that of temporal dispersion. Given metasurfaces are constructed using sub-wavelength resonators which are inherently dispersive, their corresponding surface susceptibilities are naturally frequency dependent. Moreover, quite often the surface is operated near resonance for maximal interaction of the waves with the surface. Since time modulation leads to generation of new temporal frequency components different from those of the excitation time-domain signal, incorporating metasurface dispersion in the space-time analysis is important for obtaining correct field solutions. Keeping causality of the surface in mind, frequency dependent surface susceptibility distribution may not be arbitrarily applied and must be chosen with care.
Several methods have recently been presented. It is possible to model a metasurface using surface impedances as in , but these works concentrate on "travelling wave" spacetime modulations exclusively. Several other methods have been shown that treat the metasurface as a finite-thickness medium , but the modelling of a bulk medium adds unnecessary computational burden if the metasurface can be treated as a zero-thickness sheet. In this work, we treat the surface as such, using surface susceptibilities following a physically motivated Lorentzian profile to account for temporal dispersion, whose parameters (e.g. resonant frequency) are parametrized to emulate a space-time modulation of the metasurface. Combined with GSTCs, the Floquet harmonic amplitudes are computed by solving a set of linear equations. The proposed semi-analytical method thus efficiently computes the steady-state response of a zero-thickness space-time modulated Huygens' metasurface that is excited by a plane wave. We also show how the method can be extended to arbitrary excitations, such as Gaussian beams, by decomposing such fields into plane waves using Fourier decomposition.
The paper is structured as follows. Section II describes the problem statement of this work, and provides background on time-varying metasurfaces and how these can be modelled with Lorentzian susceptibilities. Section III presents the proposed method based on an expansion using Floquet harmonics, forming a set of linear equations that can be solved for the harmonic amplitudes and used to construct the fields. Examples are provided in Section IV for cases of pure-space and pure-time modulation with comparison to FDFD and FDTD to validate the method, and followed by two types of space-time modulation to demonstrate violating Lorentz reciprocity. Finally, conclusions are provided in Section V.
II. SPACE-TIME MODULATED METASURFACES A. Problem Statement
Consider the problem in Fig. 1, where a metasurface placed at z = 0 acts as a scatterer, producing reflected and transmitted fields, E r and E t , when a incident field E i is present 1 . The local electric field at the metasurface induces electric and magnetic polarizations, P and M . If there is no time modulation, the constitutive relations can be written in the frequency domain as where the average indicates the average of the total fields at z = 0 in terms of fields at z = 0 − and z = 0 + .
The normal component of the susceptibility tensors χ are often zero or negligible , and are discarded in this work, for simplicity. Furthermore, we will simplify to a uniaxial surface with no cross-coupling, so the equations simplify and can be written in the time domain as where the electric polarization has been normalized so Q y = P y / 0 . The averages of the fields at z = 0 is 1 In the absence of a metasurface, the incident field defined to be present everywhere, so the total field with the metasurface is χe(x, t, Ω), which in the TE case simplify to Together, (2) and (5) are sufficient to uniquely solve for the fields and polarizations, given χ e , χ m , and an incident field. We are interested in solving this problem, in the case that χ e and χ m are modulated in time in addition to space. Specifically, we will consider the form of the fields when the spatial variation is periodic (with spatial angular frequency β p as in Fig. 1) and the time variation is periodic (with "pumping" frequency ω p ). The resulting fields will be described as a summation of infinite space-time harmonics assuming a temporally dispersive metasurface, which we wish to determine.
B. Linear Time-Variant (LTV) Systems
We now consider how (2) generalizes for time-dependent susceptibilties. This convolution can be viewed as an input (field)-output (polarization) system, which is linear and timeinvariant (LTI). A time-varying susceptibility on the other hand represents a linear time-variant (LTV) system, where the electric polarization density can be written as This is a generalized convolution, where the impulse response χ e (t, τ ) gives the response that is probed at time t due to an input that is applied t time units earlier. It is worth emphasizing that although there is time variation, this should not be confused with a nonlinear system; χ e does not depend on the magnitude of E av . Using the Fourier transform of χ e with respect to τ , this can be written for an arbitrary signal E av in an equivalent relation We observe that if the surface is time-invariant (no depenency on t), then neglecting the temporal dispersion is reasonable if E av (Ω) is monochromatic. However, even with a monochromatic incident field E i , any time-dependence of the susceptibility will result in Q y (x, t) that is not monochromatic, and subsequently fields that are not monochromatic via (5). Thus, the frequency dispersion inherent to a static metasurface must in general be considered when time modulation is added.
C. Periodic Lorentzian Susceptibilities
To model the temporal dispersion inherent to a static metasurface, a Lorentzian distribution provides a physicallymotivated response that can be used to model metasurfaces, such as Huygens' metasurfaces . In the time-domain, this is a damped oscillator model that can be expressed where ω a0 (t) is the resonant frequency of the oscillator, α a (t) corresponds to damping (loss), and ω ap (t) is the plasma frequency (a = e,m). Notice that the resonator is driven by the average fields at the surface (3) as in the constitutive relation (6). We could solve the differential equation to obtain the impulse response for use in (6) . However, we will use (8) directly, which takes into account the temporally dispersive nature of the surface . All six parameters in (8) are time-variant in general for a time-varying surface. Since the Lorentizan parameters are periodic in space and time, they can be written as Fourier series: where a = (e,m) for the electric and magnetic parameters, ω p = 2π/T is the temporal "pumping frequency" of the modulation, and β p = 2π/p is the spatial frequency of the modulation. Note that in general, several resonators governed by (8) may be required to have an accurate model of the metasurface, in which case responses of each of the resonators can be summed following the superposition principle, as the system is linear (i.e. M x = M x1 +M x2 +· · · where each M xn is due to a resonator with unique parameters given by (9)). We also note that while we consider TE fields in our analysis to demonstrate the method, for conciseness and simplicity, it can be straightforwardly extended to TM fields as well.
III. BLOCH-FLOQUET EXPANSION OF FIELDS
When a metasurface is periodic, the fields also become periodic, following Floquet's theorem. By expanding the fields in terms of space (and time) harmonics, we can produce a matrix equation to solve for the fields.
A. Expansion of Fields
Applying Floquet's theorem, the electric fields can be expressed as a sum of space-time harmonics, (10) where a = i, r, t for the incident, reflected, and transmitted fields 2 , respectively, and where Θ mn = (ω n t − k x,mn x). Only a single harmonic is present for the incident field, (m, n) = (m i , n i ), corresponding to a plane wave. Floquet's theorem prescribes that the transverse part of the wavevector (k x ) takes on discrete values determined by spatial periodicity, and the normal component (k z ) then follows from having a total magnitude k n : This idea can be extended for the time harmonics, where the frequency can also only change by multiples of the pumping frequency, and the wavenumber changes accordingly: Note that this allows for harmonics with negative frequencies, as well as potentially a dc "harmonic" if ω 0 is an integer multiple of ω p . The harmonics with ω n < 0 are not a cause for alarm, as k n < 0 for these harmonics and so the direction of propagation is physical (e.g. still +z in the transmission region). The special case of ω n = 0 is more questionable, but in the results, we show that this peculiarity also poses no problem.
where θ i = θ 00 is the angle of incidence. Each harmonic (m, n) represents either an oblique propagating plane wave (k z ∈ R) or a surface wave (k z ∈ I), as illustrated in Figure 2, where the real parts of the corresponding wavevectors are plotted. Using (13), we plot circles with constant k n (and ω n ), while (11a) yields the horizontal lines that represent the allowed values k x,m (which is always purely real). The intersection of the circles and lines represents possible propagating space-time harmonics. Surface waves on the other hand lie on the vertical axis and not necessarily on a k n circle. Finally, the magnetic field can also be expanded, along with the polarization densities, which are
B. Matrix Formulation
We begin by substituting E a (x, 0, t) from (10) into (3) for the incident and scattered fields, and the resulting average field into (8a). Similarly, the expansion of Q y (x, t) from (16a) is substituted into (8a), providing a set of infinite equations (17a). This procedure is repeated with the magnetic field and (8b), producing (17b).
Next, the expansions of the fields and polarization densities are also substituted into the GSTC equations (5), producing (18a) and (19a). This leaves us with four sets of infinite equations for the four sets of harmonics, E r,mn , E t,mn , Q mn , and M mn . To make the problem tractable, the harmonics can be truncated to −M < m < M and −N < n < N , which corresponds to (2M + 1) space harmonics, each of which has (2N + 1) time harmonics. This truncation assumes that the selected number of harmonics is sufficient, and this assumption must be verified after computation in the form of a series convergence. Furthermore, the finite system of equations can be written in matrix form for implementation in code, as described in Appendix A. This allows solving for the four sets of (2N + 1) · (2M + 1) unknown harmonics.
C. Extension to an Arbitrary Incident Field
The method presented in Section III-B allows solving the scattering due to a plane wave excitation at an angle θ i , but it can be extended to arbitrary excitations, such as a single Gaussian beam (spatial distribution) or a Gaussian pulse (temporal shape). Let us denote the arbitrary incident field as E i,tot (x, z, t). The metasurface responds to the field at z = 0, where we can decompose E i,tot into plane waves, using a Fourier transform, which is densely sampled (small ω s and k x,s ) to yield a good approximation of the finite signal. Following this, (17) can be solved for each of the plane waves with ω 0 = pω s and θ i = m,n jω n c Q mn + (E r0,mn + E t0,mn ) cos θ mn e jΘmn = 0 (19a) sin −1 (qk xs c 0 /ω 0 ), yielding E a,pq (x, z, t). These are simply summed to produce the scattered fields: where a = (t, r). Of course, the 2D Fourier transform (18) can be simplified to a 1D transform if the input signal is monochromatic or spatially uniform.
IV. RESULTS
To demonstrate the proposed method, we consider three cases of periodically modulated surfaces: space-only modulation (ω p = 0), time-only modulation (β p = 0), and general space-time modulation.
A. Space-Only Modulation
First, we consider a spatial modulation of the electric and magnetic resonant frequencies, ω e0 and ω m0 , using a cosine profile (see inset in Fig. 3a). The modulation harmonics are calculated using (9a) and the system of equations (17) are solved for a normally-incident plane wave with M = 100 and N = 0, for a total of 201 harmonics, producing the magnitude plotted in Fig. 3a. Since the scattered fields are monochromatic, a frequency domain simulator can be used to verify the Floquet result; a finite-difference frequency-domain (FDFD) simulation was run, producing a field magnitude in agreement with the Floquet solution (Fig. 3b). The spatial harmonics were also compared with a discrete Fourier transform (DFT) of both fields at z = ±λ/10 on both sides of the surface, with good agreement for both propagating harmonics (|k x | < k 0 , highlighted in blue) and evanescent harmonics (|k x | > k 0 ). Next, a more complex asymmetrical profile was used for the modulation. Using the same procedure, the fields and harmonics are plotted in Fig. 4. Even though θ i = 0°, the scattering primarily occurs towards +x with k x ≥ 0 harmonics being dominant, which can be expected for this surface which imparts an asymmetric phase variation .
Finally, Fig. 5 shows an example of a Gaussian beam incident on a surface with a cosine modulation profile. In this case, the angle of incidence is θ i = −10°, and the surface is designed so that the m = −1 harmonic is scattered normally (we find β p = k 0 /5.76 from (14)). We use a beam waist of 10λ and decompose the field using (18) into 23 plane waves (found to be sufficient in representing the spatial Gaussian profile). After computing the fields for each of these plane waves and summing the total fields, the Floquet method shows good agreement with the FDFD result. The slight discrepency between the two methods becomes smaller as the number of harmonics is increased for the Floquet method, and the FDFD mesh is made more dense. Note that while one harmonic is scattered towards θ −1,0 = 0°, harmonics are also scattered in other directions. We will show in Section IV-C that if the spatial modulation is coupled with a time modulation, the harmonic at 0°can be converted to a different frequency to isolate it from the other spatial harmonics. 4 The sawtooth function from MATLAB was used with a period 2π and peaks −1 and 1.
B. Time-Only Modulation
Now, we turn to the temporal modulation of the metasurface. Since the surface is uniform and the incident field is a normal plane wave, this reduces to a 1D problem. To validate the Floquet solution, we use a FDTD technique where the susceptibility is time-variant , and run the simulation until a steady-state is achieved. Then a Fourier transform yields the time harmonics that are generated. Fig. 6 shows for instance, the time-domain waveforms obtained using Floquet and FDTD method, for an example of a weak modulation (cosine profile with ∆ e,n = 0.2), where the waveforms are recorded once the steady-state is reached in FDTD. Fig. 7a(a) further shows the corresponding space harmonics showing a good agreement between both Floquet and FDTD solutions.
Next, we consider a stronger modulation (∆ e,m = 0.5) while also increasing the pumping frequency to ω p = ω 0 /2. In this case, harmonics at negative frequencies are excited in the Floquet solution (Fig. 7b). By taking a Fourier transform of the time-domain Floquet waveform (orange diamonds), these can be "flipped" to positive frequencies; in this case, they combine with positive frequency harmonics because ω p is an integer multiple of ω 0 . However, even with this taken into account, there is a discrepancy that is observed with the FDTD result.
To determine which result is more accurate, we consider the equations they should satisfy, i.e. (5) and (8). We can numerically compute the derivatives dQ y /dt and d 2 Q y /dt 2 using the time domain waveforms of FDTD and Floquet Similarly, (5a) yields a new expression ∆E y . Finally, we solve to find new values E r and E t . If the solution is exact, then we should have E r = E r and E t = E t . We carry out this procedure for both the Floquet and FDTD methods, with the disprency |E r − E r | shown in Fig. 8. While the FDTD discrepancy changes slowly as time-stepping becomes more fine, the Floquet solution shows convergence as the number of harmonics increases. Furthermore, the Floquet solution has a much smaller discrepancy, indicating that it is the more accurate solution among the two, of the original field equations. Also, we see that for both methods, the DC electric field harmonics at ω n = 0 are zero (the magnetic field, not shown, is likewise zero). From a physical perspective, a DC H field (or E field) difference can be generated across a boundary due to a static electric current (magnetic current), with the well-known boundary condition derived from Ampere's law (Faraday's law) . In the right hand side of (5), this corresponds to polarizations that are linearly changing over time, which is in contraction to a periodic solution. Mathematically, this manifests itself in (18a), which requires E r0,nm − E r0,nm = 0 for ω n = 0, while (19a) requires E r0,nm + E r0,nm = 0. The solution, of course, is that the DC fields are zero.
C. Space-Time Modulation
Finally, we consider the general case of space-time modulation. Here, it is convenient to use generalized S-parameters to describe the system, where each harmonic in the transmission and reflection regions can be considered a port, for a total of 2(2N + 1)(2M + 1) when the fundamental harmonic (m, n) = (0, 0) is normally incident 5 . We will label Fig. 7b, the consistency of the solutions was considered. The FDTD discrepancy is larger than the discrepancy of the Floquet result, while the latter improves as the number of harmonics (2N + 1) is increased.
the reflection parameter which is measured by evaluating (17) with the harmonic (m i n i ) = (−m 1 , n 1 ) excited 6 with a plane wave and the (m, n) port probed. That is, this represents the scattering from port (m 1 , n 1 ) to (m 2 , n 2 ), with m as a spatial index and n as a frequency index that can be used in (14) and (12) to find the direction and frequency, respectively. Similarly, the transmission parameter is With this convention, Fig. 9 shows a case where the space and time dependencies are decoupled, and the modulation resembles a standing wave. Each pixel represents a scattering parameter with port (m 1 , n 1 ) = (1, 0) excited in (a) and (−1, 0) excited in (b). One primary interest is whether or not this represents a reciprocal system. One way to approach this is to evaluate if R m2n2 m1n1 = R m1n1 m2n2 and T m2n2 m1n1 = T m1n1 m2n2 for all ports combinations . For example, we see that |T −1,0 1,0 | = |T 1,0 −1,0 | = 0.24 from Fig. 9, so these ports are reciprocal. lternatively, we can consider the Onsager-Casimir relations, which place conditions on the constitutive relations of LTV systems for reciprocity . In the case at hand, these require χ e (v p ) = χ e (−v p ) and χ m (v p ) = χ m (−v p ) in order for the system to be reciprocal, where the susceptibilities are a function of v p , which is the velocity of the modulation. Hence, the modulated parameter must also be identical when the direction of modulation is reversed. This is indeed the case for the standing wave modulation, which can be written as the sum of two waves travelling in opposite directions, where switching this sign of velocity is inconsequential (ω e0,m0 (x, t, v) = ω e0,m0 (x, t, −v)). This can be intuitively understood as follows: the surface "appears" the same to an incident wave regardless of which side of the surface it approaches from.
To break reciprocity, we consider a case where the space and time modulations are coupled in the form of a wave travelling along the surface in the +x direction. The scattering parameters are in Fig 10, where in (a), we see that exciting the (1, 0) port we observe an up-converted transmitted harmonic at (0, 1) with |T 0,1 1,0 | = 0.47. Exciting this port in hand, we find |T 1,0 0,1 | ≈ 0! (Instead port (−1, 0) is excited.) Thus the system is non-reciprocal. Of course, the Onsager-Casimir relations are not satisfied in this case, since the direction of modulation is critical. Extending the analysis from a plane wave to a more general incident field, and at the same time visually demonstrating the non-reciprocity, Fig. 11 shows the fields of several frequency harmonics when a Gaussian beam is launched at the metasurface in the same two experiments. In the first case (a), a beam incident on the (1, 0) port (10°at 1.0ω 0 ) has a normally transmitted harmonic that is up-converted (1.1ω 0 ), corresponding to port (0, 1). If we in turn excite this port in (b), we do not find find a transmitted harmonic in the direction of the first incident beam at ω 0 . This harmonic at 1.0ω 0 is instead directed at an angle −10°, clearly demonstrating the nonreciprocal nature of the surface.
V. CONCLUSIONS
A rigorous semi-analytical Floquet analysis has been presented for a zero-thickness space-time modulated Huygens' metasurface using GSTCs to model and determine the strengths of the new harmonic components of the scattered fields. We have accounted for the dispersion inherent to the static metasurface using physically-motived Lorentzian susceptibilities, with parameters that are modulated in space and time. These parameters (ω 0 , ω p , and α) can take on arbitrary periodic profiles in space and time, for both the electric and magnetic susceptibilities. The validity of the method has been established with comparison to FDFD simulations for pure-space modulation and FDTD simulations for pure-time modulation. Finally, two cases of space time modulation were presented: a standing wave perturbation which was found to be reciprocal and a traveling wave perturbation that breaks Lorentz reciprocity. The proposed method is fast, simple, and versatile, and is expected to be a useful tool for designing general periodic and non-reciprocal metasurfaces. |
'''
Handles every piece of hardware that comes to our attention.
''' |
Online Assignment of Heterogeneous Tasks in Crowdsourcing Markets
We investigate the problem of heterogeneous task assignment in crowdsourcing markets from the point of view of the requester, who has a collection of tasks. Workers arrive online one by one, and each declare a set of feasible tasks they can solve, and desired payment for each feasible task. The requester must decide on the fly which task (if any) to assign to the worker, while assigning workers only to feasible tasks. The goal is to maximize the number of assigned tasks with a fixed overall budget. We provide an online algorithm for this problem and prove an upper bound on the competitive ratio of this algorithm against an arbitrary (possibly worst-case) sequence of workers who want small payments relative to the requester’s total budget. We further show an almost matching lower bound on the competitive ratio of any algorithm in this setting. Finally, we propose a different algorithm that achieves an improved competitive ratio in the random permutation model, where the order of arrival of the workers is chosen uniformly at random. Apart from these strong theoretical guarantees, we carry out experiments on simulated data which demonstrates the practical applicability of our algorithms.
|
package com.ukefu.util.task.process;
public interface JPAProcess {
public void process(Object data) ;
public void end();
}
|
/*
count the numbers of various types of structures under our control
*/
void BuildOrderManager::updateStructureCounts()
{
int bases = 0;
int productionFacilities = 0;
int gases = 0;
int cannons = 0;
int workers = 0;
for (auto structure : blinkerBot.Observation()->GetUnits())
{
if (UnitData::isOurs(structure) && UnitData::isStructure(structure))
{
if (structure->unit_type == UNIT_TYPEID::PROTOSS_NEXUS)
{
bases++;
}
else if (structure->unit_type == UNIT_TYPEID::PROTOSS_GATEWAY ||
structure->unit_type == UNIT_TYPEID::PROTOSS_WARPGATE ||
structure->unit_type == UNIT_TYPEID::PROTOSS_ROBOTICSFACILITY)
{
productionFacilities++;
}
else if (structure->unit_type == UNIT_TYPEID::PROTOSS_ASSIMILATOR)
{
gases++;
}
else if (structure->unit_type == UNIT_TYPEID::PROTOSS_PHOTONCANNON)
{
cannons++;
}
else if (structure->unit_type == UNIT_TYPEID::PROTOSS_PROBE)
{
workers++;
}
}
}
currentBases = bases;
currentGases = gases;
currentProductionFacilities = productionFacilities;
currentCannons = cannons;
currentWorkers = workers;
} |
#define _POSIX_C_SOURCE 200809L
#include "bml.h"
#include "../macros.h"
#include "../typed.h"
#include <complex.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <math.h>
#include <string.h>
#include <unistd.h>
// print content of file in stdout
void TYPED_FUNC(
filecontent) (
char *fname)
{
FILE *fp = fopen(fname, "r");
int ch;
if (fp == NULL)
{
fprintf(stderr, "File %s doesn't exist\n", fname);
return;
}
// Read contents of file
while ((ch = fgetc(fp)) != EOF)
{
// print current character
putc(ch, stdout);
}
fclose(fp);
}
int TYPED_FUNC(
test_print) (
const int N,
const bml_matrix_type_t matrix_type,
const bml_matrix_precision_t matrix_precision,
const int M)
{
#ifndef BML_COMPLEX
if (matrix_precision == single_complex
|| matrix_precision == double_complex)
{
printf("[FIXME] Skipping unsupported test\n");
return 0;
}
#endif
bml_distribution_mode_t distrib_mode = sequential;
#ifdef BML_USE_MPI
if (bml_getNRanks() > 1)
{
LOG_INFO("Use distributed matrix\n");
distrib_mode = distributed;
}
#endif
//generate random matrix
bml_matrix_t *A = NULL;
A = bml_random_matrix(matrix_type, matrix_precision, N, M, distrib_mode);
REAL_T *A_dense = bml_export_to_dense(A, dense_row_major);
int fd;
char *filename;
int original_stdout;
if (bml_getMyRank() == 0)
{
bml_print_dense_matrix(N, matrix_precision, dense_row_major, A_dense,
0, N, 0, N);
/* Create unique filename (in case we run tests in parallel). */
filename = strdup(tmpnam(NULL));
fprintf(stdout, "Filename used for this test: %s\n", filename);
fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
if (fd < 0)
{
fprintf(stderr, "Failed to open %s\n", filename);
return -1;
}
/* Flush stdout before redirecting to file. */
fflush(stdout);
original_stdout = dup(fileno(stdout));
if (dup2(fd, fileno(stdout)) < 0)
{
fprintf(stderr, "Failed to duplicate stdout\n");
return -1;
}
}
/* Assumes matrix is at least 2x2 */
const int up = 2;
bml_print_bml_matrix(A, 0, up, 0, up);
/* Flush stdout before switching back. */
fflush(stdout);
if (bml_getMyRank() == 0)
close(fd);
if (bml_getMyRank() == 0)
{
/* Close file and re-instate stdout. */
if (dup2(original_stdout, fileno(stdout)) < 0)
{
fprintf(stderr, "Failed to re-activate stdout\n");
return -1;
}
printf("FILE content:\n");
TYPED_FUNC(filecontent) (filename);
//now read file just written
REAL_T *data = calloc(up * up, sizeof(REAL_T));
if (data == NULL)
{
fprintf(stderr, "calloc failed!\n");
return -1;
}
FILE *fp2 = fopen(filename, "r");
if (fp2 == NULL)
{
fprintf(stderr, "Failed to open %s\n", filename);
return -1;
}
//FILE *fp3 = fopen("test_output.dat", "w");
float realp;
float imagp;
char sign;
for (int i = 0; i < up; i++)
{
for (int j = 0; j < up; j++)
{
switch (matrix_precision)
{
case single_real:
case double_real:
fscanf(fp2, "%f", &realp);
printf("Read: %f\n", realp);
data[ROWMAJOR(i, j, up, up)] = realp;
break;
#ifdef BML_COMPLEX
case single_complex:
case double_complex:
//read complex number in 3 parts, discarding 'i'
fscanf(fp2, "%f %c %fi", &realp, &sign, &imagp);
if (sign == '-')
imagp *= -1;
REAL_T tmp = realp + imagp * _Complex_I;
printf("realp %f\n", REAL_PART(tmp));
printf("imagp %f\n", IMAGINARY_PART(tmp));
data[ROWMAJOR(i, j, up, up)] = tmp;
break;
#endif
default:
fprintf(stderr, "Unknown precision\n");
break;
}
}
}
if (fclose(fp2) == EOF)
{
fprintf(stderr, "ERROR closing file2\n");
return -1;
}
//compare data just read with original data
const double tol = 1.e-3;
for (int i = 0; i < up; i++)
{
for (int j = 0; j < up; j++)
{
REAL_T val1 = data[ROWMAJOR(i, j, up, up)];
REAL_T val2 = A_dense[ROWMAJOR(i, j, N, M)];
double diff1 = REAL_PART(val2 - val1);
double diff2 = IMAGINARY_PART(val2 - val1);
double diff = sqrt(diff1 * diff1 + diff2 * diff2);
// if (diff > tol)
{
printf("real parts= %f and %f\n", REAL_PART(val1),
REAL_PART(val2));
if (matrix_precision == single_complex
|| matrix_precision == double_complex)
{
printf("imag part= %f and %f\n",
IMAGINARY_PART(val1), IMAGINARY_PART(val2));
}
printf("i=%d, j=%d, diff=%lf\n", i, j, diff);
//fclose(fp3);
if (diff > tol)
{
fprintf(stderr, "test failed!!!\n");
return -1;
}
}
}
}
//fprintf(fp3, "Test Successful!\n");
//fclose(fp3);
free(data);
if (remove(filename) != 0)
{
fprintf(stderr, "Failed removing file %s\n", filename);
return -1;
}
free(filename);
if (bml_getMyRank() == 0)
bml_free_memory(A_dense);
}
bml_deallocate(&A);
return 0;
}
|
<reponame>nickitalebed1/ticketHunter
package com.ticket.hunter.client;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.HttpEntity;
import org.springframework.http.HttpHeaders;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.stereotype.Component;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
import org.springframework.web.client.RestTemplate;
@Component
public class TicketInfoRetriever {
@Autowired
private RestTemplate restTemplate;
private String resourceUrl = "https://booking.uz.gov.ua/en/train_search/";
public ResponseEntity<String> retrieveTrainTickets() {
String from = "2204001";
String to = "2218000";
String date = "2018-08-03";
String time = "00:00";
String get_tpl = "1";
MultiValueMap<String, String> params = new LinkedMultiValueMap<>();
params.add("from", from);
params.add("to", to);
params.add("date", date);
params.add("time", time);
params.add("get_tpl", get_tpl);
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_FORM_URLENCODED);
HttpEntity<MultiValueMap<String, String>> request = new HttpEntity<>(params, headers);
return restTemplate.postForEntity(resourceUrl, request, String.class);
}
}
|
Combined suppression of the intrarenal and circulating vasoconstrictor renin-ACE-ANG II axis and augmentation of the vasodilator ACE2-ANG 1-7-Mas axis attenuates the systemic hypertension in Ren-2 transgenic rats exposed to chronic hypoxia.
The aim of the present study was to test the hypothesis that chronic hypoxia would aggravate hypertension in Ren-2 transgenic rats (TGR), a well-defined monogenetic model of hypertension with increased activity of endogenous renin-angiotensin system (RAS). Systolic blood pressure (SBP) in conscious rats and mean arterial pressure (MAP) in anesthetized TGR and normotensive Hannover Sprague-Dawley (HanSD) rats were determined under normoxia that was either continuous or interrupted by two weeks´ hypoxia. Expression, activities and concentrations of individual components of RAS were studied in plasma and kidney of TGR and HanSD rats under normoxic conditions and after exposure to chronic hypoxia. In HanSD rats two weeks´ exposure to chronic hypoxia did not alter SBP and MAP. Surprisingly, in TGR it decreased markedly SBP and MAP; this was associated with substantial reduction in plasma and kidney renin activities and also of angiotensin II (ANG II) levels, without altering angiotensin-converting enzyme (ACE) activities. Simultaneously, in TGR the exposure to hypoxia increased kidney ACE type 2 (ACE2) activity and angiotensin 1-7 (ANG 1-7) concentrations as compared with TGR under continuous normoxia. Based on these results, we propose that suppression of the hypertensiogenic ACE-ANG II axis in the circulation and kidney tissue, combined with augmentation of the intrarenal vasodilator ACE2-ANG 1-7 axis, is the main mechanism responsible for the blood pressure-lowering effects of chronic hypoxia in TGR. |
/**
* Represents the percentage of the total grade that this assignment takes up.
*/
public class AssignmentPercentage {
public static final String MESSAGE_CONSTRAINTS =
"Assignment percentages should be a value between 0 to 100 and it should not be blank.";
public final double assignmentPercentage;
/**
* Constructs a {@code assignmentPercentage}.
*
* @param assignmentPercentage A valid assignment percentage.
*/
public AssignmentPercentage(double assignmentPercentage) {
requireNonNull(assignmentPercentage);
checkArgument(isValidAssignmentPercentage(assignmentPercentage), MESSAGE_CONSTRAINTS);
this.assignmentPercentage = assignmentPercentage;
}
/**
* Returns true if a given double is a valid assignment percentage.
*/
public static boolean isValidAssignmentPercentage(double test) {
if (test <= 100 && test >= 0) {
return true;
} else {
return false;
}
}
@Override
public String toString() {
return String.valueOf(assignmentPercentage);
}
@Override
public boolean equals(Object other) {
return other == this // short circuit if same object
|| (other instanceof AssignmentPercentage // instanceof handles nulls
&& assignmentPercentage == ((AssignmentPercentage) other).assignmentPercentage); // state check
}
} |
// Let's assume that our HTTP proxy is placed on 127.0.0.1:3128 address.
// Then we have to build the correct URL with HTTP scheme. Also, please
// remember that this executor will support both HTTP and HTTP proxies
// (i.e, will do CONNECT method for HTTPS).
func ExampleMakeProxyChainExecutor_http() {
httpURL := &url.URL{
Scheme: "http",
Host: "127.0.0.1:3128",
}
state := &LayerState{}
executor, _ := MakeProxyChainExecutor(httpURL)
executor(state)
} |
<reponame>angcyo/CalendarView<filename>calendarview/src/main/java/com/haibin/calendarview/VerticalCalendarView.java
package com.haibin.calendarview;
import android.animation.Animator;
import android.animation.AnimatorListenerAdapter;
import android.content.Context;
import android.content.res.TypedArray;
import android.util.AttributeSet;
import android.view.LayoutInflater;
import android.view.animation.LinearInterpolator;
import android.widget.FrameLayout;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.viewpager.widget.ViewPager;
import java.lang.reflect.Constructor;
import java.util.Map;
/**
* 垂直列表排列的日历
* Email:<EMAIL>
*
* @author angcyo
* @date 2021/10/21
* Copyright (c) 2020 ShenZhen Wayto Ltd. All rights reserved.
*/
public class VerticalCalendarView extends CalendarView {
public VerticalMonthRecyclerView monthRecyclerView;
public VerticalCalendarView(@NonNull Context context) {
super(context);
}
public VerticalCalendarView(@NonNull Context context, @Nullable AttributeSet attrs) {
super(context, attrs);
}
@Override
protected void init(Context context, @Nullable AttributeSet attrs) {
//mDelegate = new VerticalCalendarViewDelegate(context, attrs);
//super.init(context, attrs);
TypedArray array = context.obtainStyledAttributes(attrs, R.styleable.VerticalCalendarView);
int verticalMonthItemLayoutId = array.getResourceId(R.styleable.VerticalCalendarView_vertical_month_item_layout_id, R.layout.cv_layout_vertical_month_view);
array.recycle();
calendarLayoutId = R.layout.cv_layout_vertical_calendar_view;
//填充布局
LayoutInflater.from(context).inflate(calendarLayoutId, this, true);
//周视图
FrameLayout frameContent = findViewById(R.id.frameContent);
this.mWeekPager = findViewById(R.id.vp_week);
this.mWeekPager.setup(mDelegate);
try {
Constructor constructor = mDelegate.getWeekBarClass().getConstructor(Context.class);
mWeekBar = (WeekBar) constructor.newInstance(getContext());
} catch (Exception e) {
e.printStackTrace();
}
frameContent.addView(mWeekBar, 2);
mWeekBar.setup(mDelegate);
mWeekBar.onWeekStartChange(mDelegate.getWeekStart());
CalendarView.OnClassInitializeListener listener = mDelegate.mClassInitializeListener;
if (listener != null) {
listener.onClassInitialize(mDelegate.getWeekBarClass(), mWeekBar);
}
//横线
this.mWeekLine = findViewById(R.id.line);
this.mWeekLine.setBackgroundColor(mDelegate.getWeekLineBackground());
LayoutParams lineParams = (LayoutParams) this.mWeekLine.getLayoutParams();
lineParams.setMargins(mDelegate.getWeekLineMargin(),
mDelegate.getWeekBarHeight(),
mDelegate.getWeekLineMargin(),
0);
this.mWeekLine.setLayoutParams(lineParams);
//月视图
mMonthPager = new MonthViewPager(context); //提供一个占位用的视图, 防止库NPE异常
this.monthRecyclerView = findViewById(R.id.rv_month);
monthRecyclerView.verticalMonthItemLayoutId = verticalMonthItemLayoutId;
//this.mMonthPager.mWeekPager = mWeekPager;
//this.mMonthPager.mWeekBar = mWeekBar;
//mMonthPager.setup(mDelegate);
LayoutParams params = (LayoutParams) this.monthRecyclerView.getLayoutParams();
params.setMargins(0, mDelegate.getWeekBarHeight() + CalendarUtil.dipToPx(context, 1), 0, 0);
mWeekPager.setLayoutParams(params);
//年视图
mYearViewPager = findViewById(R.id.selectLayout);
mYearViewPager.setPadding(mDelegate.getYearViewPaddingLeft(), 0, mDelegate.getYearViewPaddingRight(), 0);
mYearViewPager.setBackgroundColor(mDelegate.getYearViewBackground());
mYearViewPager.addOnPageChangeListener(new ViewPager.OnPageChangeListener() {
@Override
public void onPageScrolled(int position, float positionOffset, int positionOffsetPixels) {
}
@Override
public void onPageSelected(int position) {
if (mWeekPager.getVisibility() == VISIBLE) {
return;
}
if (mDelegate.mYearChangeListener != null) {
mDelegate.mYearChangeListener.onYearChange(position + mDelegate.getMinYear());
}
}
@Override
public void onPageScrollStateChanged(int state) {
}
});
mDelegate.mInnerListener = new OnInnerDateSelectedListener() {
/**
* 月视图选择事件
* @param calendar calendar
* @param isClick 是否是点击
*/
@Override
public void onMonthDateSelected(Calendar calendar, boolean isClick) {
/*if (calendar.getYear() == mDelegate.getCurrentDay().getYear() &&
calendar.getMonth() == mDelegate.getCurrentDay().getMonth()
*//*&& mMonthPager.getCurrentItem() != mDelegate.mCurrentMonthViewItem*//*) {
return;
}*/
mDelegate.mIndexCalendar = calendar;
if (mDelegate.getSelectMode() == CalendarViewDelegate.SELECT_MODE_DEFAULT || isClick) {
mDelegate.mSelectedCalendar = calendar;
}
mWeekPager.updateSelected(mDelegate.mIndexCalendar, false);
//mMonthPager.updateSelected();
monthRecyclerView.updateSelected();
if (mWeekBar != null &&
(mDelegate.getSelectMode() == CalendarViewDelegate.SELECT_MODE_DEFAULT || isClick)) {
mWeekBar.onDateSelected(calendar, mDelegate.getWeekStart(), isClick);
}
}
/**
* 周视图选择事件
* @param calendar calendar
* @param isClick 是否是点击
*/
@Override
public void onWeekDateSelected(Calendar calendar, boolean isClick) {
mDelegate.mIndexCalendar = calendar;
if (mDelegate.getSelectMode() == CalendarViewDelegate.SELECT_MODE_DEFAULT || isClick
|| mDelegate.mIndexCalendar.equals(mDelegate.mSelectedCalendar)) {
mDelegate.mSelectedCalendar = calendar;
}
int y = calendar.getYear() - mDelegate.getMinYear();
int position = 12 * y + mDelegate.mIndexCalendar.getMonth() - mDelegate.getMinYearMonth();
mWeekPager.updateSingleSelect();
//mMonthPager.setCurrentItem(position, false);
//mMonthPager.updateSelected();
monthRecyclerView.updateSelected();
if (mWeekBar != null &&
(mDelegate.getSelectMode() == CalendarViewDelegate.SELECT_MODE_DEFAULT
|| isClick
|| mDelegate.mIndexCalendar.equals(mDelegate.mSelectedCalendar))) {
mWeekBar.onDateSelected(calendar, mDelegate.getWeekStart(), isClick);
}
}
};
if (mDelegate.getSelectMode() == CalendarViewDelegate.SELECT_MODE_DEFAULT) {
if (isInRange(mDelegate.getCurrentDay())) {
mDelegate.mSelectedCalendar = mDelegate.createCurrentDate();
} else {
mDelegate.mSelectedCalendar = mDelegate.getMinRangeCalendar();
}
} else {
mDelegate.mSelectedCalendar = new Calendar();
}
mDelegate.mIndexCalendar = mDelegate.mSelectedCalendar;
mWeekBar.onDateSelected(mDelegate.mSelectedCalendar, mDelegate.getWeekStart(), false);
monthRecyclerView.setup(mDelegate);
//mMonthPager.setCurrentItem(mDelegate.mCurrentMonthViewItem);
monthRecyclerView.setCurrentItem(mDelegate.mCurrentMonthViewItem, false);
mYearViewPager.setOnMonthSelectedListener(new YearRecyclerView.OnMonthSelectedListener() {
@Override
public void onMonthSelected(int year, int month) {
int position = 12 * (year - mDelegate.getMinYear()) + month - mDelegate.getMinYearMonth();
closeSelectLayout(position);
mDelegate.isShowYearSelectedLayout = false;
}
});
mYearViewPager.setup(mDelegate);
mWeekPager.updateSelected(mDelegate.createCurrentDate(), false);
}
@Override
public void setSchemeDate(Map<String, Calendar> mSchemeDates) {
super.setSchemeDate(mSchemeDates);
monthRecyclerView.update();
}
@Override
public void clearSchemeDate() {
super.clearSchemeDate();
monthRecyclerView.update();
}
@Override
public void setRange(int minYear, int minYearMonth, int minYearDay, int maxYear, int maxYearMonth, int maxYearDay) {
super.setRange(minYear, minYearMonth, minYearDay, maxYear, maxYearMonth, maxYearDay);
monthRecyclerView.updateRange();
}
@Override
protected void showSelectLayout(int year) {
super.showSelectLayout(year);
monthRecyclerView.animate()
.scaleX(0)
.scaleY(0)
.setDuration(260)
.setInterpolator(new LinearInterpolator())
.setListener(new AnimatorListenerAdapter() {
@Override
public void onAnimationEnd(Animator animation) {
super.onAnimationEnd(animation);
if (mDelegate.mYearViewChangeListener != null) {
mDelegate.mYearViewChangeListener.onYearViewChange(false);
}
}
});
}
@Override
protected void closeSelectLayout(int position) {
super.closeSelectLayout(position);
monthRecyclerView.setCurrentItem(position, false);
monthRecyclerView.animate()
.scaleX(1)
.scaleY(1)
.setDuration(180)
.setInterpolator(new LinearInterpolator())
.setListener(new AnimatorListenerAdapter() {
@Override
public void onAnimationEnd(Animator animation) {
super.onAnimationEnd(animation);
if (mDelegate.mYearViewChangeListener != null) {
mDelegate.mYearViewChangeListener.onYearViewChange(true);
}
if (mParentLayout != null) {
mParentLayout.showContentView();
if (mParentLayout.isExpand()) {
mMonthPager.setVisibility(VISIBLE);
} else {
mWeekPager.setVisibility(VISIBLE);
mParentLayout.shrink();
}
} else {
mMonthPager.setVisibility(VISIBLE);
}
mMonthPager.clearAnimation();
}
});
}
@Override
public void scrollToCurrent(boolean smoothScroll) {
super.scrollToCurrent(smoothScroll);
monthRecyclerView.scrollToCurrent(smoothScroll);
}
@Override
public void scrollToNext(boolean smoothScroll) {
super.scrollToNext(smoothScroll);
monthRecyclerView.scrollToNext(smoothScroll);
}
@Override
public void scrollToPre(boolean smoothScroll) {
super.scrollToPre(smoothScroll);
monthRecyclerView.scrollToPre(smoothScroll);
}
@Override
public void scrollToCalendar(int year, int month, int day, boolean smoothScroll, boolean invokeListener) {
super.scrollToCalendar(year, month, day, smoothScroll, invokeListener);
monthRecyclerView.scrollToCalendar(year, month, day, smoothScroll, invokeListener);
}
@Override
public void setMonthView(Class<?> cls) {
super.setMonthView(cls);
monthRecyclerView.updateMonthViewClass();
}
}
|
<filename>modules/mysqlapi/restful/graph/index_it_test.go
package graph
import (
"net/http"
oClient "github.com/Cepave/open-falcon-backend/common/http/client"
json "github.com/Cepave/open-falcon-backend/common/json"
tg "github.com/Cepave/open-falcon-backend/common/testing/ginkgo"
tc "github.com/Cepave/open-falcon-backend/common/testing/http"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("[POST] /api/v1/graph/endpoint-index/vacuum", itSkip.PrependBeforeEach(func() {
client := (&tc.GentlemanClientConf{tc.NewHttpClientConfigByFlag()}).NewClient()
BeforeEach(func() {
inTx(
`
INSERT INTO endpoint(id, endpoint, ts, t_create)
VALUES
(30501, 'cmb-js-183-213-022-038', 1001, NOW()),
(30502, 'cmb-sd-223-099-243-148', 1001, NOW()),
(30503, 'cnc-gz-058-016-043-037', UNIX_TIMESTAMP(), NOW())
`,
`
INSERT INTO endpoint_counter(
id, endpoint_id, counter, step, type, ts, t_create
)
VALUES
(20711, 30501, 'disk.io.msec_write/device=sdm', 60, 'GAUGE', 1001, NOW()),
(20712, 30501, 'disk.io.await/device=sdd', 60, 'DERIVE', 1001, NOW()),
(20713, 30502, 'net.if.in.multicast/iface=eth4', 60, 'GAUGE', 1001, NOW()),
(20714, 30503, 'disk.io.read_merged/device=sde', 60, 'GAUGE', UNIX_TIMESTAMP(), NOW()),
(20715, 30503, 'disk.io.read_sectors/device=sdf', 60, 'DERIVE', UNIX_TIMESTAMP(), NOW())
`,
`
INSERT INTO tag_endpoint(
id, endpoint_id, tag, ts, t_create
)
VALUES
(23031, 30501, 'device=sds', 1001, NOW()),
(23032, 30501, 'device=sdl', 1001, NOW()),
(23033, 30502, 'iface=eth1', 1001, NOW()),
(23034, 30503, 'iface=eth1', UNIX_TIMESTAMP(), NOW()),
(23035, 30503, 'iface=eth3', UNIX_TIMESTAMP(), NOW())
`,
)
})
AfterEach(func() {
inTx(
"DELETE FROM tag_endpoint WHERE id >= 23031 AND id <= 23035",
"DELETE FROM endpoint_counter WHERE id >= 20711 AND id <= 20715",
"DELETE FROM endpoint WHERE id >= 30501 AND id <= 30503",
)
})
sendVacuumRequest := func(
expectedAffectedEndpoints, expectedAffectedCounters,
expectedAffectedTags int,
) {
resp, err := client.Post().AddPath("/api/v1/graph/endpoint-index/vacuum").
AddQuery("for_days", "1000").
Send()
Expect(err).To(Succeed())
Expect(resp).To(tg.MatchHttpStatus(http.StatusOK))
jsonBody := oClient.ToGentlemanResp(resp).MustGetJson()
GinkgoT().Logf("[/graph/endpoint-index/vacuum] JSON Result:\n%s", json.MarshalPrettyJSON(jsonBody))
Expect(jsonBody.GetPath("affected_rows", "endpoints").MustInt()).To(Equal(expectedAffectedEndpoints))
Expect(jsonBody.GetPath("affected_rows", "counters").MustInt()).To(Equal(expectedAffectedCounters))
Expect(jsonBody.GetPath("affected_rows", "tags").MustInt()).To(Equal(expectedAffectedTags))
}
It("Send vacuum request", func() {
By("1st vacuum(something should be vacuumed)")
sendVacuumRequest(2, 3, 3)
By("2nd vacuum(nothing to be vacuumed)")
sendVacuumRequest(0, 0, 0)
})
}))
|
import numpy as np
import os
import torch.nn.functional as F
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions.categorical import Categorical
from torch.multiprocessing import Pipe
from model_resize import CnnActorCriticNetwork2, RNDModel2
from envs_resize import *
from utils import *
from arguments import get_args
from tensorboardX import SummaryWriter
def get_action(model, device, state):
state = torch.Tensor(state).to(device)
state = state.float()
action_probs, value_ext, value_int = model(state)
action_dist = Categorical(action_probs)
action = action_dist.sample()
action = action.data.cpu().numpy().squeeze()
value_ext = value_ext.data.cpu().numpy().squeeze()
value_int = value_int.data.cpu().numpy().squeeze()
action_probs = action_probs.detach().cpu()
return action, value_ext, value_int, action_probs
def compute_intrinsic_reward(rnd, device, next_obs):
next_obs = torch.FloatTensor(next_obs).to(device)
target_next_feature = rnd.target(next_obs)
predict_next_feature = rnd.predictor(next_obs)
intrinsic_reward = (target_next_feature - predict_next_feature).pow(2).sum(1) / 2
return intrinsic_reward.data.cpu().numpy()
def make_train_data(reward, done, value, gamma, gae_lambda, num_step, num_worker, use_gae):
discounted_return = np.empty([num_worker, num_step])
# Discounted Return
if use_gae:
gae = np.zeros_like([num_worker, ])
for t in range(num_step - 1, -1, -1):
delta = reward[:, t] + gamma * value[:, t + 1] * (1 - done[:, t]) - value[:, t]
gae = delta + gamma * gae_lambda * (1 - done[:, t]) * gae
discounted_return[:, t] = gae + value[:, t]
# For Actor
adv = discounted_return - value[:, :-1]
else:
running_add = value[:, -1]
for t in range(num_step - 1, -1, -1):
running_add = reward[:, t] + gamma * running_add * (1 - done[:, t])
discounted_return[:, t] = running_add
# For Actor
adv = discounted_return - value[:, :-1]
return discounted_return.reshape([-1]), adv.reshape([-1])
def train_model(args, device, output_size, model, rnd, optimizer, s_batch, target_ext_batch, target_int_batch, y_batch, adv_batch, next_obs_batch, old_action_probs):
#epoch = 3
update_proportion = 0.25
s_batch = torch.FloatTensor(s_batch).to(device)
target_ext_batch = torch.FloatTensor(target_ext_batch).to(device)
target_int_batch = torch.FloatTensor(target_int_batch).to(device)
y_batch = torch.LongTensor(y_batch).to(device)
adv_batch = torch.FloatTensor(adv_batch).to(device)
next_obs_batch = torch.FloatTensor(next_obs_batch).to(device)
sample_range = np.arange(len(s_batch))
forward_mse = nn.MSELoss(reduction='none')
with torch.no_grad():
action_probs_old_list = torch.stack(old_action_probs).permute(1, 0, 2).contiguous().view(-1, output_size).to(device)
m_old = Categorical(action_probs_old_list)
log_prob_old = m_old.log_prob(y_batch)
# ------------------------------------------------------------
for i in range(args.epoch):
np.random.shuffle(sample_range)
for j in range(int(len(s_batch) / args.batch_size)):
sample_idx = sample_range[args.batch_size * j:args.batch_size * (j + 1)]
# --------------------------------------------------------------------------------
# for Curiosity-driven(Random Network Distillation)
predict_next_state_feature, target_next_state_feature = rnd(next_obs_batch[sample_idx])
forward_loss = forward_mse(predict_next_state_feature, target_next_state_feature.detach()).mean(-1)
# Proportion of exp used for predictor update
mask = torch.rand(len(forward_loss)).to(device)
mask = (mask < update_proportion).type(torch.FloatTensor).to(device)
forward_loss = (forward_loss * mask).sum() / torch.max(mask.sum(), torch.Tensor([1]).to(device))
# ---------------------------------------------------------------------------------
action_probs, value_ext, value_int = model(s_batch[sample_idx])
m = Categorical(action_probs)
log_prob = m.log_prob(y_batch[sample_idx])
ratio = torch.exp(log_prob - log_prob_old[sample_idx])
surr1 = ratio * adv_batch[sample_idx]
surr2 = torch.clamp(
ratio,
1.0 - args.eps,
1.0 + args.eps) * adv_batch[sample_idx]
actor_loss = -torch.min(surr1, surr2).mean()
critic_ext_loss = F.mse_loss(value_ext.sum(1), target_ext_batch[sample_idx])
critic_int_loss = F.mse_loss(value_int.sum(1), target_int_batch[sample_idx])
critic_loss = critic_ext_loss + critic_int_loss
entropy = m.entropy().mean()
optimizer.zero_grad()
loss = actor_loss + 0.5 * critic_loss - args.entropy_coef * entropy + forward_loss
loss.backward()
global_grad_norm_(list(model.parameters())+list(rnd.predictor.parameters()))
optimizer.step()
def main():
args = get_args()
device = torch.device('cuda' if args.cuda else 'cpu')
env = gym.make(args.env_name)
input_size = env.observation_space.shape # 4
output_size = env.action_space.n # 2
if 'Breakout' in args.env_name:
output_size -= 1
env.close()
is_render = False
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
model_path = os.path.join(args.save_dir, args.env_name + '.model')
predictor_path = os.path.join(args.save_dir, args.env_name + '.pred')
target_path = os.path.join(args.save_dir, args.env_name + '.target')
writer = SummaryWriter(log_dir=args.log_dir)
reward_rms = RunningMeanStd()
obs_rms = RunningMeanStd(shape=(1, 1, 52, 52))
discounted_reward = RewardForwardFilter(args.ext_gamma)
model = CnnActorCriticNetwork2(input_size, output_size, args.use_noisy_net)
rnd = RNDModel2(input_size, output_size)
model = model.to(device)
rnd = rnd.to(device)
optimizer = optim.Adam(list(model.parameters()) + list(rnd.predictor.parameters()), lr=args.lr)
if args.load_model:
if args.cuda:
model.load_state_dict(torch.load(model_path))
rnd.predictor.load_state_dict(torch.load(predictor_path))
rnd.target.load_state_dict(torch.load(target_path))
print('Load models successfully.')
else:
model.load_state_dict(torch.load(model_path, map_location='cpu'))
rnd.predictor.load_state_dict(torch.load(predictor_path, map_location='cpu'))
rnd.target.load_state_dict(torch.load(target_path, map_location='cpu'))
works = []
parent_conns = []
child_conns = []
for idx in range(args.num_worker):
parent_conn, child_conn = Pipe()
work = AtariEnvironment(
args.env_name,
args.output_path,
is_render,
idx,
child_conn,
sticky_action=args.sticky_action,
p=args.sticky_action_prob,
max_episode_steps=args.max_episode_steps)
work.start()
works.append(work)
parent_conns.append(parent_conn)
child_conns.append(child_conn)
states = np.zeros([args.num_worker, 4, 52, 52])
sample_env_index = 0 # Sample Environment index to logsample_env_index
sample_episode = 0
sample_rall = 0
sample_step = 0
sample_i_rall = 0
global_update = 0
global_step = 0
# normalize observation
print('Initializes observation normalization...')
next_obs = []
for step in range(args.num_step * args.pre_obs_norm_steps):
actions = np.random.randint(0, output_size, size=(args.num_worker,))
for parent_conn, action in zip(parent_conns, actions):
parent_conn.send(action)
for parent_conn in parent_conns:
next_state, reward, done, realdone, log_reward = parent_conn.recv()
next_obs.append(next_state[3, :, :].reshape([1, 52, 52]))
if len(next_obs) % (args.num_step * args.num_worker) == 0:
next_obs = np.stack(next_obs)
obs_rms.update(next_obs)
next_obs = []
print('Training...')
while True:
total_state, total_reward, total_done, total_next_state, total_action, total_int_reward, total_next_obs, total_ext_values, total_int_values, total_action_probs = [], [], [], [], [], [], [], [], [], []
global_step += (args.num_worker * args.num_step)
global_update += 1
# Step 1. n-step rollout
#print('Start getting samples')
for _ in range(args.num_step):
actions, value_ext, value_int, action_probs = get_action(model, device, np.float32(states) / 255.)
#print('get actions')
for parent_conn, action in zip(parent_conns, actions):
parent_conn.send(action)
next_states, rewards, dones, real_dones, log_rewards, next_obs = [], [], [], [], [], []
for parent_conn in parent_conns:
next_state, reward, done, real_done, log_reward = parent_conn.recv()
next_states.append(next_state)
rewards.append(reward)
dones.append(done)
real_dones.append(real_done)
log_rewards.append(log_reward)
next_obs.append(next_state[3, :, :].reshape([1, 52, 52]))
next_states = np.stack(next_states)
rewards = np.hstack(rewards)
dones = np.hstack(dones)
real_dones = np.hstack(real_dones)
next_obs = np.stack(next_obs)
# total reward = int reward + ext Reward
intrinsic_reward = compute_intrinsic_reward(rnd, device,
((next_obs - obs_rms.mean) / np.sqrt(obs_rms.var)).clip(-5, 5))
intrinsic_reward = np.hstack(intrinsic_reward)
sample_i_rall += intrinsic_reward[sample_env_index]
total_next_obs.append(next_obs)
total_int_reward.append(intrinsic_reward)
total_state.append(states)
total_reward.append(rewards)
total_done.append(dones)
total_action.append(actions)
total_ext_values.append(value_ext)
total_int_values.append(value_int)
total_action_probs.append(action_probs)
states = next_states[:, :, :, :]
sample_rall += log_rewards[sample_env_index]
sample_step += 1
if real_dones[sample_env_index]:
sample_episode += 1
writer.add_scalar('data/reward_per_epi', sample_rall, sample_episode)
writer.add_scalar('data/reward_per_rollout', sample_rall, global_update)
writer.add_scalar('data/step', sample_step, sample_episode)
sample_rall = 0
sample_step = 0
sample_i_rall = 0
# calculate last next value
_, value_ext, value_int, _ = get_action(model, device, np.float32(states) / 255.)
total_ext_values.append(value_ext)
total_int_values.append(value_int)
# --------------------------------------------------
total_state = np.stack(total_state).transpose([1, 0, 2, 3, 4]).reshape([-1, 4, 52, 52])
total_reward = np.stack(total_reward).transpose().clip(-1, 1)
total_action = np.stack(total_action).transpose().reshape([-1])
total_done = np.stack(total_done).transpose()
total_next_obs = np.stack(total_next_obs).transpose([1, 0, 2, 3, 4]).reshape([-1, 1, 52, 52])
total_ext_values = np.stack(total_ext_values).transpose()
total_int_values = np.stack(total_int_values).transpose()
total_logging_action_probs = np.vstack(total_action_probs)
# Step 2. calculate intrinsic reward
# running mean intrinsic reward
total_int_reward = np.stack(total_int_reward).transpose()
total_reward_per_env = np.array([discounted_reward.update(reward_per_step) for reward_per_step in total_int_reward.T])
mean, std, count = np.mean(total_reward_per_env), np.std(total_reward_per_env), len(total_reward_per_env)
reward_rms.update_from_moments(mean, std ** 2, count)
# normalize intrinsic reward
total_int_reward /= np.sqrt(reward_rms.var)
writer.add_scalar('data/int_reward_per_epi', np.sum(total_int_reward) / args.num_worker, sample_episode)
writer.add_scalar('data/int_reward_per_rollout', np.sum(total_int_reward) / args.num_worker, global_update)
# -------------------------------------------------------------------------------------------
# logging Max action probability
writer.add_scalar('data/max_prob', total_logging_action_probs.max(1).mean(), sample_episode)
# Step 3. make target and advantage
# extrinsic reward calculate
ext_target, ext_adv = make_train_data(total_reward,
total_done,
total_ext_values,
args.ext_gamma,
args.gae_lambda,
args.num_step,
args.num_worker,
args.use_gae)
# intrinsic reward calculate
# None Episodic
int_target, int_adv = make_train_data(total_int_reward,
np.zeros_like(total_int_reward),
total_int_values,
args.int_gamma,
args.gae_lambda,
args.num_step,
args.num_worker,
args.use_gae)
# add ext adv and int adv
total_adv = int_adv * args.int_coef + ext_adv * args.ext_coef
# -----------------------------------------------
# Step 4. update obs normalize param
obs_rms.update(total_next_obs)
# -----------------------------------------------
total_next_obs -= obs_rms.mean
total_next_obs /= np.sqrt(obs_rms.var)
# Step 5. Training!
#print('Start training')
train_model(args, device, output_size, model, rnd, optimizer,
np.float32(total_state) / 255., ext_target, int_target, total_action,
total_adv, total_next_obs.clip(-5, 5),
total_action_probs)
if global_step % (args.num_worker * args.num_step * args.save_interval) == 0:
print('Now Global Step :{}'.format(global_step))
torch.save(model.state_dict(), model_path)
torch.save(rnd.predictor.state_dict(), predictor_path)
torch.save(rnd.target.state_dict(), target_path)
if __name__ == '__main__':
main()
|
/**
* Splits a full name composed according to the Chinese tradition:
* <pre>
* [family name [middle name]] given name
* </pre>
*/
private void splitChineseName(Name name, String fullName) {
StringTokenizer tokenizer = new StringTokenizer(fullName);
while (tokenizer.hasMoreTokens()) {
String token = tokenizer.nextToken();
if (name.givenNames == null) {
name.givenNames = token;
} else if (name.familyName == null) {
name.familyName = name.givenNames;
name.givenNames = token;
} else if (name.middleName == null) {
name.middleName = name.givenNames;
name.givenNames = token;
} else {
name.middleName = name.middleName + name.givenNames;
name.givenNames = token;
}
}
if (name.givenNames != null && name.familyName == null && name.middleName == null) {
int length = fullName.length();
if (length == 2) {
name.familyName = fullName.substring(0, 1);
name.givenNames = fullName.substring(1);
} else if (length == 3) {
name.familyName = fullName.substring(0, 1);
name.middleName = fullName.substring(1, 2);
name.givenNames = fullName.substring(2);
} else if (length == 4) {
name.familyName = fullName.substring(0, 2);
name.middleName = fullName.substring(2, 3);
name.givenNames = fullName.substring(3);
}
}
} |
Adaptively Center-Shape Sensitive Sample Selection for Ship Detection in SAR Images
With the wide application of synthetic aperture radar in maritime surveillance, a ship detection method has been rapidly developed. However, there is still a key problem common in most methods, i.e., how to select positive and negative samples. The mainstream MaxIoUAssign has inherent problems, such as a fixed threshold and rough classification, resulting in the low quality of the positive samples. To solve these problems, we propose a new sample selection method called adaptively center-shape sensitive sample selection. The proposed method introduces shape similarity between proposal boxes and ground truth as one of the evaluation criteria and collaborates with intersection of union (IoU) to measure the quality of the proposal boxes. Meanwhile, the center distance between proposal boxes and ground truth is used to control the influence degree of IoU and shape similarity. In this way, the quality score of the proposal boxes can be determined through IoU, shape similarity, and center position, making sample selection more comprehensive. Additionally, to avoid a fixed threshold, the standard deviation of the quality score is used as a variable to form the adaptive threshold. Finally, we conducted extensive experiments on the benchmark SAR ship detection dataset (SSDD) and high-resolution SAR images datasets (HRSID) datasets. The experimental results demonstrated the superiority of our method.
I. INTRODUCTION
S YNTHETIC aperture radar (SAR) is a high-resolution image radar. As an active microwave imaging sensor, its microwave imaging process has a certain penetration effect on ground targets and is less affected by the environment. Thus, it can effectively detect various hidden targets. At the same time, its all-weather advantages enable it to complete exploration missions in all extreme conditions. Because of these characteristics, SAR has been widely used in ship detection , , , , , .
Traditional SAR image ship detection methods mainly infer the ship's location and classification by observing the difference between the hull and background. There are three methods based on: 1) statistical features; 2) threshold; 3) transformation. For example, Iervolino and Guida considered the marine clutter and signal backscattering in SAR images and proposed a generalized likelihood ratio test detector. Lang et al. proposed a spatial enhanced pixel descriptor to realize the spatial structure information of the ship target and improve the separability between the ship target and ocean clutter. Leng et al. defined the area ratio invariant feature group to modify the traditional detector. Among them, the constant false alarm rate , , detection method and its improved version are the most widely studied. However, the traditional SAR ship detection method is not very reliable, and it is difficult to achieve accurate detection based on the difference between the hull and background.
Recently, convolutional neural networks (CNNs) have also been developed in object detection owing to the enhancement of deep learning and graphic processing unit (GPU) computing capability. Meanwhile, the detection performance of the SAR ship based on deep CNNs has been significantly improved. In particular, an accurate location is of great significance to SAR ship detection.
Currently, the precise location work mainly focuses on improving the network model, such as proposing a better network architecture or better strategy to extract reliable local features to obtain more accurate boundary regression. Specifically, these works are reflected in the category of the object detection algorithm. The first work divides the algorithm into anchor-based and anchor-free algorithms, which improve detection performance by constantly improving the design of the framework. The second work divides the algorithm into one-stage and two-stage by adjusting the training strategy.
The difference between the anchor-based and anchor-free algorithms lies in the generation method of the proposal boxes. The former generates some proposal boxes based on the anchor. The anchor needs to be manually designed according to the statistical characteristics of the datasets. Current mainstream anchor-based object detection algorithms include Faster R-CNN , RetinaNet , and you only look once (YOLO) , which search proposal boxes through the anchor and finally determine the target position. Then, the latter generates proposal boxes based on key or central points, which tries to eliminate an artificial anchor setting to reduce artificial interference. Current mainstream anchor-free algorithms include fully convolutional one-stage object detector (FCOS) , CornerNet , and CenterNet . In addition to the above differences, their training strategies are pretty much the same, i.e., the proposal boxes will be divided into positive and negative samples using the sample selection method. Finally, positive and negative samples are used for the regression of ground truth. This work is licensed under a Creative Commons Attribution 4.0 License. For more information, see https://creativecommons.org/licenses/by/4.0/ Fig. 1. Proposal boxes around ship targets. The red and blue rectangles are large and small ship targets, respectively. The orange and green rectangles represent proposal boxes and ground truth, respectively. Small targets correspond to fewer proposal boxes, which will be difficult to detect.
The above process demonstrates that if the selected positive samples are very close to ground truth in center distance and shape, boundary regression will converge faster and the prediction accuracy will be higher. For example, the anchor-free FCOS algorithm distributes anchor points evenly in the image according to CNN's downsample rate, and each anchor point predicts ground truth within a certain range. Once the target center point is close enough to an anchor point, the anchor point generates proposal boxes. The advantage of the FCOS algorithm is that the proposal box is closer to ground truth in center distance, but its shape is not accurate. Additionally, the anchor-based RetinaNet algorithm uses an artificial anchor as a proposal box to obtain positive samples with good position and shape. However, the anchor does not necessarily cover targets well, and many small targets correspond to fewer proposal boxes, which will be difficult to detect, as shown in Fig. 1.
The main difference between one-stage and two-stage algorithms is whether proposal boxes carry out subsequent second processing. In the one-stage algorithm, the proposal boxes do not perform preliminary screening, but they are directly used for selecting the sample, leading to the low quality of the positive samples in location and shape. Current mainstream one-stage object detection algorithms include single shot multibox detector (SSD) , RetinaNet, and YOLO. By contrast, the two-stage algorithm first filters out some proposal boxes without appropriate positions and shapes and then uses the remaining proposal box for sample selection. Current mainstream two-stage object detection algorithms include Cascade Region-CNN (Cascade RCNN) , Libra RCNN , and region-based fully convolutional networks (R-FCN) . The former training speed is relatively fast. Meanwhile, the latter has a slow speed but relatively high detection accuracy. This is because, after two-stage, the positive sample is closer to ground truth in position and shape, making up for the low quality of the positive sample. However, the influence of different sample selection methods on detection performance is almost not discussed in either single-stage or two-stage detection algorithms. Our experimental results showed that different sample selection methods influenced the model to select the best-quality positive samples.
In this article, we analyze the anchor-based and anchor-free algorithms, and one-stage and two-stage algorithms. Consequently, we conclude that each algorithm focuses on how to acquire high-quality proposal boxes. Thus, assuming that each algorithm can obtain high-quality proposal boxes, the performance gap between different algorithms will be reduced. However, although improving the network model can improve the quality of the proposal boxes, it will also bring problems, such as the network architecture being difficult to unify and a complex model. Additionally, more precise predictions generally require more model parameters and training time. Thus, it is not the most economical.
A widely overlooked improvement is how to effectively select the positive and negative samples from the mixed proposal box. As long as a remarkable selection strategy is used to select high-quality proposals, it is not necessary to make laborious changes to the network structure. Currently, the mainstream sample selection method is MaxIoUAssign, but this method can only roughly evaluate the quality of the proposal boxes. Max-IoUAssign is not fully competent because of the fixed threshold value and complex proposal box distribution. In view of these situations, Zhang et al. proposed an adaptive training sample selection (ATSS) method to investigate the differences between anchor-based and anchor-free algorithms. It adaptively adjusts the threshold according to the statistical characteristics of the proposal box intersection of union (IoU). Additionally, Zhu et al. proposed the auto-assign that adopts a confidence weighting module to modify the positive and negative confidences of the locations in the spatial and scale dimensions. Zhang et al. proposed a free anchor that adopts a learning-to-match approach and selects positive and negative samples through network training, thus eliminating manual design. Kim and Lee proposed probabilistic anchor assignment that fits a Gaussian mixed distribution according to the training state of the model and uses the distribution to adaptively separate proposal boxes into positive and negative samples. However, these methods do not consider the validity of the IoU-based evaluation criteria, which is the problem identified in this article.
We found that using IoU to evaluate proposal boxes is very rough. So, IoU does not describe the importance of a proposal box uniquely and does not effectively describe some of the situations that often occur in a sample selection, as shown in Fig. 2(a). Intuitively, because B is more like the ground truth, we should choose B over A. However, their IoU is near, which means that they are the same. Fig. 2(b) shows that A is the proposal box completely covered by ground truth. Because A only contains a part of the object, it is not easy to predict the entire target. However, the proposal box B consists of part of ground truth and background. It is difficult to predict the whole object based on part of ground truth, but background information can assist the network model to accurately predict the coordinates. Therefore, A should be abandoned, and B should be selected as a positive sample. However, A will have a larger IoU than B. These situations result in the suboptimal performance of the model. To select high-quality positive samples from the proposal box, we proposes a novel sample selection strategy called adaptively center-shape sensitive sample selection (AC4S). Compared with ATSS, autoassign, and other methods, our method not only relieves the disadvantages of the conventional MaxIoUAssign method but also does not add new hyperparameters and does not need to modify the network structure. First, it uses the shape similarity and IoU between the proposal boxes and the ground truth as the evaluation criteria of sample quality. Compared with the MaxIoUAssign method, our method can refine the evaluation of sample quality, thus improving the quality of positive samples. Second, in order to balance the influence of shape similarity and IoU, we introduce the center distance between the proposal box and the ground truth as the weight factor. Additionally, owing to the few positive samples of small targets, we adopted adaptive thresholds to increase the number of positive samples of small targets and reduced the number of positive samples of large targets. Furthermore, we conducted extensive experiments on the benchmark SAR ship detection dataset (SSDD) and high-resolution SAR images datasets (HRSID) datasets. The experimental results verified the effectiveness of the proposed method.
The main contributions of our work can be summarized as follows.
1) By observing the experimental phenomena of the current mainstream sample selection methods, we conducted a detailed analysis and found that the IoU-based evaluation criteria in sample selection are rough and the samples corresponding to different sizes of targets are unbalanced. 2) To solve the common problems in the current mainstream positive and negative sample selections, we propose the AC4S method. By using basic data from datasets, such as center location and shape similarity, the proposed method can select high-quality positive samples from a large number of proposal boxes without increasing model parameters. At the same time, fixed thresholds were replaced with adaptive ones to balance the samples of different targets.
3) We conducted extensive experiments on the benchmark SSDD and HRSID datasets to prove the effectiveness of the proposed method. The experimental results confirmed that the proposed method is effective. The rest of this article is organized as follows. Section II illustrates the proposed method in detail. Next, the experimental results on several dataset and the corresponding analysis are provided in Section III. Finally, Section IV concludes this article.
II. METHODOLOGY
This section introduces the proposed AC4S method, which is divided into three components: 1) center-distance evaluation criteria; 2) shape-similarity evaluation criteria; 3) adaptive threshold. First, we introduce the current mainstream MaxIoUAssign method and the method proposed. Second, we analyze IoU. Next, we introduce the construction of the centerdistance evaluation criteria. Then, we present the shapesimilarity evaluation criteria. Finally, we introduce the structure of the adaptive threshold.
A. MaxIoUAssign
MaxIoUAssign method is one of the most widely used positive and negative sample selection methods. It is based on a fixed threshold, i.e., the IoU threshold between proposal boxes and ground truth. First, IoU between proposal boxes and ground truth is calculated one by one, and ground truth corresponding to the maximum IoU is taken as the corresponding target of the proposal box. Once the maximum IoU is greater than the fixed IoU threshold, it is regarded as a positive sample of the target; otherwise, it is a negative sample.
This method is generally suitable for most methods, including Faster-RCNN, YOLO and RetinaNet. However, this approach also has some inherent shortcomings. First, the quality of proposal boxes is not solely determined by IoU, which leads to the fact that even if some proposal boxes have the same IoU, it does not mean that they all have the same quality. Therefore, we should consider the quality of proposal box from many aspects.
Additionally, the proposal box corresponding to a small target is less than that of a large target. Thus, the IoU of the corresponding proposal box is inevitably low, so the fixed IoU threshold is not very friendly to a small target, and it may even have no positive samples. In this case, some small targets will not be detected because they cannot participate in the training, directly leading to the algorithm being not sensitive to small targets. Therefore, an appropriate sampling method should be adopted to compensate for the imbalance between large and small targets.
In this article, we used a Faster RCNN as the baseline method to verify the effectiveness of the proposed method, and its structure is shown in Fig. 3. As a two-stage target detection method, we first use the MaxIoUAssign method through RPN to extract RoI and obtain more accurate proposal boxes in the first stage. Next, RoI uses MaxIoUAssign again to extract positive and negative samples to calculate the loss function in the second stage. It is worth noting that the target of MaxIoUAssign in the first stage is an anchor, which is manually set. Because the anchor in different positions has the same aspect ratio, it leads to failure to reflect the role of shape similarity. In view of this phenomenon, we do not improve MaxIoUAssign in the first stage, but we focus on the second stage. In the second stage, RoI will have an irregular position and shape similarity after the adjustment in the first stage. Therefore, the proposed method can be used to replace MaxIoUAssign in the second stage.
B. Analysis of IoU
To illustrate the problems with MaxIoUAssign, we explain how it works and why it is important. IoU is calculated according to the location information of the proposal box (x 1 , y 1 , x 2 , y 2 ) and ground-truth(x 1 , y 1 , x 2 , y 2 ). Its calculation formula is as follows: where min(·) and max(·) represent the maximum and minimum values, respectively. ↑ indicates copying the numerator above. To explore how the IoU function is affected by center distance and shape, ( where the results of the transformation represent the central x, y coordinates, width, and height of a box. We continue to transform the components in formula (1) as shown There are two possible values of min(x + w, x + w) and max(x − w, x − w). We present all possible situations to analyze how the center distance and shape similarity affect this formula. All possible situations are as follows: In the case of formulas (3) and (6), the central x, y coordinates of the proposal box and ground truth will not participate in calculating IoU. This detailed analysis shows that in the case of formulas (3) and (6), the center points of the proposal box and ground truth have no obvious positional relationship. At this time, IoU can no longer meet the needs of positive and negative sample selections. Therefore, the proposed method adds an evaluation criterion to compensate for the above vacancy. It is obvious that the smaller the center distance L cen is, the faster the boundary regression will converge.
We continue to study the influence of shape on IoU. For the convenience of our calculation, by assuming that L cen has reached the optimal: (x − x ) = (y − y ) = 0, then formula (1) will be converted to formula (7). The results in the following: Formula (7) shows that IoU is determined by w w and h h , namely, shape similarity L shape , indicating that L shape is an important criterion for evaluating the quality of proposal boxes. Therefore, to obtain higher quality positive samples, L shape is taken as an important evaluation criterion.
We comprehensively evaluates the quality of proposal box from three aspects of L cen , L shape , and IoU.
For large targets, there are many proposal boxes that meet the requirements of the IoU threshold, and their center point and shapes are rich. In general, the larger the L cen is, the more attention is paid to L shape . On the contrary, when L cen is small, comprehensive consideration should be given to the IoU of the proposal boxes because shape similarity will be less important. For small targets, owing to the small number of its proposal boxes, the method of using a fixed threshold will lead to the imbalance of samples corresponding to small targets, affecting the training of small targets. Therefore, a method to balance the size of the target sample should be considered.
To solve the above problem, we propose the AC4S method, and its process is shown in Algorithm 1. We inherited and transcended MaxIoUAssign method. We also investigated the influence of center distance and shape similarity on experimental results.
C. Center Distance
Center distance is a measure of the difference between the positions of two boxes. Considering the boundary regression task of target detection, the closer the center point of the proposal boxes is to the center point of ground truth, the closer the predicted value is to 0, making it easier for the boundary regression to converge to the label. Therefore, when selecting positive and negative samples, center distance is an important criterion for evaluating positive and negative samples. In particular, the proposal boxes around ground truth must be considered. We designed an evaluation function as a criterion to calculate the distance between two center points. Its form is shown in formula (8). Intuitive understanding is shown in Fig. 4.
Here, L cen represents center distance. We use the L cen evaluation criteria to select proposal boxes. Fig. 5 shows that the selected proposal boxes are concentrated near the label.
The value of the evaluation function is always greater than or equal to 0 and less than 1, which meets our basic properties for an evaluation function. Fig. 5 shows that the L cen is closer to 1 when the center point is closer to the label. It is worth noting that when the center point of the proposal box is not in ground truth, L cen is set to 0. We make up for the situation in formulas (3) and (6) by setting the center-distance evaluation criteria, which play an important role in selecting high-quality positive and negative samples.
D. Shape Similarity
To determine the shape distribution of the proposal boxes, we selected three images with different target characteristics from the SSDD dataset to observe their distribution, including small, large, and dense targets. Their statistical characteristics regarding L shape are shown in Fig. 6. Faster RCNN collected a total of 600 RoIs, and it can be seen from Fig. 6 that different targets vary greatly. In A, RoI's L shape is mainly concentrated in 0.2-0.5. In B, RoI's L shape is mainly concentrated in 0-0.3, and in C, RoI's L shape is mainly concentrated in 0.6-1. These results showed that the shape of different targets has different influences on sample selection. Therefore, it is necessary to take L shape as a separate evaluation criterion for the sample selection strategy.
An anchor can roughly cover all targets in the image by setting different positions and aspect ratios, and each target can usually find the anchor with a close distance. Therefore, even if the center point is very close to the center of ground truth, it cannot be directly regarded as a positive sample. We also need to pay attention to another important factor, shape similarity, which refers to height and width ratios between the proposal box and ground truth.
Shape similarity is also important for selecting positive and negative samples. From boundary regression loss function (9), it can be found that the model tries to predict the ratio of height Δh and width Δw between the proposal boxes and ground truth. As Δh and Δw usually carry out zero initialization, it can be determined from formula (9) that if log w w and log h h are small, they converge very quickly and are more stable after convergence.
Loss wh reg = smooth_l 1 Δw, log (9) It should be noted that in order to make the loss function converge faster, we must make L shape consistent with Loss wh reg . Therefore, referring to the structure of the boundary regression loss function, the evaluation function we designed is shown Here, we use sqrt to slow down the drastic changes caused by the product. This evaluation method can limit L shape within the range of 0 to 1. As shown in formula (10), when the shape between the proposal boxes and ground truth is similar, the L shape value will approach 1; conversely, it will be near 0. Therefore, the performance of the proposal boxes in shape similarity can be evaluated using this method.
E. Quality Score
We study the performance of L shape and L cen on different targets. We conducted experiments on small, large, and dense targets on the SSDD dataset, and the experimental results are shown in Fig. 7. A total of 600 RoIs were collected using Faster RCNN. L shape and L cen of each target were used as coordinate axis labels. Mask area is used as the proposal region for positive samples because the algorithm usually adopts the targets with large L shape and L cen as positive samples. However, the RoI of different targets in this region varies greatly, which is not conducive to the balance of training samples. Therefore, it is not enough to select samples only from L shape and L cen . Thus, we evaluate the factors influencing the IoU, L shape and L cen functions. Specifically, when the center point of the proposal box is close to ground truth, a large weight is added to L shape . When the center distance between the two boxes is far, we assign a large weight to IoU to comprehensively consider the position and shape of the proposal box. Therefore, we directly take L cen as the weight. Then, our quality score (QS) evaluation function is shown as follows: To further explore the difference between the proposed method and MaxIoUAssign, we conducted an experiment on the SSDD dataset, and the results are shown in Fig. 8. This figure shows the results obtained using QS and IoU for the same L shape and L cen , respectively. When L shape and L cen are small, the difference between IoU and QS is not large because when L cen is 0, QS will degenerate into IoU. With the increase in L shape and L cen , the difference between IoU and QS gradually becomes larger. The gap boundary between high-quality and low-quality RoIs becomes clearer by replacing IoU with QS, facilitating the separation of positive and negative samples.
F. Adaptive Threshold
To eliminate artificial interference and balance the positive and negative samples of different sizes, we adopt an adaptive threshold to replace a fixed threshold. Fig. 9 shows that the median of QS is generally within the range of 0 to 0.4. The score distribution of large targets is scattered, and the standard deviation is large. Thus, there are more proposal boxes with a high score. Therefore, the threshold value should be appropriately increased to obtain fewer positive samples. Small targets have small QS, concentrated distribution, and small standard deviation, so proposal boxes with a high Fig. 9. Distribution of QS on small, large and dense targets in the image. Black dots represent RoI data. The black line in the box represents the median line of QS. 25%-75% represents the RoI range corresponding to a score in 25%-75% range. score will become fewer, and the threshold value should be appropriately reduced to obtain more positive samples. Therefore, we consider using the standard deviation of QS as the adaptive factor of the threshold, and the details are shown in formula (12). The threshold will be adjusted adaptively according to the differences of the target proposal boxes so that small targets can select high-quality positive samples thre = α + std(score). (12) Here, α represents a hyperparameter, and std(·) represents the standard deviation.
III. EXPERIMENT
In this section, to verify the validity of the proposed method, we conducted extensive experiments on SSDD and HRSID datasets. First, we introduced the dataset, evaluation criteria, and experimental environment. Then, to compare the differences between MaxIoUAssign and our method, we conducted experiments on the SSDD dataset and analyzed their differences. Next, we performed ablation experiments to explore the setting of the hyperparameters in the evaluation criteria. Finally, the proposed method was compared with several state-of-the-art methods on the SSDD and HRSID datasets.
A. Dataset
To prove the superiority of this method, we conducted extensive experiments on the SSDD and HRSID datasets.
SSDD is the first SAR ship dataset established in 2017. It has been widely used by many researchers since its publication and has become the baseline dataset for SAR ship detection. The SSDD dataset contains many scenarios and ships and involves various sensors, resolutions, polarization modes, and working modes. Additionally, the label file settings of this dataset are the same as those of the mainstream PASCAL visual object classes (VOC) dataset, so training of the algorithms is convenient.
In using the SSDD dataset, researchers used to randomly divide training, validation, and test datasets. These inconsistent divisions often result in the absence of common evaluation criteria. As researchers gradually discovered this problem, they began to establish uniform training and test datasets. Currently, 80% of the total dataset are training datasets, and the remaining 20% are test datasets. There are 1160 images in the SSDD dataset. Therefore, the number of images in the training dataset is 921, and the number of images in the test dataset is 239. For further refinement, images whose names end with digits one and nine are set as test datasets. In this way, the performance of various detection algorithms can be evaluated in a targeted way.
The HRSID dataset is a dataset released by University of Electronic Science and Technology of China UESTC in January 2020. HRSID is used for ship detection, semantic segmentation, and instance segmentation tasks in high-resolution SAR images. The dataset contains 5604 high-resolution SAR images and 16 951 ship instances. Its label file settings are the same as those of the mainstream of the Microsoft common objects in context (MS COCO) dataset.
B. Evaluation Criteria
To evaluate the detection performance of the algorithm model, we adopted the evaluation criteria AP , AP 50 , AP 75 , AP s , AP m , and AP l in the MS COCO dataset. Average Precision (AP ) is the area under the accuracy-recall curve. AP is calculated by precision and recall, where precision and recall are shown in formula (13). It is important to note that AP is the mean value with IoU = 0.50 : 0.05 : 0.95 (primary challenge measure), AP 50 is the AP with IoU = 0.5 (PASCAL VOC measure), and AP 75 is the AP with IoU = 0.75 (Strict measure). AP s , AP m , and AP l represent AP of small target, medium target and large target respectively, where small target with an area less than 32 2 pixels, medium target with an area between 32 2 pixels and 96 2 pixels, and large target with an area greater than 96 2 pixels P = T P T P + F P × 100% (13) Here, T P (true positive) is the number of ships correctly detected, F P (false positive) is the number of ships incorrectly classified as positive, and F N (false negative) is the number of ships correctly classified as negative. AP is defined as where P represents precision and R represents recall. AP is equal to the area under the curve. In addition, floating point operations (FLOPs) and Params are adopted in this article to evaluate the computational performance and the training parameters. FLOPs can be used to measure the complexity of the model. At the same time, Frames Per Second (FPS) is adopted in this paper to evaluate the running speed. FPS is used to evaluate the number of images processed per second or the time required to process an image to assess the detection speed. The shorter the time, the faster the speed.
C. Experimental Settings
All experiments were implemented in PyTorch 1.6.0, CUDA 11.2, and cuDNN 7.4.2 with an Intel intel(R) xeon(R) silver 4110 CPU and an NVIDIA Geforce TITAN RTX GPU. The PC operating system is Ubuntu 18.04. Table I presents the computer and deep learning environment configuration for our experiments.
The algorithm model in this article is based on the MMDetection framework. We trained the proposed method based on Faster RCNN using the stochastic gradient descent algorithm for 12 epochs, with a total of two images per small batch.
The initial learning rate was set to 0.01, the weight decay was 0.0001, and the momentum was 0.9. Our code is available at https://github.com/LITTERWWE/AC4S.
D. Ablation
After the analysis in Section II, we determined three influencing factors that distinguish positive and negative samples: 1) IoU ; 2) L cen ; 3) L shape . In this section, we will study the influence of different influencing factors on the experimental results.
1) Selection of Weight: To verify the influence of different parameters on function construction, we set different weights for formula (16) on the SSDD dataset for the ablation experiment. Additionally, instead of using the adaptive threshold, we fixed the threshold at 0.5. The detection performance of the algorithm is presented in Table II score First, the fifth row of Table II shows the experimental results of the original algorithm Faster RCNN. It can be clearly seen from Table II that after adding L shape , a part of the hyperparameter setting can achieve better results than the original Faster RCNN algorithm in AP 75 , AP s , AP m , AP l , and Recall. We can also see that the detection performance of L cen is better than manual settings and the original Faster RCNN algorithm. Finally, to further illustrate the superiority of the L cen method to manual settings, we draw a P R curve, as shown in the Fig. 10. L cen TABLE IV COMPARISON OF TRAINING AND INFERENCE TIME TABLE V DETECTION RESULTS (black line) is superior to other manual setting methods and the original Faster RCNN algorithm at different recall rates. Meanwhile, when recall is greater than 0.8, our curve will decline more smoothly. These show that L cen can replace the manual setting and original Faster RCNN algorithm.
2) Effect of Adaptive Threshold: Because the fixed threshold is not friendly to the small target, adaptive positive and negative sample selections are performed in this article. Our adaptive threshold is similar to the method in ATSS. However, the difference is that ATSS takes the sum of the mean and standard deviation of IoU as the adaptive threshold. Meanwhile, the proposed method only uses standard deviation because the mean score is very small, leading to a small adaptive threshold and an excessive number of positive samples, thus affecting the training results.
We conducted extensive experiments on the SSDD dataset. The experiment was divided into two parts: 1) The first part did not use standard deviation; 2) the second part used standard deviation. The experimental results are shown in Table III. In the first part, the threshold was set from 0.6 to 0.8 due to the large mean value of L shape . In the second part, we lowered α after using the variance because the variance was in the range of 0∼0.4.
By comparing the effects of the two parts, we found that AP 50 was higher when variance was used as the adaptive threshold. In particular, when α was equal to 0.5, our method had improved We performed a detailed analysis of the experimental results. Because a small target has a small standard deviation, the proposed adaptive threshold can relatively reduce the threshold of the small target and can increase the number of positive samples with the small target. Additionally, with a large standard deviation for a large target, increasing the threshold of the large target can help the large target select positive samples of higher quality. Therefore, the detection performance can be improved through an adaptive threshold. According to the above analysis, our adaptive threshold method can play an effective role in sample selection. In the following work, thre is set to 0.55 and variance is used. Using this configuration, we achieved 96.3% AP on the SSDD dataset.
E. MaxIoUAssign versus Our Method
We selected images from three scenarios, as shown in Fig. 11, where Figs. 11(1), (2), and (3) represent small, large, and dense targets, respectively. We observe the differences between the proposed method and MaxIoUAssign of RoIs on these images in three forms, as follows. Fig. 11(b) represents the scatter diagram of QS (red sphere) and IoU (blue sphere) at different positions of RoI in Faster RCNN. X and Y represent the coordinates of RoIs on the image, and Z represents the QS or IoU value. As the figure shows, the red sphere's maximum value is larger than that of the blue sphere, while the minimum value is almost the same. This phenomenon shows that, on the one hand, high-quality RoIs show higher QS scores than IoU. On the other hand, low-quality RoIs performed almost identically on QS and IoU. It will lead to some high-quality RoIs standing out when QS is used because the gap between them and low-quality RoIs becomes larger, thus facilitating the screening of high-quality RoIs. Then, our method creates a clear dividing line between RoI QSs, which makes it easier for the model to select high-quality RoIs.
The phenomenon mentioned above may be difficult to see in scatter plots, so we smoothed the scatter diagram, and the result is shown in Fig. 11(c). In the figure, the colored surfaces are the IoU distribution surfaces of all RoIs. In contrast, the gray surfaces are the QS distribution surfaces of RoI. It is evident from the figure that the QS surface is significantly higher than the IoU surface at the center point. In addition, the closer to the central point, the more significant the gap between QS and IoU. However, the farther away from the center, the smaller the gap between QS and IoU, and finally almost overlapped.
It should be noted that the value range of QS and IoU is . Although the difference between QS and IoU seems not evident in the Fig. 11(c), taking Fig. 11(1)(c) as an example, the difference between QS and IoU at the central point is between 0.15-0.2. This gap may seem small visually, but it is enough for the neural network to perfectly select high-quality RoI, thus making the model training more effective.
Finally, look at the heat map. The darker the color, the larger the QS or IoU. Take Fig. 11(1)(d) and (1)(e) as an example. Fig. 11(1)(d) is the proposed method. The color at the center point is red, while the position away from the center point is white. Fig. 11(1)(e) is the MaxIoUAssign method. The color at the center point is red, and the position away from the center point is light red. The contrast of color shows that our method will make the sample quality appear more transparent.
F. Training and Inference Times
In order to prove that the proposed method hardly reduces the training speed and inference time while improving the detection performance, two recognized indicators, FLOP and Params, are adopted to evaluate the computational performance and complexity of the model. As for the training time, we counted the training time of our method and the baseline method Faster RCNN on SSDD and HRSID. For the inference time, FPS was used as the evaluation criterion. The results are shown in the Table IV. As seen from the table, Flops and Params corresponding to the two methods are the same. Meanwhile, on the SSDD and HRSID datasets, the training time of our method is approximately 30 s longer than that of the baseline method, which is almost negligible. Experimental results show that the proposed method has little effect on the training time and does not increase the model's computational load and training parameters.
In addition, there is almost no difference in FPS between the two methods, which indicates that the inference time of the two methods is almost the same, proving that the proposed method hardly affects the inference time.
G. Experiment on SSDD
To prove the advancement of our method, we conducted extensive experiments on SSDD dataset. In this section, we still Table V shows the test results on SSDD. As can be seen from the table, compared with the original Faster RCNN algorithm, our method improves recall by 1.5%, AP 50 by 1.7%, AP s , AP m , and AP l by 1.1%, 1.7%, and 17.3%, respectively. In addition, by comparing our method with the current mainstream algorithms in target detection, we find that our method is almost superior to other algorithms. Specifically, compared with dynamic RCNN, our method improves recall by 2.1% for AP 50 , 0.8% for AP s , and 0.4% for AP l . Compared with Cascade RCNN, our method improved recall by 2.9%, AP 50 by 5.2%, AP s , AP m and AP l by 1.7%, 3.6% and 8.2%, respectively. Compared with NAS FCOS, our method improved recall by 9.0%, AP 50 and AP 75 by 11.5% and 1.7%, AP s , AP m and AP l by 3.0%, 3.6% and 8.2%, respectively. Fig. 12 also shows that the proposed method is superior to the other algorithms under different recalls. When a recall is greater than 0.8, the decline in the proposed method is more stable than that of dynamic RCNN and NAS FCOS. The first row in Fig. 13 shows that the proposed method can detect the target accurately and does not have the problem of repeated detection compared with Faster RCNN and NAS FCOS. Meanwhile, the second row in the figure shows that the proposed method avoids repeated detection and error detection compared with Faster RCNN and Dynamic RCNN. The third row shows that although none of the three methods can completely detect the ship, the error rate of the proposed method is lower than that of other algorithms. The above analysis demonstrates that all algorithms have problems of repeated, error, and missed detections, but the detection accuracy of the proposed method is relatively high. Therefore, the proposed method can achieve a better detection effect than the dynamic RCNN and NAS FCOS algorithms.
H. Contrast Experiment With Other Sample Selection Methods
To comprehensively evaluate the performance of the method in sample selections, we compared it with other sample selection methods on the HRSID dataset, including ATSS and AutoAssign algorithms. The results are shown in Table VI. Because AutoAssign is based on FCOS, we did not migrate it to Faster RCNN. ATSS and the proposed method were applied to the Faster RCNN algorithm. These methods focus on sample selection, but their operation and ideas are different, so it has the significance of comparison.
As presented in Table VI, compared with the Faster RCNN algorithm, our AP 50 and AP 75 improved by 0.9 and 0.4, respectively. Although the effect on HRSID was not as obvious as that on SSDD, it still improved the original algorithm. Additionally, compared with the mainstream ATSS method, the proposed method improved AP 50 , AP 75 , AP s , and AP m by 8.1%, 0.9%, 6.9%, and 3.4%, respectively, in the HRSID dataset. However, compared with the current mainstream AutoAssign, although ours method was 1.3% lower in AP 50 , AP 75 was 5.8% higher, AP s was 5.2% higher, and AP m was 0.3% higher. Moreover, the algorithm complexity of AutoAssign is much higher than that of the proposed method. To sum up, our proposed method is advanced in sample selection.
I. Experiment on HRSID
To verify the robustness of the proposed algorithm, we conducted extensive experiments on the HRSID dataset. In this section, we still take anchor-based Faster RCNN as the baseline and compare it with other two methods based on CNN: 1) Dynamic RCNN; 2) NAS FCOS. Table VII presents the test results on the HRSID dataset. As presented in Table VII, compared with the original Faster RCNN algorithm, our method improved recall, AP 50 , AP s , AP m , and AP l by 1.5%, 1.7%, 1.1%, 1.7%, and 17.3%, respectively. Additionally, our method is superior to current mainstream algorithms in target detections. Specifically, compared with Dynamic RCNN, our method improved recall by 2.1% for AP 50 , 0.8% for AP s , and 0.4% for AP l . Compared with NAS FCOS, our method improved recall, AP 50 , AP 50 , AP s , AP m , and AP l by 9.0%, 11.5%, 1.7%, 3.0%, 3.6%, and 8.2%, respectively.
To intuitively observe the effect of the proposed method, we marked the detection results in the image, as shown in Fig. 14. HRSID is larger than the SSDD dataset, and there are more dense small targets, so the detection is more difficult, and the detection effect is worse. However, the proposed method is still superior to the other three detection algorithms. Fig. 14 shows that other algorithms often have an error and repeated detections for dense small targets, but the proposed method performs better than them. Fig. 15 shows that the results of the proposed method are better than those of other algorithms at different recall rates, and our curves can almost cover other curves. Therefore, the above analysis demonstrates that the proposed method can also show relatively good effects on the HRSID dataset.
IV. CONCLUSION
In this article, we proposed a new sample selection algorithm for SAR ship detection. To select high-quality proposal boxes whose shape is similar to ground truth, we retained IoU and introduced shape similarity as the evaluation criterion of sample quality. Center distance was used as a weight to balance IoU and shape similarity, which was conducive to obtaining proposal boxes of higher quality. Furthermore, to avoid the fixed threshold, the standard deviation of QS was taken as the variable to regulate the threshold, which promoted the balance of samples. The experimental results showed that the proposed AC4S can effectively improve the performance of target detection and is better than other algorithms. |
<reponame>electro89/frontend
import { Component, OnInit } from "@angular/core";
import { GlobalVarsService } from "../../../global-vars.service";
import { BackendApiService, ProfileEntryResponse, TutorialStatus } from "../../../backend-api.service";
import { AppRoutingModule } from "../../../app-routing.module";
import { Title } from "@angular/platform-browser";
@Component({
selector: "buy-creator-coins-tutorial",
templateUrl: "./buy-creator-coins-tutorial.component.html",
styleUrls: ["./buy-creator-coins-tutorial.component.scss"],
})
export class BuyCreatorCoinsTutorialComponent implements OnInit {
static PAGE_SIZE = 100;
static WINDOW_VIEWPORT = true;
static BUFFER_SIZE = 5;
AppRoutingModule = AppRoutingModule;
loading: boolean = true;
constructor(
public globalVars: GlobalVarsService,
private backendApi: BackendApiService,
private titleService: Title
) {}
topCreatorsToHighlight: ProfileEntryResponse[];
upAndComingCreatorsToHighlight: ProfileEntryResponse[];
loggedInUserProfile: ProfileEntryResponse;
investInYourself: boolean = false;
ngOnInit() {
// this.isLoadingProfilesForFirstTime = true;
this.titleService.setTitle("Buy Creator Coins Tutorial - DeSo");
// If the user just completed their profile, we instruct them to buy their own coin.
if (this.globalVars.loggedInUser?.TutorialStatus === TutorialStatus.CREATE_PROFILE) {
this.loggedInUserProfile = this.globalVars.loggedInUser?.ProfileEntryResponse;
this.investInYourself = true;
this.loading = false;
return;
}
this.backendApi
.GetTutorialCreators(this.globalVars.localNode, this.globalVars.loggedInUser.PublicKeyBase58Check, 3)
.subscribe(
(res: {
WellKnownProfileEntryResponses: ProfileEntryResponse[];
UpAndComingProfileEntryResponses: ProfileEntryResponse[];
}) => {
// Do not let users select themselves in the "Invest In Others" step.
if (res.WellKnownProfileEntryResponses?.length) {
this.topCreatorsToHighlight = res.WellKnownProfileEntryResponses.filter(
(profile) => profile.PublicKeyBase58Check !== this.globalVars.loggedInUser?.PublicKeyBase58Check
);
}
if (res.UpAndComingProfileEntryResponses?.length) {
this.upAndComingCreatorsToHighlight = res.UpAndComingProfileEntryResponses.filter(
(profile) => profile.PublicKeyBase58Check !== this.globalVars.loggedInUser?.PublicKeyBase58Check
);
}
this.loading = false;
},
(err) => {
console.error(err);
}
);
}
}
|
/**
* Document view exporter that does not export virtual nodes.
*/
public class HippoDocumentViewExporter extends DocumentViewExporter {
public HippoDocumentViewExporter(Session session, ContentHandler handler, boolean recurse, boolean binary) {
super(session, handler, recurse, binary);
}
@Override
protected void exportNodes(Node node) throws RepositoryException, SAXException {
if (!JcrUtils.isVirtual(node)) {
super.exportNodes(node);
}
}
} |
package owolabi.tobiloba.measurementrecorder;
import android.content.Context;
import android.database.Cursor;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.CursorAdapter;
import android.widget.ImageView;
import android.widget.TextView;
import owolabi.tobiloba.measurementrecorder.database.RecordContract.RecordEntry;
/**
* Created by TOBILOBA on 10/10/2017.
*/
public class RecordCursorAdapter extends CursorAdapter {
public RecordCursorAdapter(Context context, Cursor c) {
super(context, c, 0 /* flags */);
}
@Override
public View newView(Context context, Cursor cursor, ViewGroup parent) {
// Inflate a list item view using the layout specified in list_item.xml
return LayoutInflater.from(context).inflate(R.layout.list_item, parent, false);
}
@Override
public void bindView(View view, Context context, Cursor cursor) {
// Find individual views that we want to modify in the list item layout
TextView nameTextView = view.findViewById(R.id.name);
ImageView displayImage = view.findViewById(R.id.image);
// Find the columns of measurement attributes that we're interested in
int nameColumnIndex = cursor.getColumnIndex(RecordEntry.COLUMN_CLIENT_NAME);
int genderColumnIndex = cursor.getColumnIndex(RecordEntry.COLUMN_CLIENT_GENDER);
// Read the pet attributes from the Cursor for the current pet
String clientName = cursor.getString(nameColumnIndex);
int clientGender = cursor.getInt(genderColumnIndex);
//check the client gender and set the approriate image
if (clientGender == RecordEntry.GENDER_FEMALE){
displayImage.setImageResource(R.drawable.family_mother);
} else if (clientGender == RecordEntry.GENDER_UNKNOWN){
displayImage.setImageResource(R.drawable.color_black);
} else {
displayImage.setImageResource(R.drawable.family_father);
}
// Update the TextViews with the attributes for the current record
nameTextView.setText(clientName);
}
}
|
// NOTE: set the variable indicators and set terminal derivatives in _backward_info
void Simulation::backward() {
if (_q_his.size() <= 1) {
std::cerr << "[Error] Please call simulation.forward() before simulation.backward()." << std::endl;
throw "error";
}
int T = _q_his.size() - 1;
if (_backward_info._flag_q0) {
if (_backward_info._df_dq0.size() != _ndof_r) {
throw_error("_backward_info._df_dq0.size != _ndof_r");
}
}
if (_backward_info._flag_qdot0) {
if (_backward_info._df_dqdot0.size() != _ndof_r) {
throw_error("_backward_info._df_dqdot0.size != _ndof_r");
}
}
if (_backward_info._flag_p) {
if (_backward_info._df_dp.size() != _ndof_p) {
throw_error("_backward_info._df_dp.size != _ndof_p");
}
}
if (_backward_info._flag_u) {
if (_backward_info._df_du.size() != _ndof_u * T) {
throw_error("_backward_info._df_du.size != _ndof_u * T");
}
}
if (_backward_info._df_dq.size() != _ndof_r * T) {
throw_error("_backward_info._df_dq.size != _ndof_r * T");
}
if (_backward_info._df_dvar.size() != _ndof_var * T) {
throw_error("_backward_info._df_dvar.size != _ndof_var * T");
}
auto t_backward_start = clock();
if (_options->_integrator == "BDF1") {
backward_BDF1();
} else if (_options->_integrator == "BDF2") {
backward_BDF2();
}
_time_report._time_backward += clock() - t_backward_start;
} |
<gh_stars>0
package clog
import (
"flag"
"fmt"
"os"
"github.com/google/logger"
"github.com/dakraid/LooM/logview"
)
const logPath = "output.log"
var verbose = flag.Bool("verbose", false, "print info level logs to stdout")
// InitLogger The cLogger package is just a proxy to the Google/Logger package and has to initialize it first to be able to use it
func InitLogger() {
flag.Parse()
err := os.Remove(logPath)
if err != nil {
logger.Errorf("Failed to clear log file: %v", err)
}
lf, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0660)
if err != nil {
logger.Fatalf("Failed to open log file: %v", err)
}
defer lf.Close()
logger.Init("OutputLog", *verbose, true, lf)
}
// Info log call
func Info(v ...interface{}) {
logview.AddEntry(fmt.Sprint(v...))
logger.Info(v)
}
// Warning log call
func Warning(v ...interface{}) {
logview.AddEntry(fmt.Sprint(v...))
logger.Warning(v)
}
// Error log call
func Error(v ...interface{}) {
logview.AddEntry(fmt.Sprint(v...))
logger.Error(v)
}
// Fatal log call
func Fatal(v ...interface{}) {
logview.AddEntry(fmt.Sprint(v...))
logger.Fatal(v)
}
// Infof Formated Info log call
func Infof(format string, v ...interface{}) {
logview.AddEntry(fmt.Sprintf(format, v...))
logger.Infof(format, v)
}
// Warningf Formated Warning log call
func Warningf(format string, v ...interface{}) {
logview.AddEntry(fmt.Sprintf(format, v...))
logger.Warningf(format, v)
}
// Errorf Formated Error log call
func Errorf(format string, v ...interface{}) {
logview.AddEntry(fmt.Sprintf(format, v...))
logger.Errorf(format, v)
}
// Fatalf Formated Fatal log call
func Fatalf(format string, v ...interface{}) {
logview.AddEntry(fmt.Sprintf(format, v...))
logger.Fatalf(format, v)
}
|
import { maximumProduct, maximumProduct2 } from './maximumProduct';
describe('maximumProduct function', () => {
it('should work', () => {
expect(maximumProduct([-4, -4, 2, 8])).toBe(128);
expect(maximumProduct2([-4, -4, 2, 8])).toBe(128);
});
});
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.