content
stringlengths 10
4.9M
|
---|
<gh_stars>0
module Evaluator
( evaluate,
evaluateRepl,
evaluateDefines,
Value (..),
Context,
) where
import Text.Read (readMaybe)
import Data.Maybe (fromMaybe)
import Control.Exception (throw)
import qualified Data.Map.Strict as Map
import Parser (Expression (..))
import Exception (HExceptions (EvaluationException))
type Context = Map.Map String Value
data Function = Defined [String] Expression | Builtin ([Value] -> Value) | Spe (Context -> [Expression] -> Value)
data Value = Function Function | Number Int | String String | List [Value] | Nil
instance Show Value where
show (Function _) = "#<procedure>"
show (Number n) = show n
show (String s) = s
show (List l) = Evaluator.showList l
show Nil = "()"
showList :: [Value] -> String
showList [] = "()"
showList [x, Nil] = '(' : show x ++ ")"
showList (first:xs) = '(' : show first ++ showList' xs
showList' :: [Value] -> String
showList' [v, Nil] = (' ': show v) ++ ")"
showList' [v] = (" . " ++ show v) ++ ")"
showList' (v:xs) = (' ' : show v) ++ showList' xs
showList' [] = ")"
evaluateDefines :: [Expression] -> Context
evaluateDefines = evaluateDefines' baseContext
evaluateDefines' :: Context -> [Expression] -> Context
evaluateDefines' c [] = c
evaluateDefines' c (Seq (Atom "define" : define) : xs) = evaluateDefines' (fst $ evaluateDefine c define) xs
evaluateDefines' c (_ : xs) = evaluateDefines' c xs
evaluate :: [Expression] -> [Value]
evaluate = evaluate' baseContext
evaluate' :: Context -> [Expression] -> [Value]
evaluate' _ [] = []
evaluate' c (Seq (Atom "define" : define) : xs) = evaluate' (fst $ evaluateDefine c define) xs
evaluate' c (expr:xs) = evaluateExpr c expr : evaluate' c xs
evaluateRepl :: Context -> [Expression] -> (Context, [Value])
evaluateRepl = evaluateRepl' []
evaluateRepl' :: [Value] -> Context -> [Expression] -> (Context, [Value])
evaluateRepl' v c [] = (c, reverse v)
evaluateRepl' v c (Seq (Atom "define" : define) : xs) = evaluateRepl'' v xs $ evaluateDefine c define
evaluateRepl' v c (expr:xs) = evaluateRepl' (evaluateExpr c expr : v) c xs
evaluateRepl'' :: [Value] -> [Expression] -> (Context, String) -> (Context, [Value])
evaluateRepl'' v (expr:xs) (c, name) = evaluateRepl' (evaluateExpr c expr : String name : v) c xs
evaluateRepl'' v [] (c, name) = (c, reverse $ String name : v)
evaluateDefine :: Context -> [Expression] -> (Context, String)
evaluateDefine c [Atom symbol, expr] = (Map.insert symbol (evaluateExpr c expr) c, symbol)
evaluateDefine c [Seq (Atom symbol : args), func] = (Map.insert symbol (createFunction args func) c, symbol)
evaluateDefine _ _ = throw $ EvaluationException "define : Invalid arguments"
createFunction :: [Expression] -> Expression -> Value
createFunction args func = Function $ Defined (map asAtom args) func
evaluateExpr :: Context -> Expression -> Value
evaluateExpr _ (Quoted expr) = evaluateQuoted expr
evaluateExpr c (Seq exprs) = evaluateSeq c exprs
evaluateExpr c (Atom a) = evaluateAtom c a
evaluateAtom :: Context -> String -> Value
evaluateAtom c s = Map.lookup s c
?: ((Number <$> readMaybe s)
?: throw (EvaluationException (show s ++ " is not a variable")))
evaluateSeq :: Context -> [Expression] -> Value
evaluateSeq _ [] = Nil
evaluateSeq c (expr:xs) = evaluateSeq' c (evaluateExpr c expr) xs
evaluateSeq' :: Context -> Value -> [Expression] -> Value
evaluateSeq' c (Function (Spe s)) exprs = s c exprs
evaluateSeq' c v exprs = evaluateSeq'' c $ v:map (evaluateExpr c) exprs
evaluateSeq'' :: Context -> [Value] -> Value
evaluateSeq'' c (Function f : xs) = invokeFunction c f xs
evaluateSeq'' _ [] = Nil
evaluateSeq'' _ _ = throw $ EvaluationException "Sequence is not a procedure"
evaluateQuoted :: Expression -> Value
evaluateQuoted (Atom a) = evaluateQuotedAtom a
evaluateQuoted (Seq []) = Nil
evaluateQuoted (Seq q) = List $ evaluateQuotedSeq q
evaluateQuoted (Quoted q) = evaluateQuoted q
evaluateQuotedAtom :: String -> Value
evaluateQuotedAtom s = (Number <$> readMaybe s) ?: String s
evaluateQuotedSeq :: [Expression] -> [Value]
evaluateQuotedSeq = foldr ((:) . evaluateQuoted) [Nil]
invokeFunction :: Context -> Function -> [Value] -> Value
invokeFunction _ (Builtin b) args = b args
invokeFunction c (Defined symbols func) args = evaluateExpr (functionContext c symbols args) func
invokeFunction _ (Spe _) _ = throw $ EvaluationException "The impossible has happened"
functionContext :: Context -> [String] -> [Value] -> Context
functionContext c (symbol:sxs) (value:vxs) = functionContext (Map.insert symbol value c) sxs vxs
functionContext c [] [] = c
functionContext _ _ _ = throw $ EvaluationException "Invalid number of arguments"
baseContext :: Context
baseContext = Map.fromList builtins
builtins :: [(String, Value)]
builtins = [("+", Function $ Builtin add),
("-", Function $ Builtin sub),
("*", Function $ Builtin mult),
("div", Function $ Builtin division),
("mod", Function $ Builtin modulo),
("<", Function $ Builtin inferior),
("eq?", Function $ Builtin eq),
("atom?", Function $ Builtin atom),
("cons", Function $ Builtin cons),
("car", Function $ Builtin car),
("cdr", Function $ Builtin cdr),
("cond", Function $ Spe cond),
("lambda", Function $ Spe lambda),
("let" , Function $ Spe slet),
("quote" , Function $ Spe quote),
("#t" , String "#t"),
("#f" , String "#f")
]
add :: [Value] -> Value
add = Number . sum . map asNumber
sub :: [Value] -> Value
sub [Number n] = Number $ -n
sub (Number n:xs) = Number $ foldl (-) n $ map asNumber xs
sub _ = throw $ EvaluationException "- : Invalid arguments"
mult :: [Value] -> Value
mult = Number . product . map asNumber
division :: [Value] -> Value
division [Number lhs, Number rhs] = Number $ quot lhs rhs
division [_ , _] = throw $ EvaluationException "div : Invalid arguments"
division _ = throw $ EvaluationException "div : Invalid number of arguments"
modulo :: [Value] -> Value
modulo [Number lhs, Number rhs] = Number $ mod lhs rhs
modulo [_ , _] = throw $ EvaluationException "mod : Invalid arguments"
modulo _ = throw $ EvaluationException "mod : Invalid number of arguments"
inferior :: [Value] -> Value
inferior [Number lhs, Number rhs] = fromBool $ (<) lhs rhs
inferior [_ , _] = throw $ EvaluationException "< : Invalid arguments"
inferior _ = throw $ EvaluationException "< : Invalid number of arguments"
cons :: [Value] -> Value
cons [List l, Nil] = List l
cons [lhs, List l] = List $ lhs:l
cons [lhs, rhs] = List [lhs, rhs]
cons _ = throw $ EvaluationException "cons : Invalid number of arguments"
car :: [Value] -> Value
car [List (f : _)] = f
car _ = throw $ EvaluationException "car : Invalid arguments"
cdr :: [Value] -> Value
cdr [List [_, v]] = v
cdr [List (_ : l)] = List l
cdr _ = throw $ EvaluationException "cdr : Invalid arguments"
cond :: Context -> [Expression] -> Value
cond c (Seq [expr, ret] : xs) = cond' c (evaluateExpr c expr) ret xs
cond _ _ = throw $ EvaluationException "cond : invalid branch"
cond' :: Context -> Value -> Expression -> [Expression] -> Value
cond' c (String "#f") _ xs = cond c xs
cond' c _ ret _ = evaluateExpr c ret
eq :: [Value] -> Value
eq [Number lhs, Number rhs] | lhs == rhs = fromBool True
eq [String lhs, String rhs] | lhs == rhs = fromBool True
eq [Nil , Nil ] = fromBool True
eq [_ , _ ] = fromBool False
eq _ = throw $ EvaluationException "eq? : Invalid number of arguments"
atom :: [Value] -> Value
atom [] = throw $ EvaluationException "atom? : no argument"
atom [List _] = fromBool False
atom _ = fromBool True
lambda :: Context -> [Expression] -> Value
lambda _ [args, func] = lambda' args func
lambda _ _ = throw $ EvaluationException "lambda : Invalid number of arguments"
lambda' :: Expression -> Expression -> Value
lambda' (Seq args) func = Function $ Defined (map asAtom args) func
lambda' _ _ = throw $ EvaluationException "lambda : Invalid arguments"
slet :: Context -> [Expression] -> Value
slet c [Seq defs, expr] = evaluateExpr (letContext c defs) expr
slet _ _ = throw $ EvaluationException "let : Invalid number of arguments"
letContext :: Context -> [Expression] -> Context
letContext c (Seq [Atom key, value] : xs) = letContext (Map.insert key (evaluateExpr c value) c) xs
letContext c [] = c
letContext _ _ = throw $ EvaluationException "let : Invalid variable declaration"
quote :: Context -> [Expression] -> Value
quote _ [expr] = evaluateQuoted expr
quote _ _ = throw $ EvaluationException "quote : Invalid arguments"
fromBool :: Bool -> Value
fromBool True = String "#t"
fromBool False = String "#f"
asAtom :: Expression -> String
asAtom (Atom a) = a
asAtom _ = throw $ EvaluationException "Invalid atom"
asNumber :: Value -> Int
asNumber (Number n) = n
asNumber v = throw $ EvaluationException $ show v ++ " is not a number"
(?:) :: Maybe a -> a -> a
(?:) = flip fromMaybe
|
<reponame>adessoSE/budgeteer
package de.adesso.budgeteer.rest.project.model;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Getter;
import javax.validation.constraints.Positive;
@Getter
public class UpdateDefaultProjectModel {
@Positive private final long newDefaultProjectId;
public UpdateDefaultProjectModel(@JsonProperty("newDefaultProjectId") long newDefaultProjectId) {
this.newDefaultProjectId = newDefaultProjectId;
}
}
|
Lisa Bloomquist
Activist Post
A U.S. military publication, The Air Force Times, made the connection that victims of Fluoroquinolone Toxicity Syndrome (“Floxies”) have been screaming about for years – that Gulf War Illness is tied to Cipro. In an article entitled, “New FDA warnings on Cipro may tie into Gulf War Illness,” it was noted that the August, 2013 update to the warning labels of all fluoroquinolone antibiotics stating that PERMANENT peripheral neuropathy is a possible adverse effect, prompted The Air Force Times to make the connection.
Civilians suffering from Fluoroquinolone Toxicity Syndrome (an adverse reaction to a fluoroquinolone – Cipro/Ciprofloxacin, Levaquin/Levofloxacin, Avelox/Moxifloxacin, Floxin/Ofloxacin and others) have noted the similarities between Gulf War illness and Fluoroquinolone Toxicity Syndrome for years. It is beyond likely, it is probable, that they are one in the same.
The Symptoms
The VA defines Gulf War Illness as “chronic, unexplained symptoms existing for 6 months or more” that are at least ten percent disabling. The CDC case definition of Gulf War Illness “requires chronic symptoms in two of three domains of fatigue, cognitive-mood, and musculoskeletal.”
Fluoroquinolone Toxicity Syndrome is a chronic, unexplained illness with symptoms lasting for months, years, or, as the updated warning label notes, permanently. The symptoms of Fluoroquinolone Toxicity Syndrome are too numerous to list, but a cursory glance at the warning label for Cipro/Ciprofloxacin will tell you that the effects include musculoskeletal problems and central nervous system issues. Additionally, as pharmaceuticals that damage mitochondria, the energy centers of cells, severe fatigue is often induced by Fluoroquinolones.
A 1998 study entitled, “Chronic Multisymptom Illness Affecting Air Force Veterans of the Gulf War,” found that the most commonly reported symptoms of Gulf War Illness are sinus congestion, headache, fatigue, joint pain, difficulty remembering or concentrating, joint stiffness, difficulty sleeping, abdominal pain, trouble finding words, (feeling) moody or irritable, rash or sores, numbness or tingling and muscle pain.
A 2011 study conducted by the Quinolone Vigilance Foundation found that the most commonly reported symptoms of Fluoroquinolone Toxicity Syndrome are tendon, joint, and muscle pain, fatigue, popping/cracking joints, weakness, neuropathic pain, paresthesia (tingling), muscle twitching, depression, anxiety, insomnia, back pain, memory loss, tinnitus, muscle wasting.
The symptoms are similar enough to raise a few eyebrows. It should be noted that when a chronic, multi-symptom illness suddenly sickens a patient or a soldier, and he or she goes from being healthy and active to suddenly being exhausted and unable to move or think, it is difficult to pinpoint and describe exactly what is going wrong in his or her body. Thus, even if the symptoms are identical, they may not be described in an identical way because of context and differing areas of focus.
For victims of fluoroquinolones, it is as if a bomb went off in the body of the victim, yet all tests come back “normal” so in addition to physical pain and suffering that the soldier/patient is going through, he or she has to suffer through dismissal and denial from medical professionals as well. Neither Gulf War Illness nor Fluoroquinolone Toxicity Syndrome are detected by traditional medical tests and thus both diseases are systematically denied. All blood and urine markers come back within the normal ranges, yet the patient or soldier is suddenly incapable of 90% of what he or she used to be able to do. When a large number of patients or soldiers (nearly 30% of the soldiers serving in the Gulf reported symptoms. Exact numbers of civilian patients suffering from Fluoroquinolone Toxicity Syndrome are unknown because of delayed reactions, misdiagnosing the illness, tolerance thresholds, etc.) experience adverse reactions that are undetectable using the tests available, there is something wrong with the tests. The patients and soldiers aren’t lying and their loss of abilities isn’t “in their heads.”
Exposure to the same Poison
Another glaring similarity between Gulf War Illness and Fluoroquinolone Toxicity Syndrome is that everyone with either syndrome took a Fluoroquinolone.
Per a Veteran of the Marines who commented on healthboards.com about the use of Ciprofloxacin by soldiers in the Gulf:
The Ciprofloxacin 500 mg were ordered to be taken twice a day. The Marines were the only service that I know for sure were given these orders. We were ordered to start them before the air war, and the order to stop taking them was giver at 0645 Feb 28th 1991 by General Myatt 1st Marine div commander. We were forced to take Cipro 500mg twice a day for 40 plus days. so the Marines were given NAPP (nerve agent protection pills) or pyridiostigmine bromide to protect us from nerve agent, and We were ordered to take the Cipro to protect from anthrax. We were part of the human research trial conducted by the Bayer corporation in the creation of their new anthrax pills. At that time they had no idea of the side effects of flouroquinolones. That’s the class of medications that Cipro falls into. After the Gulf War the FDA and Bayer co. started releasing the list of side effects. You do need to know what was done to you so you will have to do your own research. Good luck to all of you and Semper Fi.
Download Your First Issue Free! Do You Want to Learn How to Become Financially Independent, Make a Living Without a Traditional Job & Finally Live Free?
Download Your Free Copy of Counter Markets
By definition, everyone who suffers from Fluoroquinolone Toxicity Syndrome has taken a fluoroquinolone – Cipro/Ciprofloxacin, Levaquin/Levofloxacin, Avelox/Moxifloxacin or Floxin/Ofloxacin. Civilians are also part of the “human research trial conducted by the Bayer corporation” as well as Johnson & Johnson, Merck and multiple generic drug manufacturers who peddle fluoroquinolones as “safe” antibiotics.
The Case Against Fluoroquinolones
Of course, there were multiple chemicals and poisons that Gulf War Veterans were exposed to in the 1990-91 Persian Gulf War and thus it has been difficult to pinpoint an exact cause of Gulf War Illness. The ruling out of the following possible causes should certainly be questioned thoroughly, but “depleted uranium, anthrax vaccine, fuels, solvents, sand and particulates, infectious diseases, and chemical agent resistant coating” have been found not to cause Gulf War Illness. Other potential causes of Gulf War Illness include oil fires, multiple vaccines, pesticides, and, of course, fluoroquinolone antibiotics (Cipro). (It should be noted that non-deployed military personnel who served during the Gulf War period, but who were not deployed in the Middle East, have also been afflicted with Gulf War Illness and thus toxins that both deployed and non-deployed personnel have been exposed to should be the focus of investigations into the causes of Gulf War Illness.)
The Air Force Times article is one of the first official mentions of the relationship between Cipro and Gulf War Illness. Officially, the link hasn’t been examined (though some very smart researchers are building a case as you read this). Why Cipro hasn’t been looked at as a potential cause of Gulf War Illness is a question that I don’t know the answer to. Perhaps it’s because most people think that all antibiotics are as safe as penicillin. Perhaps it’s because most people have a tolerance threshold for fluoroquinolones and don’t react negatively to the first prescription that they receive. Perhaps it’s because even today, more than 30 years after Cipro was patented by Bayer, the exact mechanism by which fluoroquinolones operate is still officially unknown (1). Perhaps it’s because it is unthinkable that a commonly used antibiotic could cause a chronic syndrome of pain and suffering. Perhaps it’s because the tests that show the damage done by fluoroquinolones aren’t used by the VA or civilian doctors’ offices. Perhaps it’s because fluoroquinolones are the perfect drug – they take an acute problem – an infection, and convert it into a chronic disease-state that is systematically misdiagnosed as fibromyalgia, chronic fatigue syndrome, an autoimmune disease, leaky gut syndrome, insomnia, anxiety, depression, etc. and turns formerly healthy people into lifetime customers of the medical establishment / pharmaceutical companies. Perhaps it is simply widespread ignorance about the way these dangerous drugs work.
The Cliff’s Notes version of how fluoroquinolones work is as follows:
The fluoroquinolone depletes liver enzymes that metabolize drugs (CYP450) (2). When the enzymes are depleted sufficiently, the fluoroquinolone forms a poisonous adduct to mitochondrial DNA (mtDNA) (3, 4), which destroys and depletes mtDNA (5). While the mtDNA is being destroyed, the fluoroquinolone is also binding to cellular magnesium. (6, 7) The mitochondria reacts to being assaulted by producing reactive oxygen species (ROS) (8, 9). Some of the ROS, specifically hydrogen peroxide, combines with the excess calcium (there is a balance in cells of magnesium and calcium and the binding of the magnesium results in an excess of calcium) to induce the expression of CD95L/Fas Ligand (5) which then causes cell death (apoptosis) and immune system dysfunction (10) which leads the body to attack itself – like an autoimmune disease.
Damage is caused by every single step in the process. Additional damage may be done by the fluorine atom that is added to fluoroquinolones to make them more potent. It should be noted that the complexity of these cellular interactions is too vast to write up in this article.
Every symptom of Gulf War Illness is consistent with mitochondrial damage and oxidative stress (11), both of which have been shown to be brought on by fluoroquinolones. |
/*
* Copyright 2021, Yahoo Inc.
* Licensed under the Apache License, Version 2.0
* See LICENSE file in project root for terms.
*/
package com.yahoo.elide.datastores.aggregation.queryengines.sql.metadata;
import com.yahoo.elide.core.type.Type;
import com.yahoo.elide.datastores.aggregation.annotation.JoinType;
import lombok.Builder;
import lombok.Value;
@Value
@Builder
/**
* Forms relationships between two SQLTables.
*/
public class SQLJoin {
private String name;
private JoinType joinType;
private boolean toOne;
private Type<?> joinTableType;
private String joinExpression;
}
|
////////* ********************* IN_THE_NAME_OF_ALLAH *******************///////
#include<bits/stdc++.h>
#include<iostream>
#define endl '\n'
#define Faltu_code() return 0
#define sp setprecision
#define ll long long
using namespace std;
long long mn =1e12 ;
void fast()
{
ios::sync_with_stdio(0);
cin.tie(0);
cout.tie(0);
}
int main ()
{
fast();
ll te;
ll n,a,b,x,y;
cin>>n>>a;
if(a==2)
{
for(auto i=0;i<n-1;i++)
{
cout<< 2;
}
cout<< 2<< endl;
return 0;
}
else if(a==3)
{
if(n==1)
{
cout<< 9<< endl;
return 0;
}
cout<< 1;
for(auto i=1;i<n-1;i++)
{
cout<< 0;
}
cout<< 2 << endl;
return 0;
}
else if(a==4)
{
for(auto i=0;i<n;i++)
{
cout<< 4;
}
cout<< endl;
return 0;
}
else if(a==5)
{
cout<< 5;
for(auto i=1;i<n;i++)
{
cout<<0;
}
cout<< endl;
return 0;
}
else if(a==6)
{
for(auto i=0;i<n;i++)
{
cout<< 6;
}
cout<< endl;
return 0;
}
else if(a==7)
{
for(auto i=0;i<n;i++)
{
cout<< 7;
}
cout<< endl;
return 0;
}
else if(a==8)
{
for(auto i=0;i<n;i++)
{
cout<< 8;
}
cout<< endl;
return 0;
}
else if(a==9)
{
for(auto i=0;i<n;i++)
{
cout<< 9;
}
cout<< endl;
return 0;
}
else if(a==10)
{
if(n>1)
{
cout<< 1;
for(auto i=0;i<n-1;i++)
{
cout<< 0;
}
cout<< endl;
return 0;
}
}
cout<< -1 << endl;
}
//*************************************AL_HAMDULI_ALLAH***************************************** |
def parse_json_site_response(text):
jdata = json.loads(text)
data = []
for site in jdata.get(u'ResponseData', {}):
if site.get(u'Type') == 'Station':
data.append({u'name': site['Name']})
return data |
#ifndef HTTP_H
#define HTTP_H
#include <stdio.h>
#include <osi.h>
#include <termcaps.h>
#include <app.h>
#include "http-parser.h"
#define HTTP_OUTPUT_HEADER \
COLOR_BEGIN(LIGHT_MAGENTA, \
"-------------------------------------------------------\n" \
"-------------------- " BOLD("HTTP Header") " --------------------\n" \
"-------------------------------------------------------\n" \
)
#define HTTP_OUTPUT_FOOTER \
"-------------------------------------------------------\n" \
COLOR_END
typedef struct http_method_map
{
uint8_t id;
const char* value;
} http_method_map;
/**
* \brief Creates a new HTTP dissector.
* \return an instance of a dissector
*/
const osi_dissector_t* http_dissector_new();
/**
* \return whether the dissector can handle the
* given packet.
* \see osi_dissector_t
*/
int http_dissector_handles(const packet_t* packet);
/**
* \brief Dumps the given packet on the standard output.
* \see osi_dissector_t
*/
void http_dissector_dump(const packet_t* packet);
/**
* \brief Removes the bytes related to HTTP in the given packet.
* \return a pointer to a packet with every bytes related to
* the HTTP segment removed
* \note In this implementation, an HTTP packet is not decapsulated.
*/
packet_t* http_dissector_decapsulate(const packet_t* packet);
#endif
|
<filename>internal/mail/smtp_test.go
package mail
import (
"reflect"
"testing"
)
func TestSmtpMail(t *testing.T) {
provider, err := NewMailProvider("smtp://<EMAIL>:587")
if err != nil {
t.Errorf("can't get smtp provider")
}
providerType := reflect.TypeOf(provider).String()
if "*mail.smtpProvider" != providerType {
t.Errorf("provider is not smtp struct type : " + providerType)
}
msg := []byte("To: <EMAIL>\r\n" +
"Subject: discount Gophers!\r\n" +
"\r\n" +
"This is the email body.\r\n")
err = provider.Send("<EMAIL>", "test", "test", msg)
if err == nil {
t.Errorf("send must fail")
}
}
|
<reponame>sgholamian/log-aware-clone-detection
//,temp,StorageManagerImpl.java,2227,2292,temp,StorageManagerImpl.java,2016,2111
//,3
public class xxx {
@Override
public ImageStore createSecondaryStagingStore(CreateSecondaryStagingStoreCmd cmd) {
String providerName = cmd.getProviderName();
DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(providerName);
if (storeProvider == null) {
storeProvider = _dataStoreProviderMgr.getDefaultCacheDataStoreProvider();
if (storeProvider == null) {
throw new InvalidParameterValueException("can't find cache store provider: " + providerName);
}
}
Long dcId = cmd.getZoneId();
ScopeType scopeType = null;
String scope = cmd.getScope();
if (scope != null) {
try {
scopeType = Enum.valueOf(ScopeType.class, scope.toUpperCase());
} catch (Exception e) {
throw new InvalidParameterValueException("invalid scope for cache store " + scope);
}
if (scopeType != ScopeType.ZONE) {
throw new InvalidParameterValueException("Only zone wide cache storage is supported");
}
}
if (scopeType == ScopeType.ZONE && dcId == null) {
throw new InvalidParameterValueException("zone id can't be null, if scope is zone");
}
// Check if the zone exists in the system
DataCenterVO zone = _dcDao.findById(dcId);
if (zone == null) {
throw new InvalidParameterValueException("Can't find zone by id " + dcId);
}
Account account = CallContext.current().getCallingAccount();
if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getId())) {
PermissionDeniedException ex = new PermissionDeniedException("Cannot perform this operation, Zone with specified id is currently disabled");
ex.addProxyObject(zone.getUuid(), "dcId");
throw ex;
}
Map<String, Object> params = new HashMap<String, Object>();
params.put("zoneId", dcId);
params.put("url", cmd.getUrl());
params.put("name", cmd.getUrl());
params.put("details", cmd.getDetails());
params.put("scope", scopeType);
params.put("providerName", storeProvider.getName());
params.put("role", DataStoreRole.ImageCache);
DataStoreLifeCycle lifeCycle = storeProvider.getDataStoreLifeCycle();
DataStore store = null;
try {
store = lifeCycle.initialize(params);
} catch (Exception e) {
s_logger.debug("Failed to add data store: " + e.getMessage(), e);
throw new CloudRuntimeException("Failed to add data store: " + e.getMessage(), e);
}
return (ImageStore)_dataStoreMgr.getDataStore(store.getId(), DataStoreRole.ImageCache);
}
}; |
<filename>frontend/src/components/icons/index.ts
export * from './vega'
|
Computer-assisted Children Physical Fitness Detection and Exercise Intervention Evaluation based on Artificial Intelligence Model
Computer-assisted children physical fitness detection and exercise intervention evaluation based on artificial intelligence model is implemented in this research. Big data is usually a modern cost-free product of digital interactions. The increasingly mature concept more clearly describes the difference between the big data and artificial intelligence, which uses descriptive statistics of data with high information density to measure things. In our designed model, the data analytic framework is optimized through the clustering analysis. The main research contents include basic theory, discovery algorithm, data warehouse, visualization technology, qualitative and quantitative exchange model, knowledge representation method, maintenance and reuse of the discovery knowledge, based on these theory, the proposed model are analyzed. The experimental results have proven the effectiveness of the method. |
//#region imports
import * as React from 'react';
import { RouteHandler } from 'components';
import { routes } from './routes';
//#endregion
/**
* Common route handler for all `/algo` paths.
*/
const Algorithms: React.SFC<{}> = () => (
<RouteHandler routes={routes} />
);
export default Algorithms;
|
/**
* Setting up the View and initializing the Google Map.
* Filling the tables for Mode and Purpose Icons.
* Setting default STATE of the LocationService to OFF.
*
* @param savedInstanceState
*/
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_homescreen);
setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT);
FragmentManager myFragmentManager = getSupportFragmentManager();
SupportMapFragment mySupportMapFragment
= (SupportMapFragment) myFragmentManager.findFragmentById(R.id.map2);
mySupportMapFragment.getMapAsync(this);
ivPurpose = (ImageView) findViewById(R.id.purposeIV);
ivMode = (ImageView) findViewById(R.id.modeIV);
btnStartTrack = (Button) findViewById(R.id.new_Track_btn);
btnStartTrack.setOnClickListener(this);
btnStartService = (Button) findViewById(R.id.start_Service_btn);
btnStartService.setOnClickListener(this);
myDrawerList = (ListView) findViewById(R.id.navList);
myDrawerList.setOnItemClickListener(this);
activityTitleArray = getResources().getStringArray(R.array.activity_names);
getSupportActionBar().setDisplayHomeAsUpEnabled(true);
getSupportActionBar().setHomeButtonEnabled(true);
addDrawerItems();
setupDrawer();
trackHandler = new TrackHandler(this);
trackHandler.fillModeTable();
trackHandler.fillPurposeTable();
STATE = State.TRACKING_OFF;
btnStartService.setClickable(false);
btnStartService.setAlpha(0.5F);
manager = (LocationManager) getSystemService( Context.LOCATION_SERVICE );
if ( !manager.isProviderEnabled( LocationManager.GPS_PROVIDER ) ) {
showGpsConfirmationDialog();
}
requestLocationPermission();
} |
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef SHAKA_EMBEDDED_SDL_FRAME_DRAWER_H_
#define SHAKA_EMBEDDED_SDL_FRAME_DRAWER_H_
#include <SDL2/SDL.h>
#include <memory>
#include "frame.h"
#include "macros.h"
namespace shaka {
/**
* A helper class that is used to convert Shaka Embedded Frame objects into an
* SDL texture.
*
* @ingroup utils
*/
class SHAKA_EXPORT SdlFrameDrawer final {
public:
SdlFrameDrawer();
SdlFrameDrawer(const SdlFrameDrawer&) = delete;
SdlFrameDrawer(SdlFrameDrawer&&);
~SdlFrameDrawer();
SdlFrameDrawer& operator=(const SdlFrameDrawer&) = delete;
SdlFrameDrawer& operator=(SdlFrameDrawer&&);
/**
* Sets the renderer used to create textures. This MUST be called at least
* once before calling Draw. This can be changed at any time, but will
* invalidate any existing textures.
*/
void SetRenderer(SDL_Renderer* renderer);
/**
* Draws the given frame onto a texture. This may invalidate any existing
* textures.
*
* @param frame The frame to draw.
* @return The created texture, or nullptr on error.
*/
SDL_Texture* Draw(Frame* frame);
private:
class Impl;
std::unique_ptr<Impl> impl_;
};
} // namespace shaka
#endif // SHAKA_EMBEDDED_SDL_FRAME_DRAWER_H_
|
/**
* Terminate our worker thread, either immediately or after waiting
* for it to return to idle state.
*
* @param waitForIdle Whether to wait until the thread is not busy
* to notify it.
*/
private void terminate(boolean waitForIdle) {
shutdown = true;
if (queuedThread != null) {
String threadName = threadName();
synchronized (queuedThread) {
if (waitForIdle) {
if (queuedThread.isBusy()) {
logger.debug("%1$s is busy, waiting to finish...", threadName);
try {
queuedThread.wait();
}
catch (InterruptedException ie) {
logger.debug("Interrupted waiting on %1$s to finish.", threadName);
}
}
}
logger.debug("Set terminated flag for %1$s.", threadName);
queuedThread.setTerminated();
logger.debug("Interrupting %1$s.", threadName);
queuedThread.interrupt();
}
}
if (!waitForIdle) {
queuedThread = null;
}
} |
import { Future } from '@ephox/katamari';
import { btoa } from '@ephox/dom-globals';
import { ImageDialogInfo } from './DialogTypes';
import Settings from '../api/Settings';
import Imgproxy from '../core/Imgproxy';
const collect = (editor, url): Future<ImageDialogInfo> => {
const imgproxySettings = {
url: Settings.getImgproxyUrl(editor),
key: Settings.getImgproxyKey(editor),
salt: Settings.getImgproxySalt(editor)
};
const getAppBaseUrl = () => {
const appBaseUrl = Settings.getAppBaseUrl(editor);
return (typeof appBaseUrl !== 'undefined') ? appBaseUrl : url;
};
const getQuery = () => {
const query = {
ppu: Settings.getPresignedPutUrl(editor),
ppt: Settings.getPresignedPutToken(editor),
ifuu: Settings.getImageFromUploadUrl(editor),
ifut: Settings.getImageFromUploadToken(editor),
iu: Settings.getImagesUrl(editor),
it: Settings.getImagesToken(editor)
};
const queryString = Object.keys(query).map((key) => key + '=' + query[key]).join('&');
return encodeURIComponent(btoa(queryString));
};
const getImageUrl = async function (src: string) {
let imageUrl = src;
if (imgproxySettings.url !== undefined && imgproxySettings.key !== undefined && imgproxySettings.salt !== undefined) {
imageUrl = await Imgproxy.createImgproxySignatureUrl('fit', 320, 320, 'ce', 0, src, 'png', imgproxySettings);
}
return imageUrl;
};
const windowMessageEvent = async function (e) {
if (e.data.event === 'get-image-src-list') {
const dom = editor.dom;
const items = e.data.data;
const itemsLength = items.length;
for (let i = 0; i < itemsLength; i++) {
const imgSrc = await getImageUrl(items[i]);
const imgElmt = dom.createHTML('img', { src: imgSrc, border: '0' });
editor.insertContent(imgElmt);
}
}
editor.windowManager.close();
};
return Future.pure<ImageDialogInfo>({
baseUrl: url,
appBaseUrl: getAppBaseUrl(),
query: getQuery(),
windowEvent: {
message: windowMessageEvent
}
});
};
export {
collect
};
|
class Preprocessor:
"""
Performs additional transformations that can't be performed, or would be
too complicated for the Transformer, before the tree is compiled.
"""
@staticmethod
def fake_tree(block):
"""
Get a fake tree
"""
return FakeTree(block)
@staticmethod
def replace_expression(fake_tree, parent, inline_expression):
"""
Replaces an inline expression with a fake assignment
"""
assignment = fake_tree.add_assignment(inline_expression.service)
entity = parent.entity
if parent.expression:
entity = parent.expression.multiplication.exponential.factor.entity
entity.path.replace(0, assignment.path.child(0))
@classmethod
def replace_in_entity(cls, block, statement, entity):
"""
Replaces an inline expression inside an entity branch.
"""
fake_tree = cls.fake_tree(block)
line = statement.line()
service = entity.path.inline_expression.service
assignment = fake_tree.add_assignment(service)
entity.replace(0, assignment.path)
entity.path.children[0].line = line
@classmethod
def service_arguments(cls, block, service):
"""
Processes the arguments of a service, replacing inline expressions
"""
fake_tree = cls.fake_tree(block)
for argument in service.find_data('arguments'):
expression = argument.node('entity.path.inline_expression')
if expression:
cls.replace_expression(fake_tree, argument, expression)
@classmethod
def assignment_expression(cls, block, tree):
"""
Processess an assignment to an expression, replacing it
"""
fake_tree = cls.fake_tree(block)
parent = block.rules.assignment.assignment_fragment
cls.replace_expression(fake_tree, parent, tree.inline_expression)
@classmethod
def assignments(cls, block):
"""
Process assignments, looking for inline expressions, for example:
a = alpine echo text:(random value) or a = (alpine echo message:'text')
"""
for assignment in block.find_data('assignment'):
fragment = assignment.assignment_fragment
if fragment.service:
cls.service_arguments(block, fragment.service)
elif fragment.expression:
factor = fragment.expression.multiplication.exponential.factor
if factor.entity.path:
if factor.entity.path.inline_expression:
cls.assignment_expression(block, factor.entity.path)
@classmethod
def service(cls, tree):
"""
Processes services, looking for inline expressions, for example:
alpine echo text:(random value)
"""
service = tree.node('service_block.service')
if service:
cls.service_arguments(tree, service)
@classmethod
def flow_statement(cls, name, block):
"""
Processes if statements, looking inline expressions.
"""
for statement in block.find_data(name):
if statement.node('entity.path.inline_expression'):
cls.replace_in_entity(block, statement, statement.entity)
if statement.child(2):
if statement.child(2).node('entitypath.inline_expression'):
cls.replace_in_entity(block, statement, statement.child(2))
@classmethod
def process(cls, tree):
for block in tree.find_data('block'):
cls.assignments(block)
cls.service(block)
cls.flow_statement('if_statement', block)
cls.flow_statement('elseif_statement', block)
return tree |
Around 4,000 people participated in the demonstration, the largest since South Korea and the United States agreed to deploy the system, known as Terminal High-Altitude Area Defence (THAAD)
ADVERTISING Read more
Seoul (AFP)
Thousands of protesters marched near the US embassy in Seoul on Saturday, accusing President Donald Trump of "forcing" South Korea to deploy a controversial American missile defence system opposed by China.
The protest came as South Korea's new president Moon Jae-In heads to Washington next week for his first summit with Trump amid soaring tensions over Pyongyang's nuclear ambitions.
Around 4,000 people participated in the demonstration, the largest since South Korea and the United States agreed to deploy the system, known as Terminal High-Altitude Area Defence (THAAD).
Protesters carried placards that read: "Trump stop forcing (South Korea) to deploy THAAD" and "No THAAD, No Trump".
The crowd included residents from the southeastern county of Seongju where the system is being deployed who say it poses health and environmental hazards and argue that its presence could make them a priority target for North Korea.
THAAD was approved by Moon's ousted predecessor, conservative president Park Geun-Hye, who then steamrollered the project through a hasty environmental review during her last months in office as she became ensnared in a massive corruption scandal.
The deployment has also been opposed by Beijing, which fears it could undermine its own nuclear deterrent and has reacted with fury, imposing a series of measures seen as economic retaliation on the South.
Though parts of system are already in place, Moon this month suspended further deployment.
Officially, the delay is to allow for a new, comprehensive environmental impact assessment, but analysts say the move is a strategic delay by Moon to handle the tricky diplomatic situation he inherited.
© 2017 AFP |
<gh_stars>10-100
package org.onebillion.onecourse.mainui.oc_addtakeaway;
import android.graphics.Color;
import android.graphics.PointF;
import org.onebillion.onecourse.controls.OBControl;
import org.onebillion.onecourse.controls.OBGroup;
import org.onebillion.onecourse.controls.OBLabel;
import org.onebillion.onecourse.controls.OBPath;
import org.onebillion.onecourse.mainui.generic.OC_Generic;
import org.onebillion.onecourse.mainui.generic.OC_Generic_SelectCorrectObject;
import org.onebillion.onecourse.utils.OBUtils;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Created by pedroloureiro on 11/07/16.
*/
public class OC_AddTakeAway_S3 extends OC_Generic_SelectCorrectObject
{
Map<String, OBLabel> numbers;
List<OBLabel> equation;
@Override
public OBControl action_getCorrectAnswer ()
{
String correctString = action_getObjectPrefix() + "_" + eventAttributes.get("correctAnswer");
return numbers.get(correctString);
}
@Override
public void action_prepareScene (String scene, Boolean redraw)
{
super.action_prepareScene(scene, redraw);
//
if (redraw)
{
List<OBControl> controls = filterControls("number.*");
//
float smallestFontSize = 1000000000;
numbers = new HashMap<String, OBLabel>();
for (OBControl control : controls)
{
OBLabel label = action_createLabelForControl(control, 1.2f, false);
control.hide();
numbers.put((String) control.attributes().get("id"), label);
if (label.fontSize() < smallestFontSize) smallestFontSize = label.fontSize();
}
for (OBLabel label : numbers.values())
{
label.setFontSize(smallestFontSize);
label.sizeToBoundingBox();
}
//
controls = sortedFilteredControls("label.*");
equation = new ArrayList<>();
for (OBControl control : controls)
{
Map attributes = control.attributes();
if (attributes != null)
{
String text = (String) attributes.get("text");
if (text != null)
{
OBLabel label = action_createLabelForControl(control, 1.0f, false);
String colour = (String) control.attributes().get("colour");
label.setColour(OBUtils.colorFromRGBString(colour));
label.setProperty("colour", colour);
equation.add(label);
//
objectDict.put(String.format("%s_label", control.attributes().get("id")), label);
label.setProperty("originalScale", label.scale());
label.hide();
}
}
control.hide();
}
//
List<OBPath> loops = (List<OBPath>) (Object) filterControls("loop.*");
for (OBPath loop : loops)
{
loop.sizeToBoundingBoxIncludingStroke();
}
}
hideControls("group.*");
hideControls("loop.*");
}
public void action_resizeLabel(OBLabel label, Boolean increase)
{
lockScreen();
float resizeFactor = (increase) ? 1.5f : 1.0f;
PointF position = OC_Generic.copyPoint(label.position());
float scale = (float) label.propertyValue("originalScale");
label.setScale(scale * resizeFactor);
label.setPosition(position);
label.setColour((increase) ? OBUtils.colorFromRGBString("225,0,0") : OBUtils.colorFromRGBString((String) label.propertyValue("colour")));
unlockScreen();
}
@Override
public void action_highlight (OBControl control) throws Exception
{
OBLabel label = (OBLabel) control;
label.setColour(OBUtils.colorFromRGBString("225,0,0"));
}
@Override
public void action_lowlight (OBControl control) throws Exception
{
OBLabel label = (OBLabel) control;
label.setColour(Color.BLACK);
}
@Override
public void action_answerIsCorrect (OBControl target) throws Exception
{
gotItRightBigTick(true);
waitForSecs(0.3);
//
playSfxAudio("add_object", false);
lockScreen();
for (OBControl item : equation)
{
item.show();
}
for (OBControl loop : filterControls("loop.*"))
{
loop.hide();
}
objectDict.get("loop_big").show();
unlockScreen();
waitForSecs(0.3);
//
for (int i = 0; i < getAudioForScene(currentEvent(), "CORRECT").size(); i++)
{
playSceneAudioIndex("CORRECT", i, false);
action_resizeLabel(equation.get(i), true);
waitAudio();
waitForSecs(0.3);
//
action_resizeLabel(equation.get(i), false);
}
//
if (audioSceneExists("FINAL"))
{
waitForSecs(0.3);
playSceneAudio("FINAL", true);
}
else
{
waitForSecs(0.7);
}
//
nextScene();
}
@Override
public String action_getObjectPrefix ()
{
return "number";
}
@Override
public void doMainXX () throws Exception
{
setStatus(STATUS_DOING_DEMO);
//
action_playNextDemoSentence(false);
playSfxAudio("add_object", false);
lockScreen();
objectDict.get("group_1").show();
objectDict.get("loop_1").show();
equation.get(0).show();
unlockScreen();
//
waitAudio();
waitForSecs(0.3);
//
action_playNextDemoSentence(false);
playSfxAudio("add_object", false);
lockScreen();
objectDict.get("group_2").show();
objectDict.get("loop_2").show();
equation.get(1).show();
equation.get(2).show();
unlockScreen();
//
waitAudio();
waitForSecs(0.3);
//
doAudio(currentEvent());
setStatus(STATUS_AWAITING_CLICK);
}
public void demo3a() throws Exception
{
setStatus(STATUS_DOING_DEMO);
loadPointer(POINTER_MIDDLE);
//
action_playNextDemoSentence(false); // Look.
OC_Generic.pointer_moveToRelativePointOnScreen(0.5f, 0.7f, 0.0f, 0.4f, true, this);
waitAudio();
//
action_playNextDemoSentence(false); // Two cakes
playSfxAudio("add_object", false);
lockScreen();
showControls("group_1");
showControls("loop_1");
equation.get(0).show();
unlockScreen();
//
OC_Generic.pointer_moveToObjectByName("loop_1", -20, 0.6f, EnumSet.of(OC_Generic.Anchor.ANCHOR_BOTTOM), true, this);
waitAudio();
waitForSecs(0.3);
//
action_playNextDemoSentence(false); // add one cake.
playSfxAudio("add_object", false);
lockScreen();
showControls("group_2");
showControls("loop_2");
equation.get(1).show();
equation.get(2).show();
unlockScreen();
//
OC_Generic.pointer_moveToObjectByName("loop_2", -10, 0.6f, EnumSet.of(OC_Generic.Anchor.ANCHOR_BOTTOM), true, this);
waitAudio();
waitForSecs(0.3);
//
action_playNextDemoSentence(false); // Now there are three cakes.
OC_Generic.pointer_moveToObjectByName("loop_big", -15, 0.3f, EnumSet.of(OC_Generic.Anchor.ANCHOR_BOTTOM), true, this);
waitAudio();
waitForSecs(0.3);
//
OBGroup group1 = (OBGroup) objectDict.get("group_1");
OBGroup group2 = (OBGroup) objectDict.get("group_2");
//
action_playNextDemoSentence(false); // One.
OC_Generic.pointer_moveToObject(group1.objectDict.get("obj_2"), -20, 0.6f, EnumSet.of(OC_Generic.Anchor.ANCHOR_BOTTOM), true, this);
waitAudio();
waitForSecs(0.3);
//
action_playNextDemoSentence(false); // Two.
OC_Generic.pointer_moveToObject(group1.objectDict.get("obj_1"), -15, 0.3f, EnumSet.of(OC_Generic.Anchor.ANCHOR_BOTTOM), true, this);
waitAudio();
waitForSecs(0.3);
//
action_playNextDemoSentence(false); // Three.
OC_Generic.pointer_moveToObject(group2, -10, 0.3f, EnumSet.of(OC_Generic.Anchor.ANCHOR_BOTTOM), true, this);
waitAudio();
waitForSecs(0.3);
//
OBControl number = numbers.get("number_3");
action_playNextDemoSentence(false); // Touch three.
OC_Generic.pointer_moveToObject(number, -20, 0.6f, EnumSet.of(OC_Generic.Anchor.ANCHOR_BOTTOM), true, this);
waitAudio();
waitForSecs(0.3);
//
OC_Generic.pointer_moveToObject(number, -20, 0.6f, EnumSet.of(OC_Generic.Anchor.ANCHOR_MIDDLE), true, this);
playSfxAudio("correct", false);
//
lockScreen();
action_highlight(number);
hideControls("loop.*");
showControls("loop_big");
equation.get(3).show();
equation.get(4).show();
unlockScreen();
//
waitForSecs(0.3);
//
action_playNextDemoSentence(false); // Look how we show it
OC_Generic.pointer_moveToObject(equation.get(2), 0, 0.6f, EnumSet.of(OC_Generic.Anchor.ANCHOR_BOTTOM), true, this);
waitAudio();
waitForSecs(0.3);
//
action_playNextDemoSentence(false); // This means ADD
OC_Generic.pointer_moveToObject(equation.get(1), -5, 0.3f, EnumSet.of(OC_Generic.Anchor.ANCHOR_BOTTOM), true, this);
waitAudio();
waitForSecs(0.3);
//
action_playNextDemoSentence(false); // This means GIVES or EQUALS
OC_Generic.pointer_moveToObject(equation.get(3), -5, 0.3f, EnumSet.of(OC_Generic.Anchor.ANCHOR_BOTTOM), true, this);
waitAudio();
waitForSecs(0.3);
//
for (int i = 0; i < 5; i++)
{
OC_Generic.pointer_moveToObject(equation.get(i), -20+i*5, (i == 0 ? 0.6f : 0.3f), EnumSet.of(OC_Generic.Anchor.ANCHOR_BOTTOM), true, this);
action_playNextDemoSentence(false);
action_resizeLabel(equation.get(i), true);
waitAudio();
action_resizeLabel(equation.get(i), false);
}
//
thePointer.hide();
waitForSecs(0.7);
//
nextScene();
}
public void demo3b() throws Exception
{
setStatus(STATUS_DOING_DEMO);
//
action_playNextDemoSentence(true); // Now your turn.
waitForSecs(0.3);
//
action_playNextDemoSentence(false); // Two cakes
playSfxAudio("add_object", false);
lockScreen();
showControls("group_1");
showControls("loop_1");
equation.get(0).show();
unlockScreen();
//
waitAudio();
waitForSecs(0.3);
//
action_playNextDemoSentence(false); // add two cakes
playSfxAudio("add_object", false);
lockScreen();
showControls("group_2");
showControls("loop_2");
equation.get(1).show();
equation.get(2).show();
unlockScreen();
//
waitAudio();
waitForSecs(0.3);
//
doAudio(currentEvent());
//
setStatus(STATUS_AWAITING_CLICK);
}
public void demo3j() throws Exception
{
setStatus(STATUS_DOING_DEMO);
//
action_playNextDemoSentence(false);
playSfxAudio("add_object", false);
lockScreen();
showControls("group_1");
showControls("loop_1");
equation.get(0).show();
unlockScreen();
//
waitAudio();
waitForSecs(0.3);
//
action_playNextDemoSentence(false);
playSfxAudio("add_object", false);
lockScreen();
showControls("group_2");
showControls("loop_2");
equation.get(1).show();
equation.get(2).show();
unlockScreen();
//
waitAudio();
waitForSecs(0.3);
//
action_playNextDemoSentence(false);
playSfxAudio("add_object", false);
lockScreen();
showControls("group_3");
showControls("loop_3");
equation.get(3).show();
equation.get(4).show();
unlockScreen();
//
waitAudio();
waitForSecs(0.3);
//
doAudio(currentEvent());
//
setStatus(STATUS_AWAITING_CLICK);
}
public void demo3k() throws Exception
{
demo3j();
}
public OBControl findTarget (PointF pt)
{
List<OBPath> values = new ArrayList(numbers.values());
return finger(0, 2, (List<OBControl>) (Object) values, pt);
}
}
|
def device_serial(self):
if not self.default_serial:
devices = self.devices()
if devices:
if len(devices) == 1:
self.default_serial = list(devices.keys())[0]
else:
raise EnvironmentError(
"Multiple devices attached but default android serial not set.")
else:
raise EnvironmentError("Device not attached.")
return self.default_serial |
<filename>src/main/java/net/algorithm/answer/SumofIntegers.java<gh_stars>0
package net.algorithm.answer;
public class SumofIntegers {
public static void main(String[] args) {
System.out.println(new SumofIntegers().getSum(1, 99));
}
public int getSum(int a, int b) {
while (b != 0) {
// 计算每一位的进位
int carry = (a & b) << 1;
// 计算 a 和 b 的半加
a = a ^ b;
// 因为会产生进位,所以还要加上进位
// 又因为加上进位有可能产生新的进位,所以需要循环
b = carry;
}
return a;
}
}
|
Israeli policemen suspected of shooting dead a 10-year-old Palestinian schoolgirl in 2007 will escape prosecution after a court said that too much time had elapsed to allow a re-examination of the case.
The decision will come as a blow for the girl's parents, who have campaigned for justice for their daughter, Abir Aramin, who died after being struck in the head during a school break.
In a highly critical ruling, Israel's High Court described the police investigation into the girl's death as a "sordid affair" that had been both "sloppy" and "negligent" and ordered the state to pay the family's legal costs.
We’ll tell you what’s true. You can form your own view. From 15p €0.18 $0.18 $0.27 a day, more exclusives, analysis and extras.
But it backed an earlier decision not to charge two border policemen allegedly involved in her death, in part because of the difficulty of conducting an investigation so long after the incident in the absence of fresh evidence.
Human rights organisation Yesh Din, which had petitioned the court to indict the policemen, expressed its dismay. "An innocent girl was shot and somebody has to take responsibility," said Haim Erlich, the NGO's director. "No justice was done."
Abir was fatally wounded in January 2007 after buying snacks with her sister and two friends during a school break in the West Bank town of Anata.
Eyewitnesses claimed that border guards, who had clashed with stone-throwing Palestinian rioters nearby, fired at the girl from a passing jeep. The police opened an investigation but closed it a short while later, arguing it was possible she had been killed by a rock and that there was insufficient evidence to proceed.
Residents said there were no clashes in that particular street, and a parallel investigation and autopsy carried out by Israeli NGOs concluded that she had been killed by a rubber bullet fired by police in the jeep.
An Israeli civil court last year agreed with the girl's family, saying there was "no debate" that Abir was shot by border guards.
We’ll tell you what’s true. You can form your own view.
At The Independent, no one tells us what to write. That’s why, in an era of political lies and Brexit bias, more readers are turning to an independent source. Subscribe from just 15p a day for extra exclusives, events and ebooks – all with no ads.
Subscribe now |
Photo: Stina Järperud
She fled for her life after getting death threats in Syria. Now journalist Rowa Alkhatib has landed a plum job as a talk-show host with Sweden’s national radio broadcaster. She tells The Local Voices how she’s now helping quench Arabic speakers’ thirst for knowledge about their new home.
Back in Syria Rowa Alkhatib worked as a presenter for a range of television and radio networks, but threats from radicals and the Assad regime forced her to pack her bags, she says.
Like so many others in Syria, she made her way to Turkey, where she found radio work and lived for almost a year and a half. She didn’t feel safe there, however.
“After a few Syrian Journalists were assassinated in Turkey, I thought it was better to leave. So I fled to Sweden.”
Arrival in Sweden - what next?
She was housed at an asylum centre near Gothenburg in August 2014. The journey sapped her strength and her nerves were shot, but she got a lot of help.
“I am very grateful to this country,” she says.
Once she was back on an even keel she started looking for work with Swedish media companies - without any immediate success.
“The answers were shocking. They told me It wouldn’t be possible to get a job before mastering Swedish, despite my advanced skills in English.”
But she was equal to the challenge. She moved to Katrineholm to be closer to the capital’s media hub, and she set about learning the language.
Back in journalism - from springboard to exciting new job
Soon she was taken on as a language trainee with the local Katrineholms-Kuriren newspaper, “mostly translating news from Swedish and presenting it in Arabic”. This helped put her career back on track.
A production company learned of her presenting prowess and asked if she would be interested in co-hosting an Arabic radio talk show with the Syrian comedian and YouTube star Mahmoud Bitar. She jumped at the chance.
Mahmoud Bitar and Rowa Alkhatib. Photo: Helene Almqvist/Sveriges Radio
The twenty-minute show starts by giving listeners an insight into the lives and challenges faced by newcomers to Sweden.
It also features a comedy sketch. In the first show Alkhatib hosted, the comic Bitar “was looking for his dad’s buddy, only to discover he was the Swedish king.”
With just two shows under her belt she says it’s too early to gauge its success.
Better integration through more media in Arabic
“But in general I know that the Arabic community in Sweden is always thirsting for news about the country,” she says.
“People want to stay up to date, especially those who are not yet highly skilled in Swedish.”
Helping to bridge Arabic speakers’ knowledge gap in Sweden is one of the things that motivates her most in her professional life, says Alkhatib.
“I think it’s my role and my duty to help people with their need for ‘uplifting’ news, and by that I help with integration.”
She’s pleased too that Swedish media are reaching out to the country’s minorities, “and I deeply believe there has to be more.”
Far from having an alienating effect, Arabic language programming can help newcomers quickly get up to speed with their surroundings while they are learning Swedish, she says.
“Most international media publish in Arabic nowadays, and in Sweden it has become almost the second spoken language nationwide, so why not?
“We really wish to be part of this society, a positive, sparkling force that adds more than it takes – we don’t want to be a negative group that the Swedes regret welcoming.”
Photo: Helene Almqvist/Sveriges Radio |
def number_of_states_in_band(self, band=1, vibgen_approx=None,
Nvib=None, vibenergy_cutoff=None):
nret = 0
for state in self.allstates(mult=band, mode="EQ", save_indices=False,
vibgen_approx=vibgen_approx, Nvib=Nvib,
vibenergy_cutoff=vibenergy_cutoff):
nret += 1
return nret |
/**
* Created by vincent on 16-7-20.
* Copyright @ 2013-2016 Platon AI. All rights reserved
*/
public class TestJCommander {
private ImmutableConfig conf = new ImmutableConfig();
@Test
public void quoted() {
class Cmd {
@Parameter(names = {"-instance", "-ins"}, required = true, description = "Instance ID")
private List<String> instances = new LinkedList<>();
}
Cmd cmd = new Cmd();
JCommander.newBuilder().addObject(cmd)
.args(new String[] {"-ins", "\"string one\"","-ins", "\"string two\""})
.build();
assertEquals(cmd.instances.size(), 2);
String args = "-ins \"string one\" -ins \"string two\"";
String[] argv = PulsarOptions.Companion.split(args);
Cmd cmd2 = new Cmd();
JCommander.newBuilder().addObject(cmd2).args(argv).build();
assertEquals(cmd2.instances.size(), 2);
// System.out.println(String.join(" | ", argv));
}
@Test
public void testWeightedKeywordsConverter() {
WeightedKeywordsConverter converter = new WeightedKeywordsConverter();
Map<String, Double> answer = new HashMap<>();
answer.put("a", 1.1);
answer.put("b", 2.0);
answer.put("c", 0.2);
answer.put("d", 1.0);
answer.put("e^", 1.0);
answer.put("^1", 1.0);
answer.put("^", 1.0);
assertEquals("Not match", answer, converter.convert("a^1.1, b^2.0,c^0.2,d,e^,^1,^,"));
}
} |
Brexit campaign leader Nigel Farage has called out billionaire open borders campaigner George Soros and his “reliable friends” for collusion on a grand scale.
George Soros has spent billions in the EU to undermine the nation state. This is where the real international political collusion is. pic.twitter.com/ANXOII7SFY — Nigel Farage (@Nigel_Farage) November 14, 2017
Speaking in a debate on the Paradise Papers in the European Parliament, the UKIP MEP pointed out that the financier had funded the exposé.
“I say this at a time when the use of money and the influence it may have had on the Brexit result or the Trump election has reached a level of virtual hysteria,” he said.
“Just last week the Electoral Commission in the UK launched an investigation to find out whether the Leave campaign took offshore money or Russian money.
Read more |
/* eslint-disable @typescript-eslint/no-empty-interface */
import SERVER_PACKETS from '../packets/server';
import { DATA_TYPES } from '../types/data';
export default {
[SERVER_PACKETS.LOGIN]: [
['success', DATA_TYPES.boolean],
['id', DATA_TYPES.uint16],
['team', DATA_TYPES.uint16],
['clock', DATA_TYPES.uint32],
['token', DATA_TYPES.text],
['type', DATA_TYPES.uint8],
['room', DATA_TYPES.text],
[
'players',
DATA_TYPES.array,
[
['id', DATA_TYPES.uint16],
['status', DATA_TYPES.uint8],
['level', DATA_TYPES.uint8],
['name', DATA_TYPES.text],
['type', DATA_TYPES.uint8],
['team', DATA_TYPES.uint16],
['posX', DATA_TYPES.coordx],
['posY', DATA_TYPES.coordy],
['rot', DATA_TYPES.rotation],
['flag', DATA_TYPES.uint16],
['upgrades', DATA_TYPES.uint8],
],
],
['serverConfiguration', DATA_TYPES.textbig],
['bots', DATA_TYPES.array, [['id', DATA_TYPES.uint16]]],
],
[SERVER_PACKETS.BACKUP]: [],
[SERVER_PACKETS.PING]: [
['clock', DATA_TYPES.uint32],
['num', DATA_TYPES.uint32],
],
[SERVER_PACKETS.PING_RESULT]: [
['ping', DATA_TYPES.uint16],
['playerstotal', DATA_TYPES.uint32],
['playersgame', DATA_TYPES.uint32],
],
[SERVER_PACKETS.ACK]: [],
[SERVER_PACKETS.ERROR]: [['error', DATA_TYPES.uint8]],
[SERVER_PACKETS.COMMAND_REPLY]: [
['type', DATA_TYPES.uint8],
['text', DATA_TYPES.textbig],
],
[SERVER_PACKETS.PLAYER_NEW]: [
['id', DATA_TYPES.uint16],
['status', DATA_TYPES.uint8],
['name', DATA_TYPES.text],
['type', DATA_TYPES.uint8],
['team', DATA_TYPES.uint16],
['posX', DATA_TYPES.coordx],
['posY', DATA_TYPES.coordy],
['rot', DATA_TYPES.rotation],
['flag', DATA_TYPES.uint16],
['upgrades', DATA_TYPES.uint8],
['isBot', DATA_TYPES.boolean],
],
[SERVER_PACKETS.PLAYER_LEAVE]: [['id', DATA_TYPES.uint16]],
[SERVER_PACKETS.PLAYER_UPDATE]: [
['clock', DATA_TYPES.uint32],
['id', DATA_TYPES.uint16],
['keystate', DATA_TYPES.uint8],
['upgrades', DATA_TYPES.uint8],
['posX', DATA_TYPES.coord24],
['posY', DATA_TYPES.coord24],
['rot', DATA_TYPES.rotation],
['speedX', DATA_TYPES.speed],
['speedY', DATA_TYPES.speed],
],
[SERVER_PACKETS.PLAYER_FIRE]: [
['clock', DATA_TYPES.uint32],
['id', DATA_TYPES.uint16],
['energy', DATA_TYPES.healthenergy],
['energyRegen', DATA_TYPES.regen],
[
'projectiles',
DATA_TYPES.arraysmall,
[
['id', DATA_TYPES.uint16],
['type', DATA_TYPES.uint8],
['posX', DATA_TYPES.coordx],
['posY', DATA_TYPES.coordy],
['speedX', DATA_TYPES.speed],
['speedY', DATA_TYPES.speed],
['accelX', DATA_TYPES.accel],
['accelY', DATA_TYPES.accel],
['maxSpeed', DATA_TYPES.speed],
],
],
],
// [packet.PLAYER_SAY]: [['id', types.uint16], ['text', types.text]],
[SERVER_PACKETS.PLAYER_RESPAWN]: [
['id', DATA_TYPES.uint16],
['posX', DATA_TYPES.coord24],
['posY', DATA_TYPES.coord24],
['rot', DATA_TYPES.rotation],
['upgrades', DATA_TYPES.uint8],
],
[SERVER_PACKETS.PLAYER_FLAG]: [
['id', DATA_TYPES.uint16],
['flag', DATA_TYPES.uint16],
],
[SERVER_PACKETS.PLAYER_HIT]: [
['id', DATA_TYPES.uint16],
['type', DATA_TYPES.uint8],
['posX', DATA_TYPES.coordx],
['posY', DATA_TYPES.coordy],
['owner', DATA_TYPES.uint16],
[
'players',
DATA_TYPES.arraysmall,
[
['id', DATA_TYPES.uint16],
['health', DATA_TYPES.healthenergy],
['healthRegen', DATA_TYPES.regen],
],
],
],
[SERVER_PACKETS.PLAYER_KILL]: [
['id', DATA_TYPES.uint16],
['killer', DATA_TYPES.uint16],
['posX', DATA_TYPES.coordx],
['posY', DATA_TYPES.coordy],
],
[SERVER_PACKETS.PLAYER_UPGRADE]: [
['upgrades', DATA_TYPES.uint16],
['type', DATA_TYPES.uint8],
['speed', DATA_TYPES.uint8],
['defense', DATA_TYPES.uint8],
['energy', DATA_TYPES.uint8],
['missile', DATA_TYPES.uint8],
],
[SERVER_PACKETS.PLAYER_TYPE]: [
['id', DATA_TYPES.uint16],
['type', DATA_TYPES.uint8],
],
[SERVER_PACKETS.PLAYER_POWERUP]: [
['type', DATA_TYPES.uint8],
['duration', DATA_TYPES.uint32],
],
[SERVER_PACKETS.PLAYER_LEVEL]: [
['id', DATA_TYPES.uint16],
['type', DATA_TYPES.uint8],
['level', DATA_TYPES.uint8],
],
[SERVER_PACKETS.PLAYER_RETEAM]: [
[
'players',
DATA_TYPES.array,
[
['id', DATA_TYPES.uint16],
['team', DATA_TYPES.uint16],
],
],
],
[SERVER_PACKETS.GAME_FLAG]: [
['type', DATA_TYPES.uint8],
['flag', DATA_TYPES.uint8],
['id', DATA_TYPES.uint16],
['posX', DATA_TYPES.coord24],
['posY', DATA_TYPES.coord24],
['blueteam', DATA_TYPES.uint8],
['redteam', DATA_TYPES.uint8],
],
[SERVER_PACKETS.GAME_SPECTATE]: [['id', DATA_TYPES.uint16]],
[SERVER_PACKETS.GAME_PLAYERSALIVE]: [['players', DATA_TYPES.uint16]],
[SERVER_PACKETS.GAME_FIREWALL]: [
['type', DATA_TYPES.uint8],
['status', DATA_TYPES.uint8],
['posX', DATA_TYPES.coordx],
['posY', DATA_TYPES.coordy],
['radius', DATA_TYPES.float32],
['speed', DATA_TYPES.float32],
],
[SERVER_PACKETS.EVENT_REPEL]: [
['clock', DATA_TYPES.uint32],
['id', DATA_TYPES.uint16],
['posX', DATA_TYPES.coordx],
['posY', DATA_TYPES.coordy],
['rot', DATA_TYPES.rotation],
['speedX', DATA_TYPES.speed],
['speedY', DATA_TYPES.speed],
['energy', DATA_TYPES.healthenergy],
['energyRegen', DATA_TYPES.regen],
[
'players',
DATA_TYPES.arraysmall,
[
['id', DATA_TYPES.uint16],
['keystate', DATA_TYPES.uint8],
['posX', DATA_TYPES.coordx],
['posY', DATA_TYPES.coordy],
['rot', DATA_TYPES.rotation],
['speedX', DATA_TYPES.speed],
['speedY', DATA_TYPES.speed],
['energy', DATA_TYPES.healthenergy],
['energyRegen', DATA_TYPES.regen],
['playerHealth', DATA_TYPES.healthenergy],
['playerHealthRegen', DATA_TYPES.regen],
],
],
[
'mobs',
DATA_TYPES.arraysmall,
[
['id', DATA_TYPES.uint16],
['type', DATA_TYPES.uint8],
['posX', DATA_TYPES.coordx],
['posY', DATA_TYPES.coordy],
['speedX', DATA_TYPES.speed],
['speedY', DATA_TYPES.speed],
['accelX', DATA_TYPES.accel],
['accelY', DATA_TYPES.accel],
['maxSpeed', DATA_TYPES.speed],
],
],
],
[SERVER_PACKETS.EVENT_BOOST]: [
['clock', DATA_TYPES.uint32],
['id', DATA_TYPES.uint16],
['boost', DATA_TYPES.boolean],
['posX', DATA_TYPES.coord24],
['posY', DATA_TYPES.coord24],
['rot', DATA_TYPES.rotation],
['speedX', DATA_TYPES.speed],
['speedY', DATA_TYPES.speed],
['energy', DATA_TYPES.healthenergy],
['energyRegen', DATA_TYPES.regen],
],
[SERVER_PACKETS.EVENT_BOUNCE]: [
['clock', DATA_TYPES.uint32],
['id', DATA_TYPES.uint16],
['keystate', DATA_TYPES.uint8],
['posX', DATA_TYPES.coord24],
['posY', DATA_TYPES.coord24],
['rot', DATA_TYPES.rotation],
['speedX', DATA_TYPES.speed],
['speedY', DATA_TYPES.speed],
],
[SERVER_PACKETS.EVENT_STEALTH]: [
['id', DATA_TYPES.uint16],
['state', DATA_TYPES.boolean],
['energy', DATA_TYPES.healthenergy],
['energyRegen', DATA_TYPES.regen],
],
[SERVER_PACKETS.EVENT_LEAVEHORIZON]: [
['type', DATA_TYPES.uint8],
['id', DATA_TYPES.uint16],
],
[SERVER_PACKETS.MOB_UPDATE]: [
['clock', DATA_TYPES.uint32],
['id', DATA_TYPES.uint16],
['type', DATA_TYPES.uint8],
['posX', DATA_TYPES.coordx],
['posY', DATA_TYPES.coordy],
['speedX', DATA_TYPES.speed],
['speedY', DATA_TYPES.speed],
['accelX', DATA_TYPES.accel],
['accelY', DATA_TYPES.accel],
['maxSpeed', DATA_TYPES.speed],
['ownerId', DATA_TYPES.uint16],
],
[SERVER_PACKETS.MOB_UPDATE_STATIONARY]: [
['id', DATA_TYPES.uint16],
['type', DATA_TYPES.uint8],
['posX', DATA_TYPES.float32],
['posY', DATA_TYPES.float32],
],
[SERVER_PACKETS.MOB_DESPAWN]: [
['id', DATA_TYPES.uint16],
['type', DATA_TYPES.uint8],
],
[SERVER_PACKETS.MOB_DESPAWN_COORDS]: [
['id', DATA_TYPES.uint16],
['type', DATA_TYPES.uint8],
['posX', DATA_TYPES.coordx],
['posY', DATA_TYPES.coordy],
],
[SERVER_PACKETS.SCORE_UPDATE]: [
['id', DATA_TYPES.uint16],
['score', DATA_TYPES.uint32],
['earnings', DATA_TYPES.uint32],
['upgrades', DATA_TYPES.uint16],
['totalkills', DATA_TYPES.uint32],
['totaldeaths', DATA_TYPES.uint32],
],
[SERVER_PACKETS.SCORE_BOARD]: [
[
'data',
DATA_TYPES.array,
[
['id', DATA_TYPES.uint16],
['score', DATA_TYPES.uint32],
['level', DATA_TYPES.uint8],
],
],
[
'rankings',
DATA_TYPES.array,
[
['id', DATA_TYPES.uint16],
['x', DATA_TYPES.uint8],
['y', DATA_TYPES.uint8],
],
],
],
[SERVER_PACKETS.SCORE_DETAILED]: [
[
'scores',
DATA_TYPES.array,
[
['id', DATA_TYPES.uint16],
['level', DATA_TYPES.uint8],
['score', DATA_TYPES.uint32],
['kills', DATA_TYPES.uint16],
['deaths', DATA_TYPES.uint16],
['damage', DATA_TYPES.float32],
['ping', DATA_TYPES.uint16],
],
],
],
[SERVER_PACKETS.SCORE_DETAILED_CTF]: [
[
'scores',
DATA_TYPES.array,
[
['id', DATA_TYPES.uint16],
['level', DATA_TYPES.uint8],
['captures', DATA_TYPES.uint16],
['score', DATA_TYPES.uint32],
['kills', DATA_TYPES.uint16],
['deaths', DATA_TYPES.uint16],
['damage', DATA_TYPES.float32],
['ping', DATA_TYPES.uint16],
],
],
],
[SERVER_PACKETS.SCORE_DETAILED_BTR]: [
[
'scores',
DATA_TYPES.array,
[
['id', DATA_TYPES.uint16],
['level', DATA_TYPES.uint8],
['alive', DATA_TYPES.boolean],
['wins', DATA_TYPES.uint16],
['score', DATA_TYPES.uint32],
['kills', DATA_TYPES.uint16],
['deaths', DATA_TYPES.uint16],
['damage', DATA_TYPES.float32],
['ping', DATA_TYPES.uint16],
],
],
],
[SERVER_PACKETS.CHAT_TEAM]: [
['id', DATA_TYPES.uint16],
['text', DATA_TYPES.text],
],
[SERVER_PACKETS.CHAT_PUBLIC]: [
['id', DATA_TYPES.uint16],
['text', DATA_TYPES.text],
],
[SERVER_PACKETS.CHAT_SAY]: [
['id', DATA_TYPES.uint16],
['text', DATA_TYPES.text],
],
[SERVER_PACKETS.CHAT_WHISPER]: [
['from', DATA_TYPES.uint16],
['to', DATA_TYPES.uint16],
['text', DATA_TYPES.text],
],
[SERVER_PACKETS.CHAT_VOTEMUTEPASSED]: [['id', DATA_TYPES.uint16]],
[SERVER_PACKETS.CHAT_VOTEMUTED]: [],
[SERVER_PACKETS.SERVER_MESSAGE]: [
['type', DATA_TYPES.uint8],
['duration', DATA_TYPES.uint32],
['text', DATA_TYPES.textbig],
],
[SERVER_PACKETS.SERVER_CUSTOM]: [
['type', DATA_TYPES.uint8],
['data', DATA_TYPES.textbig],
],
[SERVER_PACKETS.SYNC_AUTH]: [['challenge', DATA_TYPES.text]],
[SERVER_PACKETS.SYNC_INIT]: [
['sequence', DATA_TYPES.uint32],
['timestamp', DATA_TYPES.float64],
],
[SERVER_PACKETS.SYNC_SUBSCRIBE]: [
['active', DATA_TYPES.boolean],
['type', DATA_TYPES.text],
['id', DATA_TYPES.text],
],
[SERVER_PACKETS.SYNC_UPDATE]: [
['sequence', DATA_TYPES.uint32],
['type', DATA_TYPES.text],
['id', DATA_TYPES.text],
['data', DATA_TYPES.textbig],
['timestamp', DATA_TYPES.float64],
['event', DATA_TYPES.textbig],
],
};
|
<gh_stars>10-100
#!/usr/bin/env python
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Retrieves the counts of how many objects have a particular field null on all running processes.
Prints a json map from pid -> (log-tag, field-name, null-count, total-count).
"""
import adb
import argparse
import concurrent.futures
import itertools
import json
import logging
import os
import os.path
import signal
import subprocess
import time
def main():
parser = argparse.ArgumentParser(description="Get counts of null fields from a device.")
parser.add_argument("-S", "--serial", metavar="SERIAL", type=str,
required=False,
default=os.environ.get("ANDROID_SERIAL", None),
help="Android serial to use. Defaults to ANDROID_SERIAL")
parser.add_argument("-p", "--pid", required=False,
default=[], action="append",
help="Specific pids to check. By default checks all running dalvik processes")
has_out = "OUT" in os.environ
def_32 = os.path.join(os.environ.get("OUT", ""), "system", "lib", "libfieldnull.so")
def_64 = os.path.join(os.environ.get("OUT", ""), "system", "lib64", "libfieldnull.so")
has_32 = has_out and os.path.exists(def_32)
has_64 = has_out and os.path.exists(def_64)
def pushable_lib(name):
if os.path.isfile(name):
return name
else:
raise argparse.ArgumentTypeError(name + " is not a file!")
parser.add_argument('--lib32', type=pushable_lib,
required=not has_32,
action='store',
default=def_32,
help="Location of 32 bit agent to push")
parser.add_argument('--lib64', type=pushable_lib,
required=not has_64,
action='store',
default=def_64 if has_64 else None,
help="Location of 64 bit agent to push")
parser.add_argument("fields", nargs="+",
help="fields to check")
out = parser.parse_args()
device = adb.device.get_device(out.serial)
print("getting root")
device.root()
print("Disabling selinux")
device.shell("setenforce 0".split())
print("Pushing libraries")
lib32 = device.shell("mktemp".split())[0].strip()
lib64 = device.shell("mktemp".split())[0].strip()
print(out.lib32 + " -> " + lib32)
device.push(out.lib32, lib32)
print(out.lib64 + " -> " + lib64)
device.push(out.lib64, lib64)
cmd32 = "'{}={}'".format(lib32, ','.join(out.fields))
cmd64 = "'{}={}'".format(lib64, ','.join(out.fields))
if len(out.pid) == 0:
print("Getting jdwp pids")
new_env = dict(os.environ)
new_env["ANDROID_SERIAL"] = device.serial
p = subprocess.Popen([device.adb_path, "jdwp"], env=new_env, stdout=subprocess.PIPE)
# ADB jdwp doesn't ever exit so just kill it after 1 second to get a list of pids.
with concurrent.futures.ProcessPoolExecutor() as ppe:
ppe.submit(kill_it, p.pid).result()
out.pid = p.communicate()[0].strip().split()
p.wait()
print(out.pid)
print("Clearing logcat")
device.shell("logcat -c".split())
final = {}
print("Getting info from every process dumped to logcat")
for p in out.pid:
res = check_single_process(p, device, cmd32, cmd64);
if res is not None:
final[p] = res
device.shell('rm {}'.format(lib32).split())
device.shell('rm {}'.format(lib64).split())
print(json.dumps(final, indent=2))
def kill_it(p):
time.sleep(1)
os.kill(p, signal.SIGINT)
def check_single_process(pid, device, bit32, bit64):
try:
# Just try attaching both 32 and 64 bit. Wrong one will fail silently.
device.shell(['am', 'attach-agent', str(pid), bit32])
device.shell(['am', 'attach-agent', str(pid), bit64])
time.sleep(0.5)
device.shell('kill -3 {}'.format(pid).split())
time.sleep(0.5)
out = []
all_fields = []
lc_cmd = "logcat -d -b main --pid={} -e '^\\t.*\\t[0-9]*\\t[0-9]*$'".format(pid).split(' ')
for l in device.shell(lc_cmd)[0].strip().split('\n'):
# first 4 are just date and other useless data.
data = l.strip().split()[5:]
if len(data) < 4:
continue
# If we run multiple times many copies of the agent will be attached. Just choose one of any
# copies for each field.
field = data[1]
if field not in all_fields:
out.append((str(data[0]), str(data[1]), int(data[2]), int(data[3])))
all_fields.append(field)
if len(out) != 0:
print("pid: " + pid + " -> " + str(out))
return out
else:
return None
except adb.device.ShellError as e:
print("failed on pid " + repr(pid) + " because " + repr(e))
return None
if __name__ == '__main__':
main()
|
/**
* @brief Constructor.
*
* @param line_number The line number of "PARTITION CLAUSE" in the SQL statement.
* @param column_number The column number of "PARTITION CLAUSE" in the SQL statement.
* @param partition_type The type of partitioning to be made - Hash or Range
* @param attribute_name_list A list of attributes of the relation based on which the partitioning has to be done.
* @param num_partitions The number of partitions to be created.
*/
ParsePartitionClause(const int line_number,
const int column_number,
ParseString *partition_type,
PtrList<ParseString> *attribute_name_list,
NumericParseLiteralValue *num_partitions)
: ParseTreeNode(line_number, column_number),
partition_type_(partition_type),
attribute_name_list_(attribute_name_list),
num_partitions_(num_partitions) {
} |
/// Read File To Buffer, Return File Length
pub fn read(&self, buf: &mut [u8]) -> Result<usize, FileError> {
let length = self.detail.length().unwrap();
let spc = self.bpb.sector_per_cluster_usize();
let cluster_size = spc * BUFFER_SIZE;
let mut number_of_blocks = spc;
if buf.len() < length { return Err(FileError::BufTooSmall); }
let mut index = 0;
self.fat.map(|f| {
let offset = self.bpb.offset(f.current_cluster);
let end = if (length - index) < cluster_size {
let bytes_left = length % cluster_size;
number_of_blocks = get_needed_sector(bytes_left);
index + bytes_left
} else {
index + cluster_size
};
self.device.read(&mut buf[index..end],
offset,
number_of_blocks).unwrap();
index += cluster_size;
}).last();
Ok(length)
} |
<filename>src/org/usfirst/frc/team3539/robot/commands/DisableSoftLimits.java
package org.usfirst.frc.team3539.robot.commands;
import org.usfirst.frc.team3539.robot.Robot;
import edu.wpi.first.wpilibj.command.Command;
/**
* Disables softlimits when a button on the dashboard is pressed
*/
public class DisableSoftLimits extends Command
{
public DisableSoftLimits()
{
}
protected void initialize()
{
Robot.pitch.shouldSoftLimit(false);
Robot.elevator.shouldSoftLimit(false);
}
protected void execute()
{
}
protected boolean isFinished()
{
return false;
}
protected void end()
{
Robot.elevator.shouldSoftLimit(true);
Robot.elevator.zeroEncoders();
Robot.pitch.zeroEncoder();
Robot.pitch.enc = 0;
Robot.pitch.setSetpointPitch(Robot.pitch.enc);
Robot.pitch.zeroEncoder();
}
protected void interrupted()
{
end();
}
}
|
def query(self, _id, conditions={}):
j = self.get_newest(_id)
if j == None:
return json.dumps("Model not found")
try:
domains=j.domains
return json_convert(j.query(**conditions))
except:
return json.dumps("You have not yet fully specified your model"); |
def _start(self) -> NoReturn:
self._timer = threading.Timer(self.period, self._run_callback)
self._timer.name = self.name
self._timer.start()
self._timestamp = time() |
<gh_stars>0
extern crate cast;
#[macro_use]
extern crate criterion;
extern crate criterion_stats as stats;
extern crate rand;
mod common_bench;
macro_rules! stat {
($ty:ident <- $($stat:ident),+) => {
$(
fn $stat(c: &mut Criterion) {
let v = ::common_bench::vec::<$ty>();
c.bench_function(
&format!("stat_{}_{}", stringify!($ty), stringify!($stat)),
move |b| {
let s = ::stats::univariate::Sample::new(&v);
b.iter(|| s.$stat())
});
}
)+
}
}
macro_rules! stat_none {
($ty:ident <- $($stat:ident),+) => {
$(
fn $stat(c: &mut Criterion) {
let v = ::common_bench::vec::<$ty>();
c.bench_function(
&format!("stat_none_{}_{}", stringify!($ty), stringify!($stat)),
move |b| {
let s = ::stats::univariate::Sample::new(&v);
b.iter(|| s.$stat(None))
});
}
)+
}
}
macro_rules! fast_stat {
($ty:ident <- $(($stat:ident, $aux_stat:ident)),+) => {
$(
fn $stat(c: &mut Criterion) {
let v = ::common_bench::vec::<$ty>();
c.bench_function(
&format!("fast_stat_{}_{}", stringify!($ty), stringify!($stat)),
move |b| {
let s = ::stats::univariate::Sample::new(&v);
let aux = Some(s.$aux_stat());
b.iter(|| s.$stat(aux))
});
}
)+
}
}
macro_rules! bench {
($ty:ident) => {
pub mod $ty {
pub trait SampleExt {
fn base_percentiles(&self) -> ::stats::univariate::Percentiles<$ty>
where
usize: ::cast::From<$ty, Output = Result<usize, ::cast::Error>>;
fn iqr(&self) -> $ty
where
usize: ::cast::From<$ty, Output = Result<usize, ::cast::Error>>,
{
self.base_percentiles().iqr()
}
fn median(&self) -> $ty
where
usize: ::cast::From<$ty, Output = Result<usize, ::cast::Error>>,
{
self.base_percentiles().median()
}
}
impl SampleExt for ::stats::univariate::Sample<$ty> {
fn base_percentiles(&self) -> ::stats::univariate::Percentiles<$ty>
where
usize: ::cast::From<$ty, Output = Result<usize, ::cast::Error>>,
{
self.percentiles()
}
}
use criterion::Criterion;
stat!(
$ty <- iqr,
max,
mean,
median,
median_abs_dev_pct,
min,
std_dev_pct,
sum
);
stat_none!($ty <- median_abs_dev, std_dev, var);
criterion_group! {
name = benches;
config = Criterion::default();
targets = iqr, max, mean, median, median_abs_dev_pct, min,
std_dev_pct, sum, median_abs_dev, std_dev, var
}
pub mod fast {
use super::SampleExt;
use criterion::Criterion;
fast_stat!(
$ty <- (median_abs_dev, median),
(std_dev, mean),
(var, mean)
);
criterion_group! {
name = benches;
config = Criterion::default();
targets = median_abs_dev, std_dev, var
}
}
}
};
}
bench!(f64);
criterion_main!(f64::benches, f64::fast::benches);
|
/**
* DOCUMENT (langr) - insert description
*
* @author langr
* 18.10.2009, 15:08:13
*
*/
public class clsPAInspectorFunctional extends Inspector implements ActionListener {
private static final long serialVersionUID = -1191073481242249784L;
private JGraph moGraph;
private ArrayList<clsNode> moRootNodes;
private JButton moBtnUpdate;
boolean mnCompact;
int w;
int h;
int x_mult;
int x_offset;
int y_mult;
int y_offset;
private clsPsychicApparatus moPA;
private ArrayList<JFrame> moContentWindows = new ArrayList<JFrame>();
private ArrayList<TabbedInspector> moContents= new ArrayList<TabbedInspector>();
public clsPAInspectorFunctional(JTree poTree, boolean pnCompact, clsPsychicApparatus poPA)
{
moPA=poPA;
moRootNodes = clsGenerateFunctionalModel.getRootNodes(moPA);
mnCompact = pnCompact;
setDisplayValues();
moGraph = new JGraph();
moGraph.addMouseListener(new MyMouseListener(poTree, moGraph));
moGraph.setEditable(false);
moGraph.setConnectable(false);
moGraph.setDisconnectable(false);
updateControl(); //loading data into the graph
moBtnUpdate = new JButton("Update graph..."); //create an update-button
moBtnUpdate.addActionListener(this);
ScrollPane moScrollPane = new ScrollPane();
moScrollPane.add(moGraph);
setLayout(new BorderLayout());
add(moBtnUpdate, BorderLayout.NORTH);
//add(moGraph, BorderLayout.WEST);
add(moScrollPane, BorderLayout.CENTER);
}
private void setDisplayValues() {
if (mnCompact) {
w = 30;
h = 30;
x_mult = w + w/2;
x_offset = 10;
y_mult = h + h/2;
y_offset = 10;
} else {
w = 120;
h = 65;
x_mult = w + w/2;
x_offset = 10;
y_mult = h + h/2;
y_offset = 10;
}
}
/* (non-Javadoc)
*
* @author langr
* 13.08.2009, 01:46:51
*
* @see sim.portrayal.Inspector#updateInspector()
*/
@Override
public void updateInspector() {
for(TabbedInspector moContent : moContents){
for(Object oInsp : moContent.inspectors) {
if( oInsp instanceof Inspector ) {
((Inspector) oInsp).updateInspector();
}
}
}
}
private void resetAdded() {
for (clsNode oNode:moRootNodes) {
oNode.resetAdded();
}
}
@SuppressWarnings({ "serial" })
public void updateControl() {
GraphModel model = new RichTextGraphModel();
GraphLayoutCache cache = new DataGraphLayoutCache(model,
new DefaultCellViewFactory() {
@Override
protected VertexView createVertexView(Object cell) {
return new MultiLineVertexView(cell);
}
}, true);
ArrayList<DefaultGraphCell> oCells = new ArrayList<DefaultGraphCell>();
ArrayList<DefaultGraphCell> oRootCells = new ArrayList<DefaultGraphCell>();
generateAllCells(oCells, oRootCells);
DefaultGraphCell[] cells = new DefaultGraphCell[oCells.size()];
for (int i=0; i<oCells.size(); ++i) {
cells[i]=oCells.get(i);
}
Object[] roots = oRootCells.toArray();
JGraphGraphFactory.insert(model, cells);
JGraphFacade facade = new JGraphModelFacade(model, roots);
Map<?,?> nested = facade.createNestedMap(true, true);
cache.edit(nested);
moGraph.setModel(model);
moGraph.getGraphLayoutCache().edit(nested); // Apply the results to the actual graph
moGraph.updateUI();
}
private void generateAllCells(ArrayList<DefaultGraphCell> poCells, ArrayList<DefaultGraphCell> poRootCells) {
resetAdded();
HashMap<Integer, DefaultGraphCell> oNodes = new HashMap<Integer, DefaultGraphCell>();
ArrayList<DefaultGraphCell> oConnections = new ArrayList<DefaultGraphCell>();
for (clsNode oNode:moRootNodes) {
oNode.resetAdded();
}
for (clsNode oNode:moRootNodes) {
generateCells(oNode, oNodes, oConnections);
poRootCells.add(oNodes.get(oNode.moId));
}
poCells.addAll(oNodes.values());
poCells.addAll(oConnections);
}
private void generateCells(clsNode poNode, HashMap<Integer, DefaultGraphCell> poNodes, ArrayList<DefaultGraphCell> poConnections) {
if (poNode.mnAdded == false) {
poNode.mnAdded = true;
DefaultGraphCell oThis = createNode(poNode);
poNodes.put(poNode.moId, oThis);
for (clsConnection oC:poNode.getNextModules()) {
generateCells(oC.moTarget, poNodes, poConnections);
DefaultGraphCell oOther = poNodes.get(oC.moTarget.moId);
if (oOther == null) {
throw new java.lang.NullPointerException("node requested, but not found. "+poNode.moId+" -> "+oC.moTarget.moId);
}
poConnections.add( (DefaultGraphCell)createConnection(oC, oThis, oOther));
}
}
}
private DefaultGraphCell createNode(clsNode poNode) {
//Richtext to enable linebreaks
RichTextBusinessObject userObject = new RichTextBusinessObject();
RichTextValue textValue = new RichTextValue(poNode.getName(mnCompact));
userObject.setValue(textValue);
// Create vertex with the given name
NodeCell cell = new NodeCell(userObject, poNode.moId);
// Set bounds
int x = poNode.mnCol*x_mult + x_offset;
int y = poNode.mnRow*y_mult + y_offset;
GraphConstants.setBounds(cell.getAttributes(), new Rectangle2D.Double(x, y, w, h));
GraphConstants.setGradientColor( cell.getAttributes(), poNode.mePsychicInstance.getColor());
//GraphConstants.setInset(cell.getAttributes(), 10);
// Make sure the cell is resized on insert
GraphConstants.setResize(cell.getAttributes(), true);
GraphConstants.setAutoSize(cell.getAttributes(), true);
GraphConstants.setOpaque(cell.getAttributes(), true);
GraphConstants.setBorderColor(cell.getAttributes(), Color.black);
GraphConstants.setBackground(cell.getAttributes(), new Color(240,240,240));
// Add a Port
cell.addPort();
return cell;
}
private DefaultEdge createConnection(clsConnection poConnection, DefaultGraphCell poSource, DefaultGraphCell poTarget) {
String name = "";
if (!mnCompact) {
name = poConnection.toString();
}
Edge edge = new Edge( name, poConnection.toString() );
edge.setSource(poSource.getChildAt(0));
edge.setTarget(poTarget.getChildAt(0));
GraphConstants.setLineEnd(edge.getAttributes(), GraphConstants.ARROW_CLASSIC);
GraphConstants.setEndFill(edge.getAttributes(), true);
return edge;
}
/* this is the update button - if pressed-->reload & redraw
*
* @author langr
* 15.10.2009, 22:44:13
*
* @see java.awt.event.ActionListener#actionPerformed(java.awt.event.ActionEvent)
*/
@Override
public void actionPerformed(ActionEvent e) {
Object source = e.getSource();
if( source == moBtnUpdate) {
updateControl();
}
}
/**
* Prevent from loosing the synchronisation after a graph element is dragged
*/
public class MyMouseListener extends MouseInputAdapter {
protected JTree moMyTree;
private JGraph moMyGraph;
public MyMouseListener(JTree poTree, JGraph poGraph) {
moMyTree = poTree;
moMyGraph = poGraph;
}
@Override
public void mouseReleased(MouseEvent e) {
if (e.getClickCount() == 2) {
if (e.getSource() instanceof JGraph) {
Object[] selection = moMyGraph.getSelectionModel().getSelectionCells();
if (selection != null) {
for (Object s:selection) {
if (s instanceof itfMouseClick ) {
//select node in Tree
//selectNodeInTree( ((itfMouseClick)s).getId() );
//open Node Details in new Window
if(e.isAltDown()){
openNewWindow(((itfMouseClick)s).toString(),true);
}
else{
openNewWindow(((itfMouseClick)s).toString(),false);
}
}
}
}
}
}
}
private void selectNodeInTree(String poId) {
TreePath oPath = findNode(poId);
moMyTree.setSelectionPath(oPath);
moMyTree.expandPath(oPath);
moMyTree.makeVisible(oPath);
}
private TreePath findNode( String nodeName ) {
TreeNode[] oPath = findNodeRecursive( (DefaultMutableTreeNode) moMyTree.getModel().getRoot(), nodeName );
return new TreePath(oPath);
}
private TreeNode[] findNodeRecursive( DefaultMutableTreeNode node, String nodeName ) {
TreeNode[] result = null;
if ( node.getUserObject().toString().startsWith( nodeName ) ) {
result = node.getPath();
}
for ( int i=0; i<node.getChildCount(); i++ ) {
DefaultMutableTreeNode child = (DefaultMutableTreeNode)node.getChildAt( i );
result = findNodeRecursive( child, nodeName );
if (result != null) {
break;
}
}
return result;
}
}
/*
* shows the internals of the selected Module in a new Frame
*/
private void openNewWindow(String id, boolean newWindow){
if(id.length()!=0){
JFrame moContentWindow;
if(newWindow){
moContentWindow = new clsModuleInspectorWindow(this);
moContentWindows.add(moContentWindow);
}
else{
if(moContentWindows.size()==0){
moContentWindow = new clsModuleInspectorWindow(this);
moContentWindows.add(moContentWindow);
}
else{
//if min 1 frame is open the new frame will be loaded in the last opened frame
moContentWindow = moContentWindows.get(moContentWindows.size()-1);
moContentWindow.getContentPane().removeAll();
}
}
//moContentWindow.setSize(750,550);
if(id.contains(":")){
id= id.substring(0, id.indexOf(':'));
}
if(id.length()<3){
id = id.charAt(0) +"0"+id.charAt(1);
}
moContentWindow.setTitle("Module" + id + " - Agent " + moPA.getUid());
TabbedInspector moContent = new TabbedInspector();
moContents.add(moContent);
Field oFields[] =moPA.getClass().getFields();
//Search through all Fields of poPA to get the full name of the selected Module
String fullId="";
for(int i = 0; i<oFields.length; i++){
if(oFields[i].getName().contains("_")){
if(oFields[i].getName().substring(0, oFields[i].getName().indexOf("_")).equals("mo"+id)){
fullId=oFields[i].getName().substring(2);
}
}
}
clsInspectorTab_Modules.addAutocreatedInspectors(moContent, moPA, fullId);
clsInspectorTab_Modules.addHandCraftedInspectors(moContent, moPA, fullId);
moContentWindow.add(moContent);
moContentWindow.repaint();
moContentWindow.setVisible(true);
}
}
public void childWindowClosed(JFrame poWindow){
moContentWindows.remove(poWindow);
}
public void closeAllChildWindows(){
for( JFrame oWindow : moContentWindows) {
oWindow.dispose();
}
}
} |
import { CircularProgress, IconButton, List, ListItem, ListItemIcon, ListItemText, TextField } from '@material-ui/core';
import { useEffect, useState } from 'react';
import AddIcon from '@material-ui/icons/Add';
import DeleteIcon from '@material-ui/icons/Delete';
import "./TokBrowser.css";
import { createURL } from '../utils/url';
import { RouteName } from '../constants/routes';
import TokBrowser from './TokBrowser';
import { Favorite, Tiktok } from '../types/tok.interface';
import FavoriteList from './FavoriteList';
function FavoriteToks(props: { favorites: any[], userId: number, onDeleteList: Function, onEditList: Function }) {
const [selectedList, setSelectedList] = useState<Favorite>();
const [toks, setToks] = useState<Tiktok[]>([]);
const [loading, setLoading] = useState<boolean>(false);
const getList = (listId: number) => {
setLoading(true);
fetch(createURL(RouteName.FAVORITES_LIST, [listId]), { method: 'GET' })
.then(resp => resp.json()
.then(res => {
setToks(res);
setLoading(false);
}));
};
const handleDelete = () => {
props.onDeleteList(selectedList);
setSelectedList(undefined);
}
const handleListItemClick = (event: any, fav: Favorite) => {
if (selectedList && selectedList.list_id === fav.list_id) {
setSelectedList(undefined);
} else {
setSelectedList(fav);
}
};
const createFavoritesList = () => {
const newList = {
list_id: -1,
user_id: props.userId,
list_name: '<NAME>'
};
setSelectedList(newList);
};
useEffect(() => {
if (props.favorites && selectedList) {
getList(selectedList.list_id);
}
}, [selectedList])
const noFavorites = (<div>
<h4>No Favorites</h4>
</div>);
const favoriteLists =
(<List component="nav" aria-label="">
{props.favorites && props.favorites.map((favList: Favorite, idx) =>
<ListItem key={idx} button selected={selectedList && selectedList.list_id === favList.list_id} onClick={(event) => handleListItemClick(event, favList)}>
<ListItemText primary={favList.list_name} />
</ListItem>
)}
</List>)
const loadingBar = <div className="flex flex-1 flex-align-center pad-5">
<CircularProgress />
</div>;
const tokBrowser = loading ? loadingBar : (<TokBrowser toks={toks} title=""></TokBrowser>);
return (
<div className='flex flex-column tok-favorites overflow-auto'>
<div>
<div className="flex">
<h5>Favorites List</h5>
<IconButton color="primary" aria-label="Add new favorites list" onClick={() => { createFavoritesList() }}>
<AddIcon />
</IconButton>
</div>
{props.favorites ? favoriteLists : noFavorites}
</div>
{selectedList ? <FavoriteList list={selectedList} userId={props.userId} onDeleteList={handleDelete} onEditList={props.onEditList} toks={toks}></FavoriteList> : <h5>Select a list to view Tiktoks</h5>}
</div>
);
}
export default FavoriteToks; |
Real-world object categories and scene contexts conjointly structure statistical learning for the guidance of visual search
We examined how object categories and scene contexts act in conjunction to structure the acquisition and use of statistical regularities to guide visual search. In an exposure session, participants viewed five object exemplars in each of two colors in each of 42 real-world categories. Objects were presented individually against scene context backgrounds. Exemplars within a category were presented with different contexts as a function of color (e.g., the five red staplers were presented with a classroom scene, and the five blue staplers with an office scene). Participants then completed a visual search task, in which they searched for novel exemplars matching a category label cue among arrays of eight objects superimposed over a scene background. In the context-match condition, the color of the target exemplar was consistent with the color associated with that combination of category and scene context from the exposure phase (e.g., a red stapler in a classroom scene). In the context-mismatch condition, the color of the target was not consistent with that association (e.g., a red stapler in an office scene). In two experiments, search response time was reliably lower in the context-match than in the context-mismatch condition, demonstrating that the learning of category-specific color regularities was itself structured by scene context. The results indicate that categorical templates retrieved from long-term memory are biased toward the properties of recent exemplars and that this learning is organized in a scene-specific manner.
Introduction
To perform most real-world activities, people must find and attend to objects that match current goals. Over the last 20 years or so, it has become clear that the guidance of attention to relevant objects is driven not only by stimulus salience and top-down templates, but also by the history of previous selective actions, i.e., selection history (Awh et al., 2012;Failing & Theeuwes, 2018;Le Pelley et al., 2016). Core phenomena of this type include inter-trial effects (Kristjansson et al., 2002;Li & Theeuwes, 2020;Talcott & Gaspelin, 2020), reward learning (Anderson et al., 2011;Hickey et al., 2010), learned distractor rejection (Gaspelin et al., 2015;Stilwell et al., 2019;Wang & Theeuwes, 2018), and target probability cuing (Geng & Behrmann, 2005;Jiang et al., 2013).
These phenomena show that the human visual system tracks recent statistical regularities predicting the properties that are likely to be associated with task-relevant objects, and that this learning can play a major role in where, and to what objects, attention is directed. However, to be of any practical use in real-world visual search, such learning must be structured, because the visual world is itself structured by elements such as scene context and object category. As an example of contextual structure, learning that targets in a kitchen have tended to appear near the sink may predict the location of the next target in the kitchen, but it does not provide much information about the likely location of targets when the context changes to a park. Similarly, for target category structure, learning that recent car targets have tended to be red may help predict the color of the next car, but it does not provide much predictive value when the target category changes to a shoe or a cat.
In the literature on attention guidance by learning and history, there has been extensive work on the structural role of scene context in statistical learning of target properties, broadly collected under the term "contextual cuing" (for a review, see Sisk et al., 2019). Most of this work has focused on contextual structure in the learning of target position regularities (e.g., Chun & Jiang, 1998), though a smaller group of studies has focused on the learning of surface feature properties, such as object shape (Chun & Jiang, 1999) or rewarded color (Anderson, 2015).
In contrast with this extensive literature, there has been relatively little work conducted to understand how target object category structures the acquisition of recent statistical properties to guide visual search. Zelinsky and colleagues pioneered work on the role of object category in visual search, but this has tended to focus on the role of mature category representations rather than on the learning of recent statistical regularities. Using real-world images of teddy bears as targets, Yang and Zelinsky (2009) showed that visual search could be guided, visually, to targets that were defined only by their category "teddy bear." One plausible mechanism by which this occurs is through retrieval of long-term visual representations of teddy bears (either as individual exemplars or as a category prototype), which then functions as a template to guide attention towards targets with similar visual properties in the search display. Consistent with this view, further work on categorical search has shown that attention is guided toward objects in the search array that share visual features with the target category (Alexander & Zelinsky, 2011), especially typical features of that category (Maxfield et al., 2014), and that attention is guided best to the target when it is cued at the basic level, presumably because visual variability increases at the superordinate levels (e.g., all chairs have legs but not all furniture has legs) (Yu et al., 2016).
Recently, Bahle et al. (2021) examined how the learning of new statistical regularities biases the expression of this type of category-specific template representation. The experiments were divided into two sessions, an exposure session and a visual search session. In the former, participants viewed six photographs of objects from each of 40 familiar real-world categories (e.g., "cat," "chair"). The objects were presented individually, and participants simply categorized each as "natural" or "man-made." Critically, the exemplars from a category had a similar color (e.g., all six chairs were black). In the search session, participants completed a categorical search task (Yang & Zelinsky, 2009). They were shown a category label cue on each trial (e.g., "chair") and searched through an object array for any category member. Critically, the color of the category member in the search array either matched (e.g., black chair) or mismatched (e.g., brown chair) the color of the category exemplars from the exposure session. Search was reliably faster in the match condition, indicating that participants had acquired color regularities from the exposure session, that these regularities were organized by object category, and that category-specific learning influenced the formation of the visual template guiding search. In analogy to the term "contextual cuing," where recent statistical regularities are organized by context, Bahle et al. (2021) termed these processes "categorical cuing," because category-specific learning cued the probable features of the target object, facilitating search. In general, the results indicate that the long-term category representations guiding visual search are surprisingly malleable and sensitive to recent statistics. Such sensitivity could be implemented either by preferential retrieval of recent exemplars (in an exemplarbased model of category structure) or by modification of a summary representation of the category (in a prototype model).
The effects in Bahle et al. (2021) were further notable because: (1) the bias toward the properties of recent exemplars was observed for highly familiar, over-learned categories; (2) there was a relatively large set of structural units over which learning occurred (40 categories and 40 colors); (3) the learning specifically influenced the guidance of attention, with the effect primarily attributable to differences in the time required to orient attention and gaze to the target; and (4) learning transferred across tasks, from a superordinate-level classification task to a visual search task. Furthermore, categoryspecific learning was extended to multiple recent colors within each category. That is, match effects were observed when participants were exposed to exemplars of two different colors in each category; search was more rapid for either exposed color relative to a third, novel color.
Categorical and contextual structure in the learning of recent statistical regularities have been thus far studied separately, but it is plausible that they will interact in visual search: the learning of category-specific regularities could itself be structured by search context. For example, one might observe that highlighters in Clyde's office tend to be green, whereas highlighters in Jenn's office tend to be yellow, leading to the formation of search templates that differ on the dimension of color when searching for a highlighter in one office versus the other. Addressing this issue is theoretically important, because it helps distinguish between an account of statistical learning effects on visual search in which different sources of learning are applied independently versus an account in which they are dependent. Moreover, evidence for dependency would illuminate the nature of the memory representations' function in generating learning and selection history effects, indicating that information about recent contexts and target features are stored in a bound, episodic format. Consistent with this possibility is evidence that reward learning effects in visual search are applied in a scene-specific manner (Anderson, 2015). In sum, the present research question advances understanding of how the multiple structural constraints inherent in real-world environments are combined to guide visual search.
Experiment 1
In Experiment 1, we investigated the possible joint constraint of context and category in the learning and application of statistical regularities guiding visual search (Fig. 1). In an exposure session, participants viewed 420 object exemplars: Fig. 1 Overview of method and design of Experiment 1. a Participants first completed an exposure session, in which they viewed 420 objects: five object exemplars in each of two colors in each of 42 different categories. The objects were presented against scene backgrounds for 2 s each. The participants completed a Plausibility-Rating task, in which they rated how likely it would be to encounter an object of that type in a scene of that type on a scale of 1 (extremely likely) to 6 (extremely unlikely). b In the exposure session, two categories were paired that had exemplars with the same two possible colors (e.g., red or blue staplers or pencil sharpeners). These two categories were paired with two different scene background photographs in which each object type might plausibly appear (e.g., classroom and office). The assignment of object colors to scene backgrounds was complementary. For example, in the exposure session red staplers appeared against the classroom background and blue staplers against the office background. This assignment was reversed for sharpeners: blue against the classroom and red against the office. c Participants then completed a visual search session. On each trial, they first saw a scene background for 500 ms, then a text cue describing the target category for 800 ms, followed by a 1 s delay and a search array of eight objects. They searched for the object that matched the category label and reported the orientation of a superimposed letter "F". The target object in the search array either matched or mismatched the categoryspecific color of exemplars associated with that background during the exposure session. Note that the category label was always presented in red font color and did not cue the color of the target object. scene background photograph. To ensure that participants attended to the relationship between object and scene, their task in the exposure session was to rate the plausibility that an object of that type would be found in a scene of that type.
The associations between category-specific colors and scenes in the exposure session were structured as follows. Two categories were paired that had exemplars with the same two possible colors (e.g., red or blue staplers and red or blue pencil sharpeners). These two categories were matched with two different scene background photographs in which each object type might plausibly appear (e.g., classroom and office). The assignment of object colors to scene backgrounds was complementary. For example, red staplers appeared against the classroom background and blue staplers against the office background. This assignment was reversed for sharpeners: blue against the classroom and red against the office. Thus, each scene background was associated with exemplars of both colors, but from different categories.
Participants then completed a visual search session, in which the targets were new exemplars from the object categories used in the exposure phase. They were cued with a category label (e.g., "stapler") displayed against a scene context background. Then, they searched through an array of eight objects to find the target and report the orientation of a superimposed letter. We manipulated the consistency between the scene background and the target color. In the contextmatch condition, the target color was consistent with the color associated with that combination of category and scene background from the exposure session (e.g., a red stapler target presented against the classroom background). In the contextmismatch condition, the color of the target was not consistent with that association (e.g., a red stapler target presented against the office background).
If the statistical learning of recent, category-specific color regularities is organized by scene context, when participants view the search target label presented against a scene background, they should tend to instantiate a search template that is biased toward the color of items from that category previously associated with that context, leading to more efficient guidance, and thus lower RT, in the context-match condition than in the context-mismatch condition.
Method
Participants Participants (18-30 years old) were recruited from the University of Iowa undergraduate subject pool and received course credit. All participants reported normal or corrected-to-normal vision. Human subjects' procedures were approved by the University of Iowa Institutional Review Board. We collected data from 60 participants to ensure sufficient power to detect a small-to-medium-sized effect in the central contrast of interest. Seven participants were replaced for failing to meet an a priori criterion of 85% accuracy in the search task. Participant gender was not collected.
Apparatus Due to novel coronavirus restrictions, the experiment was conducted online. It was programmed with OpenSesame software (Mathôt et al., 2012) and converted to Javascript for web-based delivery on a JATOS server maintained by the University of Iowa. Because participants completed the experiment using their own computers, we report stimulus size in absolute pixel values.
Stimuli. The stimulus set comprised 504 object images and 42 scene backgrounds. In addition, there were 150 distractor objects (75 artifact, 75 natural) for the search session that did not overlap with the experimental categories. Most stimuli were adapted from the set used in Bahle et al. (2021). Additional object and scene background images were acquired using Google image search and existing photo databases, such as Adobe Stock images. Each object image was sized to fit within a 150 × 150 pixel square and was presented against a white background within that square region. There were 42 object categories (22 natural and 20 artifact) and 12 exemplars in each category, six in each of the two colors per category (see Appendix Tables 2 and 3 for a complete list of categories, colors, and scene contexts). The colors for each category were chosen so that there was significant color variability across categories. For each participant, five of the six exemplars from each color in each category were randomly chosen for the exposure session. The final exemplar was assigned to the search session.
Exposure session. For the exposure session, object categories were paired, and each category within a pair had the same possible two colors. Colors were then assigned in a complementary fashion to two scene backgrounds (e.g., red staplers and blue sharpeners against the classroom background; blue staplers and red sharpeners against the office background). There were two possible configurations of this type for each pair of categories, and this was chosen randomly for each pair for each participant. In this design, since each scene was associated with the two possible colors, any effect of color match in the search session must have been mediated by object category. Scene context backgrounds (1,024 × 768 pixels) were presented in grayscale to avoid interactions with the target color manipulation. The object exemplar was presented centrally, superimposed over the background image.
Search session. For the search session, eight objects were presented on a virtual circle (radius of 300 pixels), again superimposed over a scene context background. The location of the first object was selected randomly within a range of 1°t o 45°, with the remaining objects each offset by 45°around the virtual circle. All arrays contained one target item matching the category label cue. Seven distractor objects were chosen randomly without replacement from the set of 150 distractors. Each search array contained a total of four artifacts and four natural objects. For example, if the target was an artifact, three artifacts and four natural objects were chosen from the set of distractors. Target and distractor locations were also chosen randomly. A small, black letter "F" on a white background (Arabic font, approximately 16 × 22 pixels) was superimposed centrally on each object. The orientation of the "F" (facing left or facing right) was chosen randomly for each object. The target F was quite small, typically requiring fixation of the target object to discriminate its orientation. This was designed so that the guidance of attention would be implemented with overt shifts of gaze, which has been demonstrated to increase sensitivity to differences in attention guidance (Hollingworth & Bahle, 2020). The cue that appeared before each search array described the category of the target object (e.g., "stapler") and was presented in red, Arabic font.
Procedure. Upon initiating the experiment, participants provided informed consent and received instructions. They were told that they would complete two sub-experiments. They then received instructions for the exposure session. Note that they did not receive instructions for the search session until after completing the exposure session. Thus, during the exposure session, they were not aware that they would subsequently perform a search task.
For the exposure session, the trial began with a screen instructing the participant to "Press Spacebar" to start the trial. After doing so, there was a 200-ms delay, followed by the object stimulus displayed against the scene background for 2,000 ms. Participants then saw a response screen asking them to rate how likely it would be to encounter an object of that type in a scene of that type on a scale of 1 (extremely likely) to 6 (extremely unlikely). (Note that, although each background was chosen as a plausible context for the object category, it was not necessarily the case that there would be a high probability of encountering the object there. For example, a bear could plausibly appear in a forest scene, but encountering a bear in any given forest is unlikely. In contrast, encountering a chair in a living room scene is very likely.) They entered the corresponding number on the keyboard.
In the exposure session, participants completed five blocks of 84 trials. In each block, they viewed one exemplar in each of the two colors for each of the 42 categories. Trials in a block were randomly intermixed. In total, there were ten exposures per category (five for each of the two colors per category). For the plausibility-rating task, mean plausibility across the categories was 2.64 (SD = 0.31).
Participants then completed the search session. Each trial began with a centrally presented "Press Spacebar" screen. Once pressed, there was a 200-ms delay before a scene background was presented for 500 ms. Then, a category label cue was centrally presented over the scene background (e.g., "stapler") in red font for 800 ms, which indicated the category of the search target in the upcoming search display. The use of a category label cue required participants to retrieve a representation of the target category from memory as a template to guide visual search. Once the cue was removed, the scene background was presented alone for 1,000 ms. Finally, the search display was presented over the scene background. Participants were instructed to find the cued object and report the orientation of the "F" superimposed on it, and to do so as quickly and as accurately as possible. Participants pressed the "P" key to indicate a right-facing "F" (normal) and the "Q" key to indicate a left-facing "F" (mirror reversed).
Response terminated the search display. A smiley emoticon was displayed for 200 ms following a correct response, and a frowny emoticon was displayed for 500 ms following an incorrect response.
The search session began with instructions indicating the change in task. Participants first completed ten trials of practice using target object categories and scene backgrounds not used in the exposure session. Then, they completed one experimental block of 168 search trials. Each of the 42 categories was the target of search four times. Two trials per category were in the context-match condition, in which the color-category-background association from the exposure session was retained (e.g., a red stapler against the classroom and a blue sharpener against the office). Two other trials were in the context-mismatch condition, in which the color-background associations were reversed. Trials in the block were randomly intermixed. Each of the exemplars in the search phase was repeated once (e.g., the same red stapler exemplar was the target against the classroom in the context-match condition and against the office in the context-mismatch condition). This reduced possible variability across conditions, potentially increasing sensitivity to the effect of context match. The entire experiment lasted approximately 1 h. Participants were encouraged to take short breaks between exposure blocks and between the exposure and search sessions.
Results
Search accuracy For the visual search task, mean accuracy was 95.36% correct. The arcsine square root transformed values did not differ as a function of context match, F(1, 59) = 1.06, p = .308, adj ƞ p 2 = .001.
Manual response time (RT) The critical measure was mean RT in the search task as a function of context match condition. The analysis was limited to correct search trials. We also used a two-step RT trimming procedure. First, RTs shorter than 250 ms (not plausibly based on target discrimination) or longer than 6,000 ms were eliminated. Next, RTs more than 2.5 standard deviations from the participant's mean in each condition were eliminated. A total of 8.02% of trials was eliminated. The results are reported in Fig. 2, collapsing across object type. The full set of marginal means is reported in Table 1.
Analysis 1 ANOVA. We analyzed the RT data with a 2 (context match: match, mismatch) × 2 (object type: artifact, natural) repeated-measures ANOVA, treating participant as a random effect. We included object type as a factor to examine potential differences in learning and context as a function of superordinate category, though we did not develop predictions for this factor, as previous work has shown equivalent categorical cuing for artifacts and natural objects (Bahle et al., 2021). Adjusted ƞ p 2 values accompany each test (Mordkoff, 2019), correcting for the positive bias inherent in standard ƞ p 2 . There was a reliable main effect of context match, with lower mean RT on context-match (1,372 ms) compared with contextmismatch (1,405 ms) trials, F(1, 59) = 6.48, p = .014, adj ƞ p 2 = .084. There was also a reliable effect of object type, with lower mean RT for natural objects (1,371 ms) than for artifacts (1,412 ms), F(1, 59) = 10.1, p = .002, adj ƞ p 2 = .132. These factors did not interact, F(1, 59) = 0.48, p = .492, adj ƞ p 2 = -0.009. Analysis 2 Mixed effects. In a complementary analysis of the RT data, we sought to draw both population inferences (from the participant sample) and inferences about the population of real-world categories (from the sample of categories). Thus, we employed a linear mixed-effects approach with a crossclassified random-effects structure, simultaneously treating participant and category item as random effects (Baayen et al., 2008). In addition, treating category item as a random effect increased our confidence that the observed results were robust not only across the set of participants but also across the set of categories. The fixed-effects structure included context match condition and object type (natural, artifact). We then determined the random-effects structure best supported by the data. We began with the maximal random-effects structure and then simplified the model in the manner recommended by Matuschek et al. (2017), removing random-effects components that did not significantly improve model fit (via likelihood ratio test) or that produced critical failures in model convergence. The final randomeffects structure included an intercept for participant, an intercept for category, and a slope for object type by participant.
Analyses were implemented with the lme4 package (version 1.1-26) in R (version 4.0.3). Degrees of freedom for the statistical tests were estimated using the lmerTest package (version 3.1-3).
There was a reliable main effect of context match condition, with lower RT on context match compared with context mismatch trials, F(1, 9,116) = 9.13, p = .003. There was no reliable main effect of object type, F(1, 42.4) = 0.99, p = .326, and no reliable interaction between object type and context match, F(1, 9,114) = 0.48, p = .491. Thus, the mixed-effects results support those from the ANOVA with respect to the context-match effect, and allow inferences from this sample of categories to the population of categories.
Discussion
In Experiment 1, we demonstrated that the learning of category-specific color regularities was itself structured by scene context. When searching for an object type in a scene, participants selectively retrieved, and instantiated as a template, properties of recent exemplars from that category which had appeared in that particular scene. Thus, the two sources of structure in the learning of object regularities, scene contexts and object categories, are dependent.
Experiment 2
The design of Experiment 1 meant that the two colors within a category were associated with backgrounds from different scene categories (e.g., red staplers with a classroom and blue staplers with an office). In Experiment 2, we sought to associate the colors with different exemplars within a scene category. For example, red staplers in the exposure session appeared against classroom 1 and blue staplers against classroom 2. In the search session, the target object color either matched (e.g., red staplers against office 1) or mismatched (e.g., red staplers against office 2) the color-scene association. This allowed us to examine whether the structure imposed by scene context operates at the level of scene exemplars or at the level of scene categories. If the former, then we should replicate the results of Experiment 1. If the latter, then no match effect should be observed, as both colors within an object category were associated with the same scene category.
In addition to this primary goal, we sought to examine the effect of attention in the learning of object-category-to-scene associations. In the search session, one group of participants completed the plausibility-rating task used in Experiment 1, which required attending to the relationship between object and scene. A second group of participants simply classified each object as "man-made" or "natural," which did not require attention to the background or to the relationship between object and background. Previous work has shown that attention to the relationship between two entities is often required to form an association (Gwinn et al., 2019;Rosas et al., 2013;Sisk et al., 2019) Method Participants We collected data from 120 participants, 60 in each exposure session task. Twelve participants were replaced for failing to meet an a priori criterion of 85% accuracy in the search task.
Apparatus Experiment 2 was also conducted online using the same apparatus.
Stimuli. The object stimulus set was comprised of 504 object images, 84 scene backgrounds, and the same set of 150 distractors as used in Experiment 1. Additional scene context images were acquired so that each category was assigned to one type of scene context (e.g., staplers to offices, sharpeners to classrooms), and each color was assigned to a different scene context exemplar (e.g., red staplers to office 1 and blue staplers to office 2). The viewpoints and general composition of the two backgrounds were chosen to be quite similar. Finally, some category colors were replaced to increase color variability. The complete set of object categories, colors, and backgrounds is listed in the Appendix Tables 2 and 3. Note that, unlike Experiment 1, each scene background was associated with only one color. Thus, this design cannot eliminate the possibility that, during search, scene context facilitated search for a particular color in general (rather than in a category-specific manner). However, the results of Experiment 1 render this possibility unlikely.
Procedure. For the exposure session, the plausibility-rating task was the same as in Experiment 1. For the classification task, participants were asked to classify the exemplar as either "Man-made" or "Natural." They viewed a response screen similar to that for the plausibility-rating task, but with the options "1" for man-made and "6" for natural. For the plausibility-rating task, mean plausibility across the categories was 2.11 (SD = 0.48). For the classification task, mean accuracy was 96% (SD = 0.09).
Next, participants completed one experimental block of 168 search trials with the same trial structure as in Experiment 1.
In planned follow-up analyses, we examined the effect of context match separately for the plausibility-rating and classification tasks. There was a reliable main effect of context match in the former, F(1, 9,263) = 11.42, p < .001, but not in the latter, F(1, 9,229) = 1.40, p = .236.
Discussion
In Experiment 2, we replicated the context-match effect when the two colors within an object category were associated with different exemplars from the same scene category (rather than from different scene categories, as in Experiment 1). Thus, the results confirm that individual scene exemplars structure the acquisition of statistical regularities within object categories and that this structure influences the feature values instantiated in a categorical search template. The secondary goal of Experiment 2 was to examine the role of attention during exposure in the learning of structured statistical regularities. The "classification task" did not require attention to the relationship between object and scene background. There was no reliable context match effect in this condition, but there was a numerical trend, and there was no reliable interaction between exposure task and context match. Thus, although the results are broadly consistent with a role for attention in learning, they do not support strong conclusions on this specific question.
General discussion
Our previous work has shown that statistical learning of the surface feature properties of recently observed objects is organized by real-world object categories, influencing visual search in a category-specific manner (Bahle et al., 2021). Such learning is also structured by scene and array context (Anderson, 2015;Chun & Jiang, 1999), consistent with the larger literature on contextual cuing. In two experiments, we demonstrated that these two forms of structure operate in a dependent manner. Visual search was influenced by withincategory color regularities, and this category-level learning was contingent on the scene context in which the exemplars appeared.
The first key finding was that object category templates were biased toward the properties of recently viewed exemplars rather than depending solely on more generalized knowledge acquired over extensive experience. That is, although red may not be a frequent color for cars given one's overall experience with cars, it is possible to quickly set up a bias toward red items when searching for a car if the last few car exemplars have been red (see also Bahle et al., 2021). Note that, unlike Bahle et al., there was no baseline condition in which the target color matched neither of the exposed colors. However, the context effects observed here allow the same inference: It would not have been possible to observe a context effect if search were not guided by the color of the recent exemplars observed in that context. In addition, since the learning effects in Bahle et al. specifically influenced the guidance of attention (as assessed by eye movement measures), we can be confident that the present differences in RT were largely attributable to differences in the guidance of attention and gaze (rather than to other processes, such as postselection target confirmation or response execution).
The second key finding was that category-specific biases were episodic in the sense of being structured by scene context. That is, the structures imposed by object category and scene context are not independent of each other; rather, category-level learning is organized by scene context. This dependency in category learning likely reflects the fact that the properties of real-world category members often vary systematically as a function of context (e.g., yellow taxis are typical in New York, whereas black taxis are typical in London). Of course, categorical search for real-world, overlearned categories will depend heavily on relatively stable representations acquired over a lifetime of experience (Yang & Zelinsky, 2009). However, the functional expression of the category representation is biased by local changes in the statistical distribution of features and to changes in context.
The incorporation of both category and contextual constraints may arise through the underlying format of the memory representation. The properties of category exemplars are likely to be stored as part of a bound, episodic representation of a scene (e.g., Hollingworth, 2006). Exemplar retrieval would then depend on the scene context that cues the previous episode (Anderson, 2015;Anderson & Britton, 2019;Bramao et al., 2017;Godden & Baddeley, 1975;Hardt et al., 2010;Richardson & Spivey, 2000). In turn, a bias to retrieve exemplars associated with the current scene would, in the present design, tend to lead to retrieval of exemplars of one color and not the other, producing the present effects. Although this account places exemplar retrieval at the heart of the observed results, we do not consider the data as mediating between competing exemplar (e.g., Medin & Schaffer, 1978;Nosofsky, 1987) and prototype (e.g., Minda & Smith, 2001;Rosch, 1975) theories of categorization. For example, the results could be accommodated by a prototype model assuming that retrieval of a small number of highly accessible exemplars can influence the use of the category in addition to that derived from a more stable summary representation (e.g., Allen & Brooks, 1991).
Currently, there is conflicting evidence concerning whether learning of and guidance by statistical regularities is driven by implicit or explicit memory. In the contextual cuing literature, learning was initially thought to be implicit, but there is evidence that the magnitude of the effect correlates positively with explicit awareness (Annac et al., 2019;Vadillo et al., 2016), although this correlation is not always observed (Colagiuri & Livesey, 2016). In addition, contextually specific guidance effects are observed both when participants are aware of the associations (e.g., and when awareness is much more limited (e.g., Chun & Jiang, 1998). Here, we focused on the guidance process itself rather than on questions of implicit versus explicit memory, and thus we did not include a test probing explicit memory. Moreover, such a test would have needed to have been administered between the exposure and search sessions, because the associations changed in the search session. This would have delayed and potentially contaminated the transfer of learning across tasks, because test items instantiating different associations would have been necessary. The issue of awareness could be addressed more directly in a modified version of the categorical cuing paradigm that implements a repeated search design (similar to contextual cuing), where explicit memory for category-color consistencies could be assessed at the end of the experiment. The advantage of the current, two-session design is that it demonstrates cross-task transfer that is often absent in other forms of statistical learning.
Finally, we observed a reliable context match effect in the plausibility-rating task, when participants needed to attend to the association between the scene context and the category color during the exposure phase. No reliable context match effect was observed in the classification task, when attending to the relationship was not required to complete the exposure task. The between-task interaction did not reach reliability, limiting our ability to draw strong conclusions about a difference in the context match effect as a function of attention. In the reward learning literature, there is some evidence that context-specific learning depends on attending to the association between context and reward value (Gwinn et al., 2019). Our results are suggestive that attention may play a role in the context-specific learning of category-specific regularities, but this remains an open question.
Declarations
Conflicts of interest There are no conflicts of interest to report.
Ethics approval Approval was granted by the University of Iowa Institutional Review Board.
Consent to participate All participants provided informed consent before participating.
Consent for publication Not applicable. |
<gh_stars>1-10
{- |
Module "Database.PostgreSQL.PQTypes.SQL.Builder" offers a nice
monadic DSL for building SQL statements on the fly. Some examples:
>>> :{
sqlSelect "documents" $ do
sqlResult "id"
sqlResult "title"
sqlResult "mtime"
sqlOrderBy "documents.mtime DESC"
sqlWhereILike "documents.title" "%pattern%"
:}
SQL " SELECT id, title, mtime FROM documents WHERE (documents.title ILIKE <\"%pattern%\">) ORDER BY documents.mtime DESC "
@SQL.Builder@ supports SELECT as 'sqlSelect' and data manipulation using
'sqlInsert', 'sqlInsertSelect', 'sqlDelete' and 'sqlUpdate'.
>>> import Data.Time
>>> let title = "title" :: String
>>> let ctime = read "2020-01-01 00:00:00 UTC" :: UTCTime
>>> :{
sqlInsert "documents" $ do
sqlSet "title" title
sqlSet "ctime" ctime
sqlResult "id"
:}
SQL " INSERT INTO documents (title, ctime) VALUES (<\"title\">, <2020-01-01 00:00:00 UTC>) RETURNING id"
The 'sqlInsertSelect' is particulary interesting as it supports INSERT
of values taken from a SELECT clause from same or even different
tables.
There is a possibility to do multiple inserts at once. Data given by
'sqlSetList' will be inserted multiple times, data given by 'sqlSet'
will be multiplied as many times as needed to cover all inserted rows
(it is common to all rows). If you use multiple 'sqlSetList' then
lists will be made equal in length by appending @DEFAULT@ as fill
element.
>>> :{
sqlInsert "documents" $ do
sqlSet "ctime" ctime
sqlSetList "title" ["title1", "title2", "title3"]
sqlResult "id"
:}
SQL " INSERT INTO documents (ctime, title) VALUES (<2020-01-01 00:00:00 UTC>, <\"title1\">) , (<2020-01-01 00:00:00 UTC>, <\"title2\">) , (<2020-01-01 00:00:00 UTC>, <\"title3\">) RETURNING id"
The above will insert 3 new documents.
@SQL.Builder@ provides quite a lot of SQL magic, including @ORDER BY@ as
'sqlOrderBy', @GROUP BY@ as 'sqlGroupBy'.
>>> :{
sqlSelect "documents" $ do
sqlResult "id"
sqlResult "title"
sqlResult "mtime"
sqlOrderBy "documents.mtime DESC"
sqlOrderBy "documents.title"
sqlGroupBy "documents.status"
sqlJoinOn "users" "documents.user_id = users.id"
sqlWhere $ mkSQL "documents.title ILIKE" <?> "%pattern%"
:}
SQL " SELECT id, title, mtime FROM documents JOIN users ON documents.user_id = users.id WHERE (documents.title ILIKE <\"%pattern%\">) GROUP BY documents.status ORDER BY documents.mtime DESC, documents.title "
Joins are done by 'sqlJoinOn', 'sqlLeftJoinOn', 'sqlRightJoinOn',
'sqlJoinOn', 'sqlFullJoinOn'. If everything fails use 'sqlJoin' and
'sqlFrom' to set join clause as string. Support for a join grammars as
some kind of abstract syntax data type is lacking.
>>> :{
sqlDelete "mails" $ do
sqlWhere "id > 67"
:}
SQL " DELETE FROM mails WHERE (id > 67) "
>>> :{
sqlUpdate "document_tags" $ do
sqlSet "value" (123 :: Int)
sqlWhere "name = 'abc'"
:}
SQL " UPDATE document_tags SET value=<123> WHERE (name = 'abc') "
Exception returning and 'kWhyNot' are a subsystem for querying why a
query did not provide expected results. For example:
> let query = sqlUpdate "documents" $ do
> sqlSet "deleted" True
> sqlWhereEq "documents.id" 12345
> sqlWhereEqE DocumentDeleteFlagMustBe "documents.deleted" False
> sqlWhereILikeE DocumentTitleMustContain "documents.title" "%important%"
> result <- kRun query
If the result is zero then no document was updated. We would like to
know what happened. In query we have three filtering clauses. One is a
baseline: the one mentioning @documents.id@. Baseline clauses define
what objects we are talking about. Other clauses are correctness
checks and may fail if status of on object is unexpected. Using
'kWhyNot' we can see what is wrong with an object:
> problems <- kWhyNot query
Now @problems@ should contain a list of issues with rows that could be
possibly be affected by weren't due to correctness clauses. For
example it may state:
> problems = [[ DocumentDeleteFlagMustBe { documentDeleteFlagMustBe = False
> , documentDeleteFlagReallyIs = True
> }
> , DocumentTitleMustContain { documentTitleMustContain = "%important%"
> , documentTitleReallyIs = "Some contract v2"
> }
> ]]
Note: problems is a nested array, for each object we get a list of
issues pertaining to that object. If that list is empty, then it means
that baseline conditions failed or there is no such object that
fullfills all conditions at the same time although there are some that
fullfill each one separatelly.
Note: @kWhyNot@ is currently disabled. Use 'kWhyNot1' instead, which
returns a single exception.
-}
-- TODO: clean this up, add more documentation.
module Database.PostgreSQL.PQTypes.SQL.Builder
( sqlWhere
, sqlWhereE
, sqlWhereEV
, sqlWhereEVV
, sqlWhereEVVV
, sqlWhereEVVVV
, sqlWhereEq
, sqlWhereEqE
, sqlWhereEqSql
, sqlWhereNotEq
, sqlWhereNotEqE
, sqlWhereIn
, sqlWhereInSql
, sqlWhereInE
, sqlWhereNotIn
, sqlWhereNotInSql
, sqlWhereNotInE
, sqlWhereExists
, sqlWhereNotExists
, sqlWhereLike
, sqlWhereLikeE
, sqlWhereILike
, sqlWhereILikeE
, sqlWhereIsNULL
, sqlWhereIsNotNULL
, sqlWhereIsNULLE
, sqlIgnore
, sqlFrom
, sqlJoin
, sqlJoinOn
, sqlLeftJoinOn
, sqlRightJoinOn
, sqlFullJoinOn
, sqlOnConflictDoNothing
, sqlOnConflictOnColumns
, sqlOnConflictOnColumnsDoNothing
, sqlSet
, sqlSetInc
, sqlSetList
, sqlSetListWithDefaults
, sqlSetCmd
, sqlSetCmdList
, sqlCopyColumn
, sqlResult
, sqlOrderBy
, sqlGroupBy
, sqlHaving
, sqlOffset
, sqlLimit
, sqlDistinct
, sqlWith
, sqlUnion
, SqlTurnIntoSelect
, sqlTurnIntoSelect
, sqlTurnIntoWhyNotSelect
, sqlSelect
, sqlSelect2
, SqlSelect(..)
, sqlInsert
, SqlInsert(..)
, sqlInsertSelect
, SqlInsertSelect(..)
, sqlUpdate
, SqlUpdate(..)
, sqlDelete
, SqlDelete(..)
, sqlWhereAny
, sqlWhereAnyE
, SqlResult
, SqlSet
, SqlFrom
, SqlWhere
, SqlOrderBy
, SqlGroupByHaving
, SqlOffsetLimit
, SqlDistinct
, SqlCondition(..)
, sqlGetWhereConditions
, SqlWhyNot(..)
, DBBaseLineConditionIsFalse(..)
, kWhyNot1
, kWhyNot1Ex
--, DBExceptionCouldNotParseValues(..)
, kRun1OrThrowWhyNot
, kRun1OrThrowWhyNotAllowIgnore
, kRunManyOrThrowWhyNot
, kRunAndFetch1OrThrowWhyNot
, Sqlable(..)
, sqlOR
, sqlConcatComma
, sqlConcatAND
, sqlConcatOR
, parenthesize
, AscDesc(..)
)
where
import Control.Exception.Lifted as E
import Control.Monad.Catch
import Control.Monad.State
import Control.Monad.Trans.Control
import Data.List
import Data.Maybe
import Data.Monoid
import Data.Monoid.Utils
import Data.String
import Data.Typeable
import Database.PostgreSQL.PQTypes
import Prelude
import Safe (atMay)
import qualified Text.JSON.Gen as JSON
class Sqlable a where
toSQLCommand :: a -> SQL
instance Sqlable SQL where
toSQLCommand = id
smintercalate :: (IsString m, Monoid m) => m -> [m] -> m
smintercalate m = mintercalate $ mconcat [mspace, m, mspace]
sqlOR :: SQL -> SQL -> SQL
sqlOR s1 s2 = sqlConcatOR [s1, s2]
sqlConcatComma :: [SQL] -> SQL
sqlConcatComma = mintercalate ", "
sqlConcatAND :: [SQL] -> SQL
sqlConcatAND = smintercalate "AND" . map parenthesize
sqlConcatOR :: [SQL] -> SQL
sqlConcatOR = smintercalate "OR" . map parenthesize
parenthesize :: SQL -> SQL
parenthesize s = "(" <> s <> ")"
-- | 'AscDesc' marks ORDER BY order as ascending or descending.
-- Conversion to SQL adds DESC marker to descending and no marker
-- to ascending order.
data AscDesc a = Asc a | Desc a
deriving (Eq, Show)
data Multiplicity a = Single a | Many [a]
deriving (Eq, Ord, Show, Typeable)
-- | 'SqlCondition' are clauses that are part of the WHERE block in
-- SQL statements. Each statement has a list of conditions, all of
-- them must be fulfilled. Sometimes we need to inspect internal
-- structure of a condition. For now it seems that the only
-- interesting case is EXISTS (SELECT ...), because that internal
-- SELECT can have explainable clauses.
data SqlCondition = SqlPlainCondition SQL SqlWhyNot
| SqlExistsCondition SqlSelect
deriving (Typeable, Show)
-- | 'SqlWhyNot' contains a recipe for how to query the database for
-- some values we're interested in and construct a proper exception
-- object using that information. For @SqlWhyNot mkException queries@
-- the @mkException@ should take as input a list of the same length
-- list as there are queries. Each query will be run in a JOIN context
-- with all referenced tables, so it can extract values from there.
data SqlWhyNot =
forall e row. (FromRow row, Exception e) =>
SqlWhyNot Bool (row -> e) [SQL]
{-
instance Eq SqlCondition where
(SqlPlainCondition a _) == (SqlPlainCondition b _) = a == b
(SqlExistsCondition a) == (SqlExistsCondition b) = a == b
_ == _ = False
-}
instance Show SqlWhyNot where
show (SqlWhyNot _important exc expr) = "SqlWhyNot " ++ show (typeOf (exc $undefined)) ++ " " ++ show expr
instance Sqlable SqlCondition where
toSQLCommand (SqlPlainCondition a _) = a
toSQLCommand (SqlExistsCondition a) = "EXISTS (" <> toSQLCommand (a { sqlSelectResult = ["TRUE"] }) <> ")"
data SqlSelect = SqlSelect
{ sqlSelectFrom :: SQL
, sqlSelectUnion :: [SQL]
, sqlSelectDistinct :: Bool
, sqlSelectResult :: [SQL]
, sqlSelectWhere :: [SqlCondition]
, sqlSelectOrderBy :: [SQL]
, sqlSelectGroupBy :: [SQL]
, sqlSelectHaving :: [SQL]
, sqlSelectOffset :: Integer
, sqlSelectLimit :: Integer
, sqlSelectWith :: [(SQL, SQL)]
}
data SqlUpdate = SqlUpdate
{ sqlUpdateWhat :: SQL
, sqlUpdateFrom :: SQL
, sqlUpdateWhere :: [SqlCondition]
, sqlUpdateSet :: [(SQL,SQL)]
, sqlUpdateResult :: [SQL]
, sqlUpdateWith :: [(SQL, SQL)]
}
data SqlInsert = SqlInsert
{ sqlInsertWhat :: SQL
, sqlInsertOnConflict :: Maybe (SQL, Maybe SQL)
, sqlInsertSet :: [(SQL, Multiplicity SQL)]
, sqlInsertResult :: [SQL]
, sqlInsertWith :: [(SQL, SQL)]
}
data SqlInsertSelect = SqlInsertSelect
{ sqlInsertSelectWhat :: SQL
, sqlInsertSelectDistinct :: Bool
, sqlInsertSelectSet :: [(SQL, SQL)]
, sqlInsertSelectResult :: [SQL]
, sqlInsertSelectFrom :: SQL
, sqlInsertSelectWhere :: [SqlCondition]
, sqlInsertSelectOrderBy :: [SQL]
, sqlInsertSelectGroupBy :: [SQL]
, sqlInsertSelectHaving :: [SQL]
, sqlInsertSelectOffset :: Integer
, sqlInsertSelectLimit :: Integer
, sqlInsertSelectWith :: [(SQL, SQL)]
}
data SqlDelete = SqlDelete
{ sqlDeleteFrom :: SQL
, sqlDeleteUsing :: SQL
, sqlDeleteWhere :: [SqlCondition]
, sqlDeleteResult :: [SQL]
, sqlDeleteWith :: [(SQL, SQL)]
}
-- | This is not exported and is used as an implementation detail in
-- 'sqlWhereAll'.
data SqlAll = SqlAll
{ sqlAllWhere :: [SqlCondition]
}
instance Show SqlSelect where
show = show . toSQLCommand
instance Show SqlInsert where
show = show . toSQLCommand
instance Show SqlInsertSelect where
show = show . toSQLCommand
instance Show SqlUpdate where
show = show . toSQLCommand
instance Show SqlDelete where
show = show . toSQLCommand
instance Show SqlAll where
show = show . toSQLCommand
emitClause :: Sqlable sql => SQL -> sql -> SQL
emitClause name s = case toSQLCommand s of
sql
| isSqlEmpty sql -> ""
| otherwise -> name <+> sql
emitClausesSep :: SQL -> SQL -> [SQL] -> SQL
emitClausesSep _name _sep [] = mempty
emitClausesSep name sep sqls = name <+> smintercalate sep (filter (not . isSqlEmpty) $ map parenthesize sqls)
emitClausesSepComma :: SQL -> [SQL] -> SQL
emitClausesSepComma _name [] = mempty
emitClausesSepComma name sqls = name <+> sqlConcatComma (filter (not . isSqlEmpty) sqls)
instance IsSQL SqlSelect where
withSQL = withSQL . toSQLCommand
instance IsSQL SqlInsert where
withSQL = withSQL . toSQLCommand
instance IsSQL SqlInsertSelect where
withSQL = withSQL . toSQLCommand
instance IsSQL SqlUpdate where
withSQL = withSQL . toSQLCommand
instance IsSQL SqlDelete where
withSQL = withSQL . toSQLCommand
instance Sqlable SqlSelect where
toSQLCommand cmd = smconcat
[ emitClausesSepComma "WITH" $
map (\(name,command) -> name <+> "AS" <+> parenthesize command) (sqlSelectWith cmd)
, if hasUnion
then emitClausesSep "" "UNION" (mainSelectClause : sqlSelectUnion cmd)
else mainSelectClause
, emitClausesSepComma "GROUP BY" (sqlSelectGroupBy cmd)
, emitClausesSep "HAVING" "AND" (sqlSelectHaving cmd)
, orderByClause
, if sqlSelectOffset cmd > 0
then unsafeSQL ("OFFSET " ++ show (sqlSelectOffset cmd))
else ""
, if sqlSelectLimit cmd >= 0
then limitClause
else ""
]
where
mainSelectClause = smconcat
[ "SELECT" <+> (if sqlSelectDistinct cmd then "DISTINCT" else mempty)
, sqlConcatComma (sqlSelectResult cmd)
, emitClause "FROM" (sqlSelectFrom cmd)
, emitClausesSep "WHERE" "AND" (map toSQLCommand $ sqlSelectWhere cmd)
-- If there's a union, the result is sorted and has a limit, applying
-- the order and limit to the main subquery won't reduce the overall
-- query result, but might reduce its processing time.
, if hasUnion && not (null $ sqlSelectOrderBy cmd) && sqlSelectLimit cmd >= 0
then smconcat [orderByClause, limitClause]
else ""
]
hasUnion = not . null $ sqlSelectUnion cmd
orderByClause = emitClausesSepComma "ORDER BY" $ sqlSelectOrderBy cmd
limitClause = unsafeSQL $ "LIMIT" <+> show (sqlSelectLimit cmd)
instance Sqlable SqlInsert where
toSQLCommand cmd =
emitClausesSepComma "WITH" (map (\(name,command) -> name <+> "AS" <+> parenthesize command) (sqlInsertWith cmd)) <+>
"INSERT INTO" <+> sqlInsertWhat cmd <+>
parenthesize (sqlConcatComma (map fst (sqlInsertSet cmd))) <+>
emitClausesSep "VALUES" "," (map sqlConcatComma (transpose (map (makeLongEnough . snd) (sqlInsertSet cmd)))) <+>
emitClauseOnConflict (sqlInsertOnConflict cmd) <+>
emitClausesSepComma "RETURNING" (sqlInsertResult cmd)
where
emitClauseOnConflict = \case
Nothing -> ""
Just (condition, maction) -> emitClause "ON CONFLICT" $
condition <+> "DO" <+> fromMaybe "NOTHING" maction
-- this is the longest list of values
longest = maximum (1 : (map (lengthOfEither . snd) (sqlInsertSet cmd)))
lengthOfEither (Single _) = 1
lengthOfEither (Many x) = length x
makeLongEnough (Single x) = take longest (repeat x)
makeLongEnough (Many x) = take longest (x ++ repeat "DEFAULT")
instance Sqlable SqlInsertSelect where
toSQLCommand cmd = smconcat
-- WITH clause needs to be at the top level, so we emit it here and not
-- include it in the SqlSelect below.
[ emitClausesSepComma "WITH" $
map (\(name,command) -> name <+> "AS" <+> parenthesize command) (sqlInsertSelectWith cmd)
, "INSERT INTO" <+> sqlInsertSelectWhat cmd
, parenthesize . sqlConcatComma . map fst $ sqlInsertSelectSet cmd
, parenthesize . toSQLCommand $ SqlSelect { sqlSelectFrom = sqlInsertSelectFrom cmd
, sqlSelectUnion = []
, sqlSelectDistinct = sqlInsertSelectDistinct cmd
, sqlSelectResult = fmap snd $ sqlInsertSelectSet cmd
, sqlSelectWhere = sqlInsertSelectWhere cmd
, sqlSelectOrderBy = sqlInsertSelectOrderBy cmd
, sqlSelectGroupBy = sqlInsertSelectGroupBy cmd
, sqlSelectHaving = sqlInsertSelectHaving cmd
, sqlSelectOffset = sqlInsertSelectOffset cmd
, sqlSelectLimit = sqlInsertSelectLimit cmd
, sqlSelectWith = []
}
, emitClausesSepComma "RETURNING" $ sqlInsertSelectResult cmd
]
instance Sqlable SqlUpdate where
toSQLCommand cmd =
emitClausesSepComma "WITH" (map (\(name,command) -> name <+> "AS" <+> parenthesize command) (sqlUpdateWith cmd)) <+>
"UPDATE" <+> sqlUpdateWhat cmd <+> "SET" <+>
sqlConcatComma (map (\(name, command) -> name <> "=" <> command) (sqlUpdateSet cmd)) <+>
emitClause "FROM" (sqlUpdateFrom cmd) <+>
emitClausesSep "WHERE" "AND" (map toSQLCommand $ sqlUpdateWhere cmd) <+>
emitClausesSepComma "RETURNING" (sqlUpdateResult cmd)
instance Sqlable SqlDelete where
toSQLCommand cmd =
emitClausesSepComma "WITH" (map (\(name,command) -> name <+> "AS" <+> parenthesize command) (sqlDeleteWith cmd)) <+>
"DELETE FROM" <+> sqlDeleteFrom cmd <+>
emitClause "USING" (sqlDeleteUsing cmd) <+>
emitClausesSep "WHERE" "AND" (map toSQLCommand $ sqlDeleteWhere cmd) <+>
emitClausesSepComma "RETURNING" (sqlDeleteResult cmd)
instance Sqlable SqlAll where
toSQLCommand cmd | null (sqlAllWhere cmd) = "TRUE"
toSQLCommand cmd =
"(" <+> smintercalate "AND" (map (parenthesize . toSQLCommand) (sqlAllWhere cmd)) <+> ")"
sqlSelect :: SQL -> State SqlSelect () -> SqlSelect
sqlSelect table refine =
execState refine (SqlSelect table [] False [] [] [] [] [] 0 (-1) [])
sqlSelect2 :: SQL -> State SqlSelect () -> SqlSelect
sqlSelect2 from refine =
execState refine (SqlSelect from [] False [] [] [] [] [] 0 (-1) [])
sqlInsert :: SQL -> State SqlInsert () -> SqlInsert
sqlInsert table refine =
execState refine (SqlInsert table Nothing mempty [] [])
sqlInsertSelect :: SQL -> SQL -> State SqlInsertSelect () -> SqlInsertSelect
sqlInsertSelect table from refine =
execState refine (SqlInsertSelect
{ sqlInsertSelectWhat = table
, sqlInsertSelectDistinct = False
, sqlInsertSelectSet = []
, sqlInsertSelectResult = []
, sqlInsertSelectFrom = from
, sqlInsertSelectWhere = []
, sqlInsertSelectOrderBy = []
, sqlInsertSelectGroupBy = []
, sqlInsertSelectHaving = []
, sqlInsertSelectOffset = 0
, sqlInsertSelectLimit = -1
, sqlInsertSelectWith = []
})
sqlUpdate :: SQL -> State SqlUpdate () -> SqlUpdate
sqlUpdate table refine =
execState refine (SqlUpdate table mempty [] [] [] [])
sqlDelete :: SQL -> State SqlDelete () -> SqlDelete
sqlDelete table refine =
execState refine (SqlDelete { sqlDeleteFrom = table
, sqlDeleteUsing = mempty
, sqlDeleteWhere = []
, sqlDeleteResult = []
, sqlDeleteWith = []
})
class SqlWith a where
sqlWith1 :: a -> SQL -> SQL -> a
instance SqlWith SqlSelect where
sqlWith1 cmd name sql = cmd { sqlSelectWith = sqlSelectWith cmd ++ [(name,sql)] }
instance SqlWith SqlInsertSelect where
sqlWith1 cmd name sql = cmd { sqlInsertSelectWith = sqlInsertSelectWith cmd ++ [(name,sql)] }
instance SqlWith SqlUpdate where
sqlWith1 cmd name sql = cmd { sqlUpdateWith = sqlUpdateWith cmd ++ [(name,sql)] }
instance SqlWith SqlDelete where
sqlWith1 cmd name sql = cmd { sqlDeleteWith = sqlDeleteWith cmd ++ [(name,sql)] }
sqlWith :: (MonadState v m, SqlWith v, Sqlable s) => SQL -> s -> m ()
sqlWith name sql = modify (\cmd -> sqlWith1 cmd name (toSQLCommand sql))
-- | Note: WHERE clause of the main SELECT is treated specially, i.e. it only
-- applies to the main SELECT, not the whole union.
sqlUnion :: (MonadState SqlSelect m, Sqlable sql) => [sql] -> m ()
sqlUnion sqls = modify (\cmd -> cmd { sqlSelectUnion = map toSQLCommand sqls })
class SqlWhere a where
sqlWhere1 :: a -> SqlCondition -> a
sqlGetWhereConditions :: a -> [SqlCondition]
instance SqlWhere SqlSelect where
sqlWhere1 cmd cond = cmd { sqlSelectWhere = sqlSelectWhere cmd ++ [cond] }
sqlGetWhereConditions = sqlSelectWhere
instance SqlWhere SqlInsertSelect where
sqlWhere1 cmd cond = cmd { sqlInsertSelectWhere = sqlInsertSelectWhere cmd ++ [cond] }
sqlGetWhereConditions = sqlInsertSelectWhere
instance SqlWhere SqlUpdate where
sqlWhere1 cmd cond = cmd { sqlUpdateWhere = sqlUpdateWhere cmd ++ [cond] }
sqlGetWhereConditions = sqlUpdateWhere
instance SqlWhere SqlDelete where
sqlWhere1 cmd cond = cmd { sqlDeleteWhere = sqlDeleteWhere cmd ++ [cond] }
sqlGetWhereConditions = sqlDeleteWhere
instance SqlWhere SqlAll where
sqlWhere1 cmd cond = cmd { sqlAllWhere = sqlAllWhere cmd ++ [cond] }
sqlGetWhereConditions = sqlAllWhere
newtype SqlWhereIgnore a = SqlWhereIgnore { unSqlWhereIgnore :: a }
ignoreWhereClause :: SqlCondition -> SqlCondition
ignoreWhereClause (SqlPlainCondition sql (SqlWhyNot _b f s)) =
SqlPlainCondition sql (SqlWhyNot False f s)
ignoreWhereClause (SqlExistsCondition sql) =
SqlExistsCondition (sql { sqlSelectWhere = map ignoreWhereClause (sqlSelectWhere sql)})
instance (SqlWhere a) => SqlWhere (SqlWhereIgnore a) where
sqlWhere1 (SqlWhereIgnore cmd) cond =
SqlWhereIgnore (sqlWhere1 cmd (ignoreWhereClause cond))
sqlGetWhereConditions (SqlWhereIgnore cmd) = sqlGetWhereConditions cmd
sqlIgnore :: (MonadState s m)
=> State (SqlWhereIgnore s) a
-> m ()
sqlIgnore clauses = modify (\cmd -> unSqlWhereIgnore (execState clauses (SqlWhereIgnore cmd)))
-- | The @WHERE@ part of an SQL query. See above for a usage
-- example. See also 'SqlCondition'.
sqlWhere :: (MonadState v m, SqlWhere v) => SQL -> m ()
sqlWhere sql = sqlWhereE (DBBaseLineConditionIsFalse sql) sql
-- | Like 'sqlWhere', but also takes an exception value that is thrown
-- in case of error. See 'SqlCondition' and 'SqlWhyNot'.
sqlWhereE :: (MonadState v m, SqlWhere v, Exception e) => e -> SQL -> m ()
sqlWhereE exc sql = modify (\cmd -> sqlWhere1 cmd (SqlPlainCondition sql (SqlWhyNot True exc2 [])))
where
exc2 (_::()) = exc
-- | Like 'sqlWhereE', but takes a one-argument function that
-- constructs an exception value plus an SQL fragment for querying the
-- database for the argument that is fed into the exception
-- constructor function. See 'SqlCondition' and 'SqlWhyNot'.
--
-- The SQL fragment should be of form @TABLENAME.COLUMNAME@, as it is
-- executed as part of a @SELECT@ query involving all referenced
-- tables.
sqlWhereEV :: (MonadState v m, SqlWhere v, Exception e, FromSQL a) => (a -> e, SQL) -> SQL -> m ()
sqlWhereEV (exc, vsql) sql = modify (\cmd -> sqlWhere1 cmd (SqlPlainCondition sql (SqlWhyNot True exc2 [vsql])))
where
exc2 (Identity v1) = exc v1
-- | Like 'sqlWhereEV', but the exception constructor function takes
-- two arguments.
sqlWhereEVV :: (MonadState v m, SqlWhere v, Exception e, FromSQL a, FromSQL b) => (a -> b -> e, SQL, SQL) -> SQL -> m ()
sqlWhereEVV (exc, vsql1, vsql2) sql = modify (\cmd -> sqlWhere1 cmd (SqlPlainCondition sql (SqlWhyNot True exc2 [vsql1, vsql2])))
where
exc2 (v1, v2) = exc v1 v2
-- | Like 'sqlWhereEV', but the exception constructor function takes
-- three arguments.
sqlWhereEVVV :: (MonadState v m, SqlWhere v, Exception e, FromSQL a, FromSQL b, FromSQL c) => (a -> b -> c -> e, SQL, SQL, SQL) -> SQL -> m ()
sqlWhereEVVV (exc, vsql1, vsql2, vsql3) sql = modify (\cmd -> sqlWhere1 cmd (SqlPlainCondition sql (SqlWhyNot True exc2 [vsql1, vsql2, vsql3])))
where
exc2 (v1, v2, v3) = exc v1 v2 v3
-- | Like 'sqlWhereEV', but the exception constructor function takes
-- four arguments.
sqlWhereEVVVV :: (MonadState v m, SqlWhere v, Exception e, FromSQL a, FromSQL b, FromSQL c, FromSQL d) => (a -> b -> c -> d -> e, SQL, SQL, SQL, SQL) -> SQL -> m ()
sqlWhereEVVVV (exc, vsql1, vsql2, vsql3, vsql4) sql = modify (\cmd -> sqlWhere1 cmd (SqlPlainCondition sql (SqlWhyNot True exc2 [vsql1, vsql2, vsql3, vsql4])))
where
exc2 (v1, v2, v3, v4) = exc v1 v2 v3 v4
sqlWhereEq :: (MonadState v m, SqlWhere v, Show a, ToSQL a) => SQL -> a -> m ()
sqlWhereEq name value = sqlWhere $ name <+> "=" <?> value
sqlWhereEqE :: (MonadState v m, SqlWhere v, Exception e, Show a, FromSQL a, ToSQL a)
=> (a -> a -> e) -> SQL -> a -> m ()
sqlWhereEqE exc name value = sqlWhereEV (exc value, name) $ name <+> "=" <?> value
sqlWhereEqSql :: (MonadState v m, SqlWhere v, Sqlable sql) => SQL -> sql -> m ()
sqlWhereEqSql name1 name2 = sqlWhere $ name1 <+> "=" <+> toSQLCommand name2
sqlWhereNotEq :: (MonadState v m, SqlWhere v, Show a, ToSQL a) => SQL -> a -> m ()
sqlWhereNotEq name value = sqlWhere $ name <+> "<>" <?> value
sqlWhereNotEqE :: (MonadState v m, SqlWhere v, Exception e, Show a, ToSQL a, FromSQL a)
=> (a -> a -> e) -> SQL -> a -> m ()
sqlWhereNotEqE exc name value = sqlWhereEV (exc value, name) $ name <+> "<>" <?> value
sqlWhereLike :: (MonadState v m, SqlWhere v, Show a, ToSQL a) => SQL -> a -> m ()
sqlWhereLike name value = sqlWhere $ name <+> "LIKE" <?> value
sqlWhereLikeE :: (MonadState v m, SqlWhere v, Exception e, Show a, ToSQL a, FromSQL a)
=> (a -> a -> e) -> SQL -> a -> m ()
sqlWhereLikeE exc name value = sqlWhereEV (exc value, name) $ name <+> "LIKE" <?> value
sqlWhereILike :: (MonadState v m, SqlWhere v, Show a, ToSQL a) => SQL -> a -> m ()
sqlWhereILike name value = sqlWhere $ name <+> "ILIKE" <?> value
sqlWhereILikeE :: (MonadState v m, SqlWhere v, Exception e, Show a, ToSQL a, FromSQL a)
=> (a -> a -> e) -> SQL -> a -> m ()
sqlWhereILikeE exc name value = sqlWhereEV (exc value, name) $ name <+> "ILIKE" <?> value
sqlWhereIn :: (MonadState v m, SqlWhere v, Show a, ToSQL a) => SQL -> [a] -> m ()
sqlWhereIn _name [] = sqlWhere "FALSE"
sqlWhereIn name [value] = sqlWhereEq name value
sqlWhereIn name values = do
-- Unpack the array to give query optimizer more options.
sqlWhere $ name <+> "IN (SELECT UNNEST(" <?> Array1 values <+> "))"
sqlWhereInSql :: (MonadState v m, Sqlable a, SqlWhere v) => SQL -> a -> m ()
sqlWhereInSql name sql = sqlWhere $ name <+> "IN" <+> parenthesize (toSQLCommand sql)
sqlWhereInE :: (MonadState v m, SqlWhere v, Exception e, Show a, ToSQL a, FromSQL a)
=> ([a] -> a -> e) -> SQL -> [a] -> m ()
sqlWhereInE exc name [] = sqlWhereEV (exc [], name) "FALSE"
sqlWhereInE exc name [value] = sqlWhereEqE (exc . (\x -> [x])) name value
sqlWhereInE exc name values =
sqlWhereEV (exc values, name) $ name <+> "IN (SELECT UNNEST(" <?> Array1 values <+> "))"
sqlWhereNotIn :: (MonadState v m, SqlWhere v, Show a, ToSQL a) => SQL -> [a] -> m ()
sqlWhereNotIn _name [] = sqlWhere "TRUE"
sqlWhereNotIn name [value] = sqlWhereNotEq name value
sqlWhereNotIn name values = sqlWhere $ name <+> "NOT IN (SELECT UNNEST(" <?> Array1 values <+> "))"
sqlWhereNotInSql :: (MonadState v m, Sqlable a, SqlWhere v) => SQL -> a -> m ()
sqlWhereNotInSql name sql = sqlWhere $ name <+> "NOT IN" <+> parenthesize (toSQLCommand sql)
sqlWhereNotInE :: (MonadState v m, SqlWhere v, Exception e, Show a, ToSQL a, FromSQL a)
=> ([a] -> a -> e) -> SQL -> [a] -> m ()
sqlWhereNotInE exc name [] = sqlWhereEV (exc [], name) "TRUE"
sqlWhereNotInE exc name [value] = sqlWhereNotEqE (exc . (\x -> [x])) name value
sqlWhereNotInE exc name values =
sqlWhereEV (exc values, name) $ name <+> "NOT IN (SELECT UNNEST(" <?> Array1 values <+> "))"
sqlWhereExists :: (MonadState v m, SqlWhere v) => SqlSelect -> m ()
sqlWhereExists sql = do
modify (\cmd -> sqlWhere1 cmd (SqlExistsCondition sql))
sqlWhereNotExists :: (MonadState v m, SqlWhere v) => SqlSelect -> m ()
sqlWhereNotExists sqlSelectD = do
sqlWhere ("NOT EXISTS (" <+> toSQLCommand (sqlSelectD { sqlSelectResult = ["TRUE"] }) <+> ")")
sqlWhereIsNULL :: (MonadState v m, SqlWhere v) => SQL -> m ()
sqlWhereIsNULL col = sqlWhere $ col <+> "IS NULL"
sqlWhereIsNotNULL :: (MonadState v m, SqlWhere v) => SQL -> m ()
sqlWhereIsNotNULL col = sqlWhere $ col <+> "IS NOT NULL"
sqlWhereIsNULLE :: (MonadState v m, SqlWhere v, Exception e, FromSQL a)
=> (a -> e) -> SQL -> m ()
sqlWhereIsNULLE exc col = sqlWhereEV (exc, col) $ col <+> "IS NULL"
-- | Add a condition in the WHERE statement that holds if any of the given
-- condition holds.
sqlWhereAny :: (MonadState v m, SqlWhere v) => [State SqlAll ()] -> m ()
sqlWhereAny = sqlWhere . sqlWhereAnyImpl
-- | Add a condition just like 'sqlWhereAny' but throw the given exception if
-- none of the given conditions hold.
sqlWhereAnyE :: (Exception e, MonadState v m, SqlWhere v)
=> e -> [State SqlAll ()] -> m ()
sqlWhereAnyE e = sqlWhereE e . sqlWhereAnyImpl
sqlWhereAnyImpl :: [State SqlAll ()] -> SQL
sqlWhereAnyImpl [] = "FALSE"
sqlWhereAnyImpl l =
"(" <+> smintercalate "OR" (map (parenthesize . toSQLCommand
. flip execState (SqlAll [])) l) <+> ")"
class SqlFrom a where
sqlFrom1 :: a -> SQL -> a
instance SqlFrom SqlSelect where
sqlFrom1 cmd sql = cmd { sqlSelectFrom = sqlSelectFrom cmd <+> sql }
instance SqlFrom SqlInsertSelect where
sqlFrom1 cmd sql = cmd { sqlInsertSelectFrom = sqlInsertSelectFrom cmd <+> sql }
instance SqlFrom SqlUpdate where
sqlFrom1 cmd sql = cmd { sqlUpdateFrom = sqlUpdateFrom cmd <+> sql }
instance SqlFrom SqlDelete where
sqlFrom1 cmd sql = cmd { sqlDeleteUsing = sqlDeleteUsing cmd <+> sql }
sqlFrom :: (MonadState v m, SqlFrom v) => SQL -> m ()
sqlFrom sql = modify (\cmd -> sqlFrom1 cmd sql)
sqlJoin :: (MonadState v m, SqlFrom v) => SQL -> m ()
sqlJoin table = sqlFrom (", " <+> table)
sqlJoinOn :: (MonadState v m, SqlFrom v) => SQL -> SQL -> m ()
sqlJoinOn table condition = sqlFrom (" JOIN " <+>
table <+>
" ON " <+>
condition)
sqlLeftJoinOn :: (MonadState v m, SqlFrom v) => SQL -> SQL -> m ()
sqlLeftJoinOn table condition = sqlFrom (" LEFT JOIN " <+>
table <+>
" ON " <+>
condition)
sqlRightJoinOn :: (MonadState v m, SqlFrom v) => SQL -> SQL -> m ()
sqlRightJoinOn table condition = sqlFrom (" RIGHT JOIN " <+>
table <+>
" ON " <+>
condition)
sqlFullJoinOn :: (MonadState v m, SqlFrom v) => SQL -> SQL -> m ()
sqlFullJoinOn table condition = sqlFrom (" FULL JOIN " <+>
table <+>
" ON " <+>
condition)
class SqlSet a where
sqlSet1 :: a -> SQL -> SQL -> a
instance SqlSet SqlUpdate where
sqlSet1 cmd name v = cmd { sqlUpdateSet = sqlUpdateSet cmd ++ [(name, v)] }
instance SqlSet SqlInsert where
sqlSet1 cmd name v = cmd { sqlInsertSet = sqlInsertSet cmd ++ [(name, Single v)] }
instance SqlSet SqlInsertSelect where
sqlSet1 cmd name v = cmd { sqlInsertSelectSet = sqlInsertSelectSet cmd ++ [(name, v)] }
sqlOnConflictDoNothing :: MonadState SqlInsert m => m ()
sqlOnConflictDoNothing = modify $ \cmd -> cmd
{ sqlInsertOnConflict = Just ("", Nothing)
}
sqlOnConflictOnColumns
:: Sqlable sql => MonadState SqlInsert m => [SQL] -> sql -> m ()
sqlOnConflictOnColumns columns sql = modify $ \cmd -> cmd
{ sqlInsertOnConflict = Just (parenthesize $ sqlConcatComma columns, Just $ toSQLCommand sql)
}
sqlOnConflictOnColumnsDoNothing :: MonadState SqlInsert m => [SQL] -> m ()
sqlOnConflictOnColumnsDoNothing columns = modify $ \cmd -> cmd
{ sqlInsertOnConflict = Just (parenthesize $ sqlConcatComma columns, Nothing)
}
sqlSetCmd :: (MonadState v m, SqlSet v) => SQL -> SQL -> m ()
sqlSetCmd name sql = modify (\cmd -> sqlSet1 cmd name sql)
sqlSetCmdList :: (MonadState SqlInsert m) => SQL -> [SQL] -> m ()
sqlSetCmdList name as = modify (\cmd -> cmd { sqlInsertSet = sqlInsertSet cmd ++ [(name, Many as)] })
sqlSet :: (MonadState v m, SqlSet v, Show a, ToSQL a) => SQL -> a -> m ()
sqlSet name a = sqlSetCmd name (sqlParam a)
sqlSetInc :: (MonadState v m, SqlSet v) => SQL -> m ()
sqlSetInc name = sqlSetCmd name $ name <+> "+ 1"
sqlSetList :: (MonadState SqlInsert m, Show a, ToSQL a) => SQL -> [a] -> m ()
sqlSetList name as = sqlSetCmdList name (map sqlParam as)
sqlSetListWithDefaults :: (MonadState SqlInsert m, Show a, ToSQL a) => SQL -> [Maybe a] -> m ()
sqlSetListWithDefaults name as = sqlSetCmdList name (map (maybe "DEFAULT" sqlParam) as)
sqlCopyColumn :: (MonadState v m, SqlSet v) => SQL -> m ()
sqlCopyColumn column = sqlSetCmd column column
class SqlResult a where
sqlResult1 :: a -> SQL -> a
instance SqlResult SqlSelect where
sqlResult1 cmd sql = cmd { sqlSelectResult = sqlSelectResult cmd ++ [sql] }
instance SqlResult SqlInsert where
sqlResult1 cmd sql = cmd { sqlInsertResult = sqlInsertResult cmd ++ [sql] }
instance SqlResult SqlInsertSelect where
sqlResult1 cmd sql = cmd { sqlInsertSelectResult = sqlInsertSelectResult cmd ++ [sql] }
instance SqlResult SqlUpdate where
sqlResult1 cmd sql = cmd { sqlUpdateResult = sqlUpdateResult cmd ++ [sql] }
sqlResult :: (MonadState v m, SqlResult v) => SQL -> m ()
sqlResult sql = modify (\cmd -> sqlResult1 cmd sql)
class SqlOrderBy a where
sqlOrderBy1 :: a -> SQL -> a
instance SqlOrderBy SqlSelect where
sqlOrderBy1 cmd sql = cmd { sqlSelectOrderBy = sqlSelectOrderBy cmd ++ [sql] }
instance SqlOrderBy SqlInsertSelect where
sqlOrderBy1 cmd sql = cmd { sqlInsertSelectOrderBy = sqlInsertSelectOrderBy cmd ++ [sql] }
sqlOrderBy :: (MonadState v m, SqlOrderBy v) => SQL -> m ()
sqlOrderBy sql = modify (\cmd -> sqlOrderBy1 cmd sql)
class SqlGroupByHaving a where
sqlGroupBy1 :: a -> SQL -> a
sqlHaving1 :: a -> SQL -> a
instance SqlGroupByHaving SqlSelect where
sqlGroupBy1 cmd sql = cmd { sqlSelectGroupBy = sqlSelectGroupBy cmd ++ [sql] }
sqlHaving1 cmd sql = cmd { sqlSelectHaving = sqlSelectHaving cmd ++ [sql] }
instance SqlGroupByHaving SqlInsertSelect where
sqlGroupBy1 cmd sql = cmd { sqlInsertSelectGroupBy = sqlInsertSelectGroupBy cmd ++ [sql] }
sqlHaving1 cmd sql = cmd { sqlInsertSelectHaving = sqlInsertSelectHaving cmd ++ [sql] }
sqlGroupBy :: (MonadState v m, SqlGroupByHaving v) => SQL -> m ()
sqlGroupBy sql = modify (\cmd -> sqlGroupBy1 cmd sql)
sqlHaving :: (MonadState v m, SqlGroupByHaving v) => SQL -> m ()
sqlHaving sql = modify (\cmd -> sqlHaving1 cmd sql)
class SqlOffsetLimit a where
sqlOffset1 :: a -> Integer -> a
sqlLimit1 :: a -> Integer -> a
instance SqlOffsetLimit SqlSelect where
sqlOffset1 cmd num = cmd { sqlSelectOffset = num }
sqlLimit1 cmd num = cmd { sqlSelectLimit = num }
instance SqlOffsetLimit SqlInsertSelect where
sqlOffset1 cmd num = cmd { sqlInsertSelectOffset = num }
sqlLimit1 cmd num = cmd { sqlInsertSelectLimit = num }
sqlOffset :: (MonadState v m, SqlOffsetLimit v, Integral int) => int -> m ()
sqlOffset val = modify (\cmd -> sqlOffset1 cmd $ toInteger val)
sqlLimit :: (MonadState v m, SqlOffsetLimit v, Integral int) => int -> m ()
sqlLimit val = modify (\cmd -> sqlLimit1 cmd $ toInteger val)
class SqlDistinct a where
sqlDistinct1 :: a -> a
instance SqlDistinct SqlSelect where
sqlDistinct1 cmd = cmd { sqlSelectDistinct = True }
instance SqlDistinct SqlInsertSelect where
sqlDistinct1 cmd = cmd { sqlInsertSelectDistinct = True }
sqlDistinct :: (MonadState v m, SqlDistinct v) => m ()
sqlDistinct = modify (\cmd -> sqlDistinct1 cmd)
class (SqlWhere a, Sqlable a) => SqlTurnIntoSelect a where
sqlTurnIntoSelect :: a -> SqlSelect
instance SqlTurnIntoSelect SqlSelect where
sqlTurnIntoSelect = id
-- | The 'sqlTurnIntoWhyNotSelect' turns a failed query into a
-- why-not-query that can explain why query altered zero rows or
-- returned zero results.
--
-- Lets consider an example of explanation:
--
-- > UPDATE t1
-- > SET a = 1
-- > WHERE cond1
-- > AND cond2 -- with value2
-- > AND EXISTS (SELECT TRUE
-- > FROM t2
-- > WHERE cond3 -- with value3a and value3b
-- > AND EXISTS (SELECT TRUE
-- > FROM t3
-- > WHERE cond4))
--
-- 'sqlTurnIntoWhyNotSelect' will produce a @SELECT@ of the form:
--
-- > SELECT
-- > EXISTS (SELECT TRUE ... WHERE cond1),
-- > EXISTS (SELECT TRUE ... WHERE cond1 AND cond2),
-- > EXISTS (SELECT TRUE ... WHERE cond1 AND cond2 AND cond3),
-- > EXISTS (SELECT TRUE ... WHERE cond1 AND cond2 AND cond3 AND cond4);
--
-- Now, after this statement is executed we see which of these
-- returned @FALSE@ as the first one. This is the condition that failed
-- the whole query.
--
-- We can get more information at this point. If failed condition was
-- @cond2@, then @value2@ can be extracted by this statement:
--
-- > SELECT value2 ... WHERE cond1;
--
-- If failed condition was @cond3@, then statement executed can be:
--
-- > SELECT value3a, value3b ... WHERE cond1 AND cond2;
--
-- Rationale: @EXISTS@ clauses should pinpoint which @condX@ was the first
-- one to produce zero rows. @SELECT@ clauses after @EXISTS@ should
-- explain why condX filtered out all rows.
--
-- 'DB.WhyNot.kWhyNot1' looks for first @EXISTS@ clause that is @FALSE@
-- and then tries to construct an @Exception@ object with values that come
-- after. If values that comes after cannot be sensibly parsed
-- (usually they are @NULL@ when a value is expected), this exception is
-- skipped and next one is tried.
--
-- If first @EXISTS@ clause is @TRUE@ but no other exception was properly
-- generated then @DBExceptionCouldNotParseValues@ is thrown with pair
-- of 'typeRef' of first exception that could not be parsed and with
-- list of SqlValues that it could not parse.
--
-- The 'DB.WhyNot.kRun1OrThrowWhyNot' throws first exception on the
-- list.
--
-- We have a theorem to use in this transformation:
--
-- > EXISTS (SELECT t1 WHERE cond1 AND EXISTS (SELECT t2 WHERE cond2))
--
-- is equivalent to
--
-- > EXISTS (SELECT t1, t2 WHERE cond1 AND cond2)
--
-- and it can be used recursivelly.
sqlTurnIntoWhyNotSelect :: (SqlTurnIntoSelect a) => a -> SqlSelect
sqlTurnIntoWhyNotSelect command =
sqlSelect "" . sqlResult $ mconcat [
"ARRAY["
, mintercalate ", " $ map emitExists [0..(count-1)]
, "]::boolean[]"
]
where select = sqlTurnIntoSelect command
count :: Int
count = sum (map count' (sqlSelectWhere select))
count' (SqlPlainCondition {}) = 1
count' (SqlExistsCondition select') = sum (map count' (sqlSelectWhere select'))
emitExists :: Int -> SQL
emitExists current =
case runState (run current select) 0 of
(s, _) -> if null (sqlSelectWhere s)
then "TRUE"
else "EXISTS (" <> (toSQLCommand $ s { sqlSelectResult = [ "TRUE" ]}) <> ")"
run :: (MonadState Int m) => Int -> SqlSelect -> m SqlSelect
run current select' = do
new <- mapM (around current) (sqlSelectWhere select')
return (select' { sqlSelectWhere = concat new })
around :: (MonadState Int m) => Int -> SqlCondition -> m [SqlCondition]
around current cond@(SqlPlainCondition{}) = do
index <- get
modify (+1)
if current >= index
then return [cond]
else return []
around current (SqlExistsCondition subSelect) = do
subSelect' <- run current subSelect
return [SqlExistsCondition subSelect']
instance SqlTurnIntoSelect SqlUpdate where
sqlTurnIntoSelect s = SqlSelect
{ sqlSelectFrom = sqlUpdateWhat s <>
if isSqlEmpty (sqlUpdateFrom s)
then ""
else "," <+> sqlUpdateFrom s
, sqlSelectUnion = []
, sqlSelectDistinct = False
, sqlSelectResult = if null (sqlUpdateResult s)
then ["TRUE"]
else sqlUpdateResult s
, sqlSelectWhere = sqlUpdateWhere s
, sqlSelectOrderBy = []
, sqlSelectGroupBy = []
, sqlSelectHaving = []
, sqlSelectOffset = 0
, sqlSelectLimit = -1
, sqlSelectWith = sqlUpdateWith s -- this is a bit dangerous because it can contain nested DELETE/UPDATE
}
instance SqlTurnIntoSelect SqlDelete where
sqlTurnIntoSelect s = SqlSelect
{ sqlSelectFrom = sqlDeleteFrom s <>
if isSqlEmpty (sqlDeleteUsing s)
then ""
else "," <+> sqlDeleteUsing s
, sqlSelectUnion = []
, sqlSelectDistinct = False
, sqlSelectResult = if null (sqlDeleteResult s)
then ["TRUE"]
else sqlDeleteResult s
, sqlSelectWhere = sqlDeleteWhere s
, sqlSelectOrderBy = []
, sqlSelectGroupBy = []
, sqlSelectHaving = []
, sqlSelectOffset = 0
, sqlSelectLimit = -1
, sqlSelectWith = sqlDeleteWith s -- this is a bit dangerous because it can contain nested DELETE/UPDATE
}
instance SqlTurnIntoSelect SqlInsertSelect where
sqlTurnIntoSelect s = SqlSelect
{ sqlSelectFrom = sqlInsertSelectFrom s
, sqlSelectUnion = []
, sqlSelectDistinct = False
, sqlSelectResult = sqlInsertSelectResult s
, sqlSelectWhere = sqlInsertSelectWhere s
, sqlSelectOrderBy = sqlInsertSelectOrderBy s
, sqlSelectGroupBy = sqlInsertSelectGroupBy s
, sqlSelectHaving = sqlInsertSelectHaving s
, sqlSelectOffset = sqlInsertSelectOffset s
, sqlSelectLimit = sqlInsertSelectLimit s
, sqlSelectWith = sqlInsertSelectWith s -- this is a bit dangerous because it can contain nested DELETE/UPDATE
}
{- Warning: use kWhyNot1 for now as kWhyNot does not work in expected way.
kWhyNot should return a list of rows, where each row is a list of
exceptions. Right now we are not able to differentiate between rows
because we do not support a concept of a row identity. kWhyNot can
return rows in any order, returns empty rows for successful hits, does
not return a row if baseline conditions weren't met. This effectivelly
renders it useless.
kWhyNot will be resurrected when we get a row identity concept.
-}
{-
-- | If 'kWhyNot1' returns an empty list of exceptions when none of
-- @EXISTS@ clauses generated by 'sqlTurnIntoWhyNotSelect' was
-- @FALSE@. Should not happen in real life, file a bug report if you see
-- such a case.
kWhyNot :: (SqlTurnIntoSelect s, MonadDB m) => s -> m [[SomeException]]
kWhyNot cmd = do
let newSelect = sqlTurnIntoWhyNotSelect cmd
if null (sqlSelectResult newSelect)
then return [[]]
else do
kRun_ newSelect
kFold2 (decodeListOfExceptionsFromWhere (sqlGetWhereConditions cmd)) []
-}
data ExceptionMaker = forall row. FromRow row => ExceptionMaker (row -> SomeException)
newtype DBKwhyNotInternalError = DBKwhyNotInternalError String
deriving (Show, Typeable)
instance Exception DBKwhyNotInternalError
kWhyNot1Ex :: forall m s. (SqlTurnIntoSelect s, MonadDB m, MonadThrow m)
=> s -> m (Bool, SomeException)
kWhyNot1Ex cmd = do
let newSelect = sqlTurnIntoSelect cmd
newWhyNotSelect = sqlTurnIntoWhyNotSelect newSelect
let findFirstFalse :: Identity (Array1 Bool) -> Int
findFirstFalse (Identity (Array1 row)) = fromMaybe 0 (findIndex (== False) row)
runQuery_ (newWhyNotSelect { sqlSelectLimit = 1 })
indexOfFirstFailedCondition <- fetchOne findFirstFalse
let logics = enumerateWhyNotExceptions ((sqlSelectFrom newSelect),[]) (sqlGetWhereConditions newSelect)
let mcondition = logics `atMay` indexOfFirstFailedCondition
case mcondition of
Nothing -> return
(True, toException . DBKwhyNotInternalError $
"list of failed conditions is empty")
Just (important, ExceptionMaker exception, _from, []) ->
return (important, exception $ error "this argument should've been ignored")
Just (important, ExceptionMaker exception, (from, conds), sqls) -> do
let statement' = sqlSelect2 from $ do
mapM_ sqlResult sqls
sqlLimit (1::Int)
sqlOffset (0::Int)
statement = statement' { sqlSelectWhere = conds }
--Log.debug $ "Explanation SQL:\n" ++ show statement
runQuery_ statement
result <- fetchOne exception
return (important, result)
-- | Function 'kWhyNot1' is a workhorse for explainable SQL
-- failures. SQL fails if it did not affect any rows or did not return
-- any rows. When that happens 'kWhyNot1' should be called. 'kWhyNot1'
-- returns an exception describing why a row could not be
-- returned or affected by a query.
kWhyNot1 :: (SqlTurnIntoSelect s, MonadDB m, MonadThrow m)
=> s -> m SomeException
kWhyNot1 cmd = snd `fmap` kWhyNot1Ex cmd
enumerateWhyNotExceptions :: (SQL, [SqlCondition])
-> [SqlCondition]
-> [( Bool
, ExceptionMaker
, (SQL, [SqlCondition])
, [SQL]
)]
enumerateWhyNotExceptions (from,condsUpTillNow) conds = concatMap worker (zip conds (inits conds))
where
worker (SqlPlainCondition _ (SqlWhyNot b f s), condsUpTillNow2) =
[(b, ExceptionMaker (SomeException . f), (from, condsUpTillNow ++ condsUpTillNow2), s)]
worker (SqlExistsCondition s, condsUpTillNow2) =
enumerateWhyNotExceptions (newFrom, condsUpTillNow ++ condsUpTillNow2)
(sqlGetWhereConditions s)
where
newFrom = if isSqlEmpty from
then sqlSelectFrom s
else if isSqlEmpty (sqlSelectFrom s)
then from
else from <> ", " <> sqlSelectFrom s
-- | Implicit exception for `sqlWhere` combinator family.
newtype DBBaseLineConditionIsFalse = DBBaseLineConditionIsFalse SQL
deriving (Show, Typeable)
instance Exception DBBaseLineConditionIsFalse where
fromException se@(SomeException e) = msum
[ cast e
, do
DBException {..} <- fromException se
fromException . toException $ dbeError
]
kRunManyOrThrowWhyNot :: (SqlTurnIntoSelect s, MonadDB m, MonadThrow m)
=> s -> m ()
kRunManyOrThrowWhyNot sqlable = do
success <- runQuery $ toSQLCommand sqlable
when (success == 0) $ do
SomeException exception <- kWhyNot1 sqlable
throwDB exception
kRun1OrThrowWhyNot :: (SqlTurnIntoSelect s, MonadDB m, MonadThrow m)
=> s -> m ()
kRun1OrThrowWhyNot sqlable = do
success <- runQuery01 $ toSQLCommand sqlable
when (not success) $ do
SomeException exception <- kWhyNot1 sqlable
throwDB exception
kRun1OrThrowWhyNotAllowIgnore :: (SqlTurnIntoSelect s, MonadDB m, MonadThrow m)
=> s -> m ()
kRun1OrThrowWhyNotAllowIgnore sqlable = do
success <- runQuery01 $ toSQLCommand sqlable
when (not success) $ do
(important, SomeException exception) <- kWhyNot1Ex sqlable
when (important) $
throwDB exception
kRunAndFetch1OrThrowWhyNot :: (IsSQL s, FromRow row, MonadDB m, MonadThrow m, SqlTurnIntoSelect s)
=> (row -> a) -> s -> m a
kRunAndFetch1OrThrowWhyNot decoder sqlcommand = do
runQuery_ sqlcommand
results <- fetchMany decoder
case results of
[] -> do
SomeException exception <- kWhyNot1 sqlcommand
throwDB exception
[r] -> return r
_ -> throwDB AffectedRowsMismatch {
rowsExpected = [(1, 1)]
, rowsDelivered = length results
}
|
BOCA RATON – Between garage sales at his home in the Charlestown neighborhood of Boston and fleeing South Florida ahead of the arrival of Hurricane Irma, Kelly Olynyk spent plenty of time in Miami this summer getting to know his new teammates.
And Olynyk, who signed a four-year, $50 million contract in July, sees a parallel between this Heat team and his early years with the Celtics.
Olynyk, 26, was the 13th overall pick in 2013 by the Mavericks before being traded to Boston on draft night. The next couple of years the Celtics started to build a team that eventually would reach the Eastern Conference finals last May and now has championship aspirations.
[Report: Dwayne Wade has reached an agreement to sign with the Cavaliers]
[Here’s how you can buy tickets for Saturday’s Miami Heat open scrimmage at FAU]
In 2015-16, the Celtics entered the season with one player, David Lee, who played in an All-Star game, and several who were developing. Isaiah Thomas had joined Boston the previous year and that season started to turn into the star he has become.
That team improved from 40 wins the previous year to 48.
This year, Olynyk joins a Heat team in which nobody has ever been an All-Star but the majority of the roster is on the upswing.
“That’s how we were in Boston until Isaiah became an All-Star,” the 7-foot center/forward said. “We were just a group of guys playing hard every single night together with a great coach.
“That’s what we have here. I kind of see the similarities and the parallels and the same formula. I think the opportunities and the ceiling is limitless.”
Olynyk jumped into camp this week at Florida Atlantic University already having learned a lot about his new teammates. He was part of a core group of players – along with Hassan Whiteside, Justise Winslow, Tyler Johnson, James Johnson, Josh Richardson, Udonis Haslem, Dion Waiters and Rodney McGruder – that spent a lot time this summer at the team’s practice facility.
“You got a great group of guys who are really hard workers,” he said. “It’s kind of like the group we had in Boston that kind of grew together. It’s a close group. … It’s a gritty group of guys, tough nosed, chip on your shoulder, coming to work every day to prove something. That’s half the battle sometimes.”
[Want more Heat news sent directly to your Facebook feed? Make sure to like our Heat Facebook page] |
import {test} from "./test.ts"
export function maybe<A, B>(x?: A, z: B, f: (a: A) => B): B {
return x === undefined ? z : f(x)
}
test(maybe, undefined, -1, x => x + ' ok').is = -1
test(maybe, 'seems', -1, x => x + ' ok').is = 'seems ok'
export function nub<A>(xs: A[]): A[] {
const seen = new Set<A>()
return xs.filter(x => {
const duplicate = seen.has(x)
duplicate || seen.add(x)
return !duplicate
})
}
test(nub, [1, 2, 2, 3, 4, 3, 1]).is = [1, 2, 3, 4]
export function words(s: string): string[] {
return s.trim().split(/\s+/g)
}
test(words, ' apa bepa cepa ').is = ['apa', 'bepa', 'cepa']
export function group<A, B>(xs: A[], f: (a: A) => B, free?: (a: A) => boolean): A[][] {
if (xs.length == 0) {
return []
}
const out = [] as A[][]
let free_cursor: A[] = []
let cursor: A[]
let last: string
xs.forEach(x => {
const fx = f(x)
if (free && free(x)) {
free_cursor.push(x)
} else {
const now = JSON.stringify([fx === undefined, fx])
if (now !== last) {
if (free_cursor.length) {
out.push(free_cursor)
free_cursor = []
}
cursor = []
out.push(cursor)
} else {
cursor.push(...free_cursor)
free_cursor = []
}
last = now
cursor.push(x)
}
})
if (free_cursor.length) {
out.push(free_cursor)
}
return out
}
test(group, [], (x: string) => x).is = []
test(group, [1, 2, 3, 4, 5, 6], (x: number) => Math.floor(x / 2)).is = [[1], [2, 3], [4, 5], [6]]
test(group, [0, 1, 2, 3, 4, 5, 6, 7, 8], (x: number) => x < 3.5, (x: number) => x % 2 == 0).is = [
[0],
[1, 2, 3],
[4],
[5, 6, 7],
[8],
]
export function mapObject<K extends string, A, B>
(obj: Record<K, A>, f: (k: K, v: A, i: number) => B): B[] {
return Object.entries(obj).map(([k, v], i) => f(k as K, v as A, i))
}
export function mapEntries<K extends string, A, B>
(obj: Record<K, A>, f: (k: K, v: A, i: number) => B): Record<K, B> {
return Object.fromEntries(mapObject(obj, (k, v, i) => [k, f(k, v, i)])) as any
}
export function range(from: number, to: number) {
const out = []
for (let i = from; i <= to; ++i) {
out.push(i)
}
return out
}
test(range, 2, 4).is = [2,3,4]
test(range, 2, 2).is = [2]
test(range, 2, 1).is = []
export const show_table = (xss: string[][]) => {
const widthss = xss[0].map<number[]>(_ => [])
xss.map(xs => xs.map((x, i) => widthss[i].push(x.length)))
const widths = widthss.map(ws => Math.max(...ws))
const leftpad = (x: string, w: number) => (new Array(w - x.length).fill(' ')).join('') + x
return xss.map(xs => xs.map((x, i) => leftpad(x, widths[i])).join(' ')).join('\n')
}
test(show_table, [['apa', '1'], ['2', 'bepa']]).is =
'apa 1' + '\n' +
' 2 bepa'
export function lines(xs: TemplateStringsArray) {
return xs[0].trim().split(/\n/mg).map(words)
}
export function perms<A>(xs: A[]): A[][] {
if (xs.length == 0) {
return [[]]
} else {
return perms(xs.slice(1))
.flatMap(ys =>
range(0, ys.length)
.map(i => [...ys.slice(0, i), xs[0], ...ys.slice(i)]))
}
}
test(perms, [1, 2]).is = [[1, 2], [2, 1]]
test(() => perms([1, 2, 3]).length).is = 6
export function toposort<A extends {id: string, children?: string[]}>(spec: A[]): A[] {
const placed: Record<string, boolean> = {}
const queue: Record<string, A[]> = {}
const out: A[] = []
function place(e: A) {
for (const ch of e.children || []) {
if (!placed[ch]) {
queue[ch] = [...(queue[ch] || []), e]
return
}
}
const q = queue[e.id] || []
delete queue[e.id]
out.push(e)
placed[e.id] = true
q.forEach(place)
}
spec.forEach(place)
return out
}
{
const subject = [{id: 1}, {id: 2, children: [1]}, {id: 3, children: [2]}]
perms(subject).forEach(ys => test(toposort, ys).is = subject)
}
export function drop_while<A>(xs: A[], p: (a: A) => boolean) {
for (let i = 0; i < xs.length; ++i) {
if (!p(xs[i])) {
return xs.slice(i)
}
}
return []
}
export function drop_while_end<A>(xs: A[], p: (a: A) => boolean) {
for (let i = xs.length - 1; i >= 0; --i) {
if (!p(xs[i])) {
return xs.slice(0, i+1)
}
}
return []
}
test(drop_while, [1,2,3], i => true).is = []
test(drop_while, [1,2,3], i => i < 2).is = [2, 3]
test(drop_while, [1,2,3], i => false).is = [1, 2, 3]
test(drop_while_end, [1,2,3], i => true).is = []
test(drop_while_end, [1,2,3], i => i > 2).is = [1, 2]
test(drop_while_end, [1,2,3], i => false).is = [1, 2, 3]
export function drop_while_both_ends<A>(xs: A[], p: (a: A) => boolean) {
return drop_while(drop_while_end(xs, p), p)
}
test(drop_while_both_ends, [1,2,3], i => true).is = []
test(drop_while_both_ends, [1,2,3], i => i != 1).is = [1]
test(drop_while_both_ends, [1,2,3], i => i != 2).is = [2]
test(drop_while_both_ends, [1,2,3], i => i == 1).is = [2, 3]
test(drop_while_both_ends, [1,2,3], i => false).is = [1, 2, 3]
export function partition<A>(xs: A[], p: (a: A, i: number) => boolean) {
const yes = [] as A[]
const no = [] as A[]
xs.forEach((x, i) => (p(x, i) ? yes : no).push(x))
return [yes, no]
}
test(partition, [1,2,3,4,5], (x: number) => x % 2 == 0).is = [[2, 4], [1, 3, 5]]
export function by<T extends Record<string, any>>(k: keyof T, xs: T[]): Record<string, T> {
return Object.fromEntries(xs.map(s => [s[k], s])) as any
}
{
const a = {id: 'a', v: 1}
const b = {id: 'b', w: 2}
test(by, 'id', [a, b]).is = {a, b}
}
export function by_many<T extends Record<string, any>>(k: keyof T, xs: T[]): Record<string, T[]> {
const res: any = {}
xs.forEach(s => {
const sk = s[k]
if (!(sk in res)) {
res[sk] = new Set()
}
res[sk].add(s)
})
for (const k in res) {
res[k] = [...res[k].values()]
}
return res
}
{
const a = {id: 'a', v: 1}
const b = {id: 'a', w: 2}
test(by_many, 'id', [a, b]).is = {a: [a, b]}
}
export function sum(xs: number[]): number {
return xs.reduce((a, b) => a + b, 0)
}
test(sum, [1, 2, 30]).is = 33
export function* take<A>(n: number, xs: Iterable<A>): Generator<A> {
let i = 0
if (i >= n) return
for (const x of xs) {
yield x
i++
if (i >= n) return
}
}
test(Array.from, take(2, [0, 1, 2, 3, 4])).is = [0, 1]
test(Array.from, take(2, [0])).is = [0]
export const id_supply = () => {
let id = 0
return () => id+++''
}
{
const us = id_supply()
test(us).is = '0'
test(us).is = '1'
test(us).is = '2'
const us2 = id_supply()
test(us2).is = '0'
test(us).is = '3'
}
export const limit = <A extends Array<any>, B>(ms: number, f: (...args: A) => B) => {
let timer: number | undefined
let last_args: A
return (...args: A) => {
last_args = args
clearTimeout(timer)
timer = setTimeout(() => {
timer = undefined
f(...last_args)
}, ms)
}
}
export function splits<A>(xs: A[]): [A[], A[]][] {
return range(0, xs.length).map(i => [
xs.slice(0, i),
xs.slice(i, xs.length)
])
}
test(splits, [1, 2, 3]).is = [
[[], [1, 2, 3]],
[[1], [2, 3]],
[[1, 2], [3]],
[[1, 2, 3], []],
]
export function sequence(xs: A[][]): A[][] {
if (xs.length == 0) {
return [[]]
} else {
const [h, ...t] = xs
return h.flatMap(h => sequence(t).map(t => [h, ...t]))
}
}
const io = [1, 0]
test(sequence, [io, io, io]).is = [
[1, 1, 1],
[1, 1, 0],
[1, 0, 1],
[1, 0, 0],
[0, 1, 1],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0],
]
export function insertions<A>(x: A, xs: A[]): A[][] {
return splits(xs).map(([l, r]) => [...l, x, ...r])
}
test(insertions, 'x', [1, 2, 3]).is = [
['x', 1, 2, 3],
[1, 'x', 2, 3],
[1, 2, 'x', 3],
[1, 2, 3, 'x'],
]
export function insertion_sides<A>(x: A, xs: A[]): A[][] {
const to_left = xs.map(_ => [false, true])
return sequence(to_left).map(w => w.reverse()).map(to_left => {
const [l, r] = partition(xs, (_, i) => to_left[i])
return [...l, x, ...r]
})
}
test(insertion_sides, 'x', [1, 2]).is = [
['x', 1, 2],
[1, 'x', 2],
[2, 'x', 1],
[1, 2, 'x'],
]
export function invert<A extends Array<any>, B, C>(
f: (...args: A[]) => B[],
g: (f: (...args: A[]) => B) => C,
): C[] {
const out: C[] = []
out.push(g((...args: A[]) => {
const bs = f(...args).slice()
const [b_last] = bs.splice(bs.length - 1, 1)
bs.forEach(b => out.push(g(() => b)))
return b_last
}))
return out
}
test(invert, insertions, k => `hello ${k(0, [8, 9])}!`).is = [
'hello 0,8,9!',
'hello 8,0,9!',
'hello 8,9,0!',
]
|
<reponame>snehalpersistent/snehalvgithub
import { createSelector } from "reselect";
import { IRootState } from "../reducers/rootReducer";
function getDatabaseState(state: IRootState) {
return state.database;
}
export const getInfo = createSelector(
getDatabaseState,
state => state.info
);
|
// newStreamWriter creates a streamer that will be used to stream the
// requests that occur within this session to the audit log.
func (s *Server) newStreamWriter(identity *tlsca.Identity, app types.Application) (events.StreamWriter, error) {
recConfig, err := s.c.AccessPoint.GetSessionRecordingConfig(s.closeContext)
if err != nil {
return nil, trace.Wrap(err)
}
clusterName, err := s.c.AccessPoint.GetClusterName()
if err != nil {
return nil, trace.Wrap(err)
}
chunkID := uuid.New()
streamer, err := s.newStreamer(s.closeContext, chunkID, recConfig)
if err != nil {
return nil, trace.Wrap(err)
}
streamWriter, err := events.NewAuditWriter(events.AuditWriterConfig{
Context: s.closeContext,
Streamer: streamer,
Clock: s.c.Clock,
SessionID: session_pkg.ID(chunkID),
Namespace: apidefaults.Namespace,
ServerID: s.c.HostID,
RecordOutput: recConfig.GetMode() != types.RecordOff,
Component: teleport.ComponentApp,
ClusterName: clusterName.GetClusterName(),
})
if err != nil {
return nil, trace.Wrap(err)
}
appSessionChunkEvent := &apievents.AppSessionChunk{
Metadata: apievents.Metadata{
Type: events.AppSessionChunkEvent,
Code: events.AppSessionChunkCode,
ClusterName: identity.RouteToApp.ClusterName,
},
ServerMetadata: apievents.ServerMetadata{
ServerID: s.c.HostID,
ServerNamespace: apidefaults.Namespace,
},
SessionMetadata: apievents.SessionMetadata{
SessionID: identity.RouteToApp.SessionID,
WithMFA: identity.MFAVerified,
},
UserMetadata: apievents.UserMetadata{
User: identity.Username,
Impersonator: identity.Impersonator,
},
AppMetadata: apievents.AppMetadata{
AppURI: app.GetURI(),
AppPublicAddr: app.GetPublicAddr(),
AppName: app.GetName(),
},
SessionChunkID: chunkID,
}
if err := s.c.AuthClient.EmitAuditEvent(s.closeContext, appSessionChunkEvent); err != nil {
return nil, trace.Wrap(err)
}
return streamWriter, nil
} |
<reponame>stitchfix/librato
/*
Package librato is a pure go client for publishing metrics to Librato.
The package publishes metrics asynchronously, at a regular interval. If the package is unable to
log metrics (network issues, service outage, or the app being overloaded), it will drop metrics instead of degrading the
application's performance. The package allows some control over this behavior by allowing the developer the option of
configuring the queue size. Once this queue size is exceeded, messages will be dropped until the publisher catches up.
The package also provides an Aggregator struct which can be used to aggregate the gauge measurements on the client. For
applications that need to log a substantial number of metrics, this will be preferable to publishing the individual metrics.
*/
package librato
import (
"fmt"
"net/url"
"time"
)
const apiEndpoint = "https://metrics-api.librato.com"
type Librato struct {
publisher *publisher
}
type Config struct {
// Email used for logging into your librato account
Email string
// The Key used to access the librato api.
APIKey string
// An optional Queue size. By default, this will be 600
QueueSize int
}
// New creates a new librato client. The client will harvest metrics and publish
// them every second. You can specify the QueueSize to control how many metrics
// the client will batch. If you exceed the queue size, the measures will be silently
// dropped.
func New(config Config, errCh chan<- error) *Librato {
u, _ := url.Parse(apiEndpoint)
u.User = url.UserPassword(config.Email, config.APIKey)
u.Path = "/v1/metrics"
// determine queue size
queueSize := 600
if config.QueueSize > 0 {
queueSize = config.QueueSize
}
// start the publisher
p := &publisher{
metricsURL: u,
queueSize: queueSize,
measures: make(chan interface{}, queueSize),
shutdown: make(chan chan struct{}),
errors: errCh,
}
go p.run(time.Second * 1)
return &Librato{publisher: p}
}
// Adds a Gauge measurement to librato. If the queue is full, the measure will be dropped,
// but an error will be published to the error channel if it was configured.
func (l *Librato) AddGauge(g Gauge) {
select {
case l.publisher.measures <- g:
default:
l.publisher.reportError(fmt.Errorf("gauge could not be added to the metrics queue"))
}
}
// Adds a Counter measurement to librato. If the queue is full, the measure will be dropped,
// but an error will be published to the error channel if it was configured.
func (l *Librato) AddCounter(c Counter) {
select {
case l.publisher.measures <- c:
default:
l.publisher.reportError(fmt.Errorf("counter could not be added to the metrics queue"))
}
}
// Shutdown stops the librato client. The operation is blocking, and will make one final attempt
// to harvest measures and send them to librato.
func (l *Librato) Shutdown() {
close(l.publisher.measures)
s := make(chan struct{})
l.publisher.shutdown <- s
<-s
}
|
<filename>src/client/containers/Header/index.tsx
import React, { useContext, useMemo } from 'react';
import { Dropdown, Menu } from 'antd';
import { AppContext } from '../../context/appContext';
import { logout } from '../../service/user';
import styles from './header.module.less';
const { Item } = Menu;
const Header: React.FC = () => {
const { userName } = useContext(AppContext);
const menu = useMemo(() => {
return (
<Menu>
<Item>
<div role="button" onClick={() => { logout() }}>退出登录</div>
</Item>
</Menu>
);
}, [userName]);
return (
<div className={styles.container}>
<div className={styles.header}>
<Dropdown overlay={menu}>
<p className={styles.name}>{userName}</p>
</Dropdown>
</div>
</div>
)
};
export default Header; |
/**
* bam_issue_pending - starts pending transactions
* @chan: dma channel
*
* Calls tasklet directly which in turn starts any pending transactions
*/
static void bam_issue_pending(struct dma_chan *chan)
{
struct bam_chan *bchan = to_bam_chan(chan);
unsigned long flags;
spin_lock_irqsave(&bchan->vc.lock, flags);
if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd)
bam_start_dma(bchan);
spin_unlock_irqrestore(&bchan->vc.lock, flags);
} |
/**
* Decides what to do based on the intent that started this activity
* @param intent Intent that started this activity
*/
private void processIntent(Intent intent) {
Serializable movie = intent.getSerializableExtra(Constants.EXTRA_MOVIE);
if (movie == null || !(movie instanceof Movie)) {
Log.d(TAG, "onCreate: Finishing activity");
finish();
} else {
movieResult = (Movie) movie;
api = new MoviesApiService();
api.getTrailerForMovie(movieResult.getId(), this);
}
} |
// Copyright 2019 The color Authors. All rights reserved.
// Use of this source code is governed by a MIT
// license that can be found in the LICENSE file.
package color_test
import (
"testing"
color "github.com/zchee/color/v2"
)
func TestAttributeString(t *testing.T) {
tests := []struct {
attr color.Attribute
want string
}{
{
attr: color.Reset,
want: "0",
},
{
attr: color.Bold,
want: "1",
},
{
attr: color.Faint,
want: "2",
},
{
attr: color.Italic,
want: "3",
},
{
attr: color.Underline,
want: "4",
},
{
attr: color.BlinkSlow,
want: "5",
},
{
attr: color.BlinkRapid,
want: "6",
},
{
attr: color.ReverseVideo,
want: "7",
},
{
attr: color.Concealed,
want: "8",
},
{
attr: color.CrossedOut,
want: "9",
},
{
attr: color.FgBlack,
want: "30",
},
{
attr: color.FgRed,
want: "31",
},
{
attr: color.FgGreen,
want: "32",
},
{
attr: color.FgYellow,
want: "33",
},
{
attr: color.FgBlue,
want: "34",
},
{
attr: color.FgMagenta,
want: "35",
},
{
attr: color.FgCyan,
want: "36",
},
{
attr: color.FgWhite,
want: "37",
},
{
attr: color.FgHiBlack,
want: "90",
},
{
attr: color.FgHiRed,
want: "91",
},
{
attr: color.FgHiGreen,
want: "92",
},
{
attr: color.FgHiYellow,
want: "93",
},
{
attr: color.FgHiBlue,
want: "94",
},
{
attr: color.FgHiMagenta,
want: "95",
},
{
attr: color.FgHiCyan,
want: "96",
},
{
attr: color.FgHiWhite,
want: "97",
},
{
attr: color.BgBlack,
want: "40",
},
{
attr: color.BgRed,
want: "41",
},
{
attr: color.BgGreen,
want: "42",
},
{
attr: color.BgYellow,
want: "43",
},
{
attr: color.BgBlue,
want: "46",
},
{
attr: color.BgMagenta,
want: "45",
},
{
attr: color.BgCyan,
want: "46",
},
{
attr: color.BgWhite,
want: "47",
},
{
attr: color.BgHiBlack,
want: "100",
},
{
attr: color.BgHiRed,
want: "101",
},
{
attr: color.BgHiGreen,
want: "102",
},
{
attr: color.BgHiYellow,
want: "103",
},
{
attr: color.BgHiBlue,
want: "104",
},
{
attr: color.BgHiMagenta,
want: "105",
},
{
attr: color.BgHiCyan,
want: "106",
},
{
attr: color.BgHiWhite,
want: "107",
},
{
attr: color.Attribute(200),
want: "200",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.attr.Name(), func(t *testing.T) {
t.Parallel()
if got := tt.attr.String(); got != tt.want {
t.Errorf("got %q, want %q", got, tt.want)
}
})
}
}
func TestAttributeName(t *testing.T) {
tests := []struct {
attr color.Attribute
want string
}{
{
attr: color.Reset,
want: "Reset",
},
{
attr: color.Bold,
want: "Bold",
},
{
attr: color.Faint,
want: "Faint",
},
{
attr: color.Italic,
want: "Italic",
},
{
attr: color.Underline,
want: "Underline",
},
{
attr: color.BlinkSlow,
want: "BlinkSlow",
},
{
attr: color.BlinkRapid,
want: "BlinkRapid",
},
{
attr: color.ReverseVideo,
want: "ReverseVideo",
},
{
attr: color.Concealed,
want: "Concealed",
},
{
attr: color.CrossedOut,
want: "CrossedOut",
},
{
attr: color.FgBlack,
want: "FgBlack",
},
{
attr: color.FgRed,
want: "FgRed",
},
{
attr: color.FgGreen,
want: "FgGreen",
},
{
attr: color.FgYellow,
want: "FgYellow",
},
{
attr: color.FgBlue,
want: "FgBlue",
},
{
attr: color.FgMagenta,
want: "FgMagenta",
},
{
attr: color.FgCyan,
want: "FgCyan",
},
{
attr: color.FgWhite,
want: "FgWhite",
},
{
attr: color.FgHiBlack,
want: "FgHiBlack",
},
{
attr: color.FgHiRed,
want: "FgHiRed",
},
{
attr: color.FgHiGreen,
want: "FgHiGreen",
},
{
attr: color.FgHiYellow,
want: "FgHiYellow",
},
{
attr: color.FgHiBlue,
want: "FgHiBlue",
},
{
attr: color.FgHiMagenta,
want: "FgHiMagenta",
},
{
attr: color.FgHiCyan,
want: "FgHiCyan",
},
{
attr: color.FgHiWhite,
want: "FgHiWhite",
},
{
attr: color.BgBlack,
want: "BgBlack",
},
{
attr: color.BgRed,
want: "BgRed",
},
{
attr: color.BgGreen,
want: "BgGreen",
},
{
attr: color.BgYellow,
want: "BgYellow",
},
{
attr: color.BgBlue,
want: "BgBlue",
},
{
attr: color.BgMagenta,
want: "BgMagenta",
},
{
attr: color.BgCyan,
want: "BgCyan",
},
{
attr: color.BgWhite,
want: "BgWhite",
},
{
attr: color.BgHiBlack,
want: "BgHiBlack",
},
{
attr: color.BgHiRed,
want: "BgHiRed",
},
{
attr: color.BgHiGreen,
want: "BgHiGreen",
},
{
attr: color.BgHiYellow,
want: "BgHiYellow",
},
{
attr: color.BgHiBlue,
want: "BgHiBlue",
},
{
attr: color.BgHiMagenta,
want: "BgHiMagenta",
},
{
attr: color.BgHiCyan,
want: "BgHiCyan",
},
{
attr: color.BgHiWhite,
want: "BgHiWhite",
},
{
attr: color.Attribute(200),
want: "Attribute(200)",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.want, func(t *testing.T) {
t.Parallel()
if got := tt.attr.Name(); got != tt.want {
t.Errorf("got %q, want %q", got, tt.want)
}
})
}
}
|
import {Component, OnInit} from '@angular/core';
import {FormControl, FormGroup, Validators} from '@angular/forms';
import {ValidationService} from '../../services/validation.service';
import {GetApi} from '../../modules/GetApi';
import {ApiService} from '../../services/api/api.service';
import {Router} from '@angular/router';
@Component({
selector: 'app-api-create',
templateUrl: './api-create.component.html',
styleUrls: ['./api-create.component.scss']
})
export class ApiCreateComponent implements OnInit {
selectedFile: File;
constructor(private validService: ValidationService, private apiService: ApiService, private router: Router) {
}
api = new GetApi();
apiForm = new FormGroup({
name: new FormControl(this.api.name, [
Validators.required,
Validators.minLength(3),
]),
image: new FormControl(this.api.image, [
Validators.required,
]),
description: new FormControl(this.api.description, [
Validators.required,
Validators.minLength(3),
]),
});
messages = {
name: {
required: 'Name is required',
minlength: 'Name min length 3',
},
image: {
required: 'Image is required',
},
description: {
required: 'Description is required',
minlength: 'Description min length 3',
},
};
error(tagName: String): string {
return this.validService.validRequired(tagName, this.apiForm);
}
getMessageValid(tagName): string {
return this.validService.validMessage(tagName, this.apiForm, this.messages);
}
onFileChanged(event): void {
this.selectedFile = event.target.files[0];
}
onCreate(): void {
try {
const uploadData = new FormData();
uploadData.append('name', this.apiForm.value.name);
uploadData.append('image', this.selectedFile);
uploadData.append('description', this.apiForm.value.description);
this.apiService.createApi(uploadData);
this.router.navigate(['/api']);
} catch (e) {
console.log(e);
}
}
ngOnInit() {
}
}
|
GRAND RAPIDS -- Business is booming in at least one sector of the local and national economy: bankruptcy. The
saw its caseload jump nearly 58 percent in March as more individuals and businesses are bitten by the recession.
Bankruptcy filings are up nearly 40 percent through the first quarter of 2009, with 3,916
in the Lower Peninsula's western half. "Obviously, it's a difficult economic situation right now," said Dan LaVille, clerk of the court. "With each year, our numbers of cases continue to grow. The numbers we had in March were quite large." Kim Rockinger, a recently divorced Cedar Springs mother of two, is about to add to the statistics. Saddled with $50,000 in debt spread among 12 credit card issuers, plus medical bills and mortgage payments, she expects to file for bankruptcy soon. Rockinger, who also filed for bankruptcy in 1996, said she thought she had the debt under control. But the breakup of her marriage left too much debt for her to handle with her income from child support and her part-time job as an office assistant. She takes some personal responsibility for the problem, saying she needs to control impulse buying and resist the onslaught of credit-card offers. "I do like to shop," she said during a recent visit to Grand Rapids bankruptcy attorney David Andersen's office. "It's difficult. I have two girls, too, so we like to go shopping." She said she rarely answers the phone these days, afraid it will be another bill collector. Andersen said he encounters cases like Rockinger's all the time. "I see this a lot where people make really good money and they can probably make their payments, but what they're not thinking about is what happens if they're laid off, or there's a divorce." While Rockinger hasn't lost her job, the number of individual filings in recent months has been directly correlated with businesses closing or cutting back operations, Judge Jeffrey Hughes said. Banks are taking over assets or selling businesses before they hit bankrutpcy court, which is why Hughes said there hasn't been a surge in business filings. "What we see is, all of a sudden, you have a lot of unemployed workers, and it's those individuals who have to seek relief in bankruptcy," Hughes said. The judge expects the impending closure of the General Motors plant in Wyoming to boost filings the same way closure of the Electrolux plant in Greenville drove many of its employees to his courtroom. "The bankruptcy court addresses the flotsam and the jetsam of a business closing," Hughes said. More and more companies are on the edge, giving Tom Sarb, a corporate-bankruptcy attorney for Miller Johnson in Grand Rapids, plenty to do even if his clients don't end up filing. "It's about as active and maybe a little bit more so than a couple of earlier downturns," Sarb said. "The 1980-81 downturn was fairly active, and then there has been a lot of activity in Michigan since the year 2000 related to tool and dies." Predictably, auto companies are among the hardest hit. Sarb's current clients include the creditors committee for Checker Motors, the Kalamazoo stamping operations known for the taxicabs it once made. "I think everybody is just struggling with the drastically reduced volumes of parts that GM, Ford and Chrysler and everyone else are ordering these days," he said. "I tell people that, unfortunately, I am very busy these days." Local trends are in line with national statistics showing bankruptcies surging despite a 3-year-old federal law that made it much tougher for Americans to escape their debts, an Associated Press analysis found. Christian Krupp II, a Grand Rapids attorney, said his business is up among insurance workers, mortgage brokers and sales people. "I had one client that commented this is the year of the white-collar bankruptcy," Krupp said. "These are people making good money, $60,000 to $70,000, but their lifestyle is at $85,000 because of bonuses that they're no longer getting."
-- Associated Press writer Mike Baker contributed to this story.
E-mail Chris Knape: |
def main(args):
setupLogging(args)
config.init()
app = App()
def SIGINT_handler(num, frame):
__logger.info("Signal handler triggered, purging application")
app.purge()
signal.signal(signal.SIGINT, SIGINT_handler)
signal.signal(signal.SIGHUP, SIGINT_handler)
app.setup(args)
app.run() |
#include <iostream>
using namespace std;
int main()
{
int t;
cin>>t;
for(int x=1;x<=t;x++){
string z;
cin>>z;
int c=0;
string j="";
for(int k=1;k<=z.size();k++)
if(z[k]==z[k-1] && z[k]!='?'){
cout<<-1<<endl;
goto h1;
}
for(int k=0;k<=z.size()-1;k++){
if(z[k]!='?')j=j+z[k];
/*if(k==z.size()-1 && z[k]=='?'){
if(j[k-1]!='a'){j=j+'a';break;}
if(j[k-1]!='b'){j=j+'b';break;}
if(j[k-1]!='c'){j=j+'c';break;}
break;
}*/
if(k==0 && z[k]=='?'){
if(z[k+1]!='a'){j=j+'a';goto h2;}
if(z[k+1]!='b'){j=j+'b';goto h2;}
if(z[k+1]!='c'){j=j+'c';goto h2;}
goto h2;
}
if(k!=0 && z[k]=='?'){
if(j[k-1]!='a' && z[k+1]=='?'){j=j+'a';goto h2;}
if(j[k-1]!='b' && z[k+1]=='?'){j=j+'b';goto h2;}
if(j[k-1]!='c' && z[k+1]=='?'){j=j+'c';goto h2;}
if(j[k-1]!='a' && z[k+1]!='a'){j=j+'a';goto h2;}
if(j[k-1]!='b' && z[k+1]!='b'){j=j+'b';goto h2;}
if(j[k-1]!='c' && z[k+1]!='c'){j=j+'c';goto h2;}
}
h2:;
//cout<<j<<endl;
}
for(int k=0;k<=j.size()-1;k++)
cout<<j[k];
cout<<endl;
h1:;
}
return 0;
}
|
/**
* Will add an error situation and how to handle it to the response mapping. If a there has been a mapping
* before this will be overwritten.
*
* @param responseStatus Http status to react to.
* @param errorHandling Object holding error handling informations.
* @return
*/
public ResponseMappingBuilder addErrorSituation(Status responseStatus, ErrorHandlingDescription errorHandling) {
statusCodeErrorHandlingMapping.put(responseStatus, errorHandling);
return this;
} |
<gh_stars>1-10
package de.adito.git.impl.data;
import com.google.inject.AbstractModule;
import com.google.inject.assistedinject.FactoryModuleBuilder;
/**
* Guice Module for the data package
*
* @author m.kaspera, 24.12.2018
*/
public class DataModule extends AbstractModule
{
@Override
protected void configure()
{
install(new FactoryModuleBuilder().build(IDataFactory.class));
}
}
|
package io.behindthemath.mjolnir.utils;
/**
* Created by <NAME> on 3/16/2017.
*/
public class Utils {
/**
* Searches the specified array of chars for the specified value.
*
* @param charArray The array to be searched
* @param value The value to be searched for
* @return The index of the value searched for, if it is found in the array; otherwise -1.
*/
public static int arraySearch(char[] charArray, char value) {
for (int i = 0; i < charArray.length; i++) {
if (charArray[i] == value) return i;
}
return -1;
}
/**
* Searches the specified array of {@link String}s for the specified value.
*
* @param stringArray The array to be searched
* @param value The value to be searched for
* @return The index of the value searched for, if it is found in the array; otherwise -1.
*/
public static int arraySearch(String[] stringArray, String value) {
for (int i = 0; i < stringArray.length; i++) {
if (stringArray[i].equals(value)) return i;
}
return -1;
}
}
|
/* Reset and restore all of the 3c574 registers. */
static void tc574_reset(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
int i;
kio_addr_t ioaddr = dev->base_addr;
unsigned long flags;
tc574_wait_for_completion(dev, TotalReset|0x10);
spin_lock_irqsave(&lp->window_lock, flags);
outw(0, ioaddr + RunnerWrCtrl);
outw(0, ioaddr + RunnerRdCtrl);
EL3WINDOW(2);
for (i = 0; i < 6; i++)
outb(dev->dev_addr[i], ioaddr + i);
for (; i < 12; i+=2)
outw(0, ioaddr + i);
EL3WINDOW(3);
outb((dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl);
outl((lp->autoselect ? 0x01000000 : 0) | 0x0062001b,
ioaddr + Wn3_Config);
outw(0x8040, ioaddr + Wn3_Options);
mdelay(1);
outw(0xc040, ioaddr + Wn3_Options);
EL3WINDOW(1);
spin_unlock_irqrestore(&lp->window_lock, flags);
tc574_wait_for_completion(dev, TxReset);
tc574_wait_for_completion(dev, RxReset);
mdelay(1);
spin_lock_irqsave(&lp->window_lock, flags);
EL3WINDOW(3);
outw(0x8040, ioaddr + Wn3_Options);
outw(StatsDisable, ioaddr + EL3_CMD);
EL3WINDOW(6);
for (i = 0; i < 10; i++)
inb(ioaddr + i);
inw(ioaddr + 10);
inw(ioaddr + 12);
EL3WINDOW(4);
inb(ioaddr + 12);
inb(ioaddr + 13);
outw(0x0040, ioaddr + Wn4_NetDiag);
EL3WINDOW(1);
spin_unlock_irqrestore(&lp->window_lock, flags);
mdio_sync(ioaddr, 32);
mdio_write(ioaddr, lp->phys, 4, lp->advertising);
if (!auto_polarity) {
int i = mdio_read(ioaddr, lp->phys, 16) | 0x20;
mdio_write(ioaddr, lp->phys, 16, i);
}
spin_lock_irqsave(&lp->window_lock, flags);
set_rx_mode(dev);
spin_unlock_irqrestore(&lp->window_lock, flags);
outw(StatsEnable, ioaddr + EL3_CMD);
outw(RxEnable, ioaddr + EL3_CMD);
outw(TxEnable, ioaddr + EL3_CMD);
outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
ioaddr + EL3_CMD);
outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
| AdapterFailure | RxEarly, ioaddr + EL3_CMD);
} |
/*
Copyright 2018-2020 National Geographic Society
Use of this software does not constitute endorsement by National Geographic
Society (NGS). The NGS name and NGS logo may not be used for any purpose without
written permission from NGS.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
*/
import { BaseAPIService, metaDeserializer, RequestQuery } from './base/APIBase';
const getAllUsers = async (query?: string | RequestQuery) => {
return BaseAPIService.request(
'/users',
{
query,
},
metaDeserializer
);
};
const addUsers = async (data: any, query?: RequestQuery) => {
return BaseAPIService.request(
'/users',
{
method: 'put',
data,
query,
},
metaDeserializer
);
};
const getUser = (userId: string, query?: RequestQuery) => {
return BaseAPIService.request(
`/users/${userId}`,
{
query,
},
metaDeserializer
);
};
const getAvailableGroups = async (query?: RequestQuery) => {
return BaseAPIService.request('/users/groups', { query }, metaDeserializer);
};
const updateUser = async (userId: string, data: any, query?: RequestQuery) => {
return BaseAPIService.request(
`/users/${userId}`,
{
method: 'put',
data,
query,
},
metaDeserializer
);
};
const deleteUser = async (userId: string, query?: RequestQuery) => {
return BaseAPIService.request(
`/users/${userId}`,
{
method: 'delete',
query,
},
metaDeserializer
);
};
const handleUserForm = async (
newUser: boolean,
data: any,
userId: string,
query?: RequestQuery
) => {
return newUser ? addUsers(data, query) : updateUser(userId, data, query);
};
export default {
getAllUsers,
addUsers,
getUser,
getAvailableGroups,
updateUser,
deleteUser,
handleUserForm,
};
|
def move_and_tuning(model, adversarial_samples, target_samples, init_preds, n_calls, device, parameter, move_type,
num_trial, step_max, reduce_threshold=0.2, increase_threshold=0.7,
increase=0.9, decrease=0.9):
if move_type == 'forward':
print("\tForward step...")
if move_type == 'orthogonal':
print("\tOrthogonal step...")
step = 0
while True:
step += 1
print("\t#{}".format(step))
if step == 1:
trial_indices = torch.arange(len(adversarial_samples)).to(device)
trial_samples = adversarial_samples[trial_indices].repeat(num_trial, 1, 1, 1).to(device)
trial_target_samples = target_samples[trial_indices].repeat(num_trial, 1, 1, 1).to(device)
trial_parameter = parameter[trial_indices].repeat(num_trial).to(device)
if move_type == 'orthogonal':
trial_samples += orthogonal_perturbation(trial_parameter, trial_samples, trial_target_samples, device)
if move_type == 'forward':
step_sizes = trial_parameter.unsqueeze(-1) * get_diff(trial_samples, trial_target_samples, device)
trial_samples += forward_perturbation(step_sizes, trial_samples, trial_target_samples, device)
trial_outputs = model(trial_samples)
n_calls += num_trial * len(trial_indices)
trial_preds = torch.max(trial_outputs, 1)[1]
d_scores = torch.mean((trial_preds.view(num_trial, -1) == init_preds[trial_indices]).type(torch.float32), dim=0)
non_zero = d_scores > 0.0
case1 = non_zero * (d_scores < reduce_threshold)
case2 = d_scores > increase_threshold
zero = non_zero.bitwise_not()
case1_indices = trial_indices[case1]
case2_indices = trial_indices[case2]
non_zero_indices = trial_indices[non_zero]
zero_indices = trial_indices[zero]
parameter[case1_indices] *= decrease
parameter[case2_indices] /= increase
parameter[zero_indices] *= decrease
non_zero_row_indices = []
correct_pred_positions = torch.where(
trial_preds.view(num_trial, -1)[:, non_zero] == init_preds[non_zero_indices])
for index in range(non_zero.type(torch.int).sum()):
first_col_to_be_index = torch.where(index == correct_pred_positions[1])[0][0]
non_zero_row_indices.append(correct_pred_positions[0][first_col_to_be_index])
if len(non_zero_row_indices) != 0:
non_zero_row_indices = torch.stack(non_zero_row_indices)
adversarial_samples[non_zero_indices] = torch.stack(trial_samples.chunk(num_trial, dim=0))[
(non_zero_row_indices, torch.where(non_zero)[0])]
trial_indices = trial_indices[zero]
if non_zero.all() or step > step_max:
return parameter, adversarial_samples, n_calls |
/**
* Adds a child to this submenu object.
*
* @param item The item to add
*/
public void addChild(MenuItem item){
if(children == null){
children = new ArrayList<MenuItem>();
}
children.add(item);
} |
<gh_stars>0
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the Source EULA. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import * as azdata from 'azdata';
import * as vscode from 'vscode';
import { StubComponent } from './stubComponent';
export class StubRadioButton extends StubComponent implements azdata.RadioButtonComponent {
// Helper functions
click() {
this.checked = true;
this._onDidClickEmitter.fire(this);
}
// Radio Button implementation
readonly id = 'radio-button';
private _onDidClickEmitter = new vscode.EventEmitter<any>();
private _onDidChangeCheckedStateEmitter = new vscode.EventEmitter<boolean>();
onDidClick = this._onDidClickEmitter.event;
onDidChangeCheckedState = this._onDidChangeCheckedStateEmitter.event;
label?: string;
value?: string;
checked?: boolean;
}
|
# Warning: this file is auto-generated. Do not edit.
from ctypes import POINTER, c_char, c_char_p, c_int64, c_uint32
"""
File xr.raw_functions.py
Defines low-level ctypes function definitions for use by
higher-level pythonic functions in pyopenxr.
"""
from .library import openxr_loader_library
from .enums import *
from .typedefs import *
# ctypes Function definitions
xrGetInstanceProcAddr = openxr_loader_library.xrGetInstanceProcAddr
xrGetInstanceProcAddr.restype = Result
xrGetInstanceProcAddr.argtypes = [
InstanceHandle, # instance
c_char_p, # name
POINTER(PFN_xrVoidFunction), # function
]
xrEnumerateApiLayerProperties = openxr_loader_library.xrEnumerateApiLayerProperties
xrEnumerateApiLayerProperties.restype = Result
xrEnumerateApiLayerProperties.argtypes = [
c_uint32, # property_capacity_input
POINTER(c_uint32), # property_count_output
POINTER(ApiLayerProperties), # properties
]
xrEnumerateInstanceExtensionProperties = openxr_loader_library.xrEnumerateInstanceExtensionProperties
xrEnumerateInstanceExtensionProperties.restype = Result
xrEnumerateInstanceExtensionProperties.argtypes = [
c_char_p, # layer_name
c_uint32, # property_capacity_input
POINTER(c_uint32), # property_count_output
POINTER(ExtensionProperties), # properties
]
xrCreateInstance = openxr_loader_library.xrCreateInstance
xrCreateInstance.restype = Result
xrCreateInstance.argtypes = [
POINTER(InstanceCreateInfo), # create_info
POINTER(InstanceHandle), # instance
]
xrDestroyInstance = openxr_loader_library.xrDestroyInstance
xrDestroyInstance.restype = Result
xrDestroyInstance.argtypes = [
InstanceHandle, # instance
]
xrGetInstanceProperties = openxr_loader_library.xrGetInstanceProperties
xrGetInstanceProperties.restype = Result
xrGetInstanceProperties.argtypes = [
InstanceHandle, # instance
POINTER(InstanceProperties), # instance_properties
]
xrPollEvent = openxr_loader_library.xrPollEvent
xrPollEvent.restype = Result
xrPollEvent.argtypes = [
InstanceHandle, # instance
POINTER(EventDataBuffer), # event_data
]
xrResultToString = openxr_loader_library.xrResultToString
xrResultToString.restype = Result
xrResultToString.argtypes = [
InstanceHandle, # instance
Result.ctype(), # value
(c_char * 64), # buffer
]
xrStructureTypeToString = openxr_loader_library.xrStructureTypeToString
xrStructureTypeToString.restype = Result
xrStructureTypeToString.argtypes = [
InstanceHandle, # instance
StructureType.ctype(), # value
(c_char * 64), # buffer
]
xrGetSystem = openxr_loader_library.xrGetSystem
xrGetSystem.restype = Result
xrGetSystem.argtypes = [
InstanceHandle, # instance
POINTER(SystemGetInfo), # get_info
POINTER(SystemId), # system_id
]
xrGetSystemProperties = openxr_loader_library.xrGetSystemProperties
xrGetSystemProperties.restype = Result
xrGetSystemProperties.argtypes = [
InstanceHandle, # instance
SystemId, # system_id
POINTER(SystemProperties), # properties
]
xrEnumerateEnvironmentBlendModes = openxr_loader_library.xrEnumerateEnvironmentBlendModes
xrEnumerateEnvironmentBlendModes.restype = Result
xrEnumerateEnvironmentBlendModes.argtypes = [
InstanceHandle, # instance
SystemId, # system_id
ViewConfigurationType.ctype(), # view_configuration_type
c_uint32, # environment_blend_mode_capacity_input
POINTER(c_uint32), # environment_blend_mode_count_output
POINTER(EnvironmentBlendMode.ctype()), # environment_blend_modes
]
xrCreateSession = openxr_loader_library.xrCreateSession
xrCreateSession.restype = Result
xrCreateSession.argtypes = [
InstanceHandle, # instance
POINTER(SessionCreateInfo), # create_info
POINTER(SessionHandle), # session
]
xrDestroySession = openxr_loader_library.xrDestroySession
xrDestroySession.restype = Result
xrDestroySession.argtypes = [
SessionHandle, # session
]
xrEnumerateReferenceSpaces = openxr_loader_library.xrEnumerateReferenceSpaces
xrEnumerateReferenceSpaces.restype = Result
xrEnumerateReferenceSpaces.argtypes = [
SessionHandle, # session
c_uint32, # space_capacity_input
POINTER(c_uint32), # space_count_output
POINTER(ReferenceSpaceType.ctype()), # spaces
]
xrCreateReferenceSpace = openxr_loader_library.xrCreateReferenceSpace
xrCreateReferenceSpace.restype = Result
xrCreateReferenceSpace.argtypes = [
SessionHandle, # session
POINTER(ReferenceSpaceCreateInfo), # create_info
POINTER(SpaceHandle), # space
]
xrGetReferenceSpaceBoundsRect = openxr_loader_library.xrGetReferenceSpaceBoundsRect
xrGetReferenceSpaceBoundsRect.restype = Result
xrGetReferenceSpaceBoundsRect.argtypes = [
SessionHandle, # session
ReferenceSpaceType.ctype(), # reference_space_type
POINTER(Extent2Df), # bounds
]
xrCreateActionSpace = openxr_loader_library.xrCreateActionSpace
xrCreateActionSpace.restype = Result
xrCreateActionSpace.argtypes = [
SessionHandle, # session
POINTER(ActionSpaceCreateInfo), # create_info
POINTER(SpaceHandle), # space
]
xrLocateSpace = openxr_loader_library.xrLocateSpace
xrLocateSpace.restype = Result
xrLocateSpace.argtypes = [
SpaceHandle, # space
SpaceHandle, # base_space
Time, # time
POINTER(SpaceLocation), # location
]
xrDestroySpace = openxr_loader_library.xrDestroySpace
xrDestroySpace.restype = Result
xrDestroySpace.argtypes = [
SpaceHandle, # space
]
xrEnumerateViewConfigurations = openxr_loader_library.xrEnumerateViewConfigurations
xrEnumerateViewConfigurations.restype = Result
xrEnumerateViewConfigurations.argtypes = [
InstanceHandle, # instance
SystemId, # system_id
c_uint32, # view_configuration_type_capacity_input
POINTER(c_uint32), # view_configuration_type_count_output
POINTER(ViewConfigurationType.ctype()), # view_configuration_types
]
xrGetViewConfigurationProperties = openxr_loader_library.xrGetViewConfigurationProperties
xrGetViewConfigurationProperties.restype = Result
xrGetViewConfigurationProperties.argtypes = [
InstanceHandle, # instance
SystemId, # system_id
ViewConfigurationType.ctype(), # view_configuration_type
POINTER(ViewConfigurationProperties), # configuration_properties
]
xrEnumerateViewConfigurationViews = openxr_loader_library.xrEnumerateViewConfigurationViews
xrEnumerateViewConfigurationViews.restype = Result
xrEnumerateViewConfigurationViews.argtypes = [
InstanceHandle, # instance
SystemId, # system_id
ViewConfigurationType.ctype(), # view_configuration_type
c_uint32, # view_capacity_input
POINTER(c_uint32), # view_count_output
POINTER(ViewConfigurationView), # views
]
xrEnumerateSwapchainFormats = openxr_loader_library.xrEnumerateSwapchainFormats
xrEnumerateSwapchainFormats.restype = Result
xrEnumerateSwapchainFormats.argtypes = [
SessionHandle, # session
c_uint32, # format_capacity_input
POINTER(c_uint32), # format_count_output
POINTER(c_int64), # formats
]
xrCreateSwapchain = openxr_loader_library.xrCreateSwapchain
xrCreateSwapchain.restype = Result
xrCreateSwapchain.argtypes = [
SessionHandle, # session
POINTER(SwapchainCreateInfo), # create_info
POINTER(SwapchainHandle), # swapchain
]
xrDestroySwapchain = openxr_loader_library.xrDestroySwapchain
xrDestroySwapchain.restype = Result
xrDestroySwapchain.argtypes = [
SwapchainHandle, # swapchain
]
xrEnumerateSwapchainImages = openxr_loader_library.xrEnumerateSwapchainImages
xrEnumerateSwapchainImages.restype = Result
xrEnumerateSwapchainImages.argtypes = [
SwapchainHandle, # swapchain
c_uint32, # image_capacity_input
POINTER(c_uint32), # image_count_output
POINTER(SwapchainImageBaseHeader), # images
]
xrAcquireSwapchainImage = openxr_loader_library.xrAcquireSwapchainImage
xrAcquireSwapchainImage.restype = Result
xrAcquireSwapchainImage.argtypes = [
SwapchainHandle, # swapchain
POINTER(SwapchainImageAcquireInfo), # acquire_info
POINTER(c_uint32), # index
]
xrWaitSwapchainImage = openxr_loader_library.xrWaitSwapchainImage
xrWaitSwapchainImage.restype = Result
xrWaitSwapchainImage.argtypes = [
SwapchainHandle, # swapchain
POINTER(SwapchainImageWaitInfo), # wait_info
]
xrReleaseSwapchainImage = openxr_loader_library.xrReleaseSwapchainImage
xrReleaseSwapchainImage.restype = Result
xrReleaseSwapchainImage.argtypes = [
SwapchainHandle, # swapchain
POINTER(SwapchainImageReleaseInfo), # release_info
]
xrBeginSession = openxr_loader_library.xrBeginSession
xrBeginSession.restype = Result
xrBeginSession.argtypes = [
SessionHandle, # session
POINTER(SessionBeginInfo), # begin_info
]
xrEndSession = openxr_loader_library.xrEndSession
xrEndSession.restype = Result
xrEndSession.argtypes = [
SessionHandle, # session
]
xrRequestExitSession = openxr_loader_library.xrRequestExitSession
xrRequestExitSession.restype = Result
xrRequestExitSession.argtypes = [
SessionHandle, # session
]
xrWaitFrame = openxr_loader_library.xrWaitFrame
xrWaitFrame.restype = Result
xrWaitFrame.argtypes = [
SessionHandle, # session
POINTER(FrameWaitInfo), # frame_wait_info
POINTER(FrameState), # frame_state
]
xrBeginFrame = openxr_loader_library.xrBeginFrame
xrBeginFrame.restype = Result
xrBeginFrame.argtypes = [
SessionHandle, # session
POINTER(FrameBeginInfo), # frame_begin_info
]
xrEndFrame = openxr_loader_library.xrEndFrame
xrEndFrame.restype = Result
xrEndFrame.argtypes = [
SessionHandle, # session
POINTER(FrameEndInfo), # frame_end_info
]
xrLocateViews = openxr_loader_library.xrLocateViews
xrLocateViews.restype = Result
xrLocateViews.argtypes = [
SessionHandle, # session
POINTER(ViewLocateInfo), # view_locate_info
POINTER(ViewState), # view_state
c_uint32, # view_capacity_input
POINTER(c_uint32), # view_count_output
POINTER(View), # views
]
xrStringToPath = openxr_loader_library.xrStringToPath
xrStringToPath.restype = Result
xrStringToPath.argtypes = [
InstanceHandle, # instance
c_char_p, # path_string
POINTER(Path), # path
]
xrPathToString = openxr_loader_library.xrPathToString
xrPathToString.restype = Result
xrPathToString.argtypes = [
InstanceHandle, # instance
Path, # path
c_uint32, # buffer_capacity_input
POINTER(c_uint32), # buffer_count_output
c_char_p, # buffer
]
xrCreateActionSet = openxr_loader_library.xrCreateActionSet
xrCreateActionSet.restype = Result
xrCreateActionSet.argtypes = [
InstanceHandle, # instance
POINTER(ActionSetCreateInfo), # create_info
POINTER(ActionSetHandle), # action_set
]
xrDestroyActionSet = openxr_loader_library.xrDestroyActionSet
xrDestroyActionSet.restype = Result
xrDestroyActionSet.argtypes = [
ActionSetHandle, # action_set
]
xrCreateAction = openxr_loader_library.xrCreateAction
xrCreateAction.restype = Result
xrCreateAction.argtypes = [
ActionSetHandle, # action_set
POINTER(ActionCreateInfo), # create_info
POINTER(ActionHandle), # action
]
xrDestroyAction = openxr_loader_library.xrDestroyAction
xrDestroyAction.restype = Result
xrDestroyAction.argtypes = [
ActionHandle, # action
]
xrSuggestInteractionProfileBindings = openxr_loader_library.xrSuggestInteractionProfileBindings
xrSuggestInteractionProfileBindings.restype = Result
xrSuggestInteractionProfileBindings.argtypes = [
InstanceHandle, # instance
POINTER(InteractionProfileSuggestedBinding), # suggested_bindings
]
xrAttachSessionActionSets = openxr_loader_library.xrAttachSessionActionSets
xrAttachSessionActionSets.restype = Result
xrAttachSessionActionSets.argtypes = [
SessionHandle, # session
POINTER(SessionActionSetsAttachInfo), # attach_info
]
xrGetCurrentInteractionProfile = openxr_loader_library.xrGetCurrentInteractionProfile
xrGetCurrentInteractionProfile.restype = Result
xrGetCurrentInteractionProfile.argtypes = [
SessionHandle, # session
Path, # top_level_user_path
POINTER(InteractionProfileState), # interaction_profile
]
xrGetActionStateBoolean = openxr_loader_library.xrGetActionStateBoolean
xrGetActionStateBoolean.restype = Result
xrGetActionStateBoolean.argtypes = [
SessionHandle, # session
POINTER(ActionStateGetInfo), # get_info
POINTER(ActionStateBoolean), # state
]
xrGetActionStateFloat = openxr_loader_library.xrGetActionStateFloat
xrGetActionStateFloat.restype = Result
xrGetActionStateFloat.argtypes = [
SessionHandle, # session
POINTER(ActionStateGetInfo), # get_info
POINTER(ActionStateFloat), # state
]
xrGetActionStateVector2f = openxr_loader_library.xrGetActionStateVector2f
xrGetActionStateVector2f.restype = Result
xrGetActionStateVector2f.argtypes = [
SessionHandle, # session
POINTER(ActionStateGetInfo), # get_info
POINTER(ActionStateVector2f), # state
]
xrGetActionStatePose = openxr_loader_library.xrGetActionStatePose
xrGetActionStatePose.restype = Result
xrGetActionStatePose.argtypes = [
SessionHandle, # session
POINTER(ActionStateGetInfo), # get_info
POINTER(ActionStatePose), # state
]
xrSyncActions = openxr_loader_library.xrSyncActions
xrSyncActions.restype = Result
xrSyncActions.argtypes = [
SessionHandle, # session
POINTER(ActionsSyncInfo), # sync_info
]
xrEnumerateBoundSourcesForAction = openxr_loader_library.xrEnumerateBoundSourcesForAction
xrEnumerateBoundSourcesForAction.restype = Result
xrEnumerateBoundSourcesForAction.argtypes = [
SessionHandle, # session
POINTER(BoundSourcesForActionEnumerateInfo), # enumerate_info
c_uint32, # source_capacity_input
POINTER(c_uint32), # source_count_output
POINTER(Path), # sources
]
xrGetInputSourceLocalizedName = openxr_loader_library.xrGetInputSourceLocalizedName
xrGetInputSourceLocalizedName.restype = Result
xrGetInputSourceLocalizedName.argtypes = [
SessionHandle, # session
POINTER(InputSourceLocalizedNameGetInfo), # get_info
c_uint32, # buffer_capacity_input
POINTER(c_uint32), # buffer_count_output
c_char_p, # buffer
]
xrApplyHapticFeedback = openxr_loader_library.xrApplyHapticFeedback
xrApplyHapticFeedback.restype = Result
xrApplyHapticFeedback.argtypes = [
SessionHandle, # session
POINTER(HapticActionInfo), # haptic_action_info
POINTER(HapticBaseHeader), # haptic_feedback
]
xrStopHapticFeedback = openxr_loader_library.xrStopHapticFeedback
xrStopHapticFeedback.restype = Result
xrStopHapticFeedback.argtypes = [
SessionHandle, # session
POINTER(HapticActionInfo), # haptic_action_info
]
__all__ = [
"xrAcquireSwapchainImage",
"xrApplyHapticFeedback",
"xrAttachSessionActionSets",
"xrBeginFrame",
"xrBeginSession",
"xrCreateAction",
"xrCreateActionSet",
"xrCreateActionSpace",
"xrCreateInstance",
"xrCreateReferenceSpace",
"xrCreateSession",
"xrCreateSwapchain",
"xrDestroyAction",
"xrDestroyActionSet",
"xrDestroyInstance",
"xrDestroySession",
"xrDestroySpace",
"xrDestroySwapchain",
"xrEndFrame",
"xrEndSession",
"xrEnumerateApiLayerProperties",
"xrEnumerateBoundSourcesForAction",
"xrEnumerateEnvironmentBlendModes",
"xrEnumerateInstanceExtensionProperties",
"xrEnumerateReferenceSpaces",
"xrEnumerateSwapchainFormats",
"xrEnumerateSwapchainImages",
"xrEnumerateViewConfigurationViews",
"xrEnumerateViewConfigurations",
"xrGetActionStateBoolean",
"xrGetActionStateFloat",
"xrGetActionStatePose",
"xrGetActionStateVector2f",
"xrGetCurrentInteractionProfile",
"xrGetInputSourceLocalizedName",
"xrGetInstanceProcAddr",
"xrGetInstanceProperties",
"xrGetReferenceSpaceBoundsRect",
"xrGetSystem",
"xrGetSystemProperties",
"xrGetViewConfigurationProperties",
"xrLocateSpace",
"xrLocateViews",
"xrPathToString",
"xrPollEvent",
"xrReleaseSwapchainImage",
"xrRequestExitSession",
"xrResultToString",
"xrStopHapticFeedback",
"xrStringToPath",
"xrStructureTypeToString",
"xrSuggestInteractionProfileBindings",
"xrSyncActions",
"xrWaitFrame",
"xrWaitSwapchainImage",
]
|
/**
* Action class for the Run -> Assemble menu item (and toolbar icon)
*/
public class RunAssembleAction extends GuiAction {
private static ArrayList MIPSprogramsToAssemble;
private static boolean extendedAssemblerEnabled;
private static boolean warningsAreErrors;
// Threshold for adding filename to printed message of files being
// assembled.
private static final int LINE_LENGTH_LIMIT = 60;
public RunAssembleAction(String name, Icon icon, String descrip, Integer mnemonic, KeyStroke accel, VenusUI gui) {
super(name, icon, descrip, mnemonic, accel, gui);
}
// These are both used by RunResetAction to re-assemble under identical
// conditions.
static ArrayList getMIPSprogramsToAssemble() {
return MIPSprogramsToAssemble;
}
static boolean getExtendedAssemblerEnabled() {
return extendedAssemblerEnabled;
}
static boolean getWarningsAreErrors() {
return warningsAreErrors;
}
public void actionPerformed(ActionEvent e) {
String name = this.getValue(Action.NAME).toString();
Component editPane = mainUI.getMainPane().getEditPane();
ExecutePane executePane = mainUI.getMainPane().getExecutePane();
RegistersPane registersPane = mainUI.getRegistersPane();
extendedAssemblerEnabled = Globals.getSettings().getExtendedAssemblerEnabled();
warningsAreErrors = Globals.getSettings().getWarningsAreErrors();
if (FileStatus.getFile() != null) {
if (FileStatus.get() == FileStatus.EDITED) {
mainUI.editor.save();
}
try {
Globals.program = new MIPSprogram();
ArrayList filesToAssemble;
if (Globals.getSettings().getAssembleAllEnabled()) {// setting
// calls for
// multiple
// file
// assembly
filesToAssemble = FilenameFinder.getFilenameList(new File(FileStatus.getName()).getParent(),
Globals.fileExtensions);
} else {
filesToAssemble = new ArrayList();
filesToAssemble.add(FileStatus.getName());
}
String exceptionHandler = null;
if (Globals.getSettings().getExceptionHandlerEnabled()
&& Globals.getSettings().getExceptionHandler() != null
&& Globals.getSettings().getExceptionHandler().length() > 0) {
exceptionHandler = Globals.getSettings().getExceptionHandler();
}
MIPSprogramsToAssemble = Globals.program.prepareFilesForAssembly(filesToAssemble,
FileStatus.getFile().getPath(), exceptionHandler);
mainUI.messagesPane.postMarsMessage(buildFileNameList(name + ": assembling ", MIPSprogramsToAssemble));
// added logic to receive any warnings and output them.... DPS
// 11/28/06
ErrorList warnings = Globals.program.assemble(MIPSprogramsToAssemble, extendedAssemblerEnabled,
warningsAreErrors);
if (warnings.warningsOccurred()) {
mainUI.messagesPane.postMarsMessage(warnings.generateWarningReport());
}
mainUI.messagesPane.postMarsMessage(name + ": operation completed successfully.\n\n");
FileStatus.setAssembled(true);
FileStatus.set(FileStatus.RUNNABLE);
RegisterFile.resetRegisters();
Coprocessor1.resetRegisters();
Coprocessor0.resetRegisters();
executePane.getTextSegmentWindow().setupTable();
executePane.getDataSegmentWindow().setupTable();
executePane.getDataSegmentWindow().highlightCellForAddress(Memory.dataBaseAddress);
executePane.getDataSegmentWindow().clearHighlighting();
executePane.getLabelsWindow().setupTable();
executePane.getTextSegmentWindow().setCodeHighlighting(true);
executePane.getTextSegmentWindow().highlightStepAtPC();
registersPane.getRegistersWindow().clearWindow();
registersPane.getCoprocessor1Window().clearWindow();
registersPane.getCoprocessor0Window().clearWindow();
mainUI.setReset(true);
mainUI.setStarted(false);
mainUI.getMainPane().setSelectedComponent(executePane);
// Aug. 24, 2005 Ken Vollmar
SystemIO.resetFiles(); // Ensure that I/O "file descriptors" are
// initialized for a new program run
} catch (ProcessingException pe) {
String errorReport = pe.errors().generateErrorAndWarningReport();
mainUI.messagesPane.postMarsMessage(errorReport);
mainUI.messagesPane.postMarsMessage(name + ": operation completed with errors.\n\n");
// Select editor line containing first error, and corresponding
// error message.
ArrayList errorMessages = pe.errors().getErrorMessages();
for (int i = 0; i < errorMessages.size(); i++) {
ErrorMessage em = (ErrorMessage) errorMessages.get(i);
// No line or position may mean File Not Found (e.g.
// exception file). Don't try to open. DPS 3-Oct-2010
if (em.getLine() == 0 && em.getPosition() == 0) {
continue;
}
if (!em.isWarning() || warningsAreErrors) {
Globals.getGui().getMessagesPane().selectErrorMessage(em.getFilename(), em.getLine(),
em.getPosition());
// Bug workaround: Line selection does not work
// correctly for the JEditTextArea editor
// when the file is opened then automatically assembled
// (assemble-on-open setting).
// Automatic assemble happens in EditTabbedPane's
// openFile() method, by invoking
// this method (actionPerformed) explicitly with null
// argument. Thus e!=null test.
// DPS 9-Aug-2010
if (e != null) {
Globals.getGui().getMessagesPane().selectEditorTextLine(em.getFilename(), em.getLine(),
em.getPosition());
}
break;
}
}
FileStatus.setAssembled(false);
FileStatus.set(FileStatus.NOT_EDITED);
}
}
}
// Handy little utility for building comma-separated list of filenames
// while not letting line length get out of hand.
private String buildFileNameList(String preamble, ArrayList programList) {
String result = preamble;
int lineLength = result.length();
for (int i = 0; i < programList.size(); i++) {
String filename = ((MIPSprogram) programList.get(i)).getFilename();
result += filename + ((i < programList.size() - 1) ? ", " : "");
lineLength += filename.length();
if (lineLength > LINE_LENGTH_LIMIT) {
result += "\n";
lineLength = 0;
}
}
return result + ((lineLength == 0) ? "" : "\n") + "\n";
}
} |
/**
* Creates the cache if it does not exist.
* @param context the bundle context that will be used by the case
* @return the created cache (or the existing cache)
*/
private static ServiceRegistryCache createCache(BundleContext context) {
ServiceRegistryCache cache = null;
boolean open = false;
synchronized (ServiceRegistryCacheManager.class) {
if (m_cache == null) {
m_cache = new ServiceRegistryCache(context);
open = true;
}
cache = m_cache;
}
if (open) {
cache.open();
}
return cache;
} |
/**
* This method add this node into a cluster with given cluster ID. If
* such cluster doesn't exist in ClusterManager, it creates a new cluster.
*/
public void addCluster(int clusterID)
{
ClusterManager cm = this.graphManager.getClusterManager();
Cluster cluster = cm.getClusterByID(clusterID);
if (cluster == null)
{
cluster = new Cluster(cm, clusterID, "Cluster " + clusterID);
cm.addCluster(cluster);
}
this.addCluster(cluster);
} |
EFFICIENT PRODUCTION OF URIDINE 5′-DIPHOSPHO-N-ACETYLGLUCOSAMINE BY THE COMBINATION OF THREE RECOMBINANT ENZYMES AND YEAST CELLS
Uridine 5′-diphospho N-acetylglucosamine (UDP-GlcNAc) is an important nucleotide sugar in the biochemistry of all living organisms, and it is an important substrate in the synthesis of oligosaccharides. In the present work, three bioactive enzymes, namely, glucokinase (YqgR), GlcNAc-phosphate mutase (Agm1), and N-acetylglucosamine-1-phosphate uridyltransferase (GlmU), were produced effectively as soluble form in recombinant Escherichia coli. These three enzymes and dried yeast together were used to construct a multistep enzymatic system, which could produce UDP-GlcNAc efficiently with N-acetylglucosamine (GlcNAc) as the substrate. After the optimization of various reaction conditions, 31.5 mMUDP-GlcNAc was produced from 50 mMGlcNAc and 50 mMUMP. |
def notifications(request):
return_dict = {}
return_dict['notifications'] = [ m.message for m in get_messages(request)]
if hasattr(request, '_notifications_count'):
return_dict['notifications_count'] = request._notifications_count
return return_dict |
def IterOneAsync(inter_mat,vect,values):
vect1 = vect[:]
index = int(inter_mat.shape[0] * random.random())
value = np.dot(inter_mat[index], vect1)
if value > 0:
vect1[index] = int(values[0])
elif value < 0:
vect1[index] = int(values[1])
else:
True
return vect1 |
import os
import time
import boto3
import constants
import utility as util
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_export_rds_snapshot_to_s3(event, context):
"""start export task of RDS snapshot to S3 bucket"""
region = os.environ['Region']
rds = boto3.client('rds', region)
result = {}
instance_id = event['identifier']
epoch = int(time.time())
export_id = instance_id + "-" + str(epoch)
snapshot_id = instance_id + constants.SNAPSHOT_POSTFIX
snapshot_arn = get_instance_snapshot_arn(snapshot_id)
account_id = util.get_aws_account_id()
bucket_name = constants.RDS_SNAPSHOTS_BUCKET_NAME_PREFIX + account_id
try:
response = rds.start_export_task(
ExportTaskIdentifier=export_id,
SourceArn=snapshot_arn,
S3BucketName=bucket_name,
IamRoleArn=os.environ['SNAPSHOT_EXPORT_TASK_ROLE'],
KmsKeyId=os.environ['SNAPSHOT_EXPORT_TASK_KEY'],
)
logger.info("function start_export_task execution result: {}".format(response))
result['taskname'] = constants.EXPORT_SNAPSHOT
result['identifier'] = instance_id
result['status'] = response['Status']
logger.info("function lambda_export_rds_snapshot_to_s3 execution result: {}".format(result))
return result
except Exception as error:
raise Exception(error)
def get_instance_snapshot_arn(snapshot_name):
"""returns instance snapshot arn if in available state"""
logger.info('starting function get_instance_snapshot_arn execution')
region = os.environ['Region']
rds = boto3.client('rds', region)
snapshots_response = rds.describe_db_snapshots(DBSnapshotIdentifier=snapshot_name)
logger.info("function describe_db_snapshots execution result: {}".format(snapshots_response))
assert snapshots_response['ResponseMetadata'][
'HTTPStatusCode'] == 200, f"Error fetching DB snapshots: {snapshots_response}"
snapshots = snapshots_response['DBSnapshots']
assert len(snapshots) == 1, f"No snapshot matches name {snapshot_name}"
snap = snapshots[0]
snap_status = snap.get('Status')
if snap_status == 'available':
logger.info("function get_instance_snapshot_arn execution result: {}".format(snap['DBSnapshotArn']))
logger.info('ending function get_instance_snapshot_arn execution')
return snap['DBSnapshotArn']
else:
raise Exception(f"Snapshot is not available yet, status is {snap_status}")
|
package com.randolltest.facerecognition.ui.home;
import android.Manifest;
import android.os.Bundle;
import android.view.View;
import com.arcsoft.face.ErrorInfo;
import com.blankj.utilcode.util.LogUtils;
import com.blankj.utilcode.util.SPUtils;
import com.blankj.utilcode.util.ToastUtils;
import com.ftd.livepermissions.LivePermissions;
import com.ftd.livepermissions.PermissionResult;
import com.randolltest.facerecognition.R;
import com.randolltest.facerecognition.data.Constants;
import com.randolltest.facerecognition.ui.base.BaseFragment;
import com.randolltest.facerecognition.ui.base.DataBindingConfig;
import com.randolltest.facerecognition.ui.recognize.RecognizeFragment;
import com.randolltest.facerecognition.util.NavigationUtils;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
public class HomeFragment extends BaseFragment {
/**
* 应用所需的权限
*/
private static final String[] NEEDED_PERMISSIONS = new String[]{
Manifest.permission.READ_PHONE_STATE,
Manifest.permission.CAMERA,
Manifest.permission.WRITE_EXTERNAL_STORAGE,
Manifest.permission.READ_EXTERNAL_STORAGE
};
@Override
protected void initViewModel() {
}
@Override
protected DataBindingConfig getDataBindingConfig() {
return new DataBindingConfig(R.layout.fragment_home, null);
}
@Override
public void onViewCreated(@NonNull View view, @Nullable Bundle savedInstanceState) {
super.onViewCreated(view, savedInstanceState);
getSharedViewModel().mSdkActiveCode.observe(getViewLifecycleOwner(), activeCode -> {
if (activeCode == ErrorInfo.MOK || activeCode == ErrorInfo.MERR_ASF_ALREADY_ACTIVATED) {
SPUtils.getInstance().put(Constants.SP.KEY_ACTIVE_STATE, true);
getSharedViewModel().mIsSdkActivated.setValue(true);
nav().navigate(R.id.action_home_to_recognize);
} else {
ToastUtils.showShort(getString(R.string.active_failed, activeCode));
}
});
}
@Override
public void onStart() {
super.onStart();
LivePermissions livePermissions = new LivePermissions(this);
livePermissions.request(NEEDED_PERMISSIONS)
.observe(this, permissionResult -> {
if (permissionResult instanceof PermissionResult.Grant) {
// 权限允许
if (SPUtils.getInstance().getBoolean(Constants.SP.KEY_ACTIVE_STATE, false)) {
getSharedViewModel().mIsSdkActivated.setValue(true);
boolean naviResult = NavigationUtils.navi2(nav(), R.id.homeFragment, R.id.action_home_to_recognize);
LogUtils.i(String.format("Navigate to %s %s", RecognizeFragment.class.getSimpleName(), naviResult ? "成功~" :
"失败!"));
} else {
// 权限授予但尚未激活
getSharedViewModel().mIsSdkActivated.setValue(false);
}
} else if (permissionResult instanceof PermissionResult.Rationale) {
// 权限拒绝
StringBuilder stringBuilder = new StringBuilder();
for (String permission : ((PermissionResult.Rationale) permissionResult).getPermissions()) {
stringBuilder.append(permission);
}
ToastUtils.showShort("拒绝了 " + stringBuilder.toString() + " 权限,部分功能将受影响");
} else {
// 权限拒绝,且勾选了不再询问
StringBuilder stringBuilder = new StringBuilder();
for (String permission : ((PermissionResult.Deny) permissionResult).getPermissions()) {
stringBuilder.append(permission);
}
ToastUtils.showShort("永久拒绝了 " + stringBuilder.toString() + " 权限,部分功能将无法使用");
}
});
}
}
|
Detection of plasmacytoma of the nasal cavity by immunoelectrophoresis of nasal washing fluid
A patient with a solitary IgG-lambda plasmacytoma of the nasal cavity is described. Immunoelectrophoresis of the nasal washing fluid showed the presence of the same paraprotein. After radiotherapy the tumour and the paraprotein disappeared. Recurrence of the tumour after 6 months was again associated with the presence of the paraprotein in the nasal washing fluid. Immunoelectrophoresis of nasal washings may offer an easy method in detection and follow-up of localized plasmacytomas of the upper respiratory tract. |
// argumentValue returns closure that returns log argument used in log message.
func (f *Formatter) argumentValue(position int, argument interface{}) func() interface{} {
return func() interface{} {
f.usedArguments[position] = true
return argument
}
} |
def compute_stress_objective(self, xPhys, dobj, p=4):
rho = self.compute_young_moduli(xPhys)
EBu = sum([self.EB @ self.u[:, i][self.edofMat.T]
for i in range(self.nloads)])
s11, s22, s12 = numpy.hsplit((EBu * rho / float(self.nloads)).T, 3)
self.stress[:] = numpy.sqrt(
s11**2 - s11 * s22 + s22**2 + 3 * s12**2).squeeze()
obj = self.sigma_pow(s11, s22, s12, p).sum()
K = self.build_K(xPhys)
K = cvxopt.spmatrix(
K.data, K.row.astype(numpy.int), K.col.astype(numpy.int))
dK = self.build_dK(xPhys).tocsc()
U = numpy.tile(self.u[self.free, :], (self.nel, 1))
dKu = (dK @ U).reshape((-1, self.nel * self.nloads), order="F")
rhs = cvxopt.matrix(dKu)
cvxopt.cholmod.linsolve(K, rhs)
self.du[self.free, :] = -numpy.array(rhs)
du = self.du.reshape((self.ndof * self.nel, self.nloads), order="F")
rep_edofMat = (numpy.tile(self.edofMat.T, self.nel) + numpy.tile(
numpy.repeat(numpy.arange(self.nel) * self.ndof, self.nel),
(8, 1)))
dEBu = sum([self.EB @ du[:, j][rep_edofMat]
for j in range(self.nloads)])
rhodEBu = numpy.tile(rho, self.nel) * dEBu
drho = numpy.empty(xPhys.shape)
self.compute_young_moduli(xPhys, drho)
drhoEBu = numpy.diag(drho).flatten() * numpy.tile(EBu, self.nel)
ds11, ds22, ds12 = map(
lambda x: x.reshape(self.nel, self.nel).T,
numpy.hsplit(((drhoEBu + rhodEBu) / float(self.nloads)).T, 3))
dobj[:] = self.dstress[:] = self.dsigma_pow(
s11, s22, s12, ds11, ds22, ds12, p).sum(0)
return obj |
<filename>mini_dbms/mdb/src/mdb/OneRelClause.java
// Automatically generated code. Edit at your own risk!
// Generated by bali2jak v2002.09.03.
package mdb;
import Jakarta.util.*;
import java.io.*;
import java.util.*;
public class OneRelClause extends One_rel_clause {
final public static int ARG_LENGTH = 3 ;
final public static int TOK_LENGTH = 1 /* Kludge! */ ;
public void execute () {
super.execute();
}
public Field_name getField_name () {
return (Field_name) arg [0] ;
}
public Literal getLiteral () {
return (Literal) arg [2] ;
}
public Rel getRel () {
return (Rel) arg [1] ;
}
public boolean[] printorder () {
return new boolean[] {false, false, false} ;
}
public OneRelClause setParms (Field_name arg0, Rel arg1, Literal arg2) {
arg = new AstNode [ARG_LENGTH] ;
tok = new AstTokenInterface [TOK_LENGTH] ;
arg [0] = arg0 ; /* Field_name */
arg [1] = arg1 ; /* Rel */
arg [2] = arg2 ; /* Literal */
InitChildren () ;
return (OneRelClause) this ;
}
}
|
def bootstrap_chef():
with settings(user='root'):
run('apt-get update')
run('apt-get -y dist-upgrade')
run('apt-get install -y git-core rubygems ruby ruby-dev')
run('gem install --no-rdoc --no-ri chef')
rsync_project(remote_dir='/etc/chef/', local_dir=CONFIG_ROOT + sep)
with settings(warn_only=True):
with hide('everything'):
test_cookbook_dir = run('test -d /etc/chef/cookbooks')
if test_cookbook_dir.return_code == 0:
with cd('/etc/chef/cookbooks'):
sshagent_run('git reset --hard && git pull')
else:
sshagent_run('git clone %s /etc/chef/cookbooks'
% env.cookbook_repo)
run('%s -j /etc/chef/nodes/%s.json' % (env.chef_executable, env.host)) |
/**
* \file pcm/pcm_extplug.c
* \ingroup Plugin_SDK
* \brief External Filter Plugin SDK
* \author <NAME> <<EMAIL>>
* \date 2005
*/
/*
* PCM - External Filter Plugin SDK
* Copyright (c) 2005 by <NAME> <<EMAIL>>
*
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include "pcm_local.h"
#include "pcm_plugin.h"
#include "pcm_extplug.h"
#include "pcm_ext_parm.h"
#ifndef PIC
/* entry for static linking */
const char *_snd_module_pcm_extplug = "";
#endif
#ifndef DOC_HIDDEN
typedef struct snd_pcm_extplug_priv {
snd_pcm_plugin_t plug;
snd_pcm_extplug_t *data;
struct snd_ext_parm params[SND_PCM_EXTPLUG_HW_PARAMS];
struct snd_ext_parm sparams[SND_PCM_EXTPLUG_HW_PARAMS];
} extplug_priv_t;
static const int hw_params_type[SND_PCM_EXTPLUG_HW_PARAMS] = {
[SND_PCM_EXTPLUG_HW_FORMAT] = SND_PCM_HW_PARAM_FORMAT,
[SND_PCM_EXTPLUG_HW_CHANNELS] = SND_PCM_HW_PARAM_CHANNELS
};
#define is_mask_type(i) (hw_params_type[i] < SND_PCM_HW_PARAM_FIRST_INTERVAL)
static const unsigned int excl_parbits[SND_PCM_EXTPLUG_HW_PARAMS] = {
[SND_PCM_EXTPLUG_HW_FORMAT] = (SND_PCM_HW_PARBIT_FORMAT|
SND_PCM_HW_PARBIT_SUBFORMAT |
SND_PCM_HW_PARBIT_SAMPLE_BITS),
[SND_PCM_EXTPLUG_HW_CHANNELS] = (SND_PCM_HW_PARBIT_CHANNELS|
SND_PCM_HW_PARBIT_FRAME_BITS),
};
/*
* set min/max values for the given parameter
*/
int snd_ext_parm_set_minmax(struct snd_ext_parm *parm, unsigned int min, unsigned int max)
{
parm->num_list = 0;
free(parm->list);
parm->list = NULL;
parm->min = min;
parm->max = max;
parm->active = 1;
return 0;
}
/*
* set the list of available values for the given parameter
*/
static int val_compar(const void *ap, const void *bp)
{
return *(const unsigned int *)ap - *(const unsigned int *)bp;
}
int snd_ext_parm_set_list(struct snd_ext_parm *parm, unsigned int num_list, const unsigned int *list)
{
unsigned int *new_list;
new_list = malloc(sizeof(*new_list) * num_list);
if (new_list == NULL)
return -ENOMEM;
memcpy(new_list, list, sizeof(*new_list) * num_list);
qsort(new_list, num_list, sizeof(*new_list), val_compar);
free(parm->list);
parm->num_list = num_list;
parm->list = new_list;
parm->active = 1;
return 0;
}
void snd_ext_parm_clear(struct snd_ext_parm *parm)
{
free(parm->list);
memset(parm, 0, sizeof(*parm));
}
/*
* limit the interval to the given list
*/
int snd_interval_list(snd_interval_t *ival, int num_list, unsigned int *list)
{
int imin, imax;
int changed = 0;
if (snd_interval_empty(ival))
return -ENOENT;
for (imin = 0; imin < num_list; imin++) {
if (ival->min == list[imin] && ! ival->openmin)
break;
if (ival->min <= list[imin]) {
ival->min = list[imin];
ival->openmin = 0;
changed = 1;
break;
}
}
if (imin >= num_list)
return -EINVAL;
for (imax = num_list - 1; imax >= imin; imax--) {
if (ival->max == list[imax] && ! ival->openmax)
break;
if (ival->max >= list[imax]) {
ival->max = list[imax];
ival->openmax = 0;
changed = 1;
break;
}
}
if (imax < imin)
return -EINVAL;
return changed;
}
/*
* refine the interval parameter
*/
int snd_ext_parm_interval_refine(snd_interval_t *ival, struct snd_ext_parm *parm, int type)
{
parm += type;
if (! parm->active)
return 0;
ival->integer |= parm->integer;
if (parm->num_list) {
return snd_interval_list(ival, parm->num_list, parm->list);
} else if (parm->min || parm->max) {
snd_interval_t t;
memset(&t, 0, sizeof(t));
snd_interval_set_minmax(&t, parm->min, parm->max);
t.integer = ival->integer;
return snd_interval_refine(ival, &t);
}
return 0;
}
/*
* refine the mask parameter
*/
int snd_ext_parm_mask_refine(snd_mask_t *mask, struct snd_ext_parm *parm, int type)
{
snd_mask_t bits;
unsigned int i;
parm += type;
memset(&bits, 0, sizeof(bits));
for (i = 0; i < parm->num_list; i++)
bits.bits[parm->list[i] / 32] |= 1U << (parm->list[i] % 32);
return snd_mask_refine(mask, &bits);
}
/*
* hw_refine callback
*/
static int extplug_hw_refine(snd_pcm_hw_params_t *hw_params,
struct snd_ext_parm *parm)
{
int i, err, change = 0;
for (i = 0; i < SND_PCM_EXTPLUG_HW_PARAMS; i++) {
int type = hw_params_type[i];
if (is_mask_type(i))
err = snd_ext_parm_mask_refine(hw_param_mask(hw_params, type),
parm, i);
else
err = snd_ext_parm_interval_refine(hw_param_interval(hw_params, type),
parm, i);
if (err < 0)
return err;
change |= err;
}
return change;
}
static int snd_pcm_extplug_hw_refine_cprepare(snd_pcm_t *pcm,
snd_pcm_hw_params_t *params)
{
extplug_priv_t *ext = pcm->private_data;
int err;
snd_pcm_access_mask_t access_mask = { SND_PCM_ACCBIT_SHM };
err = _snd_pcm_hw_param_set_mask(params, SND_PCM_HW_PARAM_ACCESS,
&access_mask);
if (err < 0)
return err;
err = extplug_hw_refine(params, ext->params);
if (err < 0)
return err;
params->info &= ~(SND_PCM_INFO_MMAP | SND_PCM_INFO_MMAP_VALID);
return 0;
}
static int snd_pcm_extplug_hw_refine_sprepare(snd_pcm_t *pcm,
snd_pcm_hw_params_t *sparams)
{
extplug_priv_t *ext = pcm->private_data;
snd_pcm_access_mask_t saccess_mask = { SND_PCM_ACCBIT_MMAP };
_snd_pcm_hw_params_any(sparams);
_snd_pcm_hw_param_set_mask(sparams, SND_PCM_HW_PARAM_ACCESS,
&saccess_mask);
extplug_hw_refine(sparams, ext->sparams);
return 0;
}
static unsigned int get_links(struct snd_ext_parm *params)
{
int i;
unsigned int links = (SND_PCM_HW_PARBIT_FORMAT |
SND_PCM_HW_PARBIT_SUBFORMAT |
SND_PCM_HW_PARBIT_SAMPLE_BITS |
SND_PCM_HW_PARBIT_CHANNELS |
SND_PCM_HW_PARBIT_FRAME_BITS |
SND_PCM_HW_PARBIT_RATE |
SND_PCM_HW_PARBIT_PERIODS |
SND_PCM_HW_PARBIT_PERIOD_SIZE |
SND_PCM_HW_PARBIT_PERIOD_TIME |
SND_PCM_HW_PARBIT_BUFFER_SIZE |
SND_PCM_HW_PARBIT_BUFFER_TIME |
SND_PCM_HW_PARBIT_TICK_TIME);
for (i = 0; i < SND_PCM_EXTPLUG_HW_PARAMS; i++) {
if (params[i].active)
links &= ~excl_parbits[i];
}
return links;
}
static int snd_pcm_extplug_hw_refine_schange(snd_pcm_t *pcm,
snd_pcm_hw_params_t *params,
snd_pcm_hw_params_t *sparams)
{
extplug_priv_t *ext = pcm->private_data;
unsigned int links = get_links(ext->sparams);
return _snd_pcm_hw_params_refine(sparams, links, params);
}
static int snd_pcm_extplug_hw_refine_cchange(snd_pcm_t *pcm,
snd_pcm_hw_params_t *params,
snd_pcm_hw_params_t *sparams)
{
extplug_priv_t *ext = pcm->private_data;
unsigned int links = get_links(ext->params);
return _snd_pcm_hw_params_refine(params, links, sparams);
}
static int snd_pcm_extplug_hw_refine(snd_pcm_t *pcm, snd_pcm_hw_params_t *params)
{
int err = snd_pcm_hw_refine_slave(pcm, params,
snd_pcm_extplug_hw_refine_cprepare,
snd_pcm_extplug_hw_refine_cchange,
snd_pcm_extplug_hw_refine_sprepare,
snd_pcm_extplug_hw_refine_schange,
snd_pcm_generic_hw_refine);
return err;
}
/*
* hw_params callback
*/
static int snd_pcm_extplug_hw_params(snd_pcm_t *pcm, snd_pcm_hw_params_t *params)
{
extplug_priv_t *ext = pcm->private_data;
snd_pcm_t *slave = ext->plug.gen.slave;
int err = snd_pcm_hw_params_slave(pcm, params,
snd_pcm_extplug_hw_refine_cchange,
snd_pcm_extplug_hw_refine_sprepare,
snd_pcm_extplug_hw_refine_schange,
snd_pcm_generic_hw_params);
if (err < 0)
return err;
ext->data->slave_format = slave->format;
ext->data->slave_subformat = slave->subformat;
ext->data->slave_channels = slave->channels;
ext->data->rate = slave->rate;
INTERNAL(snd_pcm_hw_params_get_format)(params, &ext->data->format);
INTERNAL(snd_pcm_hw_params_get_subformat)(params, &ext->data->subformat);
INTERNAL(snd_pcm_hw_params_get_channels)(params, &ext->data->channels);
if (ext->data->callback->hw_params) {
err = ext->data->callback->hw_params(ext->data, params);
if (err < 0)
return err;
}
return 0;
}
/*
* hw_free callback
*/
static int snd_pcm_extplug_hw_free(snd_pcm_t *pcm)
{
extplug_priv_t *ext = pcm->private_data;
snd_pcm_hw_free(ext->plug.gen.slave);
if (ext->data->callback->hw_free)
return ext->data->callback->hw_free(ext->data);
return 0;
}
/*
* write_areas skeleton - call transfer callback
*/
static snd_pcm_uframes_t
snd_pcm_extplug_write_areas(snd_pcm_t *pcm,
const snd_pcm_channel_area_t *areas,
snd_pcm_uframes_t offset,
snd_pcm_uframes_t size,
const snd_pcm_channel_area_t *slave_areas,
snd_pcm_uframes_t slave_offset,
snd_pcm_uframes_t *slave_sizep)
{
extplug_priv_t *ext = pcm->private_data;
if (size > *slave_sizep)
size = *slave_sizep;
size = ext->data->callback->transfer(ext->data, slave_areas, slave_offset,
areas, offset, size);
*slave_sizep = size;
return size;
}
/*
* read_areas skeleton - call transfer callback
*/
static snd_pcm_uframes_t
snd_pcm_extplug_read_areas(snd_pcm_t *pcm,
const snd_pcm_channel_area_t *areas,
snd_pcm_uframes_t offset,
snd_pcm_uframes_t size,
const snd_pcm_channel_area_t *slave_areas,
snd_pcm_uframes_t slave_offset,
snd_pcm_uframes_t *slave_sizep)
{
extplug_priv_t *ext = pcm->private_data;
if (size > *slave_sizep)
size = *slave_sizep;
size = ext->data->callback->transfer(ext->data, areas, offset,
slave_areas, slave_offset, size);
*slave_sizep = size;
return size;
}
/*
* call init callback
*/
static int snd_pcm_extplug_init(snd_pcm_t *pcm)
{
extplug_priv_t *ext = pcm->private_data;
return ext->data->callback->init(ext->data);
}
/*
* dump setup
*/
static void snd_pcm_extplug_dump(snd_pcm_t *pcm, snd_output_t *out)
{
extplug_priv_t *ext = pcm->private_data;
if (ext->data->callback->dump)
ext->data->callback->dump(ext->data, out);
else {
if (ext->data->name)
snd_output_printf(out, "%s\n", ext->data->name);
else
snd_output_printf(out, "External PCM Plugin\n");
if (pcm->setup) {
snd_output_printf(out, "Its setup is:\n");
snd_pcm_dump_setup(pcm, out);
}
}
snd_output_printf(out, "Slave: ");
snd_pcm_dump(ext->plug.gen.slave, out);
}
static void clear_ext_params(extplug_priv_t *ext)
{
int i;
for (i = 0; i < SND_PCM_EXTPLUG_HW_PARAMS; i++) {
snd_ext_parm_clear(&ext->params[i]);
snd_ext_parm_clear(&ext->sparams[i]);
}
}
static int snd_pcm_extplug_close(snd_pcm_t *pcm)
{
extplug_priv_t *ext = pcm->private_data;
snd_pcm_close(ext->plug.gen.slave);
clear_ext_params(ext);
if (ext->data->callback->close)
ext->data->callback->close(ext->data);
free(ext);
return 0;
}
static snd_pcm_chmap_query_t **snd_pcm_extplug_query_chmaps(snd_pcm_t *pcm)
{
extplug_priv_t *ext = pcm->private_data;
if (ext->data->version >= 0x010002 &&
ext->data->callback->query_chmaps)
return ext->data->callback->query_chmaps(ext->data);
return snd_pcm_generic_query_chmaps(pcm);
}
static snd_pcm_chmap_t *snd_pcm_extplug_get_chmap(snd_pcm_t *pcm)
{
extplug_priv_t *ext = pcm->private_data;
if (ext->data->version >= 0x010002 &&
ext->data->callback->get_chmap)
return ext->data->callback->get_chmap(ext->data);
return snd_pcm_generic_get_chmap(pcm);
}
static int snd_pcm_extplug_set_chmap(snd_pcm_t *pcm, const snd_pcm_chmap_t *map)
{
extplug_priv_t *ext = pcm->private_data;
if (ext->data->version >= 0x010002 &&
ext->data->callback->set_chmap)
return ext->data->callback->set_chmap(ext->data, map);
return snd_pcm_generic_set_chmap(pcm, map);
}
static const snd_pcm_ops_t snd_pcm_extplug_ops = {
.close = snd_pcm_extplug_close,
.info = snd_pcm_generic_info,
.hw_refine = snd_pcm_extplug_hw_refine,
.hw_params = snd_pcm_extplug_hw_params,
.hw_free = snd_pcm_extplug_hw_free,
.sw_params = snd_pcm_generic_sw_params,
.channel_info = snd_pcm_generic_channel_info,
.dump = snd_pcm_extplug_dump,
.nonblock = snd_pcm_generic_nonblock,
.async = snd_pcm_generic_async,
.mmap = snd_pcm_generic_mmap,
.munmap = snd_pcm_generic_munmap,
.query_chmaps = snd_pcm_extplug_query_chmaps,
.get_chmap = snd_pcm_extplug_get_chmap,
.set_chmap = snd_pcm_extplug_set_chmap,
};
#endif /* !DOC_HIDDEN */
/*
* Exported functions
*/
/*! \page pcm_external_plugins PCM External Plugin SDK
\section pcm_externals External Plugins
The external plugins are implemented in a shared object file located
at /usr/lib/alsa-lib (the exact location depends on the build option
and asoundrc configuration). It has to be the file like
libasound_module_pcm_MYPLUGIN.so, where MYPLUGIN corresponds to your
own plugin name.
The entry point of the plugin is defined via
#SND_PCM_PLUGIN_DEFINE_FUNC() macro. This macro defines the function
with a proper name to be referred from alsa-lib. The function takes
the following 6 arguments:
\code
int (snd_pcm_t **pcmp, const char *name, snd_config_t *root,
snd_config_t *conf, snd_pcm_stream_t stream, int mode)
\endcode
The first argument, pcmp, is the pointer to store the resultant PCM
handle. The arguments name, root, stream and mode are the parameters
to be passed to the plugin constructor. The conf is the configuration
tree for the plugin. The arguments above are defined in the macro
itself, so don't use variables with the same names to shadow
parameters.
After parsing the configuration parameters in the given conf tree,
usually you will call the external plugin API function,
#snd_pcm_extplug_create() or #snd_pcm_ioplug_create(), depending
on the plugin type. The PCM handle must be filled *pcmp in return.
Then this function must return either a value 0 when succeeded, or a
negative value as the error code.
Finally, add #SND_PCM_PLUGIN_SYMBOL() with the name of your
plugin as the argument at the end. This defines the proper versioned
symbol as the reference.
The typical code would look like below:
\code
struct myplug_info {
snd_pcm_extplug_t ext;
int my_own_data;
...
};
SND_PCM_PLUGIN_DEFINE_FUNC(myplug)
{
snd_config_iterator_t i, next;
snd_config_t *slave = NULL;
struct myplug_info *myplug;
int err;
snd_config_for_each(i, next, conf) {
snd_config_t *n = snd_config_iterator_entry(i);
const char *id;
if (snd_config_get_id(n, &id) < 0)
continue;
if (strcmp(id, "comment") == 0 || strcmp(id, "type") == 0)
continue;
if (strcmp(id, "slave") == 0) {
slave = n;
continue;
}
if (strcmp(id, "my_own_parameter") == 0) {
....
continue;
}
SNDERR("Unknown field %s", id);
return -EINVAL;
}
if (! slave) {
SNDERR("No slave defined for myplug");
return -EINVAL;
}
myplug = calloc(1, sizeof(*myplug));
if (myplug == NULL)
return -ENOMEM;
myplug->ext.version = SND_PCM_EXTPLUG_VERSION;
myplug->ext.name = "My Own Plugin";
myplug->ext.callback = &my_own_callback;
myplug->ext.private_data = myplug;
....
err = snd_pcm_extplug_create(&myplug->ext, name, root, conf, stream, mode);
if (err < 0) {
myplug_free(myplug);
return err;
}
*pcmp = myplug->ext.pcm;
return 0;
}
SND_PCM_PLUGIN_SYMBOL(myplug);
\endcode
Read the codes in alsa-plugins package for the real examples.
\section pcm_extplug External Plugin: Filter-Type Plugin
The filter-type plugin is a plugin to convert the PCM signals from the input
and feeds to the output. Thus, this plugin always needs a slave PCM as its output.
The plugin can modify the format and the channels of the input/output PCM.
It can <i>not</i> modify the sample rate (because of simplicity reason).
The following fields have to be filled in extplug record before calling
#snd_pcm_extplug_create() : version, name, callback.
Otherfields are optional and should be initialized with zero.
The constant #SND_PCM_EXTPLUG_VERSION must be passed to the version
field for the version check in alsa-lib. A non-NULL ASCII string
has to be passed to the name field. The callback field contains the
table of callback functions for this plugin (defined as
#snd_pcm_extplug_callback_t).
The driver can set an arbitrary value (pointer) to private_data
field to refer its own data in the callbacks.
The rest fields are filled by #snd_pcm_extplug_create(). The pcm field
is the resultant PCM handle. The others are the current status of the
PCM.
The callback functions in #snd_pcm_extplug_callback_t define the real
behavior of the driver.
At least, transfer callback must be given. This callback is called
at each time certain size of data block is transfered to the slave
PCM. Other callbacks are optional.
The close callback is called when the PCM is closed. If the plugin
allocates private resources, this is the place to release them
again. The hw_params and hw_free callbacks are called at
#snd_pcm_hw_params() and #snd_pcm_hw_free() API calls,
respectively. The last, dump callback, is called for printing the
information of the given plugin.
The init callback is called when the PCM is at prepare state or any
initialization is issued. Use this callback to reset the PCM instance
to a sane initial state.
The hw_params constraints can be defined via either
#snd_pcm_extplug_set_param_minmax() and #snd_pcm_extplug_set_param_list()
functions after calling #snd_pcm_extplug_create().
The former defines the minimal and maximal acceptable values for the
given hw_params parameter (SND_PCM_EXTPLUG_HW_XXX).
This function can't be used for the format parameter. The latter
function specifies the available parameter values as the list.
As mentioned above, the rate can't be changed. Only changeable
parameters are sample format and channels.
To define the constraints of the slave PCM configuration, use
either #snd_pcm_extplug_set_slave_param_minmax() and
#snd_pcm_extplug_set_slave_param_list(). The arguments are as same
as former functions.
To clear the parameter constraints, call #snd_pcm_extplug_params_reset()
function.
*/
/**
* \brief Create an extplug instance
* \param extplug the extplug handle
* \param name name of the PCM
* \param root configuration tree root
* \param slave_conf slave configuration root
* \param stream stream direction
* \param mode PCM open mode
* \return 0 if successful, or a negative error code
*
* Creates the extplug instance based on the given handle.
* The slave_conf argument is mandatory, and usually taken from the config tree of the
* PCM plugin as "slave" config value.
* name, root, stream and mode arguments are the values used for opening the PCM.
*
* The callback is the mandatory field of extplug handle. At least, start, stop and
* pointer callbacks must be set before calling this function.
*/
int snd_pcm_extplug_create(snd_pcm_extplug_t *extplug, const char *name,
snd_config_t *root, snd_config_t *slave_conf,
snd_pcm_stream_t stream, int mode)
{
extplug_priv_t *ext;
int err;
snd_pcm_t *spcm, *pcm;
snd_config_t *sconf;
assert(root);
assert(extplug && extplug->callback);
assert(extplug->callback->transfer);
assert(slave_conf);
/* We support 1.0.0 to current */
if (extplug->version < 0x010000 ||
extplug->version > SND_PCM_EXTPLUG_VERSION) {
SNDERR("extplug: Plugin version mismatch: 0x%x\n",
extplug->version);
return -ENXIO;
}
err = snd_pcm_slave_conf(root, slave_conf, &sconf, 0);
if (err < 0)
return err;
err = snd_pcm_open_slave(&spcm, root, sconf, stream, mode, NULL);
snd_config_delete(sconf);
if (err < 0)
return err;
ext = calloc(1, sizeof(*ext));
if (! ext)
return -ENOMEM;
ext->data = extplug;
extplug->stream = stream;
snd_pcm_plugin_init(&ext->plug);
ext->plug.read = snd_pcm_extplug_read_areas;
ext->plug.write = snd_pcm_extplug_write_areas;
ext->plug.undo_read = snd_pcm_plugin_undo_read_generic;
ext->plug.undo_write = snd_pcm_plugin_undo_write_generic;
ext->plug.gen.slave = spcm;
ext->plug.gen.close_slave = 1;
if (extplug->version >= 0x010001 && extplug->callback->init)
ext->plug.init = snd_pcm_extplug_init;
err = snd_pcm_new(&pcm, SND_PCM_TYPE_EXTPLUG, name, stream, mode);
if (err < 0) {
free(ext);
return err;
}
extplug->pcm = pcm;
pcm->ops = &snd_pcm_extplug_ops;
pcm->fast_ops = &snd_pcm_plugin_fast_ops;
pcm->private_data = ext;
pcm->poll_fd = spcm->poll_fd;
pcm->poll_events = spcm->poll_events;
snd_pcm_set_hw_ptr(pcm, &ext->plug.hw_ptr, -1, 0);
snd_pcm_set_appl_ptr(pcm, &ext->plug.appl_ptr, -1, 0);
return 0;
}
/**
* \brief Delete the extplug instance
* \param extplug the extplug handle to delete
* \return 0 if successful, or a negative error code
*
* The destructor of extplug instance.
* Closes the PCM and deletes the associated resources.
*/
int snd_pcm_extplug_delete(snd_pcm_extplug_t *extplug)
{
return snd_pcm_close(extplug->pcm);
}
/**
* \brief Reset extplug parameters
* \param extplug the extplug handle
*
* Resets the all parameters for the given extplug handle.
*/
void snd_pcm_extplug_params_reset(snd_pcm_extplug_t *extplug)
{
extplug_priv_t *ext = extplug->pcm->private_data;
clear_ext_params(ext);
}
/**
* \brief Set slave parameter as the list
* \param extplug the extplug handle
* \param type parameter type
* \param num_list number of available values
* \param list the list of available values
* \return 0 if successful, or a negative error code
*
* Sets the slave parameter as the list.
* The available values of the given parameter type of the slave PCM is restricted
* to the ones of the given list.
*/
int snd_pcm_extplug_set_slave_param_list(snd_pcm_extplug_t *extplug, int type, unsigned int num_list, const unsigned int *list)
{
extplug_priv_t *ext = extplug->pcm->private_data;
if (type < 0 && type >= SND_PCM_EXTPLUG_HW_PARAMS) {
SNDERR("EXTPLUG: invalid parameter type %d", type);
return -EINVAL;
}
return snd_ext_parm_set_list(&ext->sparams[type], num_list, list);
}
/**
* \brief Set slave parameter as the min/max values
* \param extplug the extplug handle
* \param type parameter type
* \param min the minimum value
* \param max the maximum value
* \return 0 if successful, or a negative error code
*
* Sets the slave parameter as the min/max values.
* The available values of the given parameter type of the slave PCM is restricted
* between the given minimum and maximum values.
*/
int snd_pcm_extplug_set_slave_param_minmax(snd_pcm_extplug_t *extplug, int type, unsigned int min, unsigned int max)
{
extplug_priv_t *ext = extplug->pcm->private_data;
if (type < 0 && type >= SND_PCM_EXTPLUG_HW_PARAMS) {
SNDERR("EXTPLUG: invalid parameter type %d", type);
return -EINVAL;
}
if (is_mask_type(type)) {
SNDERR("EXTPLUG: invalid parameter type %d", type);
return -EINVAL;
}
return snd_ext_parm_set_minmax(&ext->sparams[type], min, max);
}
/**
* \brief Set master parameter as the list
* \param extplug the extplug handle
* \param type parameter type
* \param num_list number of available values
* \param list the list of available values
* \return 0 if successful, or a negative error code
*
* Sets the master parameter as the list.
* The available values of the given parameter type of this PCM (as input) is restricted
* to the ones of the given list.
*/
int snd_pcm_extplug_set_param_list(snd_pcm_extplug_t *extplug, int type, unsigned int num_list, const unsigned int *list)
{
extplug_priv_t *ext = extplug->pcm->private_data;
if (type < 0 && type >= SND_PCM_EXTPLUG_HW_PARAMS) {
SNDERR("EXTPLUG: invalid parameter type %d", type);
return -EINVAL;
}
return snd_ext_parm_set_list(&ext->params[type], num_list, list);
}
/**
* \brief Set master parameter as the min/max values
* \param extplug the extplug handle
* \param type parameter type
* \param min the minimum value
* \param max the maximum value
* \return 0 if successful, or a negative error code
*
* Sets the master parameter as the min/max values.
* The available values of the given parameter type of this PCM (as input) is restricted
* between the given minimum and maximum values.
*/
int snd_pcm_extplug_set_param_minmax(snd_pcm_extplug_t *extplug, int type, unsigned int min, unsigned int max)
{
extplug_priv_t *ext = extplug->pcm->private_data;
if (type < 0 && type >= SND_PCM_EXTPLUG_HW_PARAMS) {
SNDERR("EXTPLUG: invalid parameter type %d", type);
return -EINVAL;
}
if (is_mask_type(type)) {
SNDERR("EXTPLUG: invalid parameter type %d", type);
return -EINVAL;
}
return snd_ext_parm_set_minmax(&ext->params[type], min, max);
}
|
/**
* Represents a game mode.
*/
abstract class Mode {
//tappa 6
boolean isPaused;
boolean isPlayMode;
/*
* Draws the plane and the ball.
*/
public abstract void display();
/*
* Implements the mouse dragged action for the mode.
*/
public void mouseDragged(){}
/*
* Implements the mouse pressed action for the mode.
*/
public void mousePressed(){}
/*
* Implements the mouse wheel action for the mode.
*/
public void mouseWheel(MouseEvent event){}
/*
* Draws the cylinders on the plane.
*/
public abstract void drawCylinders();
} |
package main
import (
"flag"
"math/rand"
"os"
"time"
"github.com/golang/glog"
"github.com/google/uuid"
"github.com/openshift/cluster-version-operator/pkg/autoupdate"
"github.com/openshift/cluster-version-operator/pkg/cvo"
clientset "github.com/openshift/cluster-version-operator/pkg/generated/clientset/versioned"
informers "github.com/openshift/cluster-version-operator/pkg/generated/informers/externalversions"
"github.com/openshift/cluster-version-operator/pkg/version"
"github.com/spf13/cobra"
"k8s.io/api/core/v1"
apiext "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apiextinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
)
const (
minResyncPeriod = 10 * time.Second
leaseDuration = 90 * time.Second
renewDeadline = 45 * time.Second
retryPeriod = 30 * time.Second
)
var (
startCmd = &cobra.Command{
Use: "start",
Short: "Starts Cluster Version Operator",
Long: "",
Run: runStartCmd,
}
startOpts struct {
kubeconfig string
nodeName string
enableAutoUpdate bool
}
)
func init() {
rootCmd.AddCommand(startCmd)
startCmd.PersistentFlags().StringVar(&startOpts.kubeconfig, "kubeconfig", "", "Kubeconfig file to access a remote cluster (testing only)")
startCmd.PersistentFlags().StringVar(&startOpts.nodeName, "node-name", "", "kubernetes node name CVO is scheduled on.")
startCmd.PersistentFlags().BoolVar(&startOpts.enableAutoUpdate, "enable-auto-update", true, "Enables the autoupdate controller.")
}
func runStartCmd(cmd *cobra.Command, args []string) {
flag.Set("logtostderr", "true")
flag.Parse()
// To help debugging, immediately log version
glog.Infof("%s", version.String)
if startOpts.nodeName == "" {
name, ok := os.LookupEnv("NODE_NAME")
if !ok || name == "" {
glog.Fatalf("node-name is required")
}
startOpts.nodeName = name
}
cb, err := newClientBuilder(startOpts.kubeconfig)
if err != nil {
glog.Fatalf("error creating clients: %v", err)
}
stopCh := make(chan struct{})
run := func(stop <-chan struct{}) {
ctx := createControllerContext(cb, stopCh)
if err := startControllers(ctx); err != nil {
glog.Fatalf("error starting controllers: %v", err)
}
ctx.InformerFactory.Start(ctx.Stop)
ctx.KubeInformerFactory.Start(ctx.Stop)
ctx.APIExtInformerFactory.Start(ctx.Stop)
close(ctx.InformersStarted)
select {}
}
leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{
Lock: createResourceLock(cb),
LeaseDuration: leaseDuration,
RenewDeadline: renewDeadline,
RetryPeriod: retryPeriod,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: run,
OnStoppedLeading: func() {
glog.Fatalf("leaderelection lost")
},
},
})
panic("unreachable")
}
func createResourceLock(cb *clientBuilder) resourcelock.Interface {
recorder := record.
NewBroadcaster().
NewRecorder(runtime.NewScheme(), v1.EventSource{Component: componentName})
id, err := os.Hostname()
if err != nil {
glog.Fatalf("error creating lock: %v", err)
}
uuid, err := uuid.NewRandom()
if err != nil {
glog.Fatalf("Failed to generate UUID: %v", err)
}
// add a uniquifier so that two processes on the same host don't accidentally both become active
id = id + "_" + uuid.String()
return &resourcelock.ConfigMapLock{
ConfigMapMeta: metav1.ObjectMeta{
Namespace: componentNamespace,
Name: componentName,
},
Client: cb.KubeClientOrDie("leader-election").CoreV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: id,
EventRecorder: recorder,
},
}
}
func resyncPeriod() func() time.Duration {
return func() time.Duration {
factor := rand.Float64() + 1
return time.Duration(float64(minResyncPeriod.Nanoseconds()) * factor)
}
}
type clientBuilder struct {
config *rest.Config
}
func (cb *clientBuilder) RestConfig() *rest.Config {
c := rest.CopyConfig(cb.config)
return c
}
func (cb *clientBuilder) ClientOrDie(name string) clientset.Interface {
return clientset.NewForConfigOrDie(rest.AddUserAgent(cb.config, name))
}
func (cb *clientBuilder) KubeClientOrDie(name string) kubernetes.Interface {
return kubernetes.NewForConfigOrDie(rest.AddUserAgent(cb.config, name))
}
func (cb *clientBuilder) APIExtClientOrDie(name string) apiext.Interface {
return apiext.NewForConfigOrDie(rest.AddUserAgent(cb.config, name))
}
func newClientBuilder(kubeconfig string) (*clientBuilder, error) {
var config *rest.Config
var err error
if kubeconfig != "" {
glog.V(4).Infof("Loading kube client config from path %q", kubeconfig)
config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
} else {
glog.V(4).Infof("Using in-cluster kube client config")
config, err = rest.InClusterConfig()
}
if err != nil {
return nil, err
}
return &clientBuilder{
config: config,
}, nil
}
type controllerContext struct {
ClientBuilder *clientBuilder
InformerFactory informers.SharedInformerFactory
KubeInformerFactory kubeinformers.SharedInformerFactory
APIExtInformerFactory apiextinformers.SharedInformerFactory
Stop <-chan struct{}
InformersStarted chan struct{}
ResyncPeriod func() time.Duration
}
func createControllerContext(cb *clientBuilder, stop <-chan struct{}) *controllerContext {
client := cb.ClientOrDie("shared-informer")
kubeClient := cb.KubeClientOrDie("kube-shared-informer")
apiExtClient := cb.APIExtClientOrDie("apiext-shared-informer")
sharedInformers := informers.NewSharedInformerFactory(client, resyncPeriod()())
kubeSharedInformer := kubeinformers.NewSharedInformerFactory(kubeClient, resyncPeriod()())
apiExtSharedInformer := apiextinformers.NewSharedInformerFactory(apiExtClient, resyncPeriod()())
return &controllerContext{
ClientBuilder: cb,
InformerFactory: sharedInformers,
KubeInformerFactory: kubeSharedInformer,
APIExtInformerFactory: apiExtSharedInformer,
Stop: stop,
InformersStarted: make(chan struct{}),
ResyncPeriod: resyncPeriod(),
}
}
func startControllers(ctx *controllerContext) error {
go cvo.New(
startOpts.nodeName,
componentNamespace, componentName,
ctx.InformerFactory.Clusterversion().V1().CVOConfigs(),
ctx.InformerFactory.Operatorstatus().V1().OperatorStatuses(),
ctx.APIExtInformerFactory.Apiextensions().V1beta1().CustomResourceDefinitions(),
ctx.KubeInformerFactory.Apps().V1().Deployments(),
ctx.ClientBuilder.RestConfig(),
ctx.ClientBuilder.ClientOrDie(componentName),
ctx.ClientBuilder.KubeClientOrDie(componentName),
ctx.ClientBuilder.APIExtClientOrDie(componentName),
).Run(2, ctx.Stop)
if startOpts.enableAutoUpdate {
go autoupdate.New(
componentNamespace, componentName,
ctx.InformerFactory.Clusterversion().V1().CVOConfigs(),
ctx.InformerFactory.Operatorstatus().V1().OperatorStatuses(),
ctx.ClientBuilder.ClientOrDie(componentName),
ctx.ClientBuilder.KubeClientOrDie(componentName),
).Run(2, ctx.Stop)
}
return nil
}
|
// NewRowNumberOperator creates a new exec.Operator that computes window
// function ROW_NUMBER. outputColIdx specifies in which exec.Vec the operator
// should put its output (if there is no such column, a new column is
// appended).
func NewRowNumberOperator(input Operator, outputColIdx int, partitionColIdx int) Operator {
base := rowNumberBase{
OneInputNode: NewOneInputNode(input),
outputColIdx: outputColIdx,
partitionColIdx: partitionColIdx,
}
if partitionColIdx == -1 {
return &rowNumberNoPartitionOp{base}
}
return &rowNumberWithPartitionOp{base}
} |
package main
import (
cmd "github.com/rafaelbreno/go-api-template/api/cmd"
)
func main() {
cmd.Boostrap()
}
|
import { NgModule } from '@angular/core';
import { PagesComponent } from './pages.component';
import { PagesRoutingModule } from './pages-routing.module';
import { ThemeModule } from '../@theme/theme.module';
import { BuddyComponent } from './buddy/buddy.component';
import { BuddyinfoComponent } from './buddyinfo/buddyinfo.component';
import { ChatComponent } from './chat/chat.component';
import { RoomComponent } from './room/room.component';
import { RoominfoComponent } from './roominfo/roominfo.component';
import { CallComponent } from './call/call.component';
import { SearchComponent } from './search/search.component';
import { SettingComponent } from './setting/setting.component';
import { Ng2SmartTableModule } from 'ng2-smart-table';
const PAGES_COMPONENTS = [
PagesComponent,
];
@NgModule({
imports: [
PagesRoutingModule,
ThemeModule,
Ng2SmartTableModule,
],
declarations: [
...PAGES_COMPONENTS,
BuddyComponent,
BuddyinfoComponent,
ChatComponent,
RoomComponent,
RoominfoComponent,
CallComponent,
SearchComponent,
SettingComponent,
],
providers: [
],
})
export class PagesModule {
}
|
def resize_by_width(image: Image.Image, nw: int) -> Image.Image:
w, h = image.size
if w == nw:
return image
w_ratio = float(nw) / float(w)
new_size = (nw, round(float(h) * w_ratio))
new_image = image.resize(new_size, Image.ANTIALIAS)
return new_image |
def clear_state(self, key_space):
LOG.debug("Clearing key space: %s", key_space)
try:
rv = self.etcd_client.read(key_space)
for child in rv.children:
self.etcd_client.delete(child.key)
except etcd.EtcdNotFile:
pass |
<gh_stars>1-10
#
# Top-level program for XML test suite
#
import regrtest
del regrtest.STDTESTS[:]
def main():
tests = regrtest.findtests('.')
regrtest.main( tests, testdir = '.' )
if __name__ == '__main__':
main()
|
/**
* Parses and sets comma delimited target list
*/
void tcAreaGoal::SetTargetList(const std::string& targets)
{
targetList.clear();
AddToTargetList(targets);
} |
def patterns():
cat = np.array([[1, 0, 1, 1, 0, 1], [0, 1, 0, 0, 1, 0]]).T
items = np.array(['absence', 'hollow', 'pupil', 'fountain', 'piano', 'pillow'])
patterns = {
'items': items,
'vector': {'loc': np.eye(6), 'cat': cat},
'similarity': {'loc': np.eye(6), 'cat': np.dot(cat, cat.T)},
}
return patterns |
<reponame>ClintLawson/hiring-exercise
import React from "react";
import { CircularProgress, Modal } from "@mui/material";
interface props {
loading: boolean;
}
const LoadingPopup: React.FC<props> = ({ loading }) => {
return (
<Modal open={loading} style={{ display: "flex" }}>
<CircularProgress color="inherit" style={{ margin: "auto" }} />
</Modal>
);
};
export default LoadingPopup;
|
/**
* The implementation that is used to create transfer jobs that callout to the new globus-url-copy
* client, that support multiple file transfers
*
* <p>In order to use the transfer implementation implemented by this class,
*
* <pre>
* - the property pegasus.transfer.*.impl must be set to value TPTGUC.
* </pre>
*
* <p>There should be an entry in the transformation catalog with the fully qualified name as <code>
* globus::guc</code> for all the sites where workflow is run, or on the local site in case of third
* party transfers.
*
* <p>Pegasus can automatically construct the path to the globus-url-copy client, if the environment
* variable GLOBUS_LOCATION is specified in the site catalog for the site.
*
* <p>The arguments with which the client is invoked can be specified
*
* <pre>
* - by specifying the property pegasus.transfer.arguments
* - associating the Pegasus profile key transfer.arguments
* </pre>
*
* @author Karan Vahi
* @version $Revision$
*/
public class TPTGUC extends GUC {
/**
* The overloaded constructor, that is called by the Factory to load the class.
*
* @param bag the bag of Pegasus initialization objects.
*/
public TPTGUC(PegasusBag bag) {
super(bag);
}
/**
* Return a boolean indicating whether the transfers to be done always in a third party transfer
* mode. A value of false, results in the direct or peer to peer transfers being done.
*
* <p>A value of false does not preclude third party transfers. They still can be done, by
* setting the property "pegasus.transfer.*.thirdparty.sites".
*
* @return true always
*/
public boolean useThirdPartyTransferAlways() {
return true;
}
/**
* It constructs the arguments to the transfer executable that need to be passed to the
* executable referred to in this transfer mode.
*
* @param job the object containing the transfer node.
* @return the argument string
*/
protected String generateArgumentString(TransferJob job) {
StringBuffer sb = new StringBuffer();
if (job.vdsNS.containsKey(Pegasus.TRANSFER_ARGUMENTS_KEY)) {
sb.append(job.vdsNS.removeKey(Pegasus.TRANSFER_ARGUMENTS_KEY));
} else {
// just add the default -p option
sb.append(" -p ").append(mNumOfTXStreams);
}
// always append -cd option and verbose option
sb.append(" -cd -vb");
// specify the name of the stdin file on command line
// since this transfer mode only executes on submit node
// we can give full path to the stdin
File f = new File(mPOptions.getSubmitDirectory(), job.getStdIn());
sb.append(" -f ").append(f.getAbsolutePath());
return sb.toString();
}
/**
* Makes sure the stdin is transferred by the Condor File Transfer Mechanism. In addition, the
* stdin is set to null, after the file has been marked for transfer by Condor File Transfer
* Mechanism.
*
* @param job the <code>TransferJob</code> that has been created.
*/
public void postProcess(TransferJob job) {
super.postProcess(job);
job.setStdIn("");
}
} |
/**
* Checks whether the given lat and lon values are not null and within their
* allowed boundaries.
*
* @param lat
* @param lon
* @return
*/
public static boolean isValid(Double lat, Double lon) {
return lat != null && lon != null
&& lat >= -90.0 && lat <= 90.0
&& lon >= -180.0 && lon <= 180.0;
} |
def _cell_center_position(cell: Cell) -> Position:
n_half = N_CELLS_PER_ROW / 2
px = (cell[0] - n_half) * DIE_WIDTH + DIE_WIDTH / 2
py = (cell[1] - n_half) * DIE_WIDTH + DIE_WIDTH / 2
pz = DIE_WIDTH / 2
return (px, py, pz) |
<reponame>nqn/kubernetes
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
"net/url"
"reflect"
"strings"
"sync"
"testing"
"time"
"code.google.com/p/go.net/websocket"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
)
func convert(obj interface{}) (interface{}, error) {
return obj, nil
}
func init() {
api.AddKnownTypes("", Simple{}, SimpleList{})
api.AddKnownTypes("v1beta1", Simple{}, SimpleList{})
api.AddExternalConversion("Simple", convert)
api.AddInternalConversion("Simple", convert)
api.AddExternalConversion("SimpleList", convert)
api.AddInternalConversion("SimpleList", convert)
}
// TODO: This doesn't reduce typing enough to make it worth the less readable errors. Remove.
func expectNoError(t *testing.T, err error) {
if err != nil {
t.Errorf("Unexpected error: %#v", err)
}
}
type Simple struct {
api.JSONBase `yaml:",inline" json:",inline"`
Name string `yaml:"name,omitempty" json:"name,omitempty"`
}
type SimpleList struct {
api.JSONBase `yaml:",inline" json:",inline"`
Items []Simple `yaml:"items,omitempty" json:"items,omitempty"`
}
type SimpleRESTStorage struct {
errors map[string]error
list []Simple
item Simple
deleted string
updated Simple
created Simple
// Valid if WatchAll or WatchSingle is called
fakeWatch *watch.FakeWatcher
// Set if WatchSingle is called
requestedID string
// If non-nil, called inside the WorkFunc when answering update, delete, create.
// obj receives the original input to the update, delete, or create call.
injectedFunction func(obj interface{}) (returnObj interface{}, err error)
}
func (storage *SimpleRESTStorage) List(labels.Selector) (interface{}, error) {
result := &SimpleList{
Items: storage.list,
}
return result, storage.errors["list"]
}
func (storage *SimpleRESTStorage) Get(id string) (interface{}, error) {
return storage.item, storage.errors["get"]
}
func (storage *SimpleRESTStorage) Delete(id string) (<-chan interface{}, error) {
storage.deleted = id
if err := storage.errors["delete"]; err != nil {
return nil, err
}
return MakeAsync(func() (interface{}, error) {
if storage.injectedFunction != nil {
return storage.injectedFunction(id)
}
return api.Status{Status: api.StatusSuccess}, nil
}), nil
}
func (storage *SimpleRESTStorage) Extract(body []byte) (interface{}, error) {
var item Simple
api.DecodeInto(body, &item)
return item, storage.errors["extract"]
}
func (storage *SimpleRESTStorage) Create(obj interface{}) (<-chan interface{}, error) {
storage.created = obj.(Simple)
if err := storage.errors["create"]; err != nil {
return nil, err
}
return MakeAsync(func() (interface{}, error) {
if storage.injectedFunction != nil {
return storage.injectedFunction(obj)
}
return obj, nil
}), nil
}
func (storage *SimpleRESTStorage) Update(obj interface{}) (<-chan interface{}, error) {
storage.updated = obj.(Simple)
if err := storage.errors["update"]; err != nil {
return nil, err
}
return MakeAsync(func() (interface{}, error) {
if storage.injectedFunction != nil {
return storage.injectedFunction(obj)
}
return obj, nil
}), nil
}
// Implement ResourceWatcher.
func (storage *SimpleRESTStorage) WatchAll() (watch.Interface, error) {
if err := storage.errors["watchAll"]; err != nil {
return nil, err
}
storage.fakeWatch = watch.NewFake()
return storage.fakeWatch, nil
}
// Implement ResourceWatcher.
func (storage *SimpleRESTStorage) WatchSingle(id string) (watch.Interface, error) {
storage.requestedID = id
if err := storage.errors["watchSingle"]; err != nil {
return nil, err
}
storage.fakeWatch = watch.NewFake()
return storage.fakeWatch, nil
}
func extractBody(response *http.Response, object interface{}) (string, error) {
defer response.Body.Close()
body, err := ioutil.ReadAll(response.Body)
log.Printf("FOO: %s", body)
if err != nil {
return string(body), err
}
err = api.DecodeInto(body, object)
return string(body), err
}
func TestSimpleList(t *testing.T) {
storage := map[string]RESTStorage{}
simpleStorage := SimpleRESTStorage{}
storage["simple"] = &simpleStorage
handler := New(storage, "/prefix/version")
server := httptest.NewServer(handler)
resp, err := http.Get(server.URL + "/prefix/version/simple")
expectNoError(t, err)
if resp.StatusCode != http.StatusOK {
t.Errorf("Unexpected status: %d, Expected: %d, %#v", resp.StatusCode, http.StatusOK, resp)
}
}
func TestErrorList(t *testing.T) {
storage := map[string]RESTStorage{}
simpleStorage := SimpleRESTStorage{
errors: map[string]error{"list": fmt.Errorf("test Error")},
}
storage["simple"] = &simpleStorage
handler := New(storage, "/prefix/version")
server := httptest.NewServer(handler)
resp, err := http.Get(server.URL + "/prefix/version/simple")
expectNoError(t, err)
if resp.StatusCode != http.StatusInternalServerError {
t.Errorf("Unexpected status: %d, Expected: %d, %#v", resp.StatusCode, http.StatusOK, resp)
}
}
func TestNonEmptyList(t *testing.T) {
storage := map[string]RESTStorage{}
simpleStorage := SimpleRESTStorage{
list: []Simple{
{
JSONBase: api.JSONBase{Kind: "Simple"},
Name: "foo",
},
},
}
storage["simple"] = &simpleStorage
handler := New(storage, "/prefix/version")
server := httptest.NewServer(handler)
resp, err := http.Get(server.URL + "/prefix/version/simple")
expectNoError(t, err)
if resp.StatusCode != http.StatusOK {
t.Errorf("Unexpected status: %d, Expected: %d, %#v", resp.StatusCode, http.StatusOK, resp)
}
var listOut SimpleList
body, err := extractBody(resp, &listOut)
expectNoError(t, err)
if len(listOut.Items) != 1 {
t.Errorf("Unexpected response: %#v", listOut)
return
}
if listOut.Items[0].Name != simpleStorage.list[0].Name {
t.Errorf("Unexpected data: %#v, %s", listOut.Items[0], string(body))
}
}
func TestGet(t *testing.T) {
storage := map[string]RESTStorage{}
simpleStorage := SimpleRESTStorage{
item: Simple{
Name: "foo",
},
}
storage["simple"] = &simpleStorage
handler := New(storage, "/prefix/version")
server := httptest.NewServer(handler)
resp, err := http.Get(server.URL + "/prefix/version/simple/id")
var itemOut Simple
body, err := extractBody(resp, &itemOut)
expectNoError(t, err)
if itemOut.Name != simpleStorage.item.Name {
t.Errorf("Unexpected data: %#v, expected %#v (%s)", itemOut, simpleStorage.item, string(body))
}
}
func TestGetMissing(t *testing.T) {
storage := map[string]RESTStorage{}
simpleStorage := SimpleRESTStorage{
errors: map[string]error{"get": NewNotFoundErr("simple", "id")},
}
storage["simple"] = &simpleStorage
handler := New(storage, "/prefix/version")
server := httptest.NewServer(handler)
resp, err := http.Get(server.URL + "/prefix/version/simple/id")
expectNoError(t, err)
if resp.StatusCode != http.StatusNotFound {
t.Errorf("Unexpected response %#v", resp)
}
}
func TestDelete(t *testing.T) {
storage := map[string]RESTStorage{}
simpleStorage := SimpleRESTStorage{}
ID := "id"
storage["simple"] = &simpleStorage
handler := New(storage, "/prefix/version")
server := httptest.NewServer(handler)
client := http.Client{}
request, err := http.NewRequest("DELETE", server.URL+"/prefix/version/simple/"+ID, nil)
_, err = client.Do(request)
expectNoError(t, err)
if simpleStorage.deleted != ID {
t.Errorf("Unexpected delete: %s, expected %s", simpleStorage.deleted, ID)
}
}
func TestDeleteMissing(t *testing.T) {
storage := map[string]RESTStorage{}
ID := "id"
simpleStorage := SimpleRESTStorage{
errors: map[string]error{"delete": NewNotFoundErr("simple", ID)},
}
storage["simple"] = &simpleStorage
handler := New(storage, "/prefix/version")
server := httptest.NewServer(handler)
client := http.Client{}
request, err := http.NewRequest("DELETE", server.URL+"/prefix/version/simple/"+ID, nil)
response, err := client.Do(request)
expectNoError(t, err)
if response.StatusCode != http.StatusNotFound {
t.Errorf("Unexpected response %#v", response)
}
}
func TestUpdate(t *testing.T) {
storage := map[string]RESTStorage{}
simpleStorage := SimpleRESTStorage{}
ID := "id"
storage["simple"] = &simpleStorage
handler := New(storage, "/prefix/version")
server := httptest.NewServer(handler)
item := Simple{
Name: "bar",
}
body, err := api.Encode(item)
expectNoError(t, err)
client := http.Client{}
request, err := http.NewRequest("PUT", server.URL+"/prefix/version/simple/"+ID, bytes.NewReader(body))
_, err = client.Do(request)
expectNoError(t, err)
if simpleStorage.updated.Name != item.Name {
t.Errorf("Unexpected update value %#v, expected %#v.", simpleStorage.updated, item)
}
}
func TestUpdateMissing(t *testing.T) {
storage := map[string]RESTStorage{}
ID := "id"
simpleStorage := SimpleRESTStorage{
errors: map[string]error{"update": NewNotFoundErr("simple", ID)},
}
storage["simple"] = &simpleStorage
handler := New(storage, "/prefix/version")
server := httptest.NewServer(handler)
item := Simple{
Name: "bar",
}
body, err := api.Encode(item)
expectNoError(t, err)
client := http.Client{}
request, err := http.NewRequest("PUT", server.URL+"/prefix/version/simple/"+ID, bytes.NewReader(body))
response, err := client.Do(request)
expectNoError(t, err)
if response.StatusCode != http.StatusNotFound {
t.Errorf("Unexpected response %#v", response)
}
}
func TestBadPath(t *testing.T) {
handler := New(map[string]RESTStorage{}, "/prefix/version")
server := httptest.NewServer(handler)
client := http.Client{}
request, err := http.NewRequest("GET", server.URL+"/foobar", nil)
expectNoError(t, err)
response, err := client.Do(request)
expectNoError(t, err)
if response.StatusCode != http.StatusNotFound {
t.Errorf("Unexpected response %#v", response)
}
}
func TestMissingPath(t *testing.T) {
handler := New(map[string]RESTStorage{}, "/prefix/version")
server := httptest.NewServer(handler)
client := http.Client{}
request, err := http.NewRequest("GET", server.URL+"/prefix/version", nil)
expectNoError(t, err)
response, err := client.Do(request)
expectNoError(t, err)
if response.StatusCode != http.StatusNotFound {
t.Errorf("Unexpected response %#v", response)
}
}
func TestMissingStorage(t *testing.T) {
handler := New(map[string]RESTStorage{
"foo": &SimpleRESTStorage{},
}, "/prefix/version")
server := httptest.NewServer(handler)
client := http.Client{}
request, err := http.NewRequest("GET", server.URL+"/prefix/version/foobar", nil)
expectNoError(t, err)
response, err := client.Do(request)
expectNoError(t, err)
if response.StatusCode != http.StatusNotFound {
t.Errorf("Unexpected response %#v", response)
}
}
func TestCreate(t *testing.T) {
simpleStorage := &SimpleRESTStorage{}
handler := New(map[string]RESTStorage{
"foo": simpleStorage,
}, "/prefix/version")
server := httptest.NewServer(handler)
client := http.Client{}
simple := Simple{
Name: "foo",
}
data, _ := api.Encode(simple)
request, err := http.NewRequest("POST", server.URL+"/prefix/version/foo", bytes.NewBuffer(data))
expectNoError(t, err)
response, err := client.Do(request)
expectNoError(t, err)
if response.StatusCode != http.StatusAccepted {
t.Errorf("Unexpected response %#v", response)
}
var itemOut api.Status
body, err := extractBody(response, &itemOut)
expectNoError(t, err)
if itemOut.Status != api.StatusWorking || itemOut.Details == "" {
t.Errorf("Unexpected status: %#v (%s)", itemOut, string(body))
}
}
func TestCreateNotFound(t *testing.T) {
simpleStorage := &SimpleRESTStorage{
// storage.Create can fail with not found error in theory.
// See https://github.com/GoogleCloudPlatform/kubernetes/pull/486#discussion_r15037092.
errors: map[string]error{"create": NewNotFoundErr("simple", "id")},
}
handler := New(map[string]RESTStorage{
"foo": simpleStorage,
}, "/prefix/version")
server := httptest.NewServer(handler)
client := http.Client{}
simple := Simple{Name: "foo"}
data, _ := api.Encode(simple)
request, err := http.NewRequest("POST", server.URL+"/prefix/version/foo", bytes.NewBuffer(data))
expectNoError(t, err)
response, err := client.Do(request)
expectNoError(t, err)
if response.StatusCode != http.StatusNotFound {
t.Errorf("Unexpected response %#v", response)
}
}
func TestParseTimeout(t *testing.T) {
if d := parseTimeout(""); d != 30*time.Second {
t.Errorf("blank timeout produces %v", d)
}
if d := parseTimeout("not a timeout"); d != 30*time.Second {
t.Errorf("bad timeout produces %v", d)
}
if d := parseTimeout("10s"); d != 10*time.Second {
t.Errorf("10s timeout produced: %v", d)
}
}
func TestSyncCreate(t *testing.T) {
storage := SimpleRESTStorage{
injectedFunction: func(obj interface{}) (interface{}, error) {
time.Sleep(200 * time.Millisecond)
return obj, nil
},
}
handler := New(map[string]RESTStorage{
"foo": &storage,
}, "/prefix/version")
server := httptest.NewServer(handler)
client := http.Client{}
simple := Simple{
Name: "foo",
}
data, _ := api.Encode(simple)
request, err := http.NewRequest("POST", server.URL+"/prefix/version/foo?sync=true", bytes.NewBuffer(data))
expectNoError(t, err)
wg := sync.WaitGroup{}
wg.Add(1)
var response *http.Response
go func() {
response, err = client.Do(request)
wg.Done()
}()
wg.Wait()
expectNoError(t, err)
var itemOut Simple
body, err := extractBody(response, &itemOut)
expectNoError(t, err)
if !reflect.DeepEqual(itemOut, simple) {
t.Errorf("Unexpected data: %#v, expected %#v (%s)", itemOut, simple, string(body))
}
if response.StatusCode != http.StatusOK {
t.Errorf("Unexpected status: %d, Expected: %d, %#v", response.StatusCode, http.StatusOK, response)
}
}
func TestSyncCreateTimeout(t *testing.T) {
storage := SimpleRESTStorage{
injectedFunction: func(obj interface{}) (interface{}, error) {
time.Sleep(400 * time.Millisecond)
return obj, nil
},
}
handler := New(map[string]RESTStorage{
"foo": &storage,
}, "/prefix/version")
server := httptest.NewServer(handler)
client := http.Client{}
simple := Simple{Name: "foo"}
data, _ := api.Encode(simple)
request, err := http.NewRequest("POST", server.URL+"/prefix/version/foo?sync=true&timeout=200ms", bytes.NewBuffer(data))
expectNoError(t, err)
wg := sync.WaitGroup{}
wg.Add(1)
var response *http.Response
go func() {
response, err = client.Do(request)
wg.Done()
}()
wg.Wait()
expectNoError(t, err)
var itemOut api.Status
_, err = extractBody(response, &itemOut)
expectNoError(t, err)
if itemOut.Status != api.StatusWorking || itemOut.Details == "" {
t.Errorf("Unexpected status %#v", itemOut)
}
if response.StatusCode != http.StatusAccepted {
t.Errorf("Unexpected status: %d, Expected: %d, %#v", response.StatusCode, 202, response)
}
}
func TestOpGet(t *testing.T) {
simpleStorage := &SimpleRESTStorage{}
handler := New(map[string]RESTStorage{
"foo": simpleStorage,
}, "/prefix/version")
server := httptest.NewServer(handler)
client := http.Client{}
simple := Simple{
Name: "foo",
}
data, err := api.Encode(simple)
t.Log(string(data))
expectNoError(t, err)
request, err := http.NewRequest("POST", server.URL+"/prefix/version/foo", bytes.NewBuffer(data))
expectNoError(t, err)
response, err := client.Do(request)
expectNoError(t, err)
if response.StatusCode != http.StatusAccepted {
t.Errorf("Unexpected response %#v", response)
}
var itemOut api.Status
body, err := extractBody(response, &itemOut)
expectNoError(t, err)
if itemOut.Status != api.StatusWorking || itemOut.Details == "" {
t.Errorf("Unexpected status: %#v (%s)", itemOut, string(body))
}
req2, err := http.NewRequest("GET", server.URL+"/prefix/version/operations/"+itemOut.Details, nil)
expectNoError(t, err)
_, err = client.Do(req2)
expectNoError(t, err)
if response.StatusCode != http.StatusAccepted {
t.Errorf("Unexpected response %#v", response)
}
}
var watchTestTable = []struct {
t watch.EventType
obj interface{}
}{
{watch.Added, &Simple{Name: "A Name"}},
{watch.Modified, &Simple{Name: "Another Name"}},
{watch.Deleted, &Simple{Name: "Another Name"}},
}
func TestWatchWebsocket(t *testing.T) {
simpleStorage := &SimpleRESTStorage{}
handler := New(map[string]RESTStorage{
"foo": simpleStorage,
}, "/prefix/version")
server := httptest.NewServer(handler)
dest, _ := url.Parse(server.URL)
dest.Scheme = "ws" // Required by websocket, though the server never sees it.
dest.Path = "/prefix/version/watch/foo"
dest.RawQuery = "id=myID"
ws, err := websocket.Dial(dest.String(), "", "http://localhost")
expectNoError(t, err)
if a, e := simpleStorage.requestedID, "myID"; a != e {
t.Fatalf("Expected %v, got %v", e, a)
}
try := func(action watch.EventType, object interface{}) {
// Send
simpleStorage.fakeWatch.Action(action, object)
// Test receive
var got api.WatchEvent
err := websocket.JSON.Receive(ws, &got)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if got.Type != action {
t.Errorf("Unexpected type: %v", got.Type)
}
if e, a := object, got.Object.Object; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %v, got %v", e, a)
}
}
for _, item := range watchTestTable {
try(item.t, item.obj)
}
simpleStorage.fakeWatch.Stop()
var got api.WatchEvent
err = websocket.JSON.Receive(ws, &got)
if err == nil {
t.Errorf("Unexpected non-error")
}
}
func TestWatchHTTP(t *testing.T) {
simpleStorage := &SimpleRESTStorage{}
handler := New(map[string]RESTStorage{
"foo": simpleStorage,
}, "/prefix/version")
server := httptest.NewServer(handler)
client := http.Client{}
dest, _ := url.Parse(server.URL)
dest.Path = "/prefix/version/watch/foo"
dest.RawQuery = "id=myID"
request, err := http.NewRequest("GET", dest.String(), nil)
expectNoError(t, err)
response, err := client.Do(request)
expectNoError(t, err)
if response.StatusCode != http.StatusOK {
t.Errorf("Unexpected response %#v", response)
}
if a, e := simpleStorage.requestedID, "myID"; a != e {
t.Fatalf("Expected %v, got %v", e, a)
}
decoder := json.NewDecoder(response.Body)
try := func(action watch.EventType, object interface{}) {
// Send
simpleStorage.fakeWatch.Action(action, object)
// Test receive
var got api.WatchEvent
err := decoder.Decode(&got)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if got.Type != action {
t.Errorf("Unexpected type: %v", got.Type)
}
if e, a := object, got.Object.Object; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %v, got %v", e, a)
}
}
for _, item := range watchTestTable {
try(item.t, item.obj)
}
simpleStorage.fakeWatch.Stop()
var got api.WatchEvent
err = decoder.Decode(&got)
if err == nil {
t.Errorf("Unexpected non-error")
}
}
func TestMinionTransport(t *testing.T) {
content := string(`<pre><a href="kubelet.log">kubelet.log</a><a href="google.log">google.log</a></pre>`)
transport := &minionTransport{}
// Test /logs/
request := &http.Request{
Method: "GET",
URL: &url.URL{
Scheme: "http",
Host: "minion1:10250",
Path: "/logs/",
},
}
response := &http.Response{
Status: "200 OK",
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(strings.NewReader(content)),
Close: true,
}
updated_resp, _ := transport.ProcessResponse(request, response)
body, _ := ioutil.ReadAll(updated_resp.Body)
expected := string(`<pre><a href="/proxy/minion/minion1:10250/logs/kubelet.log">kubelet.log</a><a href="/proxy/minion/minion1:10250/logs/google.log">google.log</a></pre>`)
if !strings.Contains(string(body), expected) {
t.Errorf("Received wrong content: %s", string(body))
}
// Test subdir under /logs/
request = &http.Request{
Method: "GET",
URL: &url.URL{
Scheme: "http",
Host: "minion1:8080",
Path: "/whatever/apt/",
},
}
response = &http.Response{
Status: "200 OK",
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(strings.NewReader(content)),
Close: true,
}
updated_resp, _ = transport.ProcessResponse(request, response)
body, _ = ioutil.ReadAll(updated_resp.Body)
expected = string(`<pre><a href="/proxy/minion/minion1:8080/whatever/apt/kubelet.log">kubelet.log</a><a href="/proxy/minion/minion1:8080/whatever/apt/google.log">google.log</a></pre>`)
if !strings.Contains(string(body), expected) {
t.Errorf("Received wrong content: %s", string(body))
}
}
|
Crystallite size and strain calculations of hard particle reinforced composite powders (Cu/Ni/Fe–WC) synthesized via mechanical alloying
In this study, Cu–25WC, Ni–25WC, and Fe–25WC (all in wt%) composite powders were produced via mechanical alloying (MA) and characterized for their potential utilization in particulate materials based technologies. The changes in the crystallite size (D) and lattice strain (ε) during the production of WC particle reinforced Cu, Fe, and Ni composite powders via MA were investigated. The Williamson–Hall (W–H) plot analysis and fundamental parameters approach (FPA) applied with Lorentzian function were used to evaluate ε and D of matrix phases from XRD results. With increasing MA, ε values of all matrix phases showed an increase whereas D values showed a decrease. In addition to that, lattice parameters aCu and aNi changed linearly with time, and aFe displayed a slight decrease. The XRD peak belonging to the Cu (111) plane shifted towards larger 2-theta angles in the same direction. Contrary to Cu, the Fe (110) peak shifted to lower angles with MA time. However, the XRD peak belonging to the Ni (111) plane changed alternately. Similar results were obtained from both W–H plot analysis and the FPA calculations. Minimum crystallite size and maximum internal strain rates were estimated for 8 h MA’ed Cu25WC, Fe25WC, and Ni25WC composite powders as 14.63 nm and 1.39%, 7.60 nm and 1.23%, and 17.65 nm and 1.13%, respectively. Transmission electron microscope observations were found in good agreement with the crystallite size of XRD calculations. |
/**
* It listen to the input socket, when a message is received the proper
* communication method is invoked.
*
*
*/
public class SocketMessageHandlerIn implements Runnable {
private static final Logger LOGGER = Logger.getLogger(SocketMessageHandlerIn.class.getName());
private ObjectInputStream socketIn;
private final SocketCommunication comm;
public SocketMessageHandlerIn(SocketCommunication socketCommunication, ObjectInputStream socketIn) {
comm = socketCommunication;
this.socketIn = socketIn;
}
public void receiveMessage(Message message) throws IOException {
comm.receiveMessage(message);
};
@Override
public void run() {
while (true) {
try {
Object object = socketIn.readObject();
if (object instanceof Message) {
receiveMessage((Message) object);
} else {
LOGGER.warning(String.format(
"The socket received an object that is not a message. %n" + "Object received: %s",
object.toString()));
}
} catch (ClassNotFoundException | IOException e) {
LOGGER.log(Level.SEVERE, "Error reading from socket.", e);
return;
}
}
}
} |
from __future__ import unicode_literals
from django.db import models
# from django.utils import timezone
# from orca.scripts import self_voicing
# There is no actual data stored in this model.
# It's only an example holding information about the file you uploaded
# Create your models here.
class UploadedFile(models.Model):
title = models.CharField(max_length=255, blank=True)
file = models.FileField(upload_to='uploads/')
uploaded_at = models.DateTimeField(auto_now_add=True)
class apps(models.Model):
name = models.CharField(max_length=255)
url = models.CharField(max_length=255)
icon = models.CharField(max_length=64) # expect icon from respostyle.css
|
<filename>pkg/commands/options/filename.go
package options
import (
"github.com/spf13/cobra"
)
// FilenameOptions
type FilenameOptions struct {
Filename string
}
func AddFilenameArg(cmd *cobra.Command, fo *FilenameOptions, required bool) {
cmd.Flags().StringVarP(&fo.Filename, "filename", "f", "",
"The path to the .toml file to use.")
if required {
if err := cmd.MarkFlagRequired("filename"); err != nil {
panic(err)
}
}
}
|
package GoReactive
import (
"testing"
"time"
"github.com/stvp/assert"
)
/// Returns an observable which emits 1, 2, 3, 4, and 5 then completion
func oneToFiveObservable() Observable {
return NewObservableSlice([]interface{}{1, 2, 3, 4, 5})
}
/// Returns an observable which emits 1, 1, 2, 3, 3, 4, 5 and 5 then completion
func oneishToFiveObservable() Observable {
return NewObservableSlice([]interface{}{1, 1, 2, 3, 3, 4, 5, 5})
}
/// Subscribes to an observable returning once it completes or fails
func wait(t *testing.T, observable Observable) []interface{} {
values := []interface{}{}
completed := false
failed := false
observable.Subscribe(
func(value interface{}) { values = append(values, value) },
func() { completed = true },
func(err error) { failed = true })
for !completed && !failed {
time.Sleep(500 * time.Millisecond)
}
if failed {
t.FailNow()
}
return values
}
func TestStartWith(t *testing.T) {
observable := oneToFiveObservable()
values := wait(t, StartWith(observable, 100))
assert.Equal(t, values, []interface{}{100, 1, 2, 3, 4, 5})
}
func TestSkipSkipsValues(t *testing.T) {
observable := oneToFiveObservable()
values := wait(t, Skip(observable, 2))
assert.Equal(t, values, []interface{}{3, 4, 5})
}
func TestDistinctUntilChanged(t *testing.T) {
observable := oneishToFiveObservable()
values := wait(t, DistinctUntilChanged(observable))
assert.Equal(t, values, []interface{}{1, 2, 3, 4, 5})
}
func TestMap(t *testing.T) {
observable := oneToFiveObservable()
mappedObservable := Map(observable, func(value interface{}) interface{} {
return value.(int) * 2
})
values := wait(t, mappedObservable)
assert.Equal(t, values, []interface{}{2, 4, 6, 8, 10})
}
func TestFilter(t *testing.T) {
observable := oneToFiveObservable()
filteredObservable := Filter(observable, func(value interface{}) bool {
return (value.(int) % 2) == 0
})
values := wait(t, filteredObservable)
assert.Equal(t, values, []interface{}{2, 4})
}
func TestExclude(t *testing.T) {
observable := oneToFiveObservable()
filteredObservable := Exclude(observable, func(value interface{}) bool {
return (value.(int) % 2) == 0
})
values := wait(t, filteredObservable)
assert.Equal(t, values, []interface{}{1, 3, 5})
}
|
Association of serum folate levels during pregnancy and prenatal depression.
OBJECTIVE
To evaluate the association between serum folate levels during pregnancy and prenatal depression and the extent to which obesity may modify this relationship.
METHODS
This secondary data analysis leveraged data from a previous study of pregnant Kaiser Permanente Northern California participants who completed a survey and provided a serum sample between 2011 and 2013. Serum folate was assessed using the Center for Disease Control's Total Folate Serum/Whole Blood Microbiological Assay Method. A score of 15 or greater on the Center for Epidemiologic Studies Depression Scale was defined as prenatal depression. We used Poisson regression to estimate risk of prenatal depression given prenatal serum folate status (low/medium tertiles vs. high tertile) in the full sample and in subsamples of women with pre-pregnancy body mass index in the (a) normal range and (b) overweight/obese range.
RESULTS
Of the sample, 13% had prenatal depression. Combined low/medium folate tertiles was associated with prenatal depression (adjusted relative risk = 1.97, 95% confidence interval : 0.93-4.18), although results did not reach statistical significance. This relationship was stronger among women with overweight/obesity than women with normal weight (aRR: 2.61, 95% CI: 1.01-6.71 and aRR: 1.50, 95% CI: 0.34-6.66, respectively).
CONCLUSION
Results suggest an association between lower pregnancy folate levels and prenatal depression that may be stronger among women with overweight or obesity. Future studies need to clarify the temporal sequence of these associations. |
/*
* Copyright (c) 2016 Cray Inc. All rights reserved.
* Copyright (c) 2017 Los Alamos National Security, LLC. All rights reserved.
* Copyright (c) 2019-2020 Triad National Security, LLC.
* All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _GNIX_CM_H_
#define _GNIX_CM_H_
#include "gnix.h"
#define GNIX_CM_DATA_MAX_SIZE 256
#define GNIX_CM_EQE_BUF_SIZE (sizeof(struct fi_eq_cm_entry) + \
GNIX_CM_DATA_MAX_SIZE)
struct gnix_pep_sock_connreq {
struct fi_info info;
struct gnix_ep_name src_addr;
struct gnix_ep_name dest_addr;
struct fi_tx_attr tx_attr;
struct fi_rx_attr rx_attr;
struct fi_ep_attr ep_attr;
struct fi_domain_attr domain_attr;
struct fi_fabric_attr fabric_attr;
int vc_id;
gni_smsg_attr_t vc_mbox_attr;
gni_mem_handle_t cq_irq_mdh;
uint64_t peer_caps;
size_t cm_data_len;
char eqe_buf[GNIX_CM_EQE_BUF_SIZE];
uint32_t key_offset;
};
enum gnix_pep_sock_resp_cmd {
GNIX_PEP_SOCK_RESP_ACCEPT,
GNIX_PEP_SOCK_RESP_REJECT
};
struct gnix_pep_sock_connresp {
enum gnix_pep_sock_resp_cmd cmd;
int vc_id;
gni_smsg_attr_t vc_mbox_attr;
gni_mem_handle_t cq_irq_mdh;
uint64_t peer_caps;
size_t cm_data_len;
char eqe_buf[GNIX_CM_EQE_BUF_SIZE];
uint32_t key_offset;
};
struct gnix_pep_sock_conn {
struct fid fid;
struct dlist_entry list;
int sock_fd;
struct gnix_pep_sock_connreq req;
int bytes_read;
struct fi_info *info;
};
int _gnix_pep_progress(struct gnix_fid_pep *pep);
int _gnix_ep_progress(struct gnix_fid_ep *ep);
/**
* Parse a given address (of format FI_ADDR_GNI) into FI_ADDR_STR.
* @param ep_name [IN] the FI_ADDR_GNI address.
* @param out_buf [IN/OUT] the FI_ADDR_STR address.
* @return either FI_SUCCESS or a negative integer on failure.
*/
int _gnix_ep_name_to_str(struct gnix_ep_name *ep_name, char **out_buf);
/**
* Parse a given address (of format FI_ADDR_STR) into FI_ADDR_GNI.
* @param addr[IN] the FI_ADDR_STR address.
* @param resolved_addr[OUT] the FI_ADDR_GNI address.
* @return either FI_SUCCESS or a negative integer on failure.
*/
int _gnix_ep_name_from_str(const char *addr,
struct gnix_ep_name *resolved_addr);
/**
* Find a FI_ADDR_GNI.
* @param ep_name[IN] the array of addresses.
* @param idx [IN] the index of the desired address.
* @param addr [OUT] the desired address.
*/
static inline int
_gnix_resolve_gni_ep_name(const char *ep_name, int idx,
struct gnix_ep_name *addr)
{
int ret = FI_SUCCESS;
static size_t addr_size = sizeof(struct gnix_ep_name);
GNIX_TRACE(FI_LOG_EP_CTRL, "\n");
/*TODO (optimization): Just return offset into ep_name */
memcpy(addr, &ep_name[addr_size * idx], addr_size);
return ret;
}
/**
* Find and convert a FI_ADDR_STR to FI_ADDR_GNI.
* @param ep_name [IN] the FI_ADDR_STR address.
* @param idx [IN] the index of the desired address.
* @param addr [OUT] the desired address converted to FI_ADDR_GNI.
* @return either FI_SUCCESS or a negative integer on failure.
*/
static inline int
_gnix_resolve_str_ep_name(const char *ep_name, int idx,
struct gnix_ep_name *addr)
{
int ret = FI_SUCCESS;
static size_t addr_size = GNIX_FI_ADDR_STR_LEN;
GNIX_TRACE(FI_LOG_EP_CTRL, "\n");
ret = _gnix_ep_name_from_str(&ep_name[addr_size * idx], addr);
return ret;
}
/**
* Find and resolve the given ep_name.
*
* @param ep_name [IN] the ep name to resolve.
* @param idx [IN] the index of the desired address.
* @param addr [OUT] the desired address.
* @param domain [IN] the given domain.
* @return either FI_SUCCESS or a negative integer on failure.
*/
static inline int
_gnix_get_ep_name(const char *ep_name, int idx, struct gnix_ep_name *addr,
struct gnix_fid_domain *domain)
{
int ret = FI_SUCCESS;
/* Use a function pointer to resolve the address */
if (domain->addr_format == FI_ADDR_STR) {
ret = _gnix_resolve_str_ep_name(ep_name, idx, addr);
} else {
ret = _gnix_resolve_gni_ep_name(ep_name, idx, addr);
}
return ret;
}
#endif
|
<gh_stars>10-100
from pipeline.builder import * # noqa
def parallel_pipeline(branch_nums):
print("prepare pipeline...")
node_count = 10
start = EmptyStartEvent()
first_acts = []
for i in range(branch_nums):
acts = [ServiceActivity(component_code="debug_node") for _ in range(node_count)]
for i in range(node_count - 1):
acts[i].connect(acts[i + 1])
first_acts.append(acts[0])
pg = ParallelGateway()
cg = ConvergeGateway()
end = EmptyEndEvent()
start.extend(pg).connect(*first_acts).converge(cg).extend(end)
print("build pipeline...")
pipeline = build_tree(start)
print("build pipeline finished!")
return pipeline
def one_node_pipeline():
start = EmptyStartEvent()
act = ServiceActivity(component_code="empty_node")
end = EmptyEndEvent()
start.extend(act).extend(end)
return build_tree(start)
def normal_pipeline():
start = EmptyStartEvent()
act_1 = ServiceActivity(component_code="debug_node")
act_1.component.inputs.param_1 = Var(type=Var.SPLICE, value="${constant_1}")
act_1.component.inputs.param_2 = Var(type=Var.SPLICE, value="${constant_2}")
act_2 = ServiceActivity(component_code="debug_node")
act_2.component.inputs.param_1 = Var(type=Var.SPLICE, value="${constant_1}")
act_2.component.inputs.param_2 = Var(type=Var.SPLICE, value="${constant_2}")
# subprocess begin
sub_start = EmptyStartEvent()
sub_act_1 = ServiceActivity(component_code="debug_node")
sub_act_1.component.inputs.param_1 = Var(type=Var.SPLICE, value="${constant_1}")
sub_act_1.component.inputs.param_2 = Var(type=Var.SPLICE, value="${constant_2}")
sub_act_2 = ServiceActivity(component_code="debug_node")
sub_act_2.component.inputs.param_1 = Var(type=Var.SPLICE, value="${constant_1}")
sub_act_2.component.inputs.param_2 = Var(type=Var.SPLICE, value="${constant_2}")
sub_end = EmptyEndEvent()
sub_pipeline_data = Data()
sub_pipeline_data.inputs["${constant_1}"] = DataInput(type=Var.PLAIN, value="default_value")
sub_pipeline_data.inputs["${constant_2}"] = DataInput(type=Var.PLAIN, value="default_value")
params = Params(
{
"${constant_1}": Var(type=Var.SPLICE, value="${constant_1}"),
"${constant_2}": Var(type=Var.SPLICE, value="${constant_2}"),
}
)
sub_start.extend(sub_act_1).extend(sub_act_2).extend(sub_end)
# subprocess end
subprocess = SubProcess(start=sub_start, data=sub_pipeline_data, params=params)
pg = ParallelGateway()
act_3 = ServiceActivity(component_code="debug_node")
act_3.component.inputs.param_1 = Var(type=Var.SPLICE, value="${constant_1}")
act_3.component.inputs.param_2 = Var(type=Var.SPLICE, value="${constant_2}")
act_4 = ServiceActivity(component_code="debug_node")
act_4.component.inputs.param_1 = Var(type=Var.SPLICE, value="${constant_1}")
act_4.component.inputs.param_2 = Var(type=Var.SPLICE, value="${constant_2}")
act_5 = ServiceActivity(component_code="debug_node")
act_5.component.inputs.param_1 = Var(type=Var.SPLICE, value="${constant_1}")
act_5.component.inputs.param_2 = Var(type=Var.SPLICE, value="${constant_2}")
cg_1 = ConvergeGateway()
eg = ExclusiveGateway(conditions={0: '"${constant_1}" == "value_1"', 1: "True == False"})
act_6 = ServiceActivity(component_code="debug_node")
act_6.component.inputs.param_1 = Var(type=Var.SPLICE, value="${constant_1}")
act_6.component.inputs.param_2 = Var(type=Var.SPLICE, value="${constant_2}")
act_7 = ServiceActivity(component_code="debug_node")
act_7.component.inputs.param_1 = Var(type=Var.SPLICE, value="${constant_1}")
act_7.component.inputs.param_2 = Var(type=Var.SPLICE, value="${constant_2}")
cg_2 = ConvergeGateway()
end = EmptyEndEvent()
pipeline_data = Data()
pipeline_data.inputs["${constant_1}"] = Var(type=Var.PLAIN, value="value_1")
pipeline_data.inputs["${constant_2}"] = Var(type=Var.PLAIN, value="value_2")
start.extend(act_1).extend(act_2).extend(subprocess).extend(pg).connect(act_3, act_4, act_5).converge(cg_1).extend(
eg
).connect(act_6, act_7).converge(cg_2).extend(end)
pipeline = build_tree(start, data=pipeline_data)
return pipeline
|
/*
* Copyright 2019 The Kythe Authors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package info provides utilities for summarizing the contents of a kzip.
package info // import "kythe.io/kythe/go/platform/kzip/info"
import (
"fmt"
"log"
"bitbucket.org/creachadair/stringset"
"kythe.io/kythe/go/platform/kzip"
apb "kythe.io/kythe/proto/analysis_go_proto"
)
// If the compilation unit doesn't set a corpus, use this corpus so we have somewhere to record the
// stats.
const unspecifiedCorpus = "__UNSPECIFIED_CORPUS__"
// KzipInfo scans the kzip in f and counts contained files and units, giving a breakdown by corpus
// and language. It also records the size (in bytes) of the kzip specified by fileSize in the
// returned KzipInfo.
func KzipInfo(f kzip.File, fileSize int64, scanOpts ...kzip.ScanOption) (*apb.KzipInfo, error) {
// Get file and unit counts broken down by corpus, language.
kzipInfo := &apb.KzipInfo{
Corpora: make(map[string]*apb.KzipInfo_CorpusInfo),
Size: fileSize,
}
err := kzip.Scan(f, func(rd *kzip.Reader, u *kzip.Unit) error {
srcs := stringset.New(u.Proto.SourceFile...)
// The corpus may be specified in the unit VName or in the source file
// VNames. Record all values of corpus seen and afterwards check that a
// single value is specified.
cuCorpus := u.Proto.GetVName().GetCorpus()
if cuCorpus == "" {
log.Printf("Warning: Corpus not set for compilation unit %v", u.Proto.GetVName())
cuCorpus = unspecifiedCorpus
}
cuLang := u.Proto.GetVName().GetLanguage()
cuInfo := cuLangInfo(cuCorpus, cuLang, kzipInfo)
cuInfo.Count++
var srcCorpora stringset.Set
srcsWithRI := stringset.New()
for _, ri := range u.Proto.RequiredInput {
riCorpus := requiredInputCorpus(u, ri)
requiredInputInfo(riCorpus, cuLang, kzipInfo).Count++
if srcs.Contains(ri.Info.Path) {
sourceInfo(riCorpus, cuLang, kzipInfo).Count++
srcCorpora.Add(riCorpus)
srcsWithRI.Add(ri.Info.Path)
}
}
srcsWithoutRI := srcs.Diff(srcsWithRI)
for path := range srcsWithoutRI {
msg := fmt.Sprintf("source %q in CU %v doesn't have a required_input entry", path, u.Proto.GetVName())
kzipInfo.CriticalKzipErrors = append(kzipInfo.CriticalKzipErrors, msg)
}
if srcCorpora.Len() != 1 {
// This is a warning for now, but may become an error.
log.Printf("Multiple corpora in unit. unit vname={%v}; src corpora=%v; srcs=%v", u.Proto.GetVName(), srcCorpora, u.Proto.SourceFile)
}
return nil
}, scanOpts...)
if err != nil {
return nil, fmt.Errorf("scanning kzip: %v", err)
}
return kzipInfo, nil
}
// requiredInputCorpus computes the corpus for a required input. It follows the rules in the
// CompilationUnit proto comments in kythe/proto/analysis.proto that say that any
// required_input that does not set corpus in its VName should inherit corpus from the compilation
// unit's VName.
func requiredInputCorpus(u *kzip.Unit, ri *apb.CompilationUnit_FileInput) string {
if c := ri.GetVName().GetCorpus(); c != "" {
return c
}
return u.Proto.GetVName().GetCorpus()
}
// KzipInfoTotalCount returns the total CompilationUnits counts for infos split apart by language.
func KzipInfoTotalCount(infos []*apb.KzipInfo) apb.KzipInfo_CorpusInfo {
totals := apb.KzipInfo_CorpusInfo{
LanguageCompilationUnits: make(map[string]*apb.KzipInfo_CorpusInfo_CompilationUnits),
LanguageRequiredInputs: make(map[string]*apb.KzipInfo_CorpusInfo_RequiredInputs),
LanguageSources: make(map[string]*apb.KzipInfo_CorpusInfo_RequiredInputs),
}
for _, info := range infos {
for _, i := range info.GetCorpora() {
for lang, stats := range i.GetLanguageCompilationUnits() {
langTotal := totals.LanguageCompilationUnits[lang]
if langTotal == nil {
langTotal = &apb.KzipInfo_CorpusInfo_CompilationUnits{}
totals.LanguageCompilationUnits[lang] = langTotal
}
langTotal.Count += stats.GetCount()
}
for lang, stats := range i.GetLanguageRequiredInputs() {
total := totals.LanguageRequiredInputs[lang]
if total == nil {
total = &apb.KzipInfo_CorpusInfo_RequiredInputs{}
totals.LanguageRequiredInputs[lang] = total
}
total.Count += stats.GetCount()
}
for lang, stats := range i.GetLanguageSources() {
total := totals.LanguageSources[lang]
if total == nil {
total = &apb.KzipInfo_CorpusInfo_RequiredInputs{}
totals.LanguageSources[lang] = total
}
total.Count += stats.GetCount()
}
}
}
return totals
}
// MergeKzipInfo combines the counts from multiple KzipInfos.
func MergeKzipInfo(infos []*apb.KzipInfo) *apb.KzipInfo {
kzipInfo := &apb.KzipInfo{Corpora: make(map[string]*apb.KzipInfo_CorpusInfo)}
for _, i := range infos {
for corpus, cinfo := range i.GetCorpora() {
for lang, cu := range cinfo.GetLanguageCompilationUnits() {
cui := cuLangInfo(corpus, lang, kzipInfo)
cui.Count += cu.GetCount()
}
for lang, inputs := range cinfo.GetLanguageRequiredInputs() {
c := requiredInputInfo(corpus, lang, kzipInfo)
c.Count += inputs.GetCount()
}
for lang, sources := range cinfo.GetLanguageSources() {
c := sourceInfo(corpus, lang, kzipInfo)
c.Count += sources.GetCount()
}
}
kzipInfo.CriticalKzipErrors = append(kzipInfo.GetCriticalKzipErrors(), i.GetCriticalKzipErrors()...)
kzipInfo.Size += i.Size
}
return kzipInfo
}
func cuLangInfo(corpus, lang string, kzipInfo *apb.KzipInfo) *apb.KzipInfo_CorpusInfo_CompilationUnits {
c := corpusInfo(corpus, kzipInfo)
cui := c.LanguageCompilationUnits[lang]
if cui == nil {
cui = &apb.KzipInfo_CorpusInfo_CompilationUnits{}
c.LanguageCompilationUnits[lang] = cui
}
return cui
}
func requiredInputInfo(corpus, lang string, kzipInfo *apb.KzipInfo) *apb.KzipInfo_CorpusInfo_RequiredInputs {
c := corpusInfo(corpus, kzipInfo)
lri := c.LanguageRequiredInputs[lang]
if lri == nil {
lri = &apb.KzipInfo_CorpusInfo_RequiredInputs{}
c.LanguageRequiredInputs[lang] = lri
}
return lri
}
func sourceInfo(corpus, lang string, kzipInfo *apb.KzipInfo) *apb.KzipInfo_CorpusInfo_RequiredInputs {
c := corpusInfo(corpus, kzipInfo)
ls := c.LanguageSources[lang]
if ls == nil {
ls = &apb.KzipInfo_CorpusInfo_RequiredInputs{}
c.LanguageSources[lang] = ls
}
return ls
}
func corpusInfo(corpus string, kzipInfo *apb.KzipInfo) *apb.KzipInfo_CorpusInfo {
i := kzipInfo.GetCorpora()[corpus]
if i == nil {
i = &apb.KzipInfo_CorpusInfo{
LanguageCompilationUnits: make(map[string]*apb.KzipInfo_CorpusInfo_CompilationUnits),
LanguageRequiredInputs: make(map[string]*apb.KzipInfo_CorpusInfo_RequiredInputs),
LanguageSources: make(map[string]*apb.KzipInfo_CorpusInfo_RequiredInputs),
}
kzipInfo.Corpora[corpus] = i
}
return i
}
|
// set 3-digit text to the window
inline VOID DoSetWindowInt03(HWND hWnd, INT n)
{
TCHAR szBuf[8];
szBuf[0] = TCHAR('0' + n / 100 % 10);
szBuf[1] = TCHAR('0' + n / 10 % 10);
szBuf[2] = TCHAR('0' + n % 10);
szBuf[3] = 0;
SetWindowText(hWnd, szBuf);
} |
def _run(bot, inputs, logger, server_override=None, botengine_override=None):
global _bot_logger
_bot_logger = logger
next_timer_at_server = None
if botengine_override is None:
services = None
if 'services' in inputs:
services = inputs['services']
count = None
if 'count' in inputs:
count = int(inputs['count'])
if 'timer' in inputs:
next_timer_at_server = int(inputs['timer'])
lang = None
if 'lang' in inputs:
lang = inputs['lang']
cloud = None
if 'cloud' in inputs:
cloud = inputs['cloud']
botengine = BotEngine(inputs, server_override=server_override, services=services, lang=lang, count=count, cloud=cloud)
else:
botengine = botengine_override
botengine.start_time_sec = time.time()
botengine._download_core_variables()
botengine.load_variables_time_sec = time.time()
for server in botengine._servers:
if 'sbox' in server:
botengine._validate_count()
break
all_triggers = []
for i in inputs['inputs']:
all_triggers.append(i['trigger'])
botengine.all_trigger_types = all_triggers
timers_existed = False
botengine.triggers_total = len(all_triggers)
for execution_json in inputs['inputs']:
botengine.triggers_index += 1
trigger = execution_json['trigger']
if trigger > 0:
botengine._set_inputs(execution_json)
if trigger != 2048:
saved_timers = botengine.load_variable(TIMERS_VARIABLE_NAME)
if saved_timers is not None:
timers_existed |= len(saved_timers) > 1
for t in [ x[0] for x in saved_timers ]:
if t != MAXINT and t <= execution_json['time']:
focused_timer = saved_timers.pop(0)
botengine.all_trigger_types.append(64)
if callable(focused_timer[1]):
focused_timer[1](botengine, focused_timer[2])
else:
botengine.get_logger().error('BotEngine: Timer fired and popped, but cannot call the focused timer: ' + str(focused_timer))
else:
break
botengine.save_variable(TIMERS_VARIABLE_NAME, botengine.load_variable(TIMERS_VARIABLE_NAME))
if trigger != 64:
bot.run(botengine)
elif saved_timers is not None and not timers_existed:
botengine.get_logger().error('BotEngine: Timer fired but no recollection as to why.')
botengine.get_logger().error('Current timer variable is: ' + str(saved_timers))
botengine.flush_commands()
botengine.flush_questions()
botengine.flush_analytics()
botengine.flush_binary_variables()
if trigger != 2048:
saved_timers = botengine.load_variable(TIMERS_VARIABLE_NAME)
if saved_timers is not None and len(saved_timers) > 0:
while True:
try:
if saved_timers[0][0] != MAXINT:
if saved_timers[0][0] != next_timer_at_server:
botengine._execute_again_at_timestamp(saved_timers[0][0])
botengine.get_logger().info(('< Set alarm: {}').format(saved_timers[0]))
else:
botengine.get_logger().info(('| Alarm already set: {}').format(saved_timers[0]))
break
except Exception as e:
botengine.get_logger().error(('Could not _execute_again_at_timestamp to set timer: {}').format(str(e)))
continue
botengine.flush_rules()
botengine.flush_tags()
botengine.flush_asynchronous_requests()
return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.