content
stringlengths
10
4.9M
/** * Represents a pattern for matching a {@link Node}. */ @Deprecated public class PatternNode extends AbstractPatternObject<Node> { /** * The default {@link PatternGroup}. */ // Should this really EVER be used? - mutable global state!!!! public static final PatternGroup DEFAULT_PATTERN_GROUP = new PatternGroup() /*{ @Override public void addFilter( FilterExpression regexRepression ) { throw new UnsupportedOperationException( "Cannot add filter to default pattern group." ); }; }*/; private LinkedList<PatternRelationship> relationships = new LinkedList<PatternRelationship>(); private LinkedList<PatternRelationship> optionalRelationships = new LinkedList<PatternRelationship>(); private final PatternGroup group; /** * Create a new pattern node in the default {@link PatternGroup} with a * blank label. */ public PatternNode() { this( DEFAULT_PATTERN_GROUP, "" ); } /** * Create a new pattern node in the default {@link PatternGroup} with the * specified label. * * @param label the label of this pattern node. */ public PatternNode( String label ) { this( DEFAULT_PATTERN_GROUP, label ); } /** * Create a new pattern node in the specified {@link PatternGroup} with a * blank label. * * @param group the {@link PatternGroup} of this pattern node. */ public PatternNode( PatternGroup group ) { this( group, "" ); } /** * Create a new pattern node in the specified {@link PatternGroup} with the * specified label. * * @param group the {@link PatternGroup} of this pattern node. * @param label the label of this pattern node. */ public PatternNode( PatternGroup group, String label ) { this.group = group; this.label = label; } /** * Get the {@link PatternGroup} of this pattern node. * * @return the {@link PatternGroup} this pattern node belongs to. */ public PatternGroup getGroup() { return this.group; } /** * Get all {@link PatternRelationship}s associated with this pattern node. * This includes both the required and the optional * {@link PatternRelationship}s. * * @return the {@link PatternRelationship}s associated with this pattern * node. */ public Iterable<PatternRelationship> getAllRelationships() { LinkedList<PatternRelationship> allRelationships = new LinkedList<PatternRelationship>(); allRelationships.addAll( relationships ); allRelationships.addAll( optionalRelationships ); return allRelationships; } /** * Get the optional or the required {@link PatternRelationship}s associated * with this pattern node. * * @param optional if <code>true</code> return only the optional * {@link PatternRelationship}s, else return only the required. * @return the set of optional or required {@link PatternRelationship}s. */ public Iterable<PatternRelationship> getRelationships( boolean optional ) { return optional ? optionalRelationships : relationships; } void addRelationship( PatternRelationship relationship, boolean optional ) { if ( optional ) { optionalRelationships.add( relationship ); } else { relationships.add( relationship ); } } void removeRelationship( PatternRelationship relationship, boolean optional ) { if ( optional ) { optionalRelationships.remove( relationship ); } else { relationships.remove( relationship ); } } /** * Create a directed, required {@link PatternRelationship} from this node, * to the specified other node. * * @param otherNode the node at the other end of the relationship. * @return the newly created {@link PatternRelationship}. */ public PatternRelationship createRelationshipTo( PatternNode otherNode ) { return this.createRelationshipTo( otherNode, false, true ); } /** * Create a required {@link PatternRelationship} between this node and the * specified other node, with the specified direction. * * @param otherNode the node at the other end of the relationship. * @param dir the direction of the relationship. Use * {@link Direction#OUTGOING} to create a relationship from this * node to the other node. Use {@link Direction#INCOMING} to * create a relationship from the other node to this node. Use * {@link Direction#BOTH} to create a relationship where the * direction does not matter. * @return the newly created {@link PatternRelationship}. */ public PatternRelationship createRelationshipTo( PatternNode otherNode, Direction dir ) { if ( dir == Direction.INCOMING ) return otherNode.createRelationshipTo( this ); return this.createRelationshipTo( otherNode, false, dir == Direction.BOTH ? false : true ); } /** * Create a directed, required {@link PatternRelationship} of the specified * {@link RelationshipType} from this node to the specified other node. * * @param otherNode the node at the other end of the relationship. * @param type the {@link RelationshipType} of the relationship. * @return the newly created {@link PatternRelationship}. */ public PatternRelationship createRelationshipTo( PatternNode otherNode, RelationshipType type ) { return this.createRelationshipTo( otherNode, type, false, true ); } /** * Create a required {@link PatternRelationship} of the specified * {@link RelationshipType} between this node and the specified other node, * with the specified direction. * * @param otherNode the node at the other end of the relationship. * @param type the {@link RelationshipType} of the relationship. * @param dir the direction of the relationship. Use * {@link Direction#OUTGOING} to create a relationship from this * node to the other node. Use {@link Direction#INCOMING} to * create a relationship from the other node to this node. Use * {@link Direction#BOTH} to create a relationship where the * direction does not matter. * @return the newly created {@link PatternRelationship}. */ public PatternRelationship createRelationshipTo( PatternNode otherNode, RelationshipType type, Direction dir ) { if ( dir == Direction.INCOMING ) return otherNode.createRelationshipTo( this, type ); return this.createRelationshipTo( otherNode, type, false, dir == Direction.BOTH ? false : true ); } /** * Create a directed, optional {@link PatternRelationship} from this node, * to the specified other node. * * @param otherNode the node at the other end of the relationship. * @return the newly created {@link PatternRelationship}. */ public PatternRelationship createOptionalRelationshipTo( PatternNode otherNode ) { return this.createRelationshipTo( otherNode, true, true ); } /** * Create an optional {@link PatternRelationship} between this node and the * specified other node, with the specified direction. * * @param otherNode the node at the other end of the relationship. * @param dir the direction of the relationship. Use * {@link Direction#OUTGOING} to create a relationship from this * node to the other node. Use {@link Direction#INCOMING} to * create a relationship from the other node to this node. Use * {@link Direction#BOTH} to create a relationship where the * direction does not matter. * @return the newly created {@link PatternRelationship}. */ public PatternRelationship createOptionalRelationshipTo( PatternNode otherNode, Direction dir ) { return this.createRelationshipTo( otherNode, true, dir == Direction.BOTH ? false : true ); } /** * Create a directed, optional {@link PatternRelationship} of the specified * {@link RelationshipType} from this node to the specified other node. * * @param otherNode the node at the other end of the relationship. * @param type the {@link RelationshipType} of the relationship. * @return the newly created {@link PatternRelationship}. */ public PatternRelationship createOptionalRelationshipTo( PatternNode otherNode, RelationshipType type ) { return this.createRelationshipTo( otherNode, type, true, true ); } /** * Create an optional {@link PatternRelationship} of the specified * {@link RelationshipType} between this node and the specified other node, * with the specified direction. * * @param otherNode the node at the other end of the relationship. * @param type the {@link RelationshipType} of the relationship. * @param dir the direction of the relationship. Use * {@link Direction#OUTGOING} to create a relationship from this * node to the other node. Use {@link Direction#INCOMING} to * create a relationship from the other node to this node. Use * {@link Direction#BOTH} to create a relationship where the * direction does not matter. * @return the newly created {@link PatternRelationship}. */ public PatternRelationship createOptionalRelationshipTo( PatternNode otherNode, RelationshipType type, Direction dir ) { return this.createRelationshipTo( otherNode, type, true, dir == Direction.BOTH ? false : true ); } PatternRelationship createRelationshipTo( PatternNode otherNode, boolean optional, boolean directed ) { PatternRelationship relationship = new PatternRelationship( this, otherNode, optional, directed ); addRelationship( relationship, optional ); otherNode.addRelationship( relationship, optional ); return relationship; } PatternRelationship createRelationshipTo( PatternNode otherNode, RelationshipType type, boolean optional, boolean directed ) { PatternRelationship relationship = new PatternRelationship( type, this, otherNode, optional, directed ); addRelationship( relationship, optional ); otherNode.addRelationship( relationship, optional ); return relationship; } @Override public String toString() { return this.label; } }
/** * In-place combine the right side of the FD's that have the same left side * * @param fds a set of FD's */ public static void combineRight(Set<Abhaengigkeit> fds) { Map<Set<Attribut>, Set<Attribut>> map = new HashMap<>(); for (Abhaengigkeit fd : fds) { if (map.containsKey(fd.left)) { map.get(fd.left).addAll(fd.right); } else { map.put(fd.left, fd.getRight()); } } fds.clear(); for (Set<Attribut> left : map.keySet()) { fds.add(new Abhaengigkeit.Builder().left(left).right(map.get(left)).build()); } }
#include <bits/stdc++.h> using namespace std; typedef long long INT; typedef double DO; typedef pair<int, int> pii; typedef vector<int> VI; const int M = 26; const int NN = 100100; int cnt[NN<<2][M], ans[M], flag[NN<<2], tans[M]; char s[NN]; #define ls (u<<1) #define rs (ls|1) #define mid (L+R>>1) void push_up(int u) { for (int i=0; i<M; i++) cnt[u][i]=cnt[ls][i]+cnt[rs][i]; } void push_down(int u, int L, int R) { if (!flag[u]) return ; flag[ls]=flag[rs]=flag[u]; int S=0, E=M, stp=1; if (flag[u]==2) S=M-1, E=-1, stp=-1; int sz = mid-L; for (int i=S; i!=E; i+=stp) { if (sz<=cnt[u][i]) { cnt[ls][i]=sz; cnt[rs][i]=cnt[u][i]-sz; sz=0; } else { cnt[ls][i]=cnt[u][i]; cnt[rs][i]=0; sz-=cnt[u][i]; } } flag[u]=0; } void build(int u, int L, int R) { if (L+1==R) { cnt[u][s[L]]++; return ; } build(ls, L, mid); build(rs, mid, R); push_up(u); } void update(int u, int L, int R, int p, int x) { if (R<=p || p<L) return ; if (L+1==R) { memset(cnt[u], 0, sizeof cnt[u]); cnt[u][x]++; return ; } push_down(u, L, R); update(ls, L, mid, p, x); update(rs, mid, R, p, x); push_up(u); } void update(int u, int L, int R, int st, int ed, int ok) { if (R<=st || ed<=L) return ; if (st<=L && R<=ed) { int sz = R-L; flag[u]=ok; int S=0, E = M, stp=1; if (ok==2) S=M-1, E=-1, stp=-1; for (int i=S; i!=E; i+=stp) { if (sz<=ans[i]) { cnt[u][i]=sz; ans[i]-=sz; sz=0; } else { cnt[u][i]=ans[i]; sz-=ans[i]; ans[i]=0; } } return ; } push_down(u, L, R); update(ls, L, mid, st, ed, ok); update(rs, mid, R, st, ed, ok); push_up(u); } void calc(int u, int L, int R, int st, int ed) { if (R<=st || ed<=L) return ; if (st<=L && R<=ed) { for (int i=0; i<M; i++) ans[i]+=cnt[u][i]; return ; } push_down(u, L, R); calc(ls, L, mid, st, ed); calc(rs, mid, R, st, ed); } void calc_ans(int u, int L, int R) { if (L+1==R) { for (int i=0; i<M; i++) if (cnt[u][i]) s[L]='a'+i; return ; } push_down(u, L, R); calc_ans(ls, L, mid); calc_ans(rs, mid, R); } int main() { #ifndef ONLINE_JUDGE freopen("in.in", "r", stdin); freopen("out.out", "w", stdout); #else freopen("input.txt", "r", stdin); freopen("output.txt", "w", stdout); #endif int n, m, l, r; cin>>n>>m; scanf("%s", s+1); for (int i=1; i<=n; i++) s[i]-='a'; build(1, 1, n+1); while (m--) { scanf("%d%d", &l, &r); memset(ans, 0, sizeof ans); calc(1, 1, n+1, l, r+1); int cnt=0, id=-1; for (int i=0; i<M; i++) if (ans[i]&1) cnt++, id=i; if (cnt>1) continue; if (cnt) ans[id]--; for (int i=0; i<M; i++) ans[i]>>=1; int md=r-l+1>>1; memcpy(tans, ans, sizeof ans); update(1, 1, n+1, l, l+md, 1); memcpy(ans, tans, sizeof tans); update(1, 1, n+1, l+md+cnt, r+1, 2); if (~id) update(1, 1, n+1, l+md, id); } calc_ans(1, 1, n+1); puts(s+1); return 0; }
<reponame>chinabrant/BRTableView<filename>BRTableView/Classes/CommonCells/BRGapCell.h // // YTGapCell.h // TableViewDemo // // Created by brant on 2017/12/7. // Copyright © 2017年 Brant. All rights reserved. // #import <UIKit/UIKit.h> #import "BRCellProtocol.h" /** 用来做分隔的cell */ @interface BRGapCell : UITableViewCell <BRCellProtocol> @end
The Mathematical Model and an Genetic Algorithm for the Two-Echelon Electric Vehicle Routing Problem In order to cope with the challenges of high cargo load and high timeliness distribution in logistics industry, as well as to alleviate the current situation of oil resource depletion and air pollution, this study established a mathematical model of two-echelon electric vehicle routing problem (2E-EVRP) and design a heuristic algorithm. The 2E-EVRP can be divided into the multiple depot vehicle routing problem (MDEVRP) and the split delivery vehicle routing problem (SDVRP). The proposed genetic algorithm is used to solve the MDEVRP, and the actual case of a logistics company in Beijing is taken as the calculation experiment, so as to verify the feasibility of the proposed algorithm and provide decision-making reference for the development of logistics enterprises. The results show that the total path length obtained by the proposed algorithm is optimized by 20.82 kilometers compared with the traditional simulated annealing algorithm. Introduction The distribution mode of the logistics industry can be divided into direct distribution and multiechelon distribution according to different levels of distribution system. Direct distribution refers to the direct distribution of goods from the distribution center to the customer through means of transportation, and multi-echelon distribution refers to the introduction of transit nodes between the distribution center and the customer. With the expansion of city scale and increasing logistics demand, the distance between distribution center and customer point is getting farther, the establishment of multi-echelonr distribution system is the inevitable trend of the development of logistics industry. Dantzig and Ramser introduced vehicle route problem (VRP) for the first time in 1959. The VRP is to optimize the route for a fleet of vehicles from the depot to customers of different demands, with a certain aim such as the shortest distance, the smallest cost, or the least time consuming, etc. Some well-known heuristic algorithms include simulated annealing, tabu search, genetic algorithm and ant colony algorithm . Vehicle routing problems have different extensions and changes in practical applications, which are mainly divided into the following categories: capacitated vehicle routing problem (CVRP) , which restricts the maximum vehicle load; vehicle routing problem with time windows (VRPTW) , which limit service within the time required by the customer; vehicle routing problem with multiple depots (MDVRP) , which can transport goods from multiple depots; the vehicle routing problem with pick-up and delivery (VRPPD) , that is, considering both the distribution of goods from the distribution center to the customer point and the goods delivered back to the distribution center from the customer point; the vehicle routing problem with split deliveries (SDVRP) , that is, multiple 2 vehicles can serve one customer so as to make full use of the vehicle's load capacity; the stochastic vehicle routing problem (SVRP) appears when some elements of the problem are random , the stochastic factors include road conditions and periodic customer time windows etc. Scholars initially studied the electric vehicle routing problem (EVRP) in 2012, then Afroditi et al. (2014) and Pelletier et al. (2016) considered the technical limitations of electric vehicles, including load capacity, battery capacity, charging station location, charging policy, charging station available time and capacity restrictions, and electricity prices in different periods. Some researches reviewed the green vehicle routing problem . There are less researches on the two-echelon vehicle routing problems compared with traditional vehicle routing problems. Feliu et al. and Perboli et al. initially proposed the two-echelon vehicle routing problem (2E-VRP) and conducted computational experiments on small-scale data. Hemmelmayr et al. proposed an adaptive large neighborhood search for the 2E-VRP and conducted the experiment of 200 customers and 10 satellites. In this study, we introduced the two-echelon electric vehicle routing problem (2E-EVRP) and designed an genetic algorithm for application, which is a practical-based extension of the two-echelon vehicle routing problem (2E-VRP) and electric vehicle routing problem (EVRP). Problem Description The proposed 2E-EVRP is composed of one depot, multiple satellites, charge stations and customers In the first echelon, the traditional vehicles are used to transport goods from the depot to the satellites. In the second echelon, the electric vehicle are used to transport goods from the satellites to the customers, as shown in Figure.1. . Each customer i requires q i goods. m 1 traditional vehicles of Q 1 capacity in the first echelon and m k electric vehicles of Q 2 capacity in the second echelon. In summary, symbol definitions are shown in Table 1. Mathematical Model Since the establishment of the model in this study was based on the practical situation of the logistics distribution industry, reasonable and ingenious assumptions are helpful to simplify the complexity of the mathematical model and greatly improve the feasibility of the algorithm. Therefore, the following assumptions are made in this study: -Traditional fuel vehicles (first-echelon) can visit satellites more than once. -Traditional fuel vehicles (first-echelon) must depart from depot and return back to depot. -The unloading time of fuel vehicles and electric vehicles is fixed. -The charge time is fixed and each time the battery is charged full capacity. -Take no account of changes in road traffic conditions. -The energy consumption of electric vehicle is linearly related to travel distance. -Given the coordinates of each node, the Euclidean distance is used to measure the distance. Based on the above problem description and model assumption, the objective function can be established as follows: The objective function is composed of four parts, namely, distribution cost of fuel vehicle, charge cost in charge station, energy consumption cost of electric vehicles and charge cost at satellites. Constraints: 1, , , , 1 , , , , 0, , , , In the above model, Constraints (2) and (3) restrain the number of vehicles. Constraint (4) denotes the continuity of the number of goods at satellites. Constraints (5) and (6) limit the load of vehicles. Constraint (7) and (8) Algorithm Design Coding is a key step in designing genetic algorithm, namely, the feasible solution of a problem is transformed from its solution space to the search space that can be handled by genetic algorithm. For the MDVRP, using binary coding mode to represent chromosome coding is not easy to reflect the structural features of the problem. Therefore, our study applied floating-point coding method to improve the genetic algorithm, which means that the length of chromosome coding depends on the decision variables. The proposed coding mode is suitable for high accuracy requirement model, and has higher search performance for large-scale datasets. The set of customers is coded as a onedimensional array, and each customer node can only appear once. The order in the array illustrates the solution of the MDVRP, so each one-dimensional array denotes a chromosome. In the MDVRP model established in our study, each chromosome in the population corresponds to a solution route, and each solution route can calculate its corresponding distance. Therefore, the objective function ( ) f x is the total distance of the solution route, where the Euclidean distance is used to measure the distance between nodes. In the process of population evolution, we hope that the fitness value of the population can be larger, so the fitness function can be specified as In order to avoid increasing the number of similar individuals in the population, leading to a halt in evolution, we exclude the roullette wheel method. In order to retain as many individuals with good fitness as possible to the next generation, the elitist preservation method is adopted to carry out the survival of the fittest operation, that is, the individuals with the highest fitness in the current population do not participate in crossover and mutation operations, and it is used to replace the least adaptive individuals in the current generation group after crossover, mutation and other operations. The crossover operator is the main method to generate new individuals, including two contents of determining the crossover position and performing partial gene exchange. In order to improve the search ability of the algorithm and make full use of the population characteristics and distribution, this paper uses the XO algorithm to perform crossover operations. Taking the generation process of p1 as an example, the schematic diagram of the XO algorithm is shown in Figure 2. From the perspective of the genetic algorithm framework, mutation operation has randomness. However, when combined with selection and crossover operators, mutation operation can maintain the diversity of the population. In this study, the simple mutation was adopted, that is, perform mutation operation on the individual code string with a certain mutation probability, and randomly designate the value of a certain locus within a certain range for mutation operation. For the second echelon composed of the satellites and the customers, M sub-paths starting from the satellites and passing through customers can be obtained by the proposed genetic algorithm. The specific steps to insert charge stations for each sub-path are described as follows: Step 1 Insert the charge station from the first sub-path. Step 2 Based on the energy consumption assumptions, traverse sequentially in the order of customers in the current sub-path. Step 3 Determine whether the remaining energy can reach the next customer, if the battery capacity requirement is met, continue to the next step, if not, skip to Step 5. Step 4 Update the current node, determine whether the current node is a satellite, if it is, skip to Step 6, if not, return to Step 3. Step 5 Calculate the sum of the distance from the current node to all charging stations and the distance from each charge station to the next node. Similarly, calculate the sum of the distance from the previous node to all charging stations and the distance from each charging station to the next node. Update the node with the minimum total distance as the current node, and update the remaining energy, then return to Step 3. Step 6 Determine whether the current sub-path m is equal to M, if it is, the algorithm ends, if not, update m=m+1, return to Step 2. Instance Description The depot of a certain logistics company is located in the southeast of Beijing. In order to alleviate the pressure of warehousing costs in the urban area of Beijing, two satellites was set up to form a twoechelon electric vehicle routing delivery system. The experiment instance consists of 53 customers and 10 charge stations. Based on the actual location of the customers, satellites and the depot, we convert it to the coordinate system in a certain proportion, and part of the coordinate data after conversion is shown in Table 2: According to the field investigation, in the first echelon, the load of fuel vehicles Q 1 takes the value of 5000, the average unloading time is 0.5 hour, the average speed is 60 km/h, and the transportation cost per kilometer is 5 yuan. In the second echelon, the load of electric vehicle Q 2 takes the value of 1200, the average unloading time is 0.25 hour, the average speed is 44.7 km/h, the average charging time is 0.5 hour , the maximum travel distance of electric vehicles is 80 kilometers, the energy consumption rate is 0.4 kwh/km, and the average charge cost is 2 yuan /kwh. Analysis of Results According to the algorithm solution results, for the first-echelon, 3 fuel vehicles are required and the total travel distance is 392.09 km. It takes 3.48 hours to complete the transportation task from the depot to the satellites. For the second echelon, 11 electric vehicles are required and the total travel distance is 885.21 km. It takes 4.49 hours to complete the transportation task from satellites to 53 customers. To sum up, the optimal routes obtained by the proposed algorithm of 2E-EVRP model is 1,277.30 km, and the total time to realize customer coverage is 6.59 hours. The specific solution is shown in Table 3: The comparison between the traditional simulated annealing algorithm (SA) and our proposed genetic algorithm (GA) is shown in Table 4: Clustering algorithm is commonly used in traditional heuristic algorithms to solve the MDVRP, that is, transform the MDVRP to the traditional VRP by assigning customers to corresponding satellite first. The proposed genetic algorithm assigns customers to corresponding satellites optimally according to the current load of electric vehicles, which greatly improves the integrity of the algorithm and optimizes the solution results. The total travel distance is optimized by 20.82 km. Conclusion In view of the increasing freight demand and shopping experience in the logistics industry, our study points out the necessity of establishing a two-echelon vehicle routing distribution system. Considering the current situation of oil resource depletion and air pollution, our study applied electric vehicles in the second echelon to solve the "last kilometer" problem in the urban center. In order to verify the feasibility and effectiveness of the proposed algorithm, an actual logistics company was taken as an experiment instance. Based on the 2E-EVRP model established in this paper, an genetic algorithm was proposed to solve the instance, and the calculation results were analyzed and compared in terms of the total travel distance and timeliness of the delivery. The main contributions are as follows: -The two-echelon electric vehicle routing problem (2E-EVRP) were introduced and the mathematical model for the 2E-EVRP was established, in order to better meet the needs of today's logistics industry. -The genetic algorithm was proposed and tested in the practical instance. The results showed that the integrity and efficiency of the algorithm are improved compared with traditional algorithms.
<reponame>naustagic/L2JNetwork_Interlude package net.sf.l2j.gameserver.network.serverpackets; import net.sf.l2j.gameserver.model.pledge.ClanMember; /** * Format : (ch) dSd * @author -Wooden- */ public class PledgeReceivePowerInfo extends L2GameServerPacket { private final ClanMember _member; public PledgeReceivePowerInfo(ClanMember member) { _member = member; } @Override protected void writeImpl() { writeC(0xfe); writeH(0x3c); writeD(_member.getPowerGrade()); // power grade writeS(_member.getName()); writeD(_member.getClan().getRankPrivs(_member.getPowerGrade())); // privileges } }
More than 17,600 pets found new homes with loving families nationwide on Saturday for #ClearTheShelters. (Published Sunday, Aug. 16, 2015) North Texans helped Clear the Shelters on Saturday, finding more than 2,900 pets new homes. Since the promotion of Clear the Shelters started on July 27, more than 3,100 animals were adopted bringing the grand total to more than 6,000 pets placed in new homes. "It's a good cause, finding all of these animals good homes. It's just the right thing to do," said Ryan Carreker, who adopted a pet Saturday. Eleven shelters in the Dallas-Fort Worth area reported having no pets left to adopt as of 5 p.m. -- Arlington Animal Services, the Bedford Animal Shelter, Burleson Animal Services, Dallas Animal Services, Animal Hope Pet Adoptions in Fort Worth, the Humane Society of North Texas' Keller adoption center, the Humane Society of Cedar Creek Lake, Terrell Animal Adoption Center, Watauga Animal Control, Weatherford Animal Shelter and Wylie Animal Services. Newly Adopted #ClearTheShelter Pets The day wasn't just about puppies and kittens. Michelle Lombardi and Whytney Blythe took home 12-year-old Diesel and 5-year-old Sissy. "Even though we won't get to spend as much time with them as we'd like, at least we can make the end of their lives as best as we can," they said. Leona, a mix-breed dog that had been the Irving shelters' longest resident finally went home Saturday. Shelter workers cried and hugged Leona as she went to her forever home during NBCDFW.com live coverage. Her new owner, Pearson, said Leona was the first dog he saw and that he fell in love with her calm and laid-back demeanor. George, pot-bellied pig, was also a superstar. He went home with the Kershners where he'll live on a farm. At the SPCA of Texas in Dallas, the last adoption may have been the happiest for shelter workers. Photo credit: NBC 5 News The Bills family of Mesquite adopted Powder, a 5-year-old boxer. She had lived a life of abuse and was used for breeding. Shelter workers rooted for Powder to get adopted because she had been passed over so often. When the Bills family picked her she lit up and smothered her new family with hugs and kisses. What started out as an NBCDFW partnership with local shelters to help animals find loving, forever homes in Dallas-Fort Worth has turned into a nationwide initiative that helped find more than 17,300 animals homes. Thousands of animal shelters across the country, along with NBC Owned Television Stations, Telemundo Owned Television Stations and the New England Cable News, took part in Clear the Shelters, a nationwide project and waived most adoption fees during the one-day event. NBC 5 started the animal adoption initiative in 2014, and NBC's commitment is set to continue next year as well. NBC 5 Shelter Pets If you adopted a pet on Saturday, email your photo to [email protected]. Include your name, your pet's name and the name of the shelter! Thousands of Pets Find New Homes in Clear the Shelters
package com.gioov.behavioral.visitor; import java.util.ArrayList; import java.util.List; /** * @author godcheese [<EMAIL>] * @date 2020-02-12 */ public class Test { public static void main(String[] args) { Visitor visitor = new ConcreteVisitor(); StringElement stringElement = new StringElement("abc"); stringElement.accept(visitor); FloatElement floatElement = new FloatElement(5.5f); floatElement.accept(visitor); List<VisitableImpl> list = new ArrayList<>(); list.add(new StringElement("abc1")); list.add(new StringElement("abc2")); list.add(new StringElement("abc3")); list.add(new FloatElement(1.1f)); list.add(new FloatElement(2.2f)); list.add(new FloatElement(3.3f)); visitor.visitCollection(list); } }
package com.afei.texturedemo; import android.content.Context; import android.content.res.AssetManager; import android.opengl.GLSurfaceView; import javax.microedition.khronos.egl.EGLConfig; import javax.microedition.khronos.opengles.GL10; public class NativeRenderer implements GLSurfaceView.Renderer { private Context mContext; static { System.loadLibrary("native-renderer"); } public NativeRenderer(Context context) { mContext = context; } @Override public void onSurfaceCreated(GL10 gl, EGLConfig config) { registerAssetManager(mContext.getAssets()); glInit(); } @Override public void onSurfaceChanged(GL10 gl, int width, int height) { glResize(width, height); } @Override public void onDrawFrame(GL10 gl) { glDraw(); } public native void registerAssetManager(AssetManager assetManager); public native void glInit(); public native void glResize(int width, int height); public native void glDraw(); }
'use strict'; import { IListOption } from 'chord/music/api/listOption'; export const ARTIST_LIST_OPTIONS: Array<IListOption> = [ { type: 'category', name: '歌手', items: [ { id: null, name: '热门歌手' }, { id: '5001', name: '入驻歌手' }, { id: '1001', name: '华语男歌手' }, { id: '1002', name: '华语女歌手' }, { id: '1003', name: '华语组合/乐队' }, { id: '2001', name: '欧美男歌手' }, { id: '2002', name: '欧美女歌手' }, { id: '2003', name: '欧美组合/乐队' }, { id: '6001', name: '日本男歌手' }, { id: '6002', name: '日本女歌手' }, { id: '6003', name: '日本组合/乐队' }, { id: '7001', name: '韩国男歌手' }, { id: '7002', name: '韩国女歌手' }, { id: '7003', name: '韩国组合/乐队' }, { id: '4001', name: '其他男歌手' }, { id: '4002', name: '其他女歌手' }, { id: '4003', name: '其他组合/乐队' }, ], }, { type: 'initial', name: '索引', items: [ { id: '-1', name: '热门', }, ...Array.from('ABCDEFGHIJKLMNOPQRSTUVWXYZ').map((c) => ({ id: c.charCodeAt(0).toString(), name: c })), { id: '0', name: '其他', }, ], }, ];
//***************************************************************************** // //! Enables CPU timer interrupt. //! //! \param base is the base address of the timer module. //! //! This function enables the CPU timer interrupt. //! //! \return None. // //***************************************************************************** static inline void CPUTimer_enableInterrupt(uint32_t base) { ASSERT(CPUTimer_isBaseValid(base)); Set TIE bit of TCR register HWREGH(base + CPUTIMER_O_TCR) |= CPUTIMER_TCR_TIE; }
// Put implements kv.CompositeMapUpdater.Put func (m *compositeMap) Put(key composite.Key, value []byte) error { if len(key) == 0 { return fmt.Errorf("key cannot be empty or nil") } if value == nil { return fmt.Errorf("value cannot be nil") } nodes, err := m.nodes(key, true, false) if err != nil { return err } v, err := kv.NamespaceMap(m.flatMap, nodes[len(key)-1].ns).Get(key[len(key)-1]) if err != nil { return fmt.Errorf("could not retrieve key %#v from node %#v: %s", key[len(key)-1], nodes[len(key)-1].ChildRef, err) } var n compositepb.Node if v != nil { if err := n.Unmarshal(v); err != nil { return fmt.Errorf("could not unmarshal key %#v from node %#v: %s", key[len(key)-1], nodes[len(key)-1].ChildRef, err) } } n.Value = value marshaledN, err := n.Marshal() if err != nil { return fmt.Errorf("could not marshal node %#v: %s", n, err) } return kv.NamespaceMap(m.flatMap, nodes[len(key)-1].ns).Put(key[len(key)-1], marshaledN) }
import Mobigen.Common.Log as Log; Log.Init() import pika import ssl import bson import time class DirectQueueClient: def __init__( self ): self.connection = None self.channel = None def disConnect( self ): if self.channel != None: self.channel.close() if self.connection != None: self.connection.close() def connect( self, mqUser, mqPass, mqHost, mqPort, mqVhost ): userInfo = pika.PlainCredentials( mqUser, mqPass ) hostInfo = pika.ConnectionParameters( host = mqHost, port = mqPort, virtual_host = mqVhost, credentials = userInfo ) self.connection = pika.BlockingConnection( hostInfo ) #self.channel = self.connection.channel() def connectSSL( self, mqUser, mqPass, mqHost, mqPort, mqVhost, mqCaCerts, mqCertFile, mqKeyFile ): try : userInfo = pika.PlainCredentials( mqUser, mqPass ) s_options = ({"ca_certs" : mqCaCerts, "certfile" : mqCertFile, "keyfile" : mqKeyFile, "cert_reqs" : ssl.CERT_REQUIRED, "server_side" : False}) hostInfo = pika.ConnectionParameters( host=mqHost, port=mqPort, virtual_host=mqVhost, credentials=userInfo, ssl=True, ssl_options = s_options) self.connection = pika.BlockingConnection(hostInfo) #self.channel = self.connection.channel() except Exception as e : __LOG__.Trace('ERROR : %s ' %e ) time.sleep(60) self.connectSSL(mqUser, mqPass, mqHost, mqPort, mqVhost, mqCaCerts, mqCertFile, mqKeyFile) def connectChannel (self) : try : self.channel = self.connection.channel() #__LOG__.Trace( 'Channel success' ) except Exception as e : __LOG__.Trace('Channel Connection Error : %s' %e ) time.sleep(60) self.connectChannel() def is_open( self ): if self.channel.is_open: if self.connection.is_open: return True return False def disconnect( self ): # try: # if self.channel != None: # __LOG__.Trace("channel dont close") # self.channel.close() # except: # pass # __LOG__.Trace( 'Channel close' ) try: if self.connection != None: #__LOG__.Trace("connection dont close") self.connection.close() except: pass __LOG__.Trace( 'Connect close' ) self.connection = None def disconnectChannel( self ) : try : if self.channel != None : #__LOG__.Trace('channel dont close') self.channel.close() except : pass __LOG__.Trace( 'Channel Close') self.channel = None def exchange_declare(self, exchangeName) : try: self.channel.exchange_declare(exchange = exchangeName, exchange_type= 'direct', durable = True) except Exception as e : __LOG__.Trace('ERROR : %s ' %e ) #except : # __LOG__.Exception() def queue_declare( self, queueName ): try: #autu-delete = True self.channel.queue_declare( queue = queueName, durable = True) except: __LOG__.Exception() def queue_delete( self, queueName ): self.channel.queue_delete( queue= queueName ) def put(self, queueName, message, use_bson=True ): if not queueName == None and not queueName == '' : self.queue_declare(queueName) try : __LOG__.Trace(queueName) tempBody = '' if isinstance(message, str) : tempBody=message else : tempBody=str(message) result = self.channel.basic_publish( exchange = '' , routing_key = queueName , body = tempBody , properties = pika.BasicProperties( delivery_mode = 2 ) , mandatory=True ) __LOG__.Trace(result) except : __LOG__.Exception() def get( self, queueName, use_bson=True ): method_frame, header_frame, message = self.channel.basic_get( queue=queue_name ) # message = message.decode('UTF-8') __LOG__.Trace("RECEIVE MESSGAGE: %s, %s" % (type(message), message)) #if method_frame.name == 'Basic.GetOk': if method_frame: try: self.channel.basic_ack( method_frame.delivery_tag ) if use_bson: # message = bson.decode_all(message) message = bson.BSON(message).decode() # message = message.decode() except: __LOG__.Exception("[ERROR] In Channel.basic_ack") else: method_frame = None return ( method_frame, header_frame, message ) #def main(): # import hashlib # import json # import random #if __name__ == '__main__': # main()
<reponame>wolfdev45/solana-pay import { PendingPage } from '../client/components/pages/PendingPage'; export default PendingPage;
<reponame>anonl/nvlist package nl.weeaboo.vn.impl.render; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Joiner; import nl.weeaboo.common.StringUtil; /** * Rendering statistics. */ public class RenderStats { private static final Logger LOG = LoggerFactory.getLogger(RenderStats.class); private final CommandStats[] cmdStats; private final List<Integer> quadBatchSizes; private int framesRendered; public RenderStats() { cmdStats = new CommandStats[256]; //Command ID is a byte, so max 256 possibilities quadBatchSizes = new ArrayList<>(); } /** Called when rendering starts for a frame. */ public void startRender() { } /** Called when rendering ends for a frame. */ public void stopRender() { framesRendered++; if ((framesRendered & 0x3FF) == 0) { LOG.trace(toString()); } Arrays.fill(cmdStats, null); quadBatchSizes.clear(); } /** Called when a batch of quads is rendered. */ public void onRenderQuadBatch(int count) { quadBatchSizes.add(count); } /** Called when a render command is rendered. */ public void logCommand(RenderCommand cmd, long durationNanos) { CommandStats stats = cmdStats[cmd.id & 0xFF]; if (stats == null) { cmdStats[cmd.id & 0xFF] = stats = new CommandStats(cmd.getClass()); } stats.addRun(); stats.addTime(durationNanos); } /** Flexible method to log extra performance measurements. */ public void logExtra(Class<? extends RenderCommand> cmdClass, int cmdId, long durationNanos) { CommandStats stats = cmdStats[cmdId]; if (stats == null) { cmdStats[cmdId] = stats = new CommandStats(cmdClass); } stats.addTime(durationNanos); } @Override public String toString() { StringBuilder sb = new StringBuilder("[Render Stats]\n"); Joiner.on('\n').skipNulls().appendTo(sb, cmdStats); sb.append("\nQuad Render Batches:"); for (int i : quadBatchSizes) { sb.append(' ').append(i); } return sb.toString(); } private static class CommandStats { private final String label; private int count; private long durationNanos; public CommandStats(Class<?> cmdClass) { label = cmdClass.getSimpleName(); } public void addRun() { this.count++; } public void addTime(long durationNanos) { this.durationNanos += durationNanos; } @Override public String toString() { return String.format(Locale.ROOT, "%s[%03dx] %s", label, count, StringUtil.formatTime(durationNanos, TimeUnit.NANOSECONDS)); } } }
def is_utc_today(utc): current_time = datetime.datetime.utcnow() day_start = current_time - datetime.timedelta(hours=current_time.hour, minutes=current_time.minute, seconds=current_time.second) day_start_utc = unix_time(day_start) return (utc - day_start_utc) >= 0
/** * Creates a new department * @param departmentName The department name to be persisted * @return Response payload based on operation success/failure. */ @RequestMapping(value = "/systemadmin/createdepartment/{departmentName}", method = { RequestMethod.GET, RequestMethod.POST }) public ResponseEntity<?> addDepartment (@PathVariable String departmentName) { Department department = new Department(); department.setDepartmentName(departmentName); departmentRepository.save(department); return ResponseEntity.ok(new ApiResponse(true, "Department Added Successfully")); }
package seppe.alpaerts.eindproject_seppe_jochem.model.winkelmandje; import seppe.alpaerts.eindproject_seppe_jochem.model.product.DierenProduct; /** * Autor: Seppe * klasse om een bepaalde rij te maken in het winkelmandje */ public class Rij { private DierenProduct dierenProduct; private int aantal; public Rij(DierenProduct dierenProduct, int aantal) { this.dierenProduct = dierenProduct; this.aantal = aantal; } public Rij(int aantal) { this.dierenProduct = dierenProduct; this.aantal++; } public DierenProduct getDierenProduct() { return dierenProduct; } public void setDierenProduct(DierenProduct dierenProduct) { this.dierenProduct = dierenProduct; } public int getAantal() { return aantal; } public void setAantal(int aantal) { this.aantal = aantal; } }
<gh_stars>100-1000 import { ModelCard } from '@allenai/tugboat/lib/ModelCard'; import { ModelInfo, ModelId } from './ModelInfo'; class NoModelCardIdError extends Error { constructor(info: ModelInfo) { super(`Unable to determine id to use for fetching a model card for model ${info.id}.`); } } /** * Returns the id that should be used when fetching a model's model card. * * AllenNLP's identifiers are currently in the process of being migrated to a new format, where * the ids include the task that a model was trained for. * * For some models the demo already has the new id, which is stored in the `pretrained_model_id` * field. For models that don't have that, we'll need a mapping like that shown below. * * See: https://github.com/allenai/allennlp-demo/issues/732 */ export function getModelCardId(info: ModelInfo): string { // When this id exists, we can use it. if (info.pretrained_model_id) { return info.pretrained_model_id; } // Otherwise try to map the model to the identifier it should be using. switch (info.id) { case ModelId.NMN: { return 'rc-nmn'; } case ModelId.LERC: { return 'evaluate_rc-lerc'; } case ModelId.VilbertVQA: { // The api/info is missing a pretrained_model_id for vqa return 'vqa-vilbert'; } case ModelId.ELMOSNLI: { return 'pair-classification-decomposable-attention-elmo'; } default: { throw new NoModelCardIdError(info); } } } export type ModelCardsById = { [id: string]: ModelCard }; export function fetchModelCards(): Promise<ModelCardsById> { return fetch('/api/model-cards/').then((r) => r.json()); }
#pragma once #include "AsyncSemaphore.h" #include "IAsyncProducerConsumerCollection.h" #include "../Collections/ThreadSafeMinimalisticQueue.h" #include "../Tasks/TaskCombinators.h" #include <stdexcept> namespace RStein::AsyncCpp::AsyncPrimitives { template <typename TItem> class SimpleAsyncProducerConsumerCollection : public IAsyncProducerConsumerCollection<TItem> { public: SimpleAsyncProducerConsumerCollection(); SimpleAsyncProducerConsumerCollection(const SimpleAsyncProducerConsumerCollection& other) = delete; SimpleAsyncProducerConsumerCollection(SimpleAsyncProducerConsumerCollection&& other) noexcept = delete; SimpleAsyncProducerConsumerCollection& operator=(const SimpleAsyncProducerConsumerCollection& other) = delete; SimpleAsyncProducerConsumerCollection& operator=(SimpleAsyncProducerConsumerCollection&& other) noexcept = delete; virtual ~SimpleAsyncProducerConsumerCollection() = default; void Add(TItem&& item) override; void Add(const TItem& item) override; Tasks::Task<void> AddAsync(const TItem& item) override; Tasks::Task<void> AddAsync(TItem&& item) override; Tasks::Task<TItem> TakeAsync() override; Tasks::Task<TItem> TakeAsync(CancellationToken cancellationToken) override; std::vector<TItem> TryTakeAll() override; private: Collections::ThreadSafeMinimalisticQueue<TItem> _innerCollection; AsyncSemaphore _asyncSemaphore; }; } template <typename TItem> RStein::AsyncCpp::AsyncPrimitives::SimpleAsyncProducerConsumerCollection<TItem>:: SimpleAsyncProducerConsumerCollection() : IAsyncProducerConsumerCollection<TItem>(), _innerCollection(), _asyncSemaphore(std::numeric_limits<int>::max(), 0) { } template <typename TItem> void RStein::AsyncCpp::AsyncPrimitives::SimpleAsyncProducerConsumerCollection<TItem>::Add(const TItem& item) { _innerCollection.Push(item); _asyncSemaphore.Release(); } template <typename TItem> void RStein::AsyncCpp::AsyncPrimitives::SimpleAsyncProducerConsumerCollection<TItem>::Add(TItem&& item) { _innerCollection.Push(std::forward<TItem>(item)); _asyncSemaphore.Release(); } template <typename TItem> RStein::AsyncCpp::Tasks::Task<void> RStein::AsyncCpp::AsyncPrimitives::SimpleAsyncProducerConsumerCollection<TItem>::AddAsync(const TItem& item) { Add(item); return Tasks::GetCompletedTask(); } template <typename TItem> RStein::AsyncCpp::Tasks::Task<void> RStein::AsyncCpp::AsyncPrimitives::SimpleAsyncProducerConsumerCollection<TItem>::AddAsync(TItem&& item) { Add(std::forward<TItem>(item)); return Tasks::GetCompletedTask(); } template <typename TItem> RStein::AsyncCpp::Tasks::Task<TItem> RStein::AsyncCpp::AsyncPrimitives::SimpleAsyncProducerConsumerCollection<TItem>::TakeAsync() { co_await _asyncSemaphore.WaitAsync().ConfigureAwait(false); auto retValue = _innerCollection.TryPop(); if (!retValue) { throw std::logic_error("Could not take item"); } co_return retValue.value(); } template <typename TItem> RStein::AsyncCpp::Tasks::Task<TItem> RStein::AsyncCpp::AsyncPrimitives::SimpleAsyncProducerConsumerCollection<TItem>::TakeAsync(CancellationToken cancellationToken) { co_await _asyncSemaphore.WaitAsync(cancellationToken).ConfigureAwait(false); auto retValue = _innerCollection.TryPop(); if (!retValue) { throw std::logic_error("Could not take item"); } co_return retValue.value(); } template <typename TItem> std::vector<TItem> RStein::AsyncCpp::AsyncPrimitives::SimpleAsyncProducerConsumerCollection<TItem>::TryTakeAll() { return _innerCollection.PopAll(); }
<filename>utils/utils/__tests__/handle-err.test.ts /** @jest-environment node */ import { handleErr } from '../handle-err'; describe('Nodejs error handler', () => { it('throws error when it exists', () => { expect.assertions(1); function wrapper(): void { handleErr(new Error('err')); } expect(wrapper).toThrow(); }); it("doesn't throw when error is missing", () => { expect.assertions(1); function wrapper(): void { handleErr(); } expect(wrapper).not.toThrow(); }); it('throws on string input', () => { expect.assertions(1); function wrapper(): void { // eslint-disable-next-line @typescript-eslint/ban-ts-ignore // @ts-ignore - Wrong type on purpose handleErr('just a string'); } expect(wrapper).toThrow(); }); });
#include <iostream> using namespace std; int main() { int n,arr[1000],temp=0,current; cin>>n; for(int i=1;i<=n;i++) cin>>arr[i]; temp=arr[3]-arr[1]; current=MAX_READ_LOCKS; for(int i=1;i<=n-2;i++){ temp=max(temp,arr[i+2]-arr[i]); //cout<<temp<<endl; for(int j=1;j<=n-1;j++){ //cout<<i<<endl; if(j!=i||j!=i+1) temp=max(temp,arr[j+1]-arr[j]); } //cout<<temp<<endl; current=min(temp,current); temp=0;} cout<<current<<endl; }
import React, { useState, useEffect } from 'react'; import { Chart, SmartBoardDashboardProps } from '../interfaces'; import { SmartBoardChartView as ChartView } from './defaultChartView'; export const SmartBoardDashboard = (props: SmartBoardDashboardProps) => { const { chartList, chartGraph, chartOrder, chartCluster } = props; const interactionMode = props.interactionMode ?? 'defaultMode'; const hasInsight = !!props.hasInsight; // when the interactionMode is connection mode and a chart was selected, filter and resort charts const sortedChartList = new Array<Chart>(chartList.length); chartGraph.nodes.forEach((d) => { sortedChartList[chartOrder[d.id]] = d; }); const [connectionID, changeConnectionID] = useState<string>(''); useEffect(() => { return () => {}; }, [connectionID]); let curChartList = sortedChartList; const chartID = sortedChartList.map((d) => d.id); if (chartID.includes(connectionID) && interactionMode === 'connectionMode') { const connectionLinks = chartGraph.links.filter( (d: { source: string; target: string }) => d.source === connectionID || d.target === connectionID ); const connectionNodes = connectionLinks.map((d) => (d.source === connectionID ? d.target : d.source)); connectionLinks.forEach((d, i) => { const id = connectionNodes[i]; const chart = sortedChartList[chartID.indexOf(id)]; chart.description = d.description; }); connectionNodes.unshift(connectionID); const filteredChartList: Chart[] = []; connectionNodes.forEach((d) => { const chart = sortedChartList[chartID.indexOf(d)]; filteredChartList.push(chart); }); curChartList = filteredChartList; } const quitResort = () => { curChartList = sortedChartList; changeConnectionID(''); }; return ( <div id="dashboard"> {curChartList.map((chart) => { const clusterIndex = chartCluster[chart.id]; return ( <ChartView key={chart.id} chartID={chart.id} chartInfo={chart} interactionMode={interactionMode} hasInsight={hasInsight} clusterID={`cluster_${clusterIndex}`} hasLocked={!!connectionID} // if there exist connectionID, it means the dashboard comes into connection view changeConnectionID={changeConnectionID} quitResort={quitResort} /> ); })} </div> ); };
Role of the Five RNA Helicases in the Adaptive Response of Bacillus cereus ATCC 14579 Cells to Temperature, pH, and Oxidative Stresses ABSTRACT In this study, growth rates and lag times of the five RNA helicase-deleted mutants of Bacillus cereus ATCC 14579 were compared to those of the wild-type strain under thermal, oxidative, and pH stresses. Deletion of cshD and cshE had no impact under any of the tested conditions. Deletion of cshA, cshB, and cshC abolished growth at 12°C, confirming previous results. In addition, we found that each RNA helicase had a role in a specific temperature range: deletion of cshA reduced growth at all the tested temperatures up to 45°C, deletion of cshB had impact below 30°C and over 37°C, and deletion of cshC led mainly to a cold-sensitive phenotype. Under oxidative conditions, deletion of cshB and cshC reduced growth rate and increased lag time, while deletion of cshA increased lag time only with H2O2 and reduced growth rate at a high diamide concentration. Growth of the ΔcshA strain was affected at a basic pH independently of the temperature, while these conditions had a limited effect on ΔcshB and ΔcshC strain growth. The RNA helicases CshA, CshB, and CshC could participate in a general adaptation pathway to stressful conditions, with a stronger impact at low temperature and a wider role of CshA.
<reponame>KevCJones/kaop-ts import { META_KEY } from "./constants" export const defineMetadata = function (target, key, prop) { if (!target[META_KEY]) target[META_KEY] = {} target[META_KEY][key] = prop } export const getMetadata = function (target, key) { return target[META_KEY] && target[META_KEY][key] }
import { inject, injectable } from 'inversify'; import { pick } from 'lodash'; import { Arg, Args, Ctx, Mutation, Query, Resolver, UseMiddleware } from 'type-graphql'; import { ltAdmin } from '../../domain'; import * as errors from '../../errors'; import { TYPES } from '../../inversify.constants'; import { AuthRequirement, UserService } from '../../services'; import * as types from '../graphqlTypes'; import { NumberValue } from '../graphqlTypes/NumberValue.type'; import { WithAuthRequirement } from '../middlewares'; import { ResolverCtx } from '../types'; @Resolver() @injectable() export class UserResolver { constructor(@inject(TYPES.UserService) public userService: UserService) {} @Query((returns) => types.User, { nullable: true }) async user(@Args() args: types.UserArgs): Promise<types.User | null> { const user = await this.userService.find(args.id); return user ? types.User.of(user) : null; } @Query((returns) => types.UserConnection) @UseMiddleware(WithAuthRequirement(AuthRequirement.LOGGED_IN_AS_ADMIN)) async users(@Args() args: types.UserConnectionArgs): Promise<types.UserConnection> { const connection = await this.userService.findPage(args); return types.UserConnection.of(connection); } @Mutation((returns) => types.UpdateUserOutput) async updateUser( @Arg('input') input: types.UpdateUserInput, @Ctx() ctx: ResolverCtx ): Promise<typeof types.UpdateUserOutput> { const sessionUser = ctx.getSessionUser(); if (!sessionUser.isLoggedIn) { return types.ForbiddenError.of({ message: 'must be logged in' }); } try { await types.UpdateUserInput.validate(input, ctx); } catch (e) { if (e instanceof errors.ForbiddenError) { return types.ForbiddenError.of(e); } else { return types.UnknownError.of(e); } } const { id } = input; try { const attrs = pick(input, ['username', 'email', 'role']); const user = await this.userService.update(id, attrs); return types.User.of(user); } catch (e) { if (e instanceof errors.BadRequestError) { return types.BadRequestError.of(e); } else if (e instanceof errors.NotFoundError) { return types.NotFoundError.of(e); } else if (e instanceof errors.ValidationError) { return types.ValidationError.of(e); } else { return types.UnknownError.of(e); } } } @Query((returns) => types.UserCountOutput) async userCount(@Ctx() ctx: ResolverCtx): Promise<typeof types.UserCountOutput> { const sessionUser = ctx.getSessionUser(); if (!sessionUser.isLoggedIn || ltAdmin(sessionUser.role)) { return types.ForbiddenError.of({ message: 'must be logged in as admin' }); } const count = await this.userService.count(); return NumberValue.of(count); } }
/** * trasform the json returned by the API call to the FLickr API to a json to return as a response * of our API * TODO .. isolate in his own class * TODO: finire implementazione */ private JsonObject transformFlickrApiResponseToResult(JsonObject jsIn) { JsonObject jsOut = null; final String KEY_CODE = "code"; final String KEY_STAT = "stat"; final String KEY_MESSAGE = "message"; final String JPG_URL_TEMPLATE = "https://farm%s.staticflickr.com/%s/%s_%s_b.jpg"; final String KEY_FARM = "farm"; final String KEY_SERVER = "server"; final String KEY_ID = "id"; final String KEY_SECRET = "secret"; try { if (jsIn.containsKey(KEY_CODE) && jsIn.containsKey(KEY_STAT)) { if (jsIn.getString(KEY_STAT).equalsIgnoreCase("fail")) { JsonObject jsErr = new JsonObject(); jsErr.put("content", "ERROR code:" + jsIn.getInteger(KEY_CODE) + " message:" + jsIn.getString(KEY_MESSAGE)); return jsErr; } } jsOut = new JsonObject(); JsonArray jsArrayPhoto = jsIn.getJsonObject("photoset").getJsonArray("photo"); if (jsArrayPhoto != null) { for (Map.Entry<String, String> entry : getMetadata().entrySet()) { JsonArray jsArrayPhotoOut = new JsonArray(); for (Object photo : jsArrayPhoto) { JsonObject metadata = new JsonObject(); JsonObject jsPhoto = (JsonObject) photo; Integer nFarmId = jsPhoto.getInteger(KEY_FARM); String sServerId = jsPhoto.getString(KEY_SERVER); String sId = jsPhoto.getString(KEY_ID); String sSecret = jsPhoto.getString(KEY_SECRET); https://farm{farm-id}.staticflickr.com/{server-id}/{id}_{secret}_[mstzb].jpg String sFormattedURL = String.format(JPG_URL_TEMPLATE, nFarmId.toString(), sServerId, sId, sSecret); System.out.println(sFormattedURL); metadata.put("url", sFormattedURL); jsArrayPhotoOut.add(metadata); } ; jsOut.put("metadata", jsArrayPhotoOut); } jsOut.put("content", "OK"); } catch (Exception e) { JsonObject jsErr = new JsonObject(); jsErr.put("content", e.getMessage()); return jsErr; } return jsOut; }
package ru.job4j.tracker; import org.junit.After; import org.junit.Before; import org.junit.Test; import java.io.ByteArrayOutputStream; import java.io.PrintStream; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.function.Consumer; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; /** * Метод-тесты для операций в трекере * * @author <NAME> * @version $Id$ * @since 05.10.2018 */ public class StartUITest { private final PrintStream stdout = System.out; private final ByteArrayOutputStream out = new ByteArrayOutputStream(); private String ln = System.lineSeparator(); private final Consumer<String> consumer = new Consumer<String>() { private final PrintStream printStream = new PrintStream(stdout); @Override public void accept(String s) { System.out.println(s); } }; private StringBuilder menu = new StringBuilder("-----------MENU--------" + ln + "0: ADD" + ln + "1: SHOW_ALL" + ln + "2: EDIT" + ln + "3: DELETE" + ln + "4: FIND_BY_ID" + ln + "5: FIND_BY_NAME" + ln + "6: EXIT" + ln + "-----------------------" + ln); @Before public void loadOutput() { System.setOut(new PrintStream(this.out)); } @After public void backOutput() { System.setOut(this.stdout); } @Test public void whenUserAddItemThenTrackerHasNewItemWithSameName() { Input input = new StubInput(new ArrayList<>(Arrays.asList("0", "test name3", "desc3", "1", "6"))); Tracker trackerTemp = new Tracker(); new StartUI(input, trackerTemp, consumer).init(); assertThat(new String(out.toByteArray()), is(new StringBuffer() .append(this.menu) .append("You selection ADD") .append(ln) .append("Item create") .append(ln) .append(this.menu) .append("You selection SHOW_ALL") .append(ln) .append(trackerTemp.getAll().get(0).toString()) .append(ln) .append(menu) .append("You selection EXIT") .append(ln) .append("Goode bye") .append(ln) .toString() )); } @Test public void whenUserEditItem() { Tracker tracker = new Tracker(); Item item1 = tracker.add(new Item("test name", "desc")); Item item2 = tracker.add(new Item("test2 name2", "desc2")); String id = item1.getId(); Input input = new StubInput(new ArrayList<>(Arrays.asList("2", id, "test3 name3", "desc3", "1", "6"))); new StartUI(input, tracker, consumer).init(); assertThat(new String(out.toByteArray()), is(new StringBuffer() .append(this.menu) .append("You selection EDIT") .append(ln) .append("Item is update") .append(ln) .append(menu) .append("You selection SHOW_ALL") .append(ln) .append(tracker.getAll().get(0).toString()) .append(ln) .append(tracker.getAll().get(1).toString()) .append(ln) .append(menu) .append("You selection EXIT") .append(ln) .append("Goode bye") .append(ln) .toString() )); } @Test public void whenUserRemoveItem() { Tracker tracker = new Tracker(); Item item1 = tracker.add(new Item("test name", "desc")); Item item2 = tracker.add(new Item("test2 name2", "desc2")); String id = item1.getId(); Input input = new StubInput(new ArrayList<>(Arrays.asList("3", id, "1", "6"))); new StartUI(input, tracker, consumer).init(); assertThat(new String(out.toByteArray()), is(new StringBuffer() .append(this.menu) .append("You selection DELETE") .append(ln) .append("Item remove") .append(ln) .append(this.menu) .append("You selection SHOW_ALL") .append(ln) .append(tracker.getAll().get(0).toString()) .append(ln) .append(menu) .append("You selection EXIT") .append(ln) .append("Goode bye") .append(ln) .toString() )); } }
Evidence that the response to applied forces of continuously erupting rat incisors contains more than one component. Forces (2.5-15 mN) applied in a direction that opposed the eruption of rat maxillary incisors made these teeth sink into their sockets at a decreasing rate. Sometimes eruption subsequently restarted from this intruded position while the force was still applied. After the forces were removed, all the incisors extruded, rapidly at first than showing to normal rates of eruption. Forces (2.5-20 mN) applied in a direction that aided the eruption made the teeth rise in their sockets, initially rapidly, then slowing to a steady rate. After the forces were removed, the incisors sank back into their sockets at a decreasing rate. These responses could be separated into two components; rapid movements occurring immediately after applying or removing the forces and slower movements occurring at rates that were maintained as long as the force was applied, which could be regarded as eruption.
Effect of Synthesis Conditions on Capacitive Properties of Porous Carbon Derived from Hemp Bast Fiber A systematic study of the influence of synthesis conditions on the structural, morphological, and electrical properties, as well as the electrochemical performance of hemp fiber-derived carbon materials was performed. An analysis of the capacitive response of carbons obtained under various activation conditions with additional treatment with HNO3 and annealing was completed. The contribution of the formation of an electrical double layer at the outer electrode–electrolyte interface, as well as on surfaces inside micropores, has been studied and analyzed in terms of the effect of the turbostratic carbon properties (average lateral size of graphite crystallites, pore size distribution, BET surface area).
MOGADISHU, Somalia — Nearly 100 members of the Shabab, the Qaeda-linked insurgent group in Somalia, were killed in separate strikes this week, according to Somali and American officials. More than a dozen militants were killed in a United States airstrike 20 miles north of the capital of Mogadishu, according to Samantha Reho, a spokeswoman with the United States Africa Command. The Friday strike was carried out in coordination with the Somali government, she said in an email. It was the 23rd American airstrike in the country this year, and the second in 24 hours. On Thursday, airstrikes killed “several militants” in the Bay region, about 100 miles west of Mogadishu, according to a statement from the Africa Command. Separately, the Somali National Intelligence and Security Agency killed 81 Shabab fighters earlier this week in Jilib, in southern Somalia, government officials said on Saturday.
/* Code decompiled incorrectly, please refer to instructions dump. */ static /* synthetic */ void b(com.yandex.metrica.impl.ob.bq r13) { /* java.lang.String r0 = "type" java.lang.String r1 = "value" java.lang.String r2 = "key" r3 = 0 com.yandex.metrica.impl.ob.bv r4 = r13.f // Catch:{ Exception -> 0x0093, all -> 0x0088 } android.database.sqlite.SQLiteDatabase r4 = r4.a() // Catch:{ Exception -> 0x0093, all -> 0x0088 } java.lang.String r6 = r13.a() // Catch:{ Exception -> 0x0094, all -> 0x0086 } java.lang.String[] r7 = new java.lang.String[]{r2, r1, r0} // Catch:{ Exception -> 0x0094, all -> 0x0086 } r8 = 0 r9 = 0 r10 = 0 r11 = 0 r12 = 0 r5 = r4 android.database.Cursor r5 = r5.query(r6, r7, r8, r9, r10, r11, r12) // Catch:{ Exception -> 0x0094, all -> 0x0086 } L_0x001f: boolean r6 = r5.moveToNext() // Catch:{ Exception -> 0x0084, all -> 0x0081 } if (r6 == 0) goto L_0x0078 int r6 = r5.getColumnIndex(r2) // Catch:{ Exception -> 0x0084, all -> 0x0081 } java.lang.String r6 = r5.getString(r6) // Catch:{ Exception -> 0x0084, all -> 0x0081 } int r7 = r5.getColumnIndex(r1) // Catch:{ Exception -> 0x0084, all -> 0x0081 } java.lang.String r7 = r5.getString(r7) // Catch:{ Exception -> 0x0084, all -> 0x0081 } int r8 = r5.getColumnIndex(r0) // Catch:{ Exception -> 0x0084, all -> 0x0081 } int r8 = r5.getInt(r8) // Catch:{ Exception -> 0x0084, all -> 0x0081 } boolean r9 = android.text.TextUtils.isEmpty(r6) // Catch:{ Exception -> 0x0084, all -> 0x0081 } if (r9 != 0) goto L_0x001f r9 = 1 if (r8 == r9) goto L_0x005b r9 = 2 if (r8 == r9) goto L_0x0056 r9 = 3 if (r8 == r9) goto L_0x0051 r9 = 4 if (r8 == r9) goto L_0x0070 L_0x004f: r7 = r3 goto L_0x0070 L_0x0051: java.lang.Long r7 = com.yandex.metrica.impl.utils.i.a(r7) // Catch:{ Exception -> 0x0084, all -> 0x0081 } goto L_0x0070 L_0x0056: java.lang.Integer r7 = com.yandex.metrica.impl.utils.i.b(r7) // Catch:{ Exception -> 0x0084, all -> 0x0081 } goto L_0x0070 L_0x005b: java.lang.String r8 = "true" boolean r8 = r8.equals(r7) // Catch:{ Exception -> 0x0084, all -> 0x0081 } if (r8 == 0) goto L_0x0066 java.lang.Boolean r7 = java.lang.Boolean.TRUE // Catch:{ Exception -> 0x0084, all -> 0x0081 } goto L_0x0070 L_0x0066: java.lang.String r8 = "false" boolean r7 = r8.equals(r7) // Catch:{ Exception -> 0x0084, all -> 0x0081 } if (r7 == 0) goto L_0x004f java.lang.Boolean r7 = java.lang.Boolean.FALSE // Catch:{ Exception -> 0x0084, all -> 0x0081 } L_0x0070: if (r7 == 0) goto L_0x001f java.util.Map<java.lang.String, java.lang.Object> r8 = r13.f804a // Catch:{ Exception -> 0x0084, all -> 0x0081 } r8.put(r6, r7) // Catch:{ Exception -> 0x0084, all -> 0x0081 } goto L_0x001f L_0x0078: com.yandex.metrica.impl.bk.a((android.database.Cursor) r5) L_0x007b: com.yandex.metrica.impl.ob.bv r13 = r13.f r13.a(r4) return L_0x0081: r0 = move-exception r3 = r5 goto L_0x008a L_0x0084: r3 = r5 goto L_0x0094 L_0x0086: r0 = move-exception goto L_0x008a L_0x0088: r0 = move-exception r4 = r3 L_0x008a: com.yandex.metrica.impl.bk.a((android.database.Cursor) r3) com.yandex.metrica.impl.ob.bv r13 = r13.f r13.a(r4) throw r0 L_0x0093: r4 = r3 L_0x0094: com.yandex.metrica.impl.bk.a((android.database.Cursor) r3) goto L_0x007b */ throw new UnsupportedOperationException("Method not decompiled: com.yandex.metrica.impl.ob.bq.b(com.yandex.metrica.impl.ob.bq):void"); }
<gh_stars>1-10 export default ((code: number) => { switch (code) { case 400: return "400 Bad Request, make sure you made the request right, because something was malformed."; case 401: return "401 Unauthorized, if authorization was required, make sure you provided it."; case 403: return "403 Forbidden, you do not have permission to access whatever you were trying to access."; case 404: return "404 Not Found, make sure you are trying the right endpoint. Check the documentation, it may have changed."; case 405: return "405 Method Not Allowed, make sure you are using the right method, the server does not allow whatever method you tried to use."; case 410: return "410 Gone, whatever you tried to fetch has been moved or deleted."; case 413: return "413 Payload Too Large, whatever you tried to send was too large for the server to handle."; case 414: return "414 Request-URI Too Long, the request url was too long for the server to handle."; case 415: return "415 Unsupported Media Type, the server does not support the media type you tried to use."; case 429: return "429 Too Many Requests, please slow down, you are being rate limited."; case 500: return "500 Internal Server Error, the server had an internal error, please try again later."; case 501: return "501 Not Implemented, whatever you tried has not been implemented at the server."; case 502: return "502 Bad Gateway, the server, while trying to contact a local proxy instance, failed. "; case 503: return "503 Service Unavailable, the service is not available, please try again later."; case 504: return "504 Gateway Timeout, the server, while trying to contact a local proxy instace, timed out."; case 505: return "505 HTTP Version Not Supported, the http version you are trying to use is not supported."; } });
Spatially extended PAHs in circumstellar disks around T Tauri and Herbig Ae stars T O determine the presence and location of the emission from Polycyclic Aromatic Hydrocarbons (PAHs) towards low and intermediate mass young stars with disks using large aperture telescopes. VLT-VISIR N-band spectra and VLT-ISAAC and VLTNACO L-band spectra of 29 sources are presented, spectrally resolving the 3.3, 8.6, 11.2 and 12.6 μm PAH features. Spatial extent profiles of the features and the continuum emission are derived and used to associate the PAH emission with the disks. The results are discussed in the context of recent PAH emission disk models. The 3.3, 8.6 and 11.2 μm PAH features are detected toward a small fraction of the T Tauri stars, with typical upper limits between 1×10−15 and 5×10−17 W m−2. All 11.2 μm detections from a previous Spitzer survey are confirmed with (tentative) 3.3 μm detections, and in all PAH sources both the 8.6 and the 11.2 μm features are detected. For 6 detections, the spatial extent of the PAH features is confined to scales typically smaller than 0.12–0.34′′, consistent with the radii of 12-60 AU disks at their distances (typically 150 pc). For 3 additional sources, WL 16, HD 100546 and TY CrA, one or more of the PAH features are more extended than the hot dust continuum of the disk, whereas for Oph IRS 48, the size of the resolved PAH emission is confirmed to be smaller than that of the large grains. For HD 100546, the 3.3 μm emission is confined to a small radial extent of 12±3 AU, most likely associated with the outer rim of the gap in this disk. Gaps with radii out to 10–30 AU may also affect the observed PAH extent for other sources. For both Herbig Ae and T Tauri stars, the small measured extents of the 8.6 and 11.2 μm features are consistent with larger (≥ 100 carbon atoms) PAHs. PAHs in Disks around Young Solar-type Stars 60
<gh_stars>0 package net.rushashki.social.shashki64.client.config; import com.google.gwt.inject.client.AbstractGinModule; import com.google.gwt.place.shared.PlaceController; import com.google.inject.Provider; import com.google.web.bindery.event.shared.EventBus; import com.google.web.bindery.event.shared.SimpleEventBus; import net.rushashki.social.shashki64.client.rpc.GameRpcServiceAsync; import net.rushashki.social.shashki64.client.rpc.GameMessageRpcServiceAsync; import net.rushashki.social.shashki64.client.rpc.ProfileRpcServiceAsync; import net.rushashki.social.shashki64.client.util.ShashkiLogger; import net.rushashki.social.shashki64.client.view.*; import net.rushashki.social.shashki64.client.view.ui.*; import net.rushashki.social.shashki64.shared.locale.ShashkiConstants; import javax.inject.Inject; import javax.inject.Singleton; /** * Created with IntelliJ IDEA. * User: alekspo * Date: 23.11.14 * Time: 13:23 */ public class ShashkiGinModule extends AbstractGinModule { @Override protected void configure() { bind(ShashkiLogger.class).in(Singleton.class); bind(EventBus.class).to(SimpleEventBus.class).in(Singleton.class); bind(PlaceController.class).toProvider(PlaceProvider.class).in(Singleton.class); bind(ShashkiConstants.class).in(Singleton.class); bind(HomeView.class).to(HomeViewUi.class).in(Singleton.class); bind(PlayTapeView.class).to(PlayTapeViewUi.class).in(Singleton.class); bind(PlayView.class).to(PlayViewUi.class).in(Singleton.class); bind(SignInView.class).to(SignInViewUi.class).in(Singleton.class); bind(ProfileView.class).to(ProfileViewUi.class).in(Singleton.class); bind(SettingsView.class).to(SettingsViewUi.class).in(Singleton.class); bind(AboutUsView.class).to(AboutUsViewUi.class).in(Singleton.class); bind(NotFoundView.class).to(NotFoundViewUi.class).in(Singleton.class); bind(ProfileRpcServiceAsync.class).in(Singleton.class); bind(GameMessageRpcServiceAsync.class).in(Singleton.class); bind(GameRpcServiceAsync.class).in(Singleton.class); } static class PlaceProvider implements Provider<PlaceController> { @Inject EventBus eventBus; @Override public PlaceController get() { return new PlaceController(eventBus); } } }
Cooper McKim is a rising junior at Tufts University fascinated by the dynamics of environmental work with policy, entrepreneurship and activism. McKim studies environmental policy and helps write a monthly newsletter for the environmental studies program. He has interned with Massachusetts Rivers Alliance, a New Jersey Congressman and the NPR affiliate station WSHU. "Tufts will divest, whether that's 50 years from now when the environment is so chaotic that fossil fuels are simply not a good investment, or in one, two or five years. It truly is a moral imperative. Every day that we invest in fossil fuels, we continue to say that they are a good investment." Photo credit: Tufts Divest In a liveleak video, an iPhone screen rises above the back of a chair showing a grainy image of a room full of prospective Tufts student, and an admissions person. She explains the differences between engineering psychology and management, when a female Tufts student next to the camera, raises her hand, "I was wondering if you could talk a little bit about Tufts investment in the fossil fuel industry." The flustered admissions person tried to shrug it off, but the student, Eliza Slocum, persisted, "Could you just make a general statement..." The two students next to Slocum then intervene, hoping to get a statement. One of them, Pearl, tries one last time saying, "We don't want to take up any more of your time..." Then a father marches down to the aisle and leans over Pearl: "Enough, enough, no. That's too much time. She's done with you, okay? She's done with you. Stop wasting our time. I'm gonna get security if you don't shut the hell up, okay?" The packed room of prospective families clap. This small catastrophe was Tufts Divest first direct action aimed at admissions. At a school where admissions is sacred, the public reaction was not good. Membership for Tufts Divest dropped by 30 percent, including leaders like Kit Collins, who couldn't align with those tactics. Collins, a percussionist and rising Senior said, "This environmental justice community is being co-opted by a desire to be seen as activists and to do righteous activist-y things for people's egos as opposed to doing activist-y things because they're good for our goal." Evan Bell, a soft-spoken remaining leader of Tufts Divest, admits that there was a certain sense of urgency regarding this action: "I think it was dealt with in somewhat of a state of panic, a measured panic. It wasn't highly strategic and as it went poorly, it just went more poorly." The tactics were not well-defined and most of the organizing happened amongst a small group on Facebook, the day or two before. The leaked video fueled the controversy, reinforcing the stigma already glowing around the Tufts' Divest movement. Despite all of these negative reactions, Bell insists the action was still important: "We needed a Board of Trustees meeting, that's part of the story we were trying to tell. We worked within the bureaucracy and it didn't work, we had to push it." In the end, this action did get the attention of the administration, and opened the door to a Board of Trustees meeting. Like this action, divestment is more complicated than its pithy title suggests. Bell says, "I don't do divestment because I care about Tufts endowment that much. It's about setting cultural precedents, stigmatizing the fossil fuel industry... and offering an on ramp, where college students can plug-in and join an international movement." With more than 150 other campus divestment movements, it really does offer an international network at a local scale. Events like Powershift and Climate Summer, organized by 350.org and Better Future Project, allows divesters around the world to coordinate, build an informed local movement and present a united front. Bell, having participated in many of events like this says, "I've been able to see so many people go from knowing and caring, to acting and making real change." The larger divest movement provides direction to the local movements, and individual secondary targets provide momentum. For example, Tufts Divest banded with other northeastern schools to perform a direct action in the TransCanada offices in Massachusetts in 2013, making young people's opposition to the Keystone XL pipeline very clear. In 2014, Tufts was one of schools across the country to create a blockade in front of the White House also opposing Keystone XL. The efficiency, power and unity represented by young people at the blockade was all thanks to divestment. Each secondary target at a local level contributes to movement building and overall momentum. Tufts Divest does still has a problem, though. The Board of Trustees responded with a stern no regarding divestment. After that meeting and the admissions faux pas, the group must rebrand and commit to a firm direction for next year. Bell says, "It's not just about bringing on more people and asking the same questions. Tufts Divest has yet to find an intermediate ask, and intermediate way of building that power." Bell has confidence that Tufts will eventually make the right choice, "Tufts will divest, whether that's 50 years from now when the environment is so chaotic that fossil fuels are simply not a good investment, or in one, two or five years. It truly is a moral imperative. Every day that we invest in fossil fuels, we continue to say that they are a good investment."
#include <stdlib.h> typedef struct { int re,im; } icplx_t; int main(int argc, char *argv[]) { icplx_t i ; i = (icplx_t){0,1}; int one = ((icplx_t){0,1}).im; return 0; }
<reponame>911992/WAsys_easy_http_data<filename>src/main/java/wasys/lib/pojo_http_data/api/annotations/package-info.java /** * This package contains annotations needed to specify meta/completed POJO definitions. */ package wasys.lib.pojo_http_data.api.annotations;
def require_upload(self): formStash = self.formStash getcreate_args = {} if formStash.results["private_key_file_pem"] is not None: self.upload_type = "pem" self.private_key_pem = getcreate_args[ "key_pem" ] = formhandling.slurp_file_field(formStash, "private_key_file_pem") self.getcreate_args = decode_args(getcreate_args)
/// Returns new particles after a collision. /// Assumes that the particles are tangent to each other. /// The first particle returned corresponds to self. /// /// # Panics /// - if the two particles are not tangent (or within 1e-5 units) pub fn bounce(&self, other: &Particle) -> (Particle, Particle) { let r_t = self.r + other.r; let dx = &self.x - &other.x; // only works for particles in contact assert!( (dx.norm() - r_t).abs() < 1e-5, "bounce was given non-tangent particles:\n\ distance: {:?}\n\ self: {:?}\n\ other: {:?}\n", (&self.x - &other.x).norm(), self, other ); let dv = &self.v - &other.v; let m_r = self.m * other.m / (self.m + other.m); // dp = 2 m1 m2 / (m1 + m2) (dv . \hat{dx}) \hat{dx} let dp = dx.scale(2. * m_r * (&dv * &dx) / dx.norm2()); let v1 = &self.v - &dp.scale(1. / self.m); let v2 = &other.v + &dp.scale(1. / other.m); let p1 = Particle { v: v1, .. self.clone() }; let p2 = Particle { v: v2, .. other.clone() }; (p1, p2) }
Over the last few weeks we've seen DDoS attacks hitting our systems that show that attackers have switched to new, large methods of bringing down web applications. They appear to come from an IoT botnet (like Mirai and relations) which were responsible for the large attacks against Brian Krebs. Our automatic DDoS mitigation systems have been handling these attacks, but we thought it would be interesting to publish some of the details of what we are seeing. In this article we'll share data on two attacks, which are perfect examples of the new trends in DDoS. CC BY 2.0 image by E Magnuson In the past we've written extensively about volumetric DDoS attacks and how to mitigate them. The attacks are distinguished by their heavy use of L7 (i.e. HTTP) attacks as opposed to the more familiar SYN floods, ACK floods, and NTP and DNS reflection attacks. Many DDoS mitigation systems are tuned to handle volumetric L3/4 attacks; in this instance attackers have switched to L7 attacks in an attempt to knock web applications offline. Seeing the move towards L7 DDoS attacks we put in place a new system that recognizes and blocks these attacks as they happen. The L7 mitigator recognizes attacks against a single host and distributes a fingerprint that protects all 4 million Cloudflare customers. We'll write more about it in the future. HTTP Requests per second Often when DDoS attacks are reported the size of the attack is reported in Gbps (or even Tbps), but there are many ways to measure the size of an attack. For L7 HTTP-based attacks it also makes sense to measure requests per second. That's because, unlike volumetric L3/4 attacks, HTTP-based attacks eat up resources by making actual HTTP requests to the attacked server. Recently we were hit by a couple of unusually large L7 attacks, crossing 1 million HTTP requests per second (1 Mrps). Here is one of them: This attack continued for 15 minutes. Multiple recent attacks had >1 Mrps and lasted for minutes. This particular attack peaked at 1.75 Mrps. It was composed of short HTTP requests (around 121 bytes per request), without anything unusual in the HTTP headers. The requests had a fixed Cookie header. We counted 52,467 unique IP addresses taking part in this attack. Due to the Anycast nature of the Cloudflare network, the malicious traffic was spread across multiple Cloudflare cities and with 100 cities we are able to get a good picture of where the bots are located. Here are the top affected datacenters: This attack went largely to our Hong Kong and Prague datacenters. This is another common characteristic; most of the recent attacks looked similar. Since the attack looks concentrated, we wondered if only a small number of AS numbers (networks) were the source of the attack. Unfortunately no, the IP addresses participating in the flood are evenly distributed. Out of 10,000 random requests we analyzed, we saw source IP addresses from over 300 AS numbers. These are the biggest sources: 48 AS24086 ; Vietnam 101 AS4134 ; China 128 AS7552 ; Vietnam 329 AS45899 ; Vietnam 2366 AS15895 ; Ukraine These attacks are a new trend, so it's not fair to blame the AS operators for not cleaning up devices participating in them. Having said that, the Ukrainian ISP and Vietnamese AS45899 seem to stand out. We'll get back to those in a moment. Bandwidth Although requests per second is a common metric for measuring these attacks, it's not the only one. We can also measure the bandwidth used in the attack. By this count the attack mentioned above was pretty small (since we've got used to DDoS attacks being reported in 100s of Gbps). It peaked at roughly 2Gbps. But recall that these L7 attacks end up hitting a web server and are not simply volumetric: they use server resources. However, we saw another attack that was unusual in that it was an L7 with similar bandwidth consumption to traditional L3/4 volumetric attacks. First, here's the requests per second graph: This attack generated "only" 220k requests per second at peak. However, it generated significant inbound bandwidth: This attack topped out at 360Gbps per second of inbound HTTP traffic. It's pretty unusual for an HTTP attack to generate a substantial amount of network traffic. This attack was special, and was composed of HTTP requests like this: GET /en HTTP/1.1 User-Agent: <some string> Cookie: <some cookie> Host: example.com Connection: close Content-Length: 800000 a[]=&b[]=&a[]=&b[]=&a[]=&b[]=&a[]=&b[]=&a[]=&b[]=&a[]=&b[]=... It's the long payload sent after the request headers that allowed the attackers to generate substantial traffic. Since this attack we've seen similar events with varying parameters in the request body. Sometimes these attacks came as GET requests, sometimes as POST. Additionally, this particular attack lasted roughly one hour, with 128,833 unique IP addresses. The datacenter distribution was different, with most of the attack concentrated on Frankfurt: As the attack was composed of a very large number of bots, we expected the AS distribution to be fairly even. Indeed, in the 10,000 request sample we recorded a whopping 737 unique AS numbers. Here are the top sources: 286 AS45899 ; Vietnam 314 AS7552 ; Vietnam 316 AS3462 ; Taiwan 323 AS18403 ; Vietnam 1510 AS15895 ; Ukraine Once again, the Ukrainian ISP and couple of Vietnamese networks are the top hitters. More on the sources We wondered why AS15895 was so special. First, we investigated our traffic charts. Here is the inbound traffic we received from them over last 30 days: The first significant attack was clearly seen as a spike on September 29 and reached 30Gbps. A very similar chart is visible for AS45899: We can see some smaller attacks attempted around September 26. A couple of days later the attackers turned the throttle up hitting 7.5Gbps non-stop from this ASN. Other AS numbers we investigated reveal a similar story. Devices While it's not possible for us to investigate all the attacking devices, it is fair to say that these attacks came from Internet-of-Things (IoT) category of devices. There are multiple hints confirming this theory. First, all of the attacking devices have port 23 (telnet) open (closing connection immediately) or closed. Never filtered. This is a strong hint that the malware disabled the telnet port just after it installed itself. Most of the hosts from the Vietnamese networks look like connected CCTV cameras. Multiple have open port 80 with presenting "NETSurveillance WEB" page. The Ukrainian devices are a bit different though. Most have port 80 closed, making it harder to identify. We had noticed one device with port 443 open serving a valid TLS cert issued by Western Digital, handling domain device-xxxx.wd2go.com suggesting it was a hard drive (Network Attached Storage to be precise). 未来: the future of DDoS We plan to continue our investigation and collaborate with external researchers to find a permanent solution to this rising threat. Although the most recent attacks have mostly involved Internet-connected cameras, there's no reason to think that they are likely the only source of future DDoS attacks. As more and more devices (fridges, fitness trackers, sleep monitors, ...) are added to the Internet they'll likely be unwilling participants in future attacks. CC BY 2.0 image by CODE_n We're hiring multiple roles, including our DDoS mitigation team. Help us save the Internet from DDoS attacks. Originally this article attributed the Mirai botnet for the shown attacks. We now believe that, for technical reasons, the large-bandwidth attack might not have come from a botnet running the leaked Mirai code.
import gc def queryset_iterator(queryset, chunksize=1000): ''''' Iterate over a Django Queryset ordered by the primary key This method loads a maximum of chunksize (default: 1000) rows in it's memory at the same time while django normally would load all rows in it's memory. Using the iterator() method only causes it to not preload all the classes. Note that the implementation of the iterator does not support ordered query sets. ''' pk = 0 last_pk = queryset.order_by('-pk')[0].pk queryset = queryset.order_by('pk') while pk < last_pk: for row in queryset.filter(pk__gt=pk)[:chunksize]: pk = row.pk yield row gc.collect()
package main import "log" type Cvs interface { Checkout() error Pull() (string, error) Push() error Revision() (string, error) Status() error } func NewCvs(url string, info *RepoInfo) Cvs { var workdir string for src, dsts := range info.Targets { if src == "./" { workdir = dsts[0] } } if info.Cvs == "" { // FIXME: parse cvs from url info.Cvs = "git" } switch info.Cvs { case "git": return NewGitCvs(workdir, url, info) default: log.Fatal("unsupport cvs type:", info.Cvs) } return nil }
// deal the execute snapshot change func (c *PinkCollector) dealExecuteSnapshotHisChange(kc *etcd.KeyChange) { if strings.TrimSpace(kc.Value) == "" { _ = c.cli.Delete(context.Background(), kc.Key) return } es := new(protocol.ExecuteSnapshot).Decode(kc.Value) if es.State == protocol.ExecuteSnapshotFail || es.State == protocol.ExecuteSnapshotSuccess { c.saveExecuteSnapshotHisToDB(es) ctx, _ := context.WithTimeout(context.Background(), time.Second*3) err := c.cli.Delete(ctx, kc.Key) if err != nil { log.Infof("the pink collector transfer the execute snapshot %+v fail", kc) return } log.Infof("the pink collector delete the execute snapshot his %+v from etcd success", kc) } }
// SHOW CREATE TABLE statement. public class ShowCreateTableStmt extends ShowStmt { public enum CreateTableType { TABLE("TABLE"), VIEW("VIEW"), MATERIALIZED_VIEW("MATERIALIZED VIEW"); private final String value; CreateTableType(String value) { this.value = value; } public String getValue() { return value; } } private static final ShowResultSetMetaData META_DATA = ShowResultSetMetaData.builder() .addColumn(new Column("Table", ScalarType.createVarchar(20))) .addColumn(new Column("Create Table", ScalarType.createVarchar(30))) .build(); private static final ShowResultSetMetaData VIEW_META_DATA = ShowResultSetMetaData.builder() .addColumn(new Column("View", ScalarType.createVarchar(20))) .addColumn(new Column("Create View", ScalarType.createVarchar(30))) .addColumn(new Column("character_set_client", ScalarType.createVarchar(30))) .addColumn(new Column("collation_connection", ScalarType.createVarchar(30))) .build(); private static final ShowResultSetMetaData MATERIALIZED_VIEW_META_DATA = ShowResultSetMetaData.builder() .addColumn(new Column("Materialized View", ScalarType.createVarchar(20))) .addColumn(new Column("Create Materialized View", ScalarType.createVarchar(30))) .build(); private TableName tbl; private CreateTableType type; public ShowCreateTableStmt(TableName tbl, CreateTableType type) { this.tbl = tbl; this.type = type; } public TableName getTbl() { return tbl; } public String getDb() { return tbl.getDb(); } public String getTable() { return tbl.getTbl(); } public CreateTableType getType() { return type; } public static ShowResultSetMetaData getViewMetaData() { return VIEW_META_DATA; } public static ShowResultSetMetaData getMaterializedViewMetaData() { return MATERIALIZED_VIEW_META_DATA; } @Override public void analyze(Analyzer analyzer) throws AnalysisException { } @Override public String toSql() { return "SHOW CREATE " + type.getValue() + " " + tbl; } @Override public String toString() { return toSql(); } @Override public ShowResultSetMetaData getMetaData() { return META_DATA; } @Override public <R, C> R accept(AstVisitor<R, C> visitor, C context) { return visitor.visitShowCreateTableStmt(this, context); } }
<reponame>pietroluongo/opengl-2d-shooter #include "../include/glutCallbacks.h" #include "../include/camera.h" #include "../include/constants.h" #include "../include/debug.h" #include "../libs/imgui/glut/imgui_impl_glut.h" #include "../libs/imgui/imgui.h" #include "../libs/imgui/imgui_impl_opengl2.h" #include <GL/gl.h> #include <GL/glu.h> #if defined(_WIN32) || defined(WIN32) #include <windows.h> #endif #include <GL/glut.h> void display() { glClear(GL_COLOR_BUFFER_BIT); context->getGameRef()->draw(); debug::drawUI(); glutPostRedisplay(); glutSwapBuffers(); } void keyDown(unsigned char key, int x, int y) { context->updateKeyStatus(key, KEY_DOWN_STATUS); glutPostRedisplay(); } void mouse(int button, int state, int x, int y) { if (context->imguiHasMouseFocus) { ImGui_ImplGLUT_MouseFunc(button, state, x, y); ImGui_ImplGLUT_MotionFunc(x, y); return; } if (state == GLUT_DOWN) { if (button == GLUT_LEFT_BUTTON) { context->setMouseButtons(MOUSE_BUTTON_LEFT, true); } else if (button == GLUT_RIGHT_BUTTON) { context->setMouseButtons(MOUSE_BUTTON_RIGHT, true); } } else if (state == GLUT_UP) { if (button == GLUT_LEFT_BUTTON) { context->setMouseButtons(MOUSE_BUTTON_LEFT, false); } else if (button == GLUT_RIGHT_BUTTON) { context->setMouseButtons(MOUSE_BUTTON_RIGHT, false); } } passiveMotion(x, y); glutPostRedisplay(); }; void specialDown(int key, int x, int y) { context->updateKeyStatus(key, KEY_DOWN_STATUS); if (key == GLUT_KEY_F1) { context->toggleDebugInfo(); } if (key == GLUT_KEY_F2) { context->toggleCameraInfo(); } if (key == GLUT_KEY_F3) { context->togglePhysicsInfo(); } if (key == GLUT_KEY_F12) { context->togglePlayerInfo(); } if (key == GLUT_KEY_F4) { context->toggleEnemyInfo(); } if(key == GLUT_KEY_F5) { context->toggleMemoryInfo(); } glutPostRedisplay(); } void specialUp(int key, int x, int y) { context->updateKeyStatus(key, KEY_UP_STATUS); glutPostRedisplay(); } void keyUp(unsigned char key, int x, int y) { context->updateKeyStatus(key, KEY_UP_STATUS); if (key == 'r') { context->resetGame(); } if (key == 'p') { context->getGameRef()->togglePause(); } if (key == 'm') { context->imguiHasMouseFocus = !context->imguiHasMouseFocus; } glutPostRedisplay(); } void setupGlut(int argc, char** argv) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB); glutInitWindowSize(WINDOW_WIDTH, WINDOW_HEIGHT); glutCreateWindow(WINDOW_TITLE); debug::imgui_init(); glClearColor(0.0, 0.0, 0.0, 0.0); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glEnable(GL_BLEND); glutDisplayFunc(display); glutPassiveMotionFunc(passiveMotion); glutMotionFunc(passiveMotion); glutIdleFunc(idle); glutKeyboardFunc(keyDown); glutKeyboardUpFunc(keyUp); glutSpecialFunc(specialDown); glutSpecialUpFunc(specialUp); glutMouseFunc(mouse); } void passiveMotion(int x, int y) { if (context->imguiHasMouseFocus) { ImGui_ImplGLUT_MotionFunc(x, y); return; } context->updateMousePos(glm::ivec2(x, y)); } void idle() { static GLdouble prevTime = glutGet(GLUT_ELAPSED_TIME); GLdouble curTime, deltaTime; curTime = glutGet(GLUT_ELAPSED_TIME); deltaTime = curTime - prevTime; prevTime = curTime; double framerate = 1.0 / deltaTime * 1000; context->updateTiming(framerate, deltaTime / 1000); context->idle(); glutPostRedisplay(); }
/// <reference path="./xss.d.ts" /> import xss from "xss"; console.log(xss("<script>alert('xss');</script>"));
/** * Provides information about an event provided by the apple file events api. All of the public * methods are simple methods that check if the particular event flag is set in the flags variable * returned by the apple file event. * * @see <a * href="https://developer.apple.com/documentation/coreservices/1455361-fseventstreameventflags" * target="_blank">FSEventStreamEventFlags</a> */ abstract class Event { // https://developer.apple.com/documentation/coreservices/1455361-fseventstreameventflags abstract int flags(); public boolean isNone() { return flags() == 0; } public boolean mustScanSubDirs() { return (flags() & Flags.Event.MustScanSubDirs) != 0; } public boolean userDropped() { return (flags() & Flags.Event.UserDropped) != 0; } public boolean kernelDropped() { return (flags() & Flags.Event.KernelDropped) != 0; } public boolean eventIdsWrapped() { return (flags() & Flags.Event.EventIdsWrapped) != 0; } public boolean historyDone() { return (flags() & Flags.Event.HistoryDone) != 0; } public boolean rootChanged() { return (flags() & Flags.Event.RootChanged) != 0; } public boolean mount() { return (flags() & Flags.Event.Mount) != 0; } public boolean unmount() { return (flags() & Flags.Event.Unmount) != 0; } public boolean itemChangeOwner() { return (flags() & Flags.Event.ItemChangeOwner) != 0; } public boolean itemCreated() { return (flags() & Flags.Event.ItemCreated) != 0; } public boolean itemFinderInfoMod() { return (flags() & Flags.Event.ItemFinderInfoMod) != 0; } public boolean itemInodeMetaMod() { return (flags() & Flags.Event.ItemInodeMetaMod) != 0; } public boolean itemIsDir() { return (flags() & Flags.Event.ItemIsDir) != 0; } public boolean itemIsFile() { return (flags() & Flags.Event.ItemIsFile) != 0; } public boolean itemIsHardlink() { return (flags() & Flags.Event.ItemIsHardlink) != 0; } public boolean itemIsLastHardlink() { return (flags() & Flags.Event.ItemIsLastHardlink) != 0; } public boolean itemIsSymlink() { return (flags() & Flags.Event.ItemIsSymlink) != 0; } public boolean itemModified() { return (flags() & Flags.Event.ItemModified) != 0; } public boolean itemRemoved() { return (flags() & Flags.Event.ItemRemoved) != 0; } public boolean itemRenamed() { return (flags() & Flags.Event.ItemRenamed) != 0; } public boolean itemXattrMod() { return (flags() & Flags.Event.ItemXattrMod) != 0; } public boolean ownEvent() { return (flags() & Flags.Event.OwnEvent) != 0; } public boolean itemCloned() { return (flags() & Flags.Event.ItemCloned) != 0; } }
/** * Unregisters <code>mMessageReceiver</code> when this activity is paused. */ @Override public void onResume() { LocalBroadcastManager.getInstance(this).registerReceiver(mMessageReceiver, new IntentFilter(ServiceMotionSense.INTENT_DATA)); super.onResume(); }
package adventofcode2017 import ( "bufio" "fmt" "math" "os" "strconv" "strings" "testing" ) // Nested anonymous structs cannot be initialized in a nice way type DistanceTestdata struct { pos1 [2]int pos2 [2]int dist int } func ExampleDay03() { fmt.Println(Day03(361527)) // Output: 326 } func TestDay03(t *testing.T) { // values taken from spec testdata := [][]int{ {1, 0}, {12, 3}, {23, 2}, {1024, 31}, } for _, pair := range testdata { square := pair[0] want := pair[1] got := Day03(square) if want != got { t.Fatalf("square %d: want %d but got %d\n", square, want, got) } } } func TestDay03A174344(t *testing.T) { f, err := os.Open("testdata/b174344.txt") if err != nil { t.Fatal(err) } defer f.Close() sc := bufio.NewScanner(f) for sc.Scan() { line := sc.Text() if strings.HasPrefix(line, "#") { continue } // split into index and result parts := strings.Fields(line) idx, err := strconv.Atoi(parts[0]) if err != nil { t.Fatal(err) } want, err := strconv.Atoi(parts[1]) if err != nil { t.Fatal(err) } got := Transform(idx, math.Sin, false) if want != got { t.Fatalf("n=%d: want %d but got %d\n", idx, want, got) } } if err := sc.Err(); err != nil { t.Fatal(err) } } func BenchmarkDay03(b *testing.B) { for i := 0; i < b.N; i++ { Day03(361527) } }
<gh_stars>1-10 /* SPDX-License-Identifier: Apache-2.0 */ /* Copyright Contributors to the ODPi Egeria project. */ package org.odpi.openmetadata.accessservices.informationview.ffdc.exceptions; public class PublishEventException extends InformationViewCheckedExceptionBase { public PublishEventException(String reportingClassName, String reportingActionDescription, String reportedErrorMessage, String reportedSystemAction, String reportedUserAction) { super(reportingClassName, reportingActionDescription, reportedErrorMessage, reportedSystemAction, reportedUserAction); } }
<gh_stars>1-10 package yaml import ( "encoding/json" "io/ioutil" "os" "path/filepath" "reflect" "strings" "testing" ) func compareToJSON(t *testing.T, yamlPath string) { yaml, err := ioutil.ReadFile(yamlPath) if err != nil { t.Fatal(err) } vy, err := Load(yaml) if err != nil { t.Fatal(err) } jsonPath := strings.TrimSuffix(yamlPath, ".yaml") + ".json" jsonFile, err := os.Open(jsonPath) if err != nil { t.Fatal(err) } defer jsonFile.Close() decoder := json.NewDecoder(jsonFile) var vj interface{} decoder.Decode(&vj) if !reflect.DeepEqual(vj, vy) { t.Fatalf("%s:\n\texpect %#v\n\tgot %#v", yamlPath, vj, vy) } } func TestLoad(t *testing.T) { paths, err := filepath.Glob("tests/*.yaml") if err != nil { t.Fatal(err) } if len(paths) < 5 { t.Fatal("wrong tests/ directory?") } for _, path := range paths { compareToJSON(t, path) } } func TestLoadSimple(t *testing.T) { o := func(b []byte, expected interface{}) { actual, err := Load(b) if err != nil { t.Fatalf("Load(%q): %v", b, err) } if !reflect.DeepEqual(actual, expected) { t.Fatalf("Load(%q):\n\texpect %#v\n\tgot %#v", b, expected, actual) } } o(nil, nil) o([]byte("a"), "a") o([]byte("[a,b,c]"), []interface{}{"a", "b", "c"}) o([]byte("a: x\nb: y\n"), map[string]interface{}{"a": "x", "b": "y"}) o([]byte("a: [x, b: y]"), map[string]interface{}{"a": []interface{}{"x", map[string]interface{}{"b": "y"}}}) o([]byte("hello: 世界\n😂: smile"), map[string]interface{}{"hello": "世界", "😂": "smile"}) }
<reponame>bpbpublications/Python-Quick-Interview-Guide<filename>Chapter 07/word_search.py from typing import List class Solution: def solution(self, board, word, x, y, cur): if(x < 0 or x >= len(board) or y < 0 or y >= len(board[x]) or board[x][y] == ' '): return False cur += board[x][y] if(len(cur) > len(word)): return False if(board[x][y] != word[len(cur)-1]): return False if(cur == word): return True temp = board[x][y] board[x][y] = ' ' if(self.solution(board, word, x, y+1, cur)): return True if(self.solution(board, word, x, y-1, cur)): return True if(self.solution(board, word, x+1, y, cur)): return True if(self.solution(board, word, x-1, y, cur)): return True board[x][y] = temp return False def exist(self, board: List[List[str]], word: str) -> bool: if(len(word) == 0): return True n = len(board) for i in range(n): m = len(board[i]) for j in range(m): if(word[0] == board[i][j] and self.solution(board, word, i, j, "")): return True return False temp =\ [ ['A','B','C','X'], ['S','Z','C','S'], ['P','D','E','E'] ] sol=Solution() #board=temp.copy();print(sol.exist(board,"ASFD")) board=temp[:];print(sol.exist(board,"ABCCED"))
class ManagedMcrun: """ A class for performing a mcstas simulation and organizing the data into python objects ManagedMcrun is usually called by the instrument class of McStasScript but can be used independently. It runs the mcrun command using the system command, and if this is not in the path, the absolute path can be given in a keyword argument executable_path. Attributes ---------- name_of_instrumentfile : str Name of instrument file to be executed data_folder_name : str Name of datafolder mcrun writes to disk ncount : int Number of rays to simulate mpi : int Number of mpi threads to run parameters : dict Dictionary of parameter names and values for this simulation custom_flags : string Custom flags that are passed to the mcrun command executable_path : string Path to the mcrun command (can be empty if already in path) Methods ------- run_simulation() Runs simulation, returns list of McStasData instances """ def __init__(self, instr_name, **kwargs): """ Parameters ---------- instr_name : str Name of instrument file to be simulated kwargs : keyword arguments foldername : str, required Sets data_folder_name ncount : int, default 1E6 Sets ncount mpi : int, default None Sets thread count, None to disable mpi parameters : dict Sets parameters custom_flags : str, default "" Sets custom_flags passed to mcrun executable_path : str Path to mcrun command, "" if already in path increment_folder_name : bool, default False If True, automatically appends foldername to make it unique force_compile : bool, default True If True, forces compile. If False no new instrument is written run_folder : str Path to folder in which to run McStas """ self.name_of_instrumentfile = instr_name self.data_folder_name = "" self.ncount = int(1E6) self.mpi = None self.parameters = {} self.custom_flags = "" self.executable_path = "" self.executable = "" self.increment_folder_name = False self.compile = True self.run_path = "." # executable_path always in kwargs if "executable_path" in kwargs: self.executable_path = kwargs["executable_path"] if "executable" in kwargs: self.executable = kwargs["executable"] if "foldername" in kwargs: self.data_folder_name = kwargs["foldername"] else: raise NameError( "ManagedMcrun needs foldername to load data, add " + "with keyword argument.") if "ncount" in kwargs: self.ncount = int(kwargs["ncount"]) if self.ncount < 1: raise ValueError("ncount should be a positive integer, was " + str(self.ncount)) if "mpi" in kwargs: self.mpi = kwargs["mpi"] try: self.mpi = int(self.mpi) except ValueError: if self.mpi is not None: raise RuntimeError("MPI should be an integer, was " + str(self.mpi)) if self.mpi is not None: if self.mpi < 1: raise ValueError("MPI should be an integer larger than" + " 0, was " + str(self.mpi)) if "parameters" in kwargs: self.parameters = kwargs["parameters"] if not isinstance(self.parameters, dict): raise RuntimeError("Parameters should be given as dict.") if "custom_flags" in kwargs: self.custom_flags = kwargs["custom_flags"] if not isinstance(self.custom_flags, str): raise RuntimeError("ManagedMcrun detected given customf_flags" + " was not a string.") if "increment_folder_name" in kwargs: self.increment_folder_name = kwargs["increment_folder_name"] if "force_compile" in kwargs: self.compile = kwargs["force_compile"] if "run_path" in kwargs: self.run_path = kwargs["run_path"] # get relevant paths and check their validity current_directory = os.getcwd() if not os.path.isabs(self.data_folder_name): self.data_folder_name = os.path.join(current_directory, self.data_folder_name) else: split_data_path = os.path.split(self.data_folder_name) if not os.path.isdir(split_data_path[0]): raise RuntimeError("Parent folder for datafolder invalid: " + str(split_data_path[0])) if not os.path.isabs(self.run_path): self.run_path = os.path.join(current_directory, self.run_path) else: split_run_path = os.path.split(self.run_path) if not os.path.isdir(split_run_path[0]): raise RuntimeError("Parent folder for run_path invalid: " + str(split_run_path[0])) if not os.path.isdir(self.run_path): raise RuntimeError("ManagedMcrun found run_path to " + "be invalid: " + str(self.run_path)) if not os.path.isdir(self.executable_path): raise RuntimeError("ManagedMcrun found executable_path to " + "be invalid: " + str(self.executable_path)) def run_simulation(self, **kwargs): """ Runs McStas simulation described by initializing the object """ # construct command to run option_string = "" if self.compile: option_string = "-c " if self.mpi is not None: mpi_string = " --mpi=" + str(self.mpi) + " " # Set mpi else: mpi_string = " " option_string = (option_string + "-n " + str(self.ncount) # Set ncount + mpi_string) if self.increment_folder_name and os.path.isdir(self.data_folder_name): counter = 0 new_name = self.data_folder_name + "_" + str(counter) while os.path.isdir(new_name): counter = counter + 1 new_name = self.data_folder_name + "_" + str(counter) self.data_folder_name = new_name if len(self.data_folder_name) > 0: option_string = (option_string + "-d " + self.data_folder_name) # add parameters to command parameter_string = "" for key, val in self.parameters.items(): parameter_string = (parameter_string + " " + str(key) # parameter name + "=" + str(val)) # parameter value mcrun_full_path = os.path.join(self.executable_path, self.executable) if len(self.executable_path) > 1: if not (self.executable_path[-1] == "\\" or self.executable_path[-1] == "/"): mcrun_full_path = os.path.join(self.executable_path, self.executable) # Run the mcrun command on the system full_command = (mcrun_full_path + " " + option_string + " " + self.custom_flags + " " + self.name_of_instrumentfile + parameter_string) process = subprocess.run(full_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, cwd=self.run_path) if "suppress_output" in kwargs: if kwargs["suppress_output"] is False: print(process.stderr) print(process.stdout) else: print(process.stderr) print(process.stdout) def load_results(self, *args): """ Method for loading data from a mcstas simulation Loads data on all monitors in a McStas data folder, and returns these as a list of McStasData objects. Parameters ---------- optional first argument : str path to folder from which data should be loaded """ if len(args) == 0: data_folder_name = self.data_folder_name elif len(args) == 1: data_folder_name = args[0] else: raise RuntimeError("load_results can be called " + "with 0 or 1 arguments") return load_results(data_folder_name)
Retrospective cohort study of the South Tyneside Exercise Referral Scheme 2009‐14: predictors of dropout and barriers to adherence Background Exercise Referral Schemes (ERS) are a prevalent method of increasing physical activity levels. However, they suffer from participant dropout and research predicting dropout or barriers to adherence are limited. This study aimed to focus upon the effect of referral characteristics on dropout, dropout predictors and whether self‐reported barriers to exercise predict dropout. Methods ERS data from 2009 to 2014 were retrieved for analysis. Chi‐squared and t‐tests were used to investigate differences between referral characteristics, and logistic regression used to investigate dropout predictors. Results Of 6894 participants, 37.8% (n = 2608) dropped out within 6 weeks and 50.03% (n = 3449) by the final 12th week. More males adhered (P < 0.001) with dropouts being significantly younger (P < 0.001). Dropout predictors were smoking (OR = 1.58, 95% CI: 1.29‐1.93) or being a Tier 3 referral (OR = 1.47, 95% CI: 1.25‐1.73). Increasing age (OR = 0.98, 95% CI: 0.98‐0.99), drinking alcohol (OR = 0.82, 95% CI: 0.71‐0.95), secondary care referrals (OR = 0.68, 95% CI: 0.52‐0.90), having a lack of motivation (OR = 0.81, 95% CI: 0.69‐0.95) or a lack of childcare (OR = 0.69, 95% CI: 0.50‐0.95) decreased the likelihood of dropout. Conclusion ERS dropout continues to be problematic. Smoking and having moderate‐high comorbidities predicted dropout. Increasing age and patient‐reported barriers of a lack of time or childcare decreased dropout risk. The reasons for dropout require further investigation.
import asyncio from codewatchman.Watchman import Watchman from codewatchman.WatchmanLog import WatchmanLog import logging logging.basicConfig(level=logging.DEBUG) tokenData = { "tokenId": "<KEY>", "accessToken": "<KEY>" } instance = Watchman(tokenData["tokenId"], tokenData["accessToken"]) first_log = WatchmanLog(log_code="TESTLOG", message="IT IS WORKING!!!!!", payload={ "test": True }) instance.send_log(first_log) print("This is to let you know that the script is complete.")
<filename>sugoi-api-ldap-config-provider/src/test/java/fr/insee/sugoi/config/LdapRealmProviderWriteTest.java /* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package fr.insee.sugoi.config; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; import fr.insee.sugoi.core.configuration.UiMappingService; import fr.insee.sugoi.ldap.utils.config.LdapConfigKeys; import fr.insee.sugoi.model.Realm; import fr.insee.sugoi.model.UserStorage; import fr.insee.sugoi.model.fixtures.StoreMappingFixture; import fr.insee.sugoi.model.technics.ModelType; import fr.insee.sugoi.model.technics.StoreMapping; import fr.insee.sugoi.model.technics.UiField; import java.util.ArrayList; import java.util.List; import org.junit.jupiter.api.Test; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.autoconfigure.ldap.embedded.EmbeddedLdapAutoConfiguration; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.boot.test.mock.mockito.MockBean; import org.springframework.test.context.TestPropertySource; @SpringBootTest(classes = {EmbeddedLdapAutoConfiguration.class, LdapRealmProviderDAOImpl.class}) @TestPropertySource(locations = "classpath:/application-write.properties") public class LdapRealmProviderWriteTest { @Autowired LdapRealmProviderDAOImpl ldapRealmProviderDAOImpl; @MockBean private UiMappingService uiMappingService; @Test public void addNewRealmWithOneUserStorageTest() { Realm realmToAdd = new Realm(); realmToAdd.setName("toadd"); realmToAdd.setUrl("localhost"); realmToAdd.addProperty(LdapConfigKeys.GROUP_FILTER_PATTERN, "toto"); UserStorage uniqueUserStorage = new UserStorage(); uniqueUserStorage.setUserSource("ou=SSM,o=insee,c=fr"); uniqueUserStorage.setOrganizationSource("ou=organisations,ou=clients_domaine2,o=insee,c=fr"); realmToAdd.setUserStorages(List.of(uniqueUserStorage)); assertThat("Realm should not exist", ldapRealmProviderDAOImpl.load("toadd").isEmpty()); ldapRealmProviderDAOImpl.createRealm(realmToAdd, null); Realm retrievedRealm = ldapRealmProviderDAOImpl.load("toadd").get(); assertThat("Realm should be present", retrievedRealm.getName(), is("toadd")); assertThat("Realm should have an url", retrievedRealm.getUrl(), is("localhost")); assertThat( "Realm should have a usersource", retrievedRealm.getUserStorages().get(0).getUserSource(), is("ou=SSM,o=insee,c=fr")); assertThat( "Realm should have an organizationsource", retrievedRealm.getUserStorages().get(0).getOrganizationSource(), is("ou=organisations,ou=clients_domaine2,o=insee,c=fr")); assertThat( "Realm should have a groupfilterpattern", retrievedRealm.getProperties().get(LdapConfigKeys.GROUP_FILTER_PATTERN), is("toto")); } @Test public void addNewRealmWithTwoUserStoragesTest() { Realm realmToAdd = new Realm(); realmToAdd.setName("multistorage"); realmToAdd.setUrl("localhost"); realmToAdd.addProperty(LdapConfigKeys.GROUP_FILTER_PATTERN, "toto"); UserStorage userStorage1 = new UserStorage(); userStorage1.setName("first"); userStorage1.setUserSource("ou=SSM,o=insee,c=fr"); userStorage1.setOrganizationSource("ou=organisations,ou=clients_domaine2,o=insee,c=fr"); UserStorage userStorage2 = new UserStorage(); userStorage2.setName("second"); userStorage2.setUserSource("ou=SSM,o=insee,c=fr"); userStorage2.setOrganizationSource("ou=organisations,ou=clients_domaine2,o=insee,c=fr"); List<UserStorage> userStorages = new ArrayList<>(); userStorages.add(userStorage1); userStorages.add(userStorage2); realmToAdd.setUserStorages(userStorages); assertThat("Realm should not exist", ldapRealmProviderDAOImpl.load("multistorage").isEmpty()); ldapRealmProviderDAOImpl.createRealm(realmToAdd, null); Realm retrievedRealm = ldapRealmProviderDAOImpl.load("multistorage").get(); assertThat("Realm should be present", retrievedRealm.getName(), is("multistorage")); assertThat( "Realm should have two userstorages", retrievedRealm.getUserStorages().size(), is(2)); assertThat( "First realm has usersource", retrievedRealm.getUserStorages().get(0).getUserSource(), is("ou=SSM,o=insee,c=fr")); assertThat( "Realm should have a groupfilterpattern", retrievedRealm.getProperties().get(LdapConfigKeys.GROUP_FILTER_PATTERN), is("toto")); } @Test public void deleteRealmWithOneStorageTest() { assertThat("Realm should be present", ldapRealmProviderDAOImpl.load("todelete").isPresent()); ldapRealmProviderDAOImpl.deleteRealm("todelete", null); assertThat("todelete should be deleted", ldapRealmProviderDAOImpl.load("todelete").isEmpty()); } @Test public void deleteRealmWithMultipleStoragesTest() { assertThat( "Realm should be present", ldapRealmProviderDAOImpl.load("todeletemulti").isPresent()); ldapRealmProviderDAOImpl.deleteRealm("todeletemulti", null); assertThat( "todeletemulti should be deleted", ldapRealmProviderDAOImpl.load("todeletemulti").isEmpty()); } @Test public void changeRealmUserSourceTest() { Realm realmToModify = ldapRealmProviderDAOImpl.load("tomodify").get(); UserStorage userStorage = realmToModify.getUserStorages().get(0); userStorage.setUserSource("ou=contacts,ou=clients_domaine2,o=insee,c=fr"); ldapRealmProviderDAOImpl.updateRealm(realmToModify, null); assertThat( "User source should change to domaine2", ldapRealmProviderDAOImpl.load("tomodify").get().getUserStorages().get(0).getUserSource(), is("ou=contacts,ou=clients_domaine2,o=insee,c=fr")); } @Test public void changeRealmUrlTest() { Realm realmToModify = ldapRealmProviderDAOImpl.load("tomodify").get(); realmToModify.setUrl("new_url"); ldapRealmProviderDAOImpl.updateRealm(realmToModify, null); assertThat( "Url should have changed", ldapRealmProviderDAOImpl.load("tomodify").get().getUrl(), is("new_url")); } @Test public void addUiUserMapping() { Realm realmToModify = ldapRealmProviderDAOImpl.load("tomodify").get(); UiField uiField = new UiField(); uiField.setName("Identifiant"); uiField.setTag("main"); realmToModify.getUiMapping().get(Realm.UIMappingType.UI_ORGANIZATION_MAPPING).add(uiField); ldapRealmProviderDAOImpl.updateRealm(realmToModify, null); assertThat( "Should have a uiUserMapping Identifiant", ldapRealmProviderDAOImpl .load("tomodify") .get() .getUiMapping() .get(Realm.UIMappingType.UI_ORGANIZATION_MAPPING) .stream() .anyMatch( uf -> uf.getName().equalsIgnoreCase("Identifiant") && uf.getTag().equalsIgnoreCase("main"))); } @Test public void addUiOrganizationMapping() { Realm realmToModify = ldapRealmProviderDAOImpl.load("tomodify").get(); UiField uiField = new UiField(); uiField.setName("Identifiant"); uiField.setTag("main"); realmToModify.getUiMapping().get(Realm.UIMappingType.UI_USER_MAPPING).add(uiField); ldapRealmProviderDAOImpl.updateRealm(realmToModify, null); assertThat( "Should have a uiUserMapping Identifiant", ldapRealmProviderDAOImpl .load("tomodify") .get() .getUiMapping() .get(Realm.UIMappingType.UI_USER_MAPPING) .stream() .anyMatch( uf -> uf.getName().equalsIgnoreCase("Identifiant") && uf.getTag().equalsIgnoreCase("main"))); } @Test public void addApplicationMappingTest() { Realm realmToModify = ldapRealmProviderDAOImpl.load("tomodify").get(); realmToModify.setApplicationMappings(StoreMappingFixture.getApplicationStoreMappings()); ldapRealmProviderDAOImpl.updateRealm(realmToModify, null); assertThat( "Application mapping should have a name", ldapRealmProviderDAOImpl.load("tomodify").get().getApplicationMappings().stream() .anyMatch(v -> v.equals(new StoreMapping("name", "ou", ModelType.STRING, true)))); } @Test public void addOrganizationMappingTest() { Realm realmToModify = ldapRealmProviderDAOImpl.load("tomodify").get(); realmToModify .getUserStorages() .get(0) .setOrganizationMappings(StoreMappingFixture.getOrganizationStoreMappings()); ldapRealmProviderDAOImpl.updateRealm(realmToModify, null); assertThat( "Organization mapping should have an address", ldapRealmProviderDAOImpl .load("tomodify") .get() .getUserStorages() .get(0) .getOrganizationMappings() .stream() .anyMatch( v -> v.equals( new StoreMapping( "address", "inseeAdressePostaleDN", ModelType.ADDRESS, true)))); } }
<gh_stars>1-10 package com.drumonii.loltrollbuild.model.builder; import com.drumonii.loltrollbuild.model.Champion; import com.drumonii.loltrollbuild.model.ChampionInfo; import com.drumonii.loltrollbuild.model.ChampionPassive; import com.drumonii.loltrollbuild.model.ChampionSpell; import com.drumonii.loltrollbuild.model.image.ChampionImage; import java.util.*; import java.util.stream.Collectors; /** * Builder for {@link Champion}s. */ public final class ChampionBuilder { private int id; private String key; private String name; private String title; private String partype; private ChampionInfo info; private List<ChampionSpell> spells = new ArrayList<>(); private ChampionPassive passive; private ChampionImage image; private SortedSet<String> tags = new TreeSet<>(); public ChampionBuilder withId(int id) { this.id = id; return this; } public ChampionBuilder withKey(String key) { this.key = key; return this; } public ChampionBuilder withName(String name) { this.name = name; return this; } public ChampionBuilder withTitle(String title) { this.title = title; return this; } public ChampionBuilder withPartype(String partype) { this.partype = partype; return this; } public ChampionBuilder withInfo(ChampionInfo info) { this.info = info; return this; } public ChampionBuilder withSpells(List<ChampionSpell> spells) { this.spells = spells; return this; } public ChampionBuilder withSpells(ChampionSpell... spells) { withSpells(Arrays.stream(spells).collect(Collectors.toCollection(ArrayList::new))); return this; } public ChampionBuilder withPassive(ChampionPassive passive) { this.passive = passive; return this; } public ChampionBuilder withImage(ChampionImage image) { this.image = image; return this; } public ChampionBuilder withTags(SortedSet<String> tags) { this.tags = tags; return this; } public ChampionBuilder withTags(String... tags) { withTags(new TreeSet<>(Arrays.asList(tags))); return this; } public Champion build() { Champion champion = new Champion(); champion.setId(id); champion.setKey(key); champion.setName(name); champion.setTitle(title); champion.setPartype(partype); champion.setInfo(info); if (info != null) { info.setId(id); info.setChampion(champion); } champion.setSpells(spells); for (ChampionSpell spell : spells) { spell.getImage().setKey(spell.getKey()); spell.getImage().setSpell(spell); spell.setChampion(champion); } champion.setPassive(passive); if (passive != null) { passive.getImage().setId(id); passive.setId(id); passive.setChampion(champion); } champion.setImage(image); if (image != null) { image.setId(id); image.setChampion(champion); } champion.setTags(tags); return champion; } }
Role of apoptosis, bcl-2 and bax protein expression in premature rupture of fetal membranes. OBJECTIVE To examine the degree of apoptosis in human fetal membranes associated with premature rupture of membranes (PROM) as compared with normal pregnancies and to evaluate the expression of proapoptotic bax and antiapoptotic bcl-2 gene products. STUDY DESIGN Fetal membranes from 50 pregnancies were included in the study. Thirty of 50 pregnancies had PROM. Twenty pregnancies with intact membranes served as controls. Chorioamniotic membrane biopsies were taken from the rupture site of the membrane and periphery of the rupture side. In the control group, membrane biopsies were taken from the artificial rupture site, cervical pole of the membranes and membranes close to the edge of the placenta. In recognizing apoptotic figures, routinely processed samples were stained with hematoxylin and eosin for light microscopic evaluation. Quantification of the apoptotic cells was performed with high-power fields and expressed as the number per 100 cells. The membranes of both groups were then stained with bcl-2 and bax antibodies by using the standard steptavidin-biotin-immunoperoxidase method. Staining with both antibodies were compared between two groups. RESULTS Apoptotic cells were detected in the amniotic epithelium, in chorionic cells and fibroblastic layer of the fetal membranes. Apoptotic cells were found mostly in the chorionic cells. There was a statistically significant difference between the apoptotic index in PROM and the control group in both rupture and peripheral sites (P < .05), although within each group peripheral and rupture sites showed no difference in terms of apoptotic cell counts. Both bax and bcl-2 expression was observed in 40% of control cases and in 57% and 50% of cases with PROM, respectively, mostly in the chorionic trophoblastic cells. The PROM and control groups showed no statistically significant difference in terms of bcl-2 and bax protein expression. CONCLUSION Apoptosis may play a role in the pathogenesis of PROM, but the changes in apoptosis do not seem to be mediated by bcl-2 and bax genes in the amniotic membrane. Other regulatory mechanisms must be investigated.
<reponame>e11en/ReactUiCompare import Code from "components/CodeComponent"; import ElementCard from "components/ElementCardComponent"; import FlexWrapper from "components/FlexWrapperComponent"; const NormalButtons = () => { return ( <ElementCard title="Normal buttons"> <div></div> <Code> {` `} </Code> </ElementCard> ); }; const ButtonVariations = () => { return ( <ElementCard title="Other button variations"> <div></div> <Code> {` `} </Code> </ElementCard> ); }; const ButtonSizes = () => { return ( <ElementCard title="Button sizes"> <div></div> <Code> {` `} </Code> </ElementCard> ); }; const ButtonGroups = () => { return ( <ElementCard title="Button groups"> <div></div> <Code> {` `} </Code> </ElementCard> ); }; export const Button = () => { return ( <FlexWrapper> <NormalButtons /> <ButtonVariations /> <ButtonSizes /> <ButtonGroups /> </FlexWrapper> ); };
Some footballers just don't want to give the game up. It's become common place that stars of yesteryear may see out their careers in warmer climates like America and India, and sometimes they want to cash in on their name and enjoy a last payday too (ahem, China). At 36-years-old, Dimitar Berbatov still has plenty to offer given his style of play, but it appears as though he is all about winding down. After a season with PAOK in Greece during the 2015-16 campaign, the Bulgarian forward has been out of the game for a year and has still been linked with moves to the Premier League. Most notably, Newcastle. Now the former Tottenham and Fulham man has decided to get back into the game, but he's not heading for England's top-flight - far from it. He's set to follow in former Manchester United teammate Wes Brown's footsteps with a move to Indian side Kerala Blasters. Jim White of Sky Sports tweeted out the information on Monday and Berbatov looks set to wind down his career in style. Berbatov won the Premier League Golden Boot back in 2011 as part of one of the last ultra-successful Manchester United sides. Although his style didn't resonate with every United fan, Sir Alex Ferguson defended his stint at Old Trafford. "I don't think Dimitar was a failure here," said Ferguson. "Some people like to see players run through brick walls all the time. Dimitar is not that type of player, but he is very talented player and he had a decent goalscoring record here." He also rejected claims that Berbatov had to be the star to thrive - like he was at Fulham - rather than a part of an all-star cast. "That was not a problem for him," said Ferguson. "The problem for him here was the way we wanted to play and the selections I had to make. When you have choices you hope to make the right one. Javier Hernández had a fantastic first season with us and as time went on he became a more regular player than Dimitar."
/** * Decrypts a certain encrypted String with the Columnar Transposition * Cipher Rule * * @return a String decrypted with the word encrypted by the Columnar * Transposition Cipher Rule */ public static String decrypter() { StringBuilder wordDecrypted = new StringBuilder(); for (int i = 1; i < table.length; i++) { for (Object item : table[i]) { wordDecrypted.append(item); } } return wordDecrypted.toString().replaceAll(ENCRYPTION_FIELD, ""); }
package orc import ( "bytes" "fmt" "strings" "unicode" "unicode/utf8" "github.com/crphang/orc/proto" ) type Category struct { name string isPrimitive bool typeKind *proto.Type_Kind } func (c Category) String() string { return c.name } var ( CategoryBoolean = Category{"boolean", true, proto.Type_BOOLEAN.Enum()} CategoryByte = Category{"tinyint", true, proto.Type_BYTE.Enum()} CategoryShort = Category{"smallint", true, proto.Type_SHORT.Enum()} CategoryInt = Category{"int", true, proto.Type_INT.Enum()} CategoryLong = Category{"bigint", true, proto.Type_LONG.Enum()} CategoryFloat = Category{"float", true, proto.Type_FLOAT.Enum()} CategoryDouble = Category{"double", true, proto.Type_DOUBLE.Enum()} CategoryString = Category{"string", true, proto.Type_STRING.Enum()} CategoryDate = Category{"date", true, proto.Type_DATE.Enum()} CategoryTimestamp = Category{"timestamp", true, proto.Type_TIMESTAMP.Enum()} CategoryBinary = Category{"binary", true, proto.Type_BINARY.Enum()} CategoryDecimal = Category{"decimal", true, proto.Type_DECIMAL.Enum()} CategoryVarchar = Category{"varchar", true, proto.Type_VARCHAR.Enum()} CategoryChar = Category{"char", true, proto.Type_CHAR.Enum()} CategoryList = Category{"array", false, proto.Type_LIST.Enum()} CategoryMap = Category{"map", false, proto.Type_MAP.Enum()} CategoryStruct = Category{"struct", false, proto.Type_STRUCT.Enum()} CategoryUnion = Category{"uniontype", false, proto.Type_UNION.Enum()} Categories = []Category{ CategoryBoolean, CategoryByte, CategoryShort, CategoryInt, CategoryLong, CategoryFloat, CategoryDouble, CategoryString, CategoryDate, CategoryTimestamp, CategoryBinary, CategoryDecimal, CategoryVarchar, CategoryChar, CategoryList, CategoryMap, CategoryStruct, CategoryUnion, } ) type stringPosition struct { value string position int length int } func NewStringPosition(value string) *stringPosition { value = strings.ToLower(value) value = strings.NewReplacer("\n", "", " ", "", "\t", "").Replace(value) return &stringPosition{ value, 0, utf8.RuneCountInString(value), } } func (s stringPosition) String() string { var buf bytes.Buffer buf.WriteString(`\'`) buf.WriteString(string([]rune(s.value)[0:s.position])) buf.WriteString(`^`) buf.WriteString(string([]rune(s.value)[s.position])) buf.WriteString(`\'`) return buf.String() } func (s *stringPosition) parseCategory() (Category, error) { start := s.position for s.position < s.length { ch := []rune(s.value)[s.position] if !unicode.IsLetter(rune(ch)) { break } s.position++ } if s.position != start { word := strings.ToLower(string([]rune(s.value)[start:s.position])) for _, cat := range Categories { if cat.name == word { return cat, nil } } } return Category{}, fmt.Errorf("can't parse category at %v", s.value) } func (s *stringPosition) parseInt() (int, error) { start := s.position var result int for s.position < s.length { ch := []rune(s.value)[s.position] if !unicode.IsDigit(ch) { break } result = result*10 + int(ch-'0') s.position++ } if s.position == start { return 0, fmt.Errorf("Missing integer at %v", s) } return result, nil } func (s *stringPosition) parseName() (string, error) { start := s.position for s.position < s.length { ch := []rune(s.value)[s.position] if (!unicode.IsLetter(ch) && !unicode.IsDigit(ch)) && ch != ',' && ch != '_' { break } s.position++ } if s.position == start { return "", fmt.Errorf("Missing name at %v", s) } return string([]rune(s.value)[start:s.position]), nil } func (s *stringPosition) requireChar(required rune) error { if s.position >= s.length || []rune(s.value)[s.position] != required { return fmt.Errorf("Missing required char '%s' at position %v", string(required), s.position) } s.position += 1 return nil } func (s *stringPosition) consumeChar(ch rune) bool { result := s.position < s.length && []rune(s.value)[s.position] == ch if result { s.position += 1 } return result } func (s *stringPosition) parseUnion(ty *TypeDescription) error { err := s.requireChar('<') if err != nil { return err } consume := true for consume { t, err := s.parseType() if err != nil { return err } err = ty.addUnionChild(t) if err != nil { return err } consume = s.consumeChar(',') } err = s.requireChar('>') if err != nil { return err } return nil } func (s *stringPosition) parseStruct(ty *TypeDescription) error { err := s.requireChar('<') if err != nil { return err } consume := true for consume { fieldName, err := s.parseName() if err != nil { return err } err = s.requireChar(':') if err != nil { return err } t, err := s.parseType() if err != nil { return err } err = ty.addField(fieldName, t) if err != nil { return err } consume = s.consumeChar(',') } err = s.requireChar('>') if err != nil { return err } return nil } func (s *stringPosition) parseType() (*TypeDescription, error) { var err error cat, err := s.parseCategory() if err != nil { return nil, err } result, err := NewTypeDescription(SetCategory(cat)) if err != nil { return nil, err } switch result.category.name { case CategoryBinary.name, CategoryBoolean.name, CategoryByte.name, CategoryDate.name, CategoryDouble.name, CategoryFloat.name, CategoryInt.name, CategoryLong.name, CategoryShort.name, CategoryString.name, CategoryTimestamp.name: case CategoryChar.name, CategoryVarchar.name: err = s.requireChar('(') if err != nil { return nil, err } length, err := s.parseInt() if err != nil { return nil, err } err = result.withMaxLength(length) if err != nil { return nil, err } err = s.requireChar(')') if err != nil { return nil, err } case CategoryDecimal.name: err = s.requireChar('(') if err != nil { return nil, err } precision, err := s.parseInt() if err != nil { return nil, err } err = s.requireChar(',') if err != nil { return nil, err } scale, err := s.parseInt() if err != nil { return nil, err } err = result.withScale(scale) if err != nil { return nil, err } err = result.withPrecision(precision) if err != nil { return nil, err } err = s.requireChar(')') if err != nil { return nil, err } case CategoryList.name: err = s.requireChar('<') if err != nil { return nil, err } k, err := s.parseType() if err != nil { return nil, err } result.children = append(result.children, k) err = s.requireChar('>') if err != nil { return nil, err } case CategoryMap.name: err = s.requireChar('<') if err != nil { return nil, err } t, err := s.parseType() if err != nil { return nil, err } result.children = append(result.children, t) err = s.requireChar(',') if err != nil { return nil, err } t, err = s.parseType() if err != nil { return nil, err } result.children = append(result.children, t) err = s.requireChar('>') if err != nil { return nil, err } case CategoryUnion.name: err = s.parseUnion(result) if err != nil { return nil, err } case CategoryStruct.name: err = s.parseStruct(result) if err != nil { return nil, err } default: return nil, fmt.Errorf("Unknown type %s at %s", result.category, s) } return result, nil } const ( maxPrecision = 38 maxScale = 38 defaultPrecision = 38 defaultScale = 10 defaultLength = 256 ) type TypeDescription struct { category Category parent *TypeDescription children []*TypeDescription fieldNames []string maxLength int precision int scale int id int maxId int } type TypeDescriptionTransformFunc func(t *TypeDescription) error func NewTypeDescription(fns ...TypeDescriptionTransformFunc) (*TypeDescription, error) { t := &TypeDescription{ id: -1, maxId: -1, maxLength: defaultLength, precision: defaultPrecision, scale: defaultScale, } for _, fn := range fns { err := fn(t) if err != nil { return nil, err } } return t, nil } func SetCategory(category Category) TypeDescriptionTransformFunc { return func(t *TypeDescription) error { t.category = category return nil } } func AddField(field string, fns ...TypeDescriptionTransformFunc) TypeDescriptionTransformFunc { return func(t *TypeDescription) error { ft, err := NewTypeDescription(fns...) if err != nil { return err } return t.addField(field, ft) } } func AddUnionChild(fns ...TypeDescriptionTransformFunc) TypeDescriptionTransformFunc { return func(t *TypeDescription) error { ut, err := NewTypeDescription(fns...) if err != nil { return err } return t.addUnionChild(ut) } } func AddChild(fns ...TypeDescriptionTransformFunc) TypeDescriptionTransformFunc { return func(t *TypeDescription) error { ut, err := NewTypeDescription(fns...) if err != nil { return err } return t.addChild(ut) } } func (t *TypeDescription) addChild(child *TypeDescription) error { if t.category != CategoryList && t.category != CategoryMap { return fmt.Errorf("Can only add child to map or list type and not %s", t.category.name) } t.children = append(t.children, child) child.parent = t return nil } func (t *TypeDescription) addUnionChild(child *TypeDescription) error { if t.category.name != CategoryUnion.name { return fmt.Errorf("Can only add types to union type and not %s", t.category.name) } t.children = append(t.children, child) child.parent = t return nil } func (t *TypeDescription) addField(field string, fieldType *TypeDescription) error { if t.category.name != CategoryStruct.name { return fmt.Errorf("Can only add fields to struct type and not %s", t.category.name) } t.fieldNames = append(t.fieldNames, field) t.children = append(t.children, fieldType) fieldType.parent = t return nil } func (t *TypeDescription) withMaxLength(maxLength int) error { if t.category.name != CategoryVarchar.name && t.category.name != CategoryChar.name { return fmt.Errorf("maxLength is only allowed on char and varchar and not %s", t.category.name) } t.maxLength = maxLength return nil } func (t *TypeDescription) withScale(scale int) error { if t.category.name != CategoryDecimal.name { return fmt.Errorf("scale is only allowed on decimal and not %s", t.category.name) } else if scale < 0 || scale > maxScale || scale > t.precision { return fmt.Errorf("scale is out of range at %v", scale) } t.scale = scale return nil } func (t *TypeDescription) withPrecision(precision int) error { if t.category.name != CategoryDecimal.name { return fmt.Errorf("precision is only allowed on decimal and not %s", t.category.name) } else if precision < 1 || precision > maxPrecision || t.scale > precision { return fmt.Errorf("precision %v is out of range of 1 .. %v", precision, t.scale) } t.precision = precision return nil } func (t *TypeDescription) Columns() []string { return t.fieldNames } func (t *TypeDescription) getID() int { if t.id == -1 { root := t for root.parent != nil { root = root.parent } root.assignIDs(0) } return t.id } func (t *TypeDescription) getChildrenIDs() []int { min := t.getID() max := t.maxId ids := make([]int, max-min, max-min) for i := range ids { ids[i] = min + i + 1 } return ids } func (t *TypeDescription) getSubtypes() []int { var ids []int for _, child := range t.children { ids = append(ids, child.getID()) } return ids } func (t *TypeDescription) getCategory() Category { return t.category } func (t *TypeDescription) assignIDs(startID int) int { t.id = startID startID++ if t.children != nil { for _, child := range t.children { startID = child.assignIDs(startID) } } t.maxId = startID - 1 return startID } func (t *TypeDescription) printToBuffer(buf *bytes.Buffer) { buf.WriteString(t.category.name) switch t.category.name { case CategoryDecimal.name: buf.WriteString(`(`) buf.WriteString(fmt.Sprint(t.precision)) buf.WriteString(`,`) buf.WriteString(fmt.Sprint(t.scale)) buf.WriteString(`)`) case CategoryChar.name, CategoryVarchar.name: buf.WriteString(`(`) buf.WriteString(fmt.Sprint(t.maxLength)) buf.WriteString(`)`) case CategoryList.name, CategoryMap.name, CategoryUnion.name: buf.WriteString(`<`) for i, child := range t.children { if i != 0 { buf.WriteString(`,`) } child.printToBuffer(buf) } buf.WriteString(`>`) case CategoryStruct.name: buf.WriteString(`<`) for i, child := range t.children { if i != 0 { buf.WriteString(`,`) } buf.WriteString(t.fieldNames[i]) buf.WriteString(`:`) child.printToBuffer(buf) } buf.WriteString(`>`) } } func (t *TypeDescription) String() string { var buf bytes.Buffer t.printToBuffer(&buf) return buf.String() } func (t *TypeDescription) printJSONToBuffer(prefix string, buf *bytes.Buffer, indent int) { for i := 0; i < indent; i++ { buf.WriteString("\t") } buf.WriteString(prefix) buf.WriteString("{\"category\": \"") buf.WriteString(t.category.name) buf.WriteString("\", \"id\": ") buf.WriteString(fmt.Sprint(t.getID())) buf.WriteString(", \"max\": ") buf.WriteString(fmt.Sprint(t.maxId)) switch t.category.name { case CategoryDecimal.name: buf.WriteString(", \"precision\": ") buf.WriteString(fmt.Sprint(t.precision)) buf.WriteString(", \"scale\": ") buf.WriteString(fmt.Sprint(t.scale)) case CategoryChar.name, CategoryVarchar.name: buf.WriteString(", \"length\": ") buf.WriteString(fmt.Sprint(t.maxLength)) case CategoryList.name, CategoryMap.name, CategoryUnion.name: buf.WriteString(", \"children\": [") for i, child := range t.children { child.printJSONToBuffer("", buf, indent) if i != len(t.children)-1 { buf.WriteString(`,`) } } buf.WriteString("]") case CategoryStruct.name: buf.WriteString(", \"fields\": {") for i, child := range t.children { child.printJSONToBuffer("\""+t.fieldNames[i]+"\": ", buf, indent) if i != len(t.children)-1 { buf.WriteString(`,`) } } buf.WriteString(`}`) break default: break } buf.WriteString(`}`) } // ToJSON returns a json encoded string of t. func (t *TypeDescription) ToJSON() string { var buf bytes.Buffer t.printJSONToBuffer("", &buf, 0) return buf.String() } // MarshalJSON returns a json encoded byte slice of t. func (t *TypeDescription) MarshalJSON() ([]byte, error) { return []byte(t.ToJSON()), nil } func (t *TypeDescription) GetField(fieldName string) (*TypeDescription, error) { fieldNames := strings.Split(fieldName, ".") root := fieldNames[0] if len(fieldNames) == 1 { if root == "" || root == "*" { return t, nil } if len(t.fieldNames) != len(t.children) { return nil, fmt.Errorf("no field with name: %s", fieldName) } for i, child := range t.children { if t.fieldNames[i] == root { return child, nil } } } for i, child := range t.children { if t.fieldNames[i] == root { return child.GetField(strings.Join(fieldNames[1:], ".")) } } return nil, fmt.Errorf("no field with name: %s", fieldName) } func (t *TypeDescription) Type() *proto.Type { ids := t.getSubtypes() children := make([]uint32, len(ids)) precision := uint32(t.precision) scale := uint32(t.scale) maxLength := uint32(t.maxLength) for i := range ids { children[i] = uint32(ids[i]) } return &proto.Type{ Kind: t.category.typeKind, FieldNames: t.fieldNames, Subtypes: children, Precision: &precision, Scale: &scale, MaximumLength: &maxLength, } } func (t *TypeDescription) Types() []*proto.Type { var types []*proto.Type types = append(types, t.Type()) for _, child := range t.children { types = append(types, child.Types()...) } return types } func createMap(key, value *TypeDescription) (*TypeDescription, error) { td, err := NewTypeDescription( SetCategory(CategoryMap), ) if err != nil { return nil, err } err = td.addChild(key) if err != nil { return nil, err } err = td.addChild(value) if err != nil { return nil, err } return td, nil } func createList(child *TypeDescription) (*TypeDescription, error) { td, err := NewTypeDescription( SetCategory(CategoryList), ) if err != nil { fmt.Println(err) return nil, err } err = td.addChild(child) if err != nil { return nil, err } return td, nil } func ParseSchema(schema string) (*TypeDescription, error) { return NewStringPosition(schema).parseType() }
package com.dynamicpdf.api; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonInclude.Include; /** * Represents AES 128 bit PDF document security. * AES 128 bit PDF security is compatible with PDF version 1.5 and higher and, * Adobe Acrobat Reader version 7 or higher is needed to open these documents. * Older readers will not be able to read documents encrypted with this security. */ @JsonInclude(Include.NON_NULL) public class Aes128Security extends Security { private EncryptDocumentComponents documentComponents; /** * Initializes a new instance of the <code>Aes128Security</code> class by * taking the owner and user passwords as parameters to create PDF. * @param ownerPassword The owner password to open the document. * @param userPassword The user password to open the document. */ public Aes128Security(String userPassword, String ownerPassword) { super(userPassword, ownerPassword); } @JsonProperty("type") SecurityType getType() { return SecurityType.AES128; } /** * Gets the <code>EncryptDocumentComponents</code>, components of the document to be encrypted. * We can encrypt all the PDF content or the content, excluding the metadata. * @return The documents components to be encrypted. */ public EncryptDocumentComponents getDocumentComponents() { return documentComponents; } /** * Sets the <code>EncryptDocumentComponents</code>, components of the document to be encrypted. * We can encrypt all the PDF content or the content, excluding the metadata. * @param value The documents components to be encrypted. */ public void setDocumentComponents(EncryptDocumentComponents value) { documentComponents = value; } }
<gh_stars>0 {-# LANGUAGE ScopedTypeVariables #-} -- | Helpers for testing module Tests.Helpers ( -- * helpers T(..) , typeName , Double01(..) -- * IEEE 754 , isDenorm -- * Generic QC tests , monotonicallyIncreases , monotonicallyIncreasesIEEE -- * HUnit helpers , testAssertion , testEquality -- * QC helpers , small , unsquare , shrinkFixedList ) where import Data.Typeable import Numeric.MathFunctions.Constants (m_tiny) import Test.Framework import Test.Framework.Providers.HUnit import Test.QuickCheck import qualified Numeric.IEEE as IEEE import qualified Test.HUnit as HU -- | Phantom typed value used to select right instance in QC tests data T a = T -- | String representation of type name typeName :: Typeable a => T a -> String typeName = show . typeOf . typeParam where typeParam :: T a -> a typeParam _ = undefined -- | Check if Double denormalized isDenorm :: Double -> Bool isDenorm x = let ax = abs x in ax > 0 && ax < m_tiny -- | Generates Doubles in range [0,1] newtype Double01 = Double01 Double deriving (Show) instance Arbitrary Double01 where arbitrary = do (_::Int, x) <- fmap properFraction arbitrary return $ Double01 x ---------------------------------------------------------------- -- Generic QC ---------------------------------------------------------------- -- Check that function is nondecreasing monotonicallyIncreases :: (Ord a, Ord b) => (a -> b) -> a -> a -> Bool monotonicallyIncreases f x1 x2 = f (min x1 x2) <= f (max x1 x2) -- Check that function is nondecreasing taking rounding errors into -- account. -- -- In fact funstion is allowed to decrease less than one ulp in order -- to guard againist problems with excess precision. On x86 FPU works -- with 80-bit numbers but doubles are 64-bit so rounding happens -- whenever values are moved from registers to memory monotonicallyIncreasesIEEE :: (Ord a, IEEE.IEEE b) => (a -> b) -> a -> a -> Bool monotonicallyIncreasesIEEE f x1 x2 = y1 <= y2 || (y1 - y2) < y2 * IEEE.epsilon where y1 = f (min x1 x2) y2 = f (max x1 x2) ---------------------------------------------------------------- -- HUnit helpers ---------------------------------------------------------------- testAssertion :: String -> Bool -> Test testAssertion str cont = testCase str $ HU.assertBool str cont testEquality :: (Show a, Eq a) => String -> a -> a -> Test testEquality msg a b = testCase msg $ HU.assertEqual msg a b unsquare :: (Arbitrary a, Show a, Testable b) => (a -> b) -> Property unsquare = forAll (small arbitrary) small :: Gen a -> Gen a small act = sized $ \n -> resize (smallish n) act where smallish = round . (sqrt :: Double -> Double) . fromIntegral . abs shrinkFixedList :: (a -> [a]) -> [a] -> [[a]] shrinkFixedList shr (x:xs) = map (:xs) (shr x) ++ map (x:) (shrinkFixedList shr xs) shrinkFixedList _ [] = []
<reponame>Narflex/sagetv /* FAAC - codec plugin for Cooledit Copyright (C) 2002-2004 <NAME> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. The author can be contacted at: <EMAIL> */ #include <windows.h> //#include "resource.h" #include "filters.h" // CoolEdit #include "DecDialog.h" #include "Cfaad.h" // ********************************************************************************************* BOOL FAR PASCAL FilterUnderstandsFormat(LPSTR filename) { WORD len; if((len=lstrlen(filename))>4 && (!strcmpi(filename+len-4,".aac") || !strcmpi(filename+len-4,".mp4") || !strcmpi(filename+len-4,".m4a"))) return TRUE; return FALSE; } // ********************************************************************************************* long FAR PASCAL FilterGetFileSize(HANDLE hInput) { if(!hInput) return 0; DWORD dst_size; MYINPUT *mi; GLOBALLOCK(mi,hInput,MYINPUT,return 0); dst_size=mi->dst_size; GlobalUnlock(hInput); return dst_size; } // ********************************************************************************************* DWORD FAR PASCAL FilterOptionsString(HANDLE hInput, LPSTR szString) { if(!hInput) { lstrcpy(szString,""); return 0; } MYINPUT *mi; GLOBALLOCK(mi,hInput,MYINPUT,return 0); sprintf(szString,"MPEG%d - %lu bps\n", mi->file_info.version ? 4 : 2, mi->file_info.bitrate); if(mi->IsMP4) // MP4 file -------------------------------------------------------------------- lstrcat(szString,mpeg4AudioNames[mi->type]); else // AAC file ----------------------------------------------------------------------------- { switch(mi->file_info.headertype) { case RAW: sprintf(szString,"MPEG%d\nRaw\n", mi->file_info.version ? 4 : 2); lstrcat(szString,mpeg4AudioNames[mi->file_info.object_type]); GlobalUnlock(hInput); return 1;//0; // call FilterGetOptions() case ADIF: lstrcat(szString,"ADIF\n"); break; case ADTS: lstrcat(szString,"ADTS\n"); break; } lstrcat(szString,mpeg4AudioNames[mi->file_info.object_type]); /* switch(mi->file_info.object_type) { case MAIN: lstrcat(szString,"Main"); break; case LC: lstrcat(szString,"LC (Low Complexity)"); break; case SSR: lstrcat(szString,"SSR (unsupported)"); break; case LTP: lstrcat(szString,"LTP (Long Term Prediction)"); break; case HE_AAC: lstrcat(szString,"HE (High Efficiency)"); break; }*/ } GlobalUnlock(hInput); return 1; // don't call FilterGetOptions() } // ********************************************************************************************* /* DWORD FAR PASCAL FilterOptions(HANDLE hInput) { // FilterGetOptions() is called if this function and FilterSetOptions() are exported and FilterOptionsString() returns 0 // FilterSetOptions() is called only if this function is exported and and it returns 0 return 1; } // --------------------------------------------------------------------------------------------- DWORD FAR PASCAL FilterSetOptions(HANDLE hInput, DWORD dwOptions, LONG lSamprate, WORD wChannels, WORD wBPS) { return dwOptions; }*/ // ********************************************************************************************* void FAR PASCAL CloseFilterInput(HANDLE hInput) { if(!hInput) return; /* if(mi->file_info.headertype==RAW) { CRegistry reg; if(reg.openCreateReg(HKEY_CURRENT_USER,REGISTRY_PROGRAM_NAME "\\FAAD")) reg.setRegBool("OpenDialog",FALSE); else MessageBox(0,"Can't open registry!",0,MB_OK|MB_ICONSTOP); }*/ Cfaad tmp(hInput); } // ********************************************************************************************* #define ERROR_OFI(msg) \ { \ if(msg) \ MessageBox(0, msg, APP_NAME " plugin", MB_OK|MB_ICONSTOP); \ if(hInput) \ { \ GlobalUnlock(hInput); \ CloseFilterInput(hInput); \ } \ return 0; \ } // ----------------------------------------------------------------------------------------------- // return handle that will be passed in to close, and write routines HANDLE FAR PASCAL OpenFilterInput(LPSTR lpstrFilename, long far *lSamprate, WORD far *wBitsPerSample, WORD far *wChannels, HWND hWnd, long far *lChunkSize) { HANDLE hInput=NULL; Cfaad tmp; CMyDecCfg cfg(false); if(!*lSamprate && !tmp.IsMP4(lpstrFilename)) { /* aac_buffer b; float fLength; int bitrate; DWORD headertype; tmp.GetAACInfos(lpstrFilename,&b,&headertype,&fLength,&bitrate); if(headertype==RAW) tmp.ShowDlg4RawAAC=ShowDlg4RawAAC;*/ DWORD *seek_table; int seek_table_length; faadAACInfo file_info; if(!get_AAC_format(lpstrFilename, &file_info, &seek_table, &seek_table_length, 0)) if(file_info.headertype==RAW) tmp.ShowDlg4RawAAC=ShowDlg4RawAAC; } if(hInput=tmp.getInfos(lpstrFilename)) { MYINPUT *mi; GLOBALLOCK(mi,hInput,MYINPUT,return NULL); *wChannels=(WORD)mi->Channels; *lSamprate=mi->Samprate; *wBitsPerSample=mi->BitsPerSample; *lChunkSize=(*wBitsPerSample/8)*1024**wChannels*2; GlobalUnlock(hInput); tmp.hInput=NULL; } return hInput; } // ********************************************************************************************* DWORD FAR PASCAL ReadFilterInput(HANDLE hInput, unsigned char far *bufout, long lBytes) { if(!hInput) return 0; Cfaad tmp; return tmp.processData(hInput,bufout,lBytes); }
/** * add a DynamoDB request. accumulate the requests until batchSize, then promote to the queue * @param dynamoDBWriteRequest a single write request to DynamoDB */ public ListenableFuture<BatchResponse> add(final DynamoDBWriteRequest dynamoDBWriteRequest) { if (keySelector != null) { try { String key = keySelector.getKey(dynamoDBWriteRequest.getWriteRequest()); if (seenKeys.contains(key)) { promoteBatch(); } seenKeys.add(key); } catch (Exception e) { throw new RuntimeException(e); } } numberOfRecords++; String tableName = dynamoDBWriteRequest.getTableName(); final List<WriteRequest> writeRequests = batchUnderProcess.computeIfAbsent(tableName, k -> new ArrayList<>(batchSize)); writeRequests.add(dynamoDBWriteRequest.getWriteRequest()); ListenableFuture<BatchResponse> listenableFuture = futures.computeIfAbsent(batchId, k -> SettableFuture.create()); if (numberOfRecords >= batchSize) { promoteBatch(); } return listenableFuture; }
/** * An exception class which signals nonfatal errors. * * <p>A nonfatal error terminates a process for the original source concerned * but does not ruin the whole process of {@link Chionographis} task.</p> * * <p>An instances of this exception class has a flag of whether the cause exception of * the exception is reported through the logger already or not. * The flag defaults to {@code false}.</p> */ class NonfatalBuildException extends BuildException { private static final long serialVersionUID = -3814186559367393576L; public NonfatalBuildException() { } public NonfatalBuildException(String message) { super(message); } public NonfatalBuildException(String message, Throwable cause) { super(message, cause); } public NonfatalBuildException(Throwable cause) { super(cause); } public NonfatalBuildException(String message, Location location) { super(message, location); } public NonfatalBuildException(String msg, Throwable cause, Location location) { super(msg, cause, location); } public NonfatalBuildException(Throwable cause, Location location) { super(cause, location); } @Override public String toString() { String bySuper = super.toString(); return bySuper.isEmpty() ? getClass().getName() : bySuper; } }
#include <ATen/core/interned_strings.h> namespace torch { namespace jit { namespace prim { using namespace ::c10::prim; } namespace attr { using namespace ::c10::attr; } namespace aten { using namespace ::c10::aten; } namespace onnx { using namespace ::c10::onnx; } using ::c10::Symbol; } // namespace jit } // namespace torch
/* * Copyright 2014 <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ #include <iostream> #include <stdexcept> #include <string> #include <thread> #include <vector> #include <sys/stat.h> #include <sys/types.h> #include <dlfcn.h> #include <pthread.h> #include <unistd.h> #include <giomm.h> #include <glibmm.h> #include <libconfig.h++> #include <jni.h> #include "JarReader.h" #include "jvm.h" #include "minecraftd-dbus.h" #include "pipe.h" namespace { const std::string DEFAULT_CONFIG_FILE_NAME{MINECRAFTDCONFDIR "/minecraftd.conf"}; const std::string DEFAULT_LOG_CONFIG_FILENAME{MINECRAFTDCONFDIR "/log4j2.xml"}; class EnsureConditionBroadcast { public: EnsureConditionBroadcast(pthread_cond_t *condition) : condition_(condition), signalled_(false) { } ~EnsureConditionBroadcast() { signal(); } void signal() { if(!signalled_) { pthread_cond_broadcast(condition_); signalled_ = true; } } private: pthread_cond_t *condition_; bool signalled_; }; void jvmMain(minecraftd::JvmMainArguments *arguments) { EnsureConditionBroadcast ensureConditionBroadcast{&arguments->jvmCompleteCondition}; void *libjvm = dlopen(arguments->libjvmPath.c_str(), RTLD_LAZY); if(libjvm == nullptr) { throw std::runtime_error{"Failed to load JVM dynamic library: " + std::string{dlerror()}}; } minecraftd::JNI_CreateJavaVM JNI_CreateJavaVM = reinterpret_cast<minecraftd::JNI_CreateJavaVM>(dlsym(libjvm, "JNI_CreateJavaVM")); if(JNI_CreateJavaVM == nullptr) { throw std::runtime_error{"Failed to load JNI_CreateJavaVM function: " + std::string{dlerror()}}; } JavaVMInitArgs jvmArguments; jvmArguments.version = JNI_VERSION_1_6; std::vector<JavaVMOption> jvmOptions; std::string classPath{"-Djava.class.path=" + arguments->jarPath}; jvmOptions.push_back(JavaVMOption{const_cast<char*>(classPath.c_str()), nullptr}); // For Oracle-based JVMs, give the process a name for tools such as jcmd and jconsole jvmOptions.push_back(JavaVMOption{const_cast<char*>("-Dsun.java.command=minecraftd"), nullptr}); std::string log4jOption{"-Dlog4j.configurationFile="}; if(!arguments->customLogConfiguration.empty()) { log4jOption += arguments->customLogConfiguration; jvmOptions.push_back(JavaVMOption{const_cast<char*>(log4jOption.c_str()), nullptr}); } for(auto argument: arguments->additionalArguments) { jvmOptions.push_back(JavaVMOption{const_cast<char*>(argument.c_str()), nullptr}); } jvmArguments.options = jvmOptions.data(); jvmArguments.nOptions = jvmOptions.size(); jvmArguments.ignoreUnrecognized = false; JavaVM *jvm; JNIEnv *jni; jint jrc = JNI_CreateJavaVM(&jvm, reinterpret_cast<void**>(&jni), &jvmArguments); if(jrc != JNI_OK) { throw std::runtime_error{"Failed to create Java virtual machine"}; } std::string mainClassSpec{arguments->mainClassName}; for(size_t i = mainClassSpec.find('.'); i != std::string::npos; i = mainClassSpec.find('.', i)) { mainClassSpec[i] = '/'; } jclass mainClass = jni->FindClass(mainClassSpec.c_str()); if(mainClass == nullptr) { minecraftd::JavaException e{jni}; if(e.type() == "java.lang.NoClassDefFoundError") { throw std::runtime_error{"Could not find main class: " + arguments->mainClassName}; } else { throw e; } } jmethodID mainMethod = jni->GetStaticMethodID(mainClass, "main", "([Ljava/lang/String;)V"); if(mainMethod == nullptr) { minecraftd::JavaException e{jni}; if(e.type() == "java.lang.NoSuchMethodError") { throw std::runtime_error{"Could not find `void main(String[])' method in " + arguments->mainClassName}; } else { throw e; } } jclass stringClass = jni->FindClass("java/lang/String"); if(stringClass == nullptr) { throw minecraftd::JavaException{jni}; } jobjectArray mainArguments = jni->NewObjectArray(1, stringClass, nullptr); if(mainArguments == nullptr) { throw minecraftd::JavaException{jni}; } jstring noguiString = jni->NewStringUTF("nogui"); if(noguiString == nullptr) { throw minecraftd::JavaException{jni}; } jni->SetObjectArrayElement(mainArguments, 0, noguiString); if(jni->ExceptionCheck()) { throw minecraftd::JavaException{jni}; } std::cout << "Signalling completion of JVM startup" << std::endl; ensureConditionBroadcast.signal(); jni->CallStaticVoidMethod(mainClass, mainMethod, mainArguments); if(jni->ExceptionCheck()) { throw minecraftd::JavaException{jni}; } } pthread_t mainThread; Glib::RefPtr<Glib::MainLoop> mainLoop; void onExit() { mainLoop->quit(); std::cout << "Waiting for main thread to exit" << std::endl; pthread_join(mainThread, nullptr); } } int main(int argc, char **argv) { std::vector<std::string> arguments{argv + 1, argv + argc}; std::string configFileName{DEFAULT_CONFIG_FILE_NAME}; for(auto it = arguments.cbegin(); it != arguments.cend(); ++it) { if((*it == "--help") || (*it == "-h") || (*it == "-?")) { std::cout << "Usage: " << argv[0] << " [options]" << std::endl << std::endl << " --help/-h/-?\t\tPrints this message" << std::endl << " --config/-c <path>\tSpecifies an alternate configuration file (default:" << std::endl << " \t" << DEFAULT_CONFIG_FILE_NAME << ')' << std::endl; return 0; } else if((*it == "--config") || (*it == "-c")) { if(++it == arguments.cend()) { std::cerr << "Error: --config requires a path argument" << std::endl; return 1; } configFileName = *it; } } libconfig::Config configFile; try { configFile.readFile(configFileName.c_str()); } catch(const libconfig::FileIOException &e) { std::cerr << "Failed to read from " << configFileName << ": " << e.what() << std::endl; return 1; } catch(const libconfig::ParseException &e) { std::cerr << "Failed to parse " << configFileName << ": " << e.what() << std::endl; return 1; } std::string serverDirectory{MINECRAFTSERVERDIR}; configFile.lookupValue("serverDirectory", serverDirectory); if(chdir(serverDirectory.c_str()) != 0) { if(errno == ENOENT) { // TODO: Create directory recursively if(mkdir(serverDirectory.c_str(), 0777) != 0) { throw std::system_error(errno, std::system_category()); } } else { throw std::system_error(errno, std::system_category()); } } std::string jarPath{MINECRAFTJARDIR "/minecraft_server.jar"}; configFile.lookupValue("jar", jarPath); std::string jvmLibPath{JVMLIBPATH}; configFile.lookupValue("jvm.jvmLibrary", jvmLibPath); std::string logConfigFileName{DEFAULT_LOG_CONFIG_FILENAME}; try { libconfig::Setting &customLogConfiguration = configFile.lookup("customLogConfiguration"); if(customLogConfiguration.getType() == libconfig::Setting::TypeBoolean) { if(!static_cast<bool>(customLogConfiguration)) { logConfigFileName.clear(); logConfigFileName.shrink_to_fit(); } } else { logConfigFileName = static_cast<const char*>(customLogConfiguration); } } catch(libconfig::SettingNotFoundException&) { // No custom logging configuration } minecraftd::JarReader jarReader{jarPath}; std::string mainClassName = jarReader.getMainClassName(); minecraftd::PosixPipe pipe; if(close(STDIN_FILENO) != 0) { std::cerr << "Failed to close current standard input file descriptor" << std::endl; return 1; } if(dup2(pipe.readEnd(), STDIN_FILENO) != 0) { std::cerr << "Failed to duplicate read end of console pipe as standard input:" << errno << std::endl; return 1; } pthread_mutex_t mutex; if(pthread_mutex_init(&mutex, nullptr) != 0) { std::cerr << "Failed to create mutex object" << std::endl; return 1; } minecraftd::JvmMainArguments jvmMainArguments{jvmLibPath, jarPath, mainClassName, logConfigFileName}; if(pthread_cond_init(&jvmMainArguments.jvmCompleteCondition, nullptr) != 0) { std::cerr << "Failed to create JVM completion condition" << std::endl; return 1; } try { libconfig::Setting &additionalArguments = configFile.lookup("jvm.arguments"); const int count = additionalArguments.getLength(); for(int i = 0; i < count; ++i) { jvmMainArguments.additionalArguments.push_back(additionalArguments[i]); } } catch(libconfig::SettingNotFoundException&) { // No additional arguments to pass } mainThread = pthread_self(); std::atexit(onExit); std::thread jvmMainThread(jvmMain, &jvmMainArguments); pthread_mutex_lock(&mutex); std::cout << "Waiting for JVM to complete startup" << std::endl; pthread_cond_wait(&jvmMainArguments.jvmCompleteCondition, &mutex); pthread_mutex_unlock(&mutex); Gio::init(); minecraftd::Minecraftd1 dbusObject{"/net/za/slyfox/Minecraftd1", pipe}; std::cout << "Starting main loop" << std::endl; mainLoop = Glib::MainLoop::create(); mainLoop->run(); jvmMainThread.join(); std::cout << "And we're done!" << std::endl; return 0; }
<reponame>jyotisingh/gocd /* * Copyright 2018 ThoughtWorks, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.thoughtworks.go.server.service; import com.thoughtworks.go.config.*; import com.thoughtworks.go.config.materials.MaterialConfigs; import com.thoughtworks.go.config.materials.SubprocessExecutionContext; import com.thoughtworks.go.domain.*; import com.thoughtworks.go.domain.buildcause.BuildCause; import com.thoughtworks.go.domain.exception.StageAlreadyBuildingException; import com.thoughtworks.go.domain.materials.Material; import com.thoughtworks.go.domain.materials.TestingMaterial; import com.thoughtworks.go.domain.materials.svn.Subversion; import com.thoughtworks.go.domain.materials.svn.SvnCommand; import com.thoughtworks.go.fixture.PipelineWithTwoStages; import com.thoughtworks.go.helper.*; import com.thoughtworks.go.server.cache.GoCache; import com.thoughtworks.go.server.dao.DatabaseAccessHelper; import com.thoughtworks.go.server.dao.PipelineDao; import com.thoughtworks.go.server.dao.StageSqlMapDao; import com.thoughtworks.go.server.domain.Username; import com.thoughtworks.go.server.persistence.MaterialRepository; import com.thoughtworks.go.server.scheduling.ScheduleHelper; import com.thoughtworks.go.server.transaction.TransactionTemplate; import com.thoughtworks.go.util.GoConfigFileHelper; import com.thoughtworks.go.util.TimeProvider; import com.thoughtworks.go.utils.Assertions; import com.thoughtworks.go.utils.Timeout; import org.apache.commons.io.FileUtils; import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import org.junit.runner.RunWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; import java.io.File; import static com.thoughtworks.go.helper.MaterialConfigsMother.svnMaterialConfig; import static com.thoughtworks.go.helper.ModificationsMother.modifyOneFile; import static com.thoughtworks.go.util.GoConfigFileHelper.env; import static com.thoughtworks.go.util.GoConstants.DEFAULT_APPROVED_BY; import static org.hamcrest.Matchers.*; import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(locations = { "classpath:WEB-INF/applicationContext-global.xml", "classpath:WEB-INF/applicationContext-dataLocalAccess.xml", "classpath:testPropertyConfigurer.xml" }) public class PipelineScheduleServiceTest { @Autowired private ScheduleService scheduleService; @Autowired private GoConfigDao goConfigDao; @Autowired private GoConfigService goConfigService; @Autowired private PipelineDao pipelineDao; @Autowired private StageSqlMapDao stageDao; @Autowired PipelineScheduleQueue pipelineScheduleQueue; @Autowired PipelineService pipelineService; @Autowired private ScheduleHelper scheduleHelper; @Autowired private DatabaseAccessHelper dbHelper; @Autowired private PipelineLockService pipelineLockService; @Autowired private GoCache goCache; @Autowired private EnvironmentConfigService environmentConfigService; @Autowired private MaterialRepository materialRepository; @Autowired private TransactionTemplate transactionTemplate; @Autowired private SubprocessExecutionContext subprocessExecutionContext; @Autowired private InstanceFactory instanceFactory; @Rule public final TemporaryFolder temporaryFolder = new TemporaryFolder(); private PipelineWithTwoStages pipelineWithTwoStages; private PipelineConfig mingleConfig; private PipelineConfig evolveConfig; private String md5 = "md5-test"; private static final String STAGE_NAME = "dev"; private GoConfigFileHelper configHelper; public Subversion repository; public static TestRepo testRepo; private PipelineConfig goConfig; @Before public void setup() throws Exception { configHelper = new GoConfigFileHelper(); configHelper.usingCruiseConfigDao(goConfigDao); configHelper.onSetUp(); testRepo = new SvnTestRepo(temporaryFolder); dbHelper.onSetUp(); repository = new SvnCommand(null, testRepo.projectRepositoryUrl()); mingleConfig = configHelper.addPipeline("mingle", STAGE_NAME, repository, "unit", "functional"); goConfig = configHelper.addPipeline("go", STAGE_NAME, repository, "unit"); StageConfig ftStageConfig = StageConfigMother.custom("ft", "twist"); ftStageConfig.jobConfigByConfigName(new CaseInsensitiveString("twist")).addVariable("JOB_LVL", "job value"); ftStageConfig.setVariables(env("STAGE_LVL", "stage value")); configHelper.addStageToPipeline("go", ftStageConfig); configHelper.addEnvironmentVariableToPipeline("go", env("PIPELINE_LVL", "pipeline value")); configHelper.addEnvironments("uat"); EnvironmentConfig uatEnv = configHelper.currentConfig().getEnvironments().named(new CaseInsensitiveString("uat")); uatEnv.addPipeline(new CaseInsensitiveString("go")); uatEnv.addEnvironmentVariable("ENV_LVL", "env value"); evolveConfig = configHelper.addPipeline("evolve", STAGE_NAME, repository, "unit"); goCache.clear(); } @After public void teardown() throws Exception { if (pipelineWithTwoStages != null) { pipelineWithTwoStages.onTearDown(); } dbHelper.onTearDown(); pipelineScheduleQueue.clear(); testRepo.tearDown(); FileUtils.deleteQuietly(new File("pipelines")); configHelper.onTearDown(); } @Test public void shouldScheduleStageAfterModifications() throws Exception { scheduleAndCompleteInitialPipelines(); Material stubMaterial = new TestingMaterial(); mingleConfig.setMaterialConfigs(new MaterialConfigs(stubMaterial.config())); MaterialRevisions revisions = new MaterialRevisions(); revisions.addRevision(stubMaterial, ((TestingMaterial)stubMaterial).modificationsSince(null, null, subprocessExecutionContext)); BuildCause buildCause = BuildCause.createWithModifications(revisions, ""); dbHelper.saveMaterials(buildCause.getMaterialRevisions()); Pipeline pipeline = instanceFactory.createPipelineInstance(mingleConfig, buildCause, new DefaultSchedulingContext(DEFAULT_APPROVED_BY), md5, new TimeProvider()); pipelineService.save(pipeline); verifyMingleScheduledWithModifications(); } @Test public void shouldLockPipelineWhenSchedulingIt() throws Exception { scheduleAndCompleteInitialPipelines(); configHelper.lockPipeline("mingle"); Material stubMaterial = new TestingMaterial(); mingleConfig.setMaterialConfigs(new MaterialConfigs(stubMaterial.config())); assertThat(pipelineLockService.isLocked("mingle"), is(false)); MaterialRevisions revisions = new MaterialRevisions(); revisions.addRevision(stubMaterial, ((TestingMaterial) stubMaterial).modificationsSince(null, null, subprocessExecutionContext)); BuildCause buildCause = BuildCause.createWithModifications(revisions, ""); dbHelper.saveMaterials(buildCause.getMaterialRevisions()); Pipeline pipeline = instanceFactory.createPipelineInstance(mingleConfig, buildCause, new DefaultSchedulingContext(DEFAULT_APPROVED_BY), md5, new TimeProvider()); pipelineService.save(pipeline); assertThat(pipelineLockService.isLocked("mingle"), is(true)); } @Test public void shouldScheduleJobForAllAgentsWhenToBeRunOnAllAgents() throws Exception { configHelper.addAgent("localhost", "uuid1"); configHelper.addAgent("localhost", "uuid2"); configHelper.addAgent("localhost", "uuid3"); configHelper.addAgentToEnvironment("dev", "uuid1"); configHelper.setRunOnAllAgents(CaseInsensitiveString.str(evolveConfig.name()), STAGE_NAME, "unit", true); Material stubMaterial = new TestingMaterial(); evolveConfig.setMaterialConfigs(new MaterialConfigs(stubMaterial.config())); MaterialRevisions revisions = new MaterialRevisions(); revisions.addRevision(stubMaterial, ((TestingMaterial) stubMaterial).modificationsSince(null, null, subprocessExecutionContext)); BuildCause buildCause = BuildCause.createWithModifications(revisions, ""); dbHelper.saveMaterials(buildCause.getMaterialRevisions()); Pipeline pipeline = instanceFactory.createPipelineInstance(evolveConfig, buildCause, new DefaultSchedulingContext(DEFAULT_APPROVED_BY, environmentConfigService.agentsForPipeline(evolveConfig.name())), md5, new TimeProvider()); pipelineService.save(pipeline); Stage instance = scheduleService.scheduleStage(pipeline, STAGE_NAME, "anyone", new ScheduleService.NewStageInstanceCreator(goConfigService), new ScheduleService.ExceptioningErrorHandler()); JobInstances scheduledJobs = instance.getJobInstances(); assertThat(scheduledJobs.toArray(), hasItemInArray(hasProperty("name", is(RunOnAllAgents.CounterBasedJobNameGenerator.appendMarker("unit", 1))))); assertThat(scheduledJobs.toArray(), hasItemInArray(hasProperty("agentUuid", is("uuid2")))); assertThat(scheduledJobs.toArray(), hasItemInArray(hasProperty("name", is(RunOnAllAgents.CounterBasedJobNameGenerator.appendMarker("unit", 2))))); assertThat(scheduledJobs.toArray(), hasItemInArray(hasProperty("agentUuid", is("uuid3")))); assertThat(scheduledJobs.size(), is(2)); } @Test public void shouldScheduleMultipleJobsWhenToBeRunMultipleInstance() throws Exception { configHelper.setRunMultipleInstance(CaseInsensitiveString.str(evolveConfig.name()), STAGE_NAME, "unit", 2); Material stubMaterial = new TestingMaterial(); evolveConfig.setMaterialConfigs(new MaterialConfigs(stubMaterial.config())); MaterialRevisions revisions = new MaterialRevisions(); revisions.addRevision(stubMaterial, ((TestingMaterial) stubMaterial).modificationsSince(null, null, subprocessExecutionContext)); BuildCause buildCause = BuildCause.createWithModifications(revisions, ""); dbHelper.saveMaterials(buildCause.getMaterialRevisions()); Pipeline pipeline = instanceFactory.createPipelineInstance(evolveConfig, buildCause, new DefaultSchedulingContext(DEFAULT_APPROVED_BY, environmentConfigService.agentsForPipeline(evolveConfig.name())), md5, new TimeProvider()); pipelineService.save(pipeline); Stage instance = scheduleService.scheduleStage(pipeline, STAGE_NAME, "anyone", new ScheduleService.NewStageInstanceCreator(goConfigService), new ScheduleService.ExceptioningErrorHandler()); JobInstances scheduledJobs = instance.getJobInstances(); assertThat(scheduledJobs.size(), is(2)); assertThat(scheduledJobs.toArray(), hasItemInArray(hasProperty("name", is(RunMultipleInstance.CounterBasedJobNameGenerator.appendMarker("unit", 1))))); assertThat(scheduledJobs.toArray(), hasItemInArray(hasProperty("name", is(RunMultipleInstance.CounterBasedJobNameGenerator.appendMarker("unit", 2))))); } @Test public void shouldPassEnvironmentLevelEnvironmentVariablesToJobsForNewlyScheduledStage() throws Exception { scheduleAndCompleteInitialPipelines(); Pipeline pipeline = pipelineDao.mostRecentPipeline("go"); Stage stage = scheduleService.scheduleStage(pipeline, "ft", "anonymous", new ScheduleService.NewStageInstanceCreator(goConfigService), new ScheduleService.ExceptioningErrorHandler()); EnvironmentVariables jobVariables = stage.getJobInstances().first().getPlan().getVariables(); assertThat(jobVariables.size(), is(3)); //pipeline, stage, job, env is applied while creating work assertThat(jobVariables, hasItem(new EnvironmentVariable("PIPELINE_LVL", "pipeline value"))); assertThat(jobVariables, hasItem(new EnvironmentVariable("STAGE_LVL", "stage value"))); assertThat(jobVariables, hasItem(new EnvironmentVariable("JOB_LVL", "job value"))); } @Test public void shouldLockPipelineWhenSchedulingStage() throws Exception { scheduleAndCompleteInitialPipelines(); Pipeline pipeline = pipelineDao.mostRecentPipeline("mingle"); configHelper.lockPipeline("mingle"); assertThat(pipelineLockService.isLocked("mingle"), is(false)); scheduleService.scheduleStage(pipeline, STAGE_NAME, "anonymous", new ScheduleService.NewStageInstanceCreator(goConfigService), new ScheduleService.ExceptioningErrorHandler()); assertThat(pipelineLockService.isLocked("mingle"), is(true)); } @Test public void shouldForceFirstStagePlan() throws Exception { pipelineWithTwoStages = new PipelineWithTwoStages(materialRepository, transactionTemplate, temporaryFolder); pipelineWithTwoStages.usingDbHelper(dbHelper).usingConfigHelper(configHelper).onSetUp(); pipelineWithTwoStages.createPipelineWithFirstStagePassedAndSecondStageRunning(); Pipeline pipeline = manualSchedule(pipelineWithTwoStages.pipelineName); assertThat(pipeline.getFirstStage().stageState(), is(StageState.Building)); } @Test public void shouldForceFirstStagePlanWhenOtherStageIsRunning() throws Exception { pipelineWithTwoStages = new PipelineWithTwoStages(materialRepository, transactionTemplate, temporaryFolder); pipelineWithTwoStages.usingDbHelper(dbHelper).usingConfigHelper(configHelper).onSetUp(); pipelineWithTwoStages.createPipelineWithFirstStagePassedAndSecondStageRunning(); Pipeline pipeline = manualSchedule(pipelineWithTwoStages.pipelineName); assertThat(pipeline.getFirstStage().isActive(), is(true)); } @Test public void shouldForceStagePlanWithModificationsSinceLast() throws Exception { Pipeline completedMingle = scheduleAndCompleteInitialPipelines(); pipelineDao.loadPipeline(completedMingle.getId()); TestingMaterial testingMaterial = new TestingMaterial(); mingleConfig.setMaterialConfigs(new MaterialConfigs(testingMaterial.config())); MaterialRevisions revisions = new MaterialRevisions(); revisions.addRevision(testingMaterial, testingMaterial.modificationsSince(null, null, subprocessExecutionContext)); BuildCause buildCause = BuildCause.createManualForced(revisions, Username.ANONYMOUS); dbHelper.saveMaterials(buildCause.getMaterialRevisions()); Pipeline forcedPipeline = instanceFactory.createPipelineInstance(mingleConfig, buildCause, new DefaultSchedulingContext( DEFAULT_APPROVED_BY), md5, new TimeProvider()); pipelineService.save(forcedPipeline); verifyMingleScheduledWithModifications(); } @Test public void shouldNotScheduleAnyNewPipelineWhenErrorHappens() throws Exception { String stageName = "invalidStageName"; PipelineConfig invalidPipeline = configHelper.addPipelineWithInvalidMaterial("invalidPipeline", stageName); int beforeScheduling = pipelineDao.count(CaseInsensitiveString.str(invalidPipeline.name())); autoSchedulePipelines(); int afterScheduling = pipelineDao.count(CaseInsensitiveString.str(invalidPipeline.name())); assertThat(beforeScheduling, is(afterScheduling)); } @Test public void shouldNotScheduleActivePipeline() throws Exception { Pipeline pipeline = PipelineMother.building(mingleConfig); pipeline = dbHelper.savePipelineWithStagesAndMaterials(pipeline); Pipeline newPipeline = manualSchedule(CaseInsensitiveString.str(mingleConfig.name())); assertThat(newPipeline.getId(), is(pipeline.getId())); } @Test public void shouldNotScheduleBuildIfNoModification() throws Exception { autoSchedulePipelines("mingle", "evolve"); // Get the scheduled evolve stage and complete it. Stage evolveInstance = stageDao.mostRecentWithBuilds(CaseInsensitiveString.str(evolveConfig.name()), evolveConfig.findBy(new CaseInsensitiveString("dev"))); dbHelper.passStage(evolveInstance); stageDao.stageStatusChanged(evolveInstance); autoSchedulePipelines(); Stage mostRecent = stageDao.mostRecentWithBuilds(CaseInsensitiveString.str(evolveConfig.name()), evolveConfig.findBy(new CaseInsensitiveString("dev"))); assertThat(mostRecent.getId(), is(evolveInstance.getId())); assertThat(mostRecent.getJobInstances().first().getState(), is(JobState.Completed)); } @Test public void shouldSaveBuildStateCorrectly() throws Exception { PipelineConfig cruisePlan = configHelper.addPipeline("cruise", "dev", repository); goConfigService.forceNotifyListeners(); autoSchedulePipelines("mingle", "evolve", "cruise"); Stage cruise = stageDao.mostRecentWithBuilds(CaseInsensitiveString.str(cruisePlan.name()), cruisePlan.findBy(new CaseInsensitiveString("dev"))); JobInstance instance = cruise.getJobInstances().first(); assertThat(instance.getState(), is(JobState.Scheduled)); } @Test public void shouldRemoveBuildCauseIfPipelineNotExist() throws Exception { configHelper.addPipeline("cruise", "dev", repository); goConfigService.forceNotifyListeners(); scheduleHelper.autoSchedulePipelinesWithRealMaterials("mingle", "evolve", "cruise"); Assertions.assertWillHappen(2, PipelineScheduleQueueMatcher.numberOfScheduledPipelinesIsAtLeast(pipelineScheduleQueue), Timeout.FIVE_SECONDS); int originalSize = pipelineScheduleQueue.toBeScheduled().size(); assertThat(originalSize, greaterThan(1)); configHelper.initializeConfigFile(); goConfigService.forceNotifyListeners(); scheduleService.autoSchedulePipelinesFromRequestBuffer(); assertThat(pipelineScheduleQueue.toBeScheduled().size(), is(0)); } @Test public void shouldRemoveBuildCauseIfAnyExceptionIsThrown() throws Exception { configHelper.addPipeline("cruise", "dev", repository); goConfigService.forceNotifyListeners(); goConfigService.getCurrentConfig().pipelineConfigByName(new CaseInsensitiveString("cruise")).get(0).jobConfigByConfigName(new CaseInsensitiveString("unit")).setRunOnAllAgents(true); scheduleHelper.autoSchedulePipelinesWithRealMaterials("cruise"); goConfigService.forceNotifyListeners(); scheduleService.autoSchedulePipelinesFromRequestBuffer(); assertThat(pipelineScheduleQueue.toBeScheduled().size(), is(0)); } @Test public void shouldNotThrowErrorWhenMaterialsChange() throws Exception { configHelper.addPipeline("cruise", "dev", repository); goConfigService.forceNotifyListeners(); scheduleHelper.autoSchedulePipelinesWithRealMaterials("mingle", "evolve", "cruise"); configHelper.replaceMaterialForPipeline("cruise", svnMaterialConfig("http://new-material", null)); goConfigService.forceNotifyListeners(); try { scheduleService.autoSchedulePipelinesFromRequestBuffer(); } catch (Exception e) { fail("#2520 - should not cause an error if materials have changed"); } } @Test public void shouldConsumeAllBuildCausesInServerHealth() throws Exception { pipelineScheduleQueue.schedule(new CaseInsensitiveString("mingle"), BuildCause.createManualForced(modifyOneFile(mingleConfig), Username.ANONYMOUS)); pipelineScheduleQueue.schedule(new CaseInsensitiveString("evolve"), BuildCause.createManualForced(modifyOneFile(evolveConfig), Username.ANONYMOUS)); scheduleService.autoSchedulePipelinesFromRequestBuffer(); assertThat(pipelineScheduleQueue.toBeScheduled().size(), is(0)); } private void autoSchedulePipelines(String... pipelineNames) throws Exception { scheduleHelper.autoSchedulePipelinesWithRealMaterials(pipelineNames); scheduleService.autoSchedulePipelinesFromRequestBuffer(); } private Pipeline manualSchedule(String pipelineName) throws Exception, StageAlreadyBuildingException { scheduleHelper.manuallySchedulePipelineWithRealMaterials(pipelineName, new Username(new CaseInsensitiveString("some user name"))); scheduleService.autoSchedulePipelinesFromRequestBuffer(); return pipelineService.mostRecentFullPipelineByName(pipelineName); } private void assertPipelinesScheduled() { Pipeline minglePipeline = pipelineDao.mostRecentPipeline(CaseInsensitiveString.str(mingleConfig.name())); Stage mingleStage = minglePipeline.getFirstStage(); assertThat(mingleStage.getName(), is(STAGE_NAME)); assertThat(mingleStage.getJobInstances().size(), is(2)); JobInstance mingleJob = mingleStage.getJobInstances().first(); assertThat(mingleJob.getState(), is(JobState.Scheduled)); assertPipelineScheduled(evolveConfig); assertPipelineScheduled(goConfig); } private void assertPipelineScheduled(PipelineConfig config) { Stage evolveStage = stageDao.mostRecentWithBuilds(CaseInsensitiveString.str(config.name()), config.findBy(new CaseInsensitiveString("dev"))); assertThat(evolveStage.getName(), is("dev")); assertThat(evolveStage.getJobInstances().size(), is(1)); assertThat(evolveStage.getJobInstances().first().getState(), is(JobState.Scheduled)); } private void verifyMingleScheduledWithModifications() { Pipeline scheduledPipeline = pipelineDao.mostRecentPipeline(CaseInsensitiveString.str(mingleConfig.name())); BuildCause buildCause = scheduledPipeline.getBuildCause(); assertThat(buildCause.getMaterialRevisions().totalNumberOfModifications(), is(3)); JobInstance instance = scheduledPipeline.getFirstStage().getJobInstances().first(); assertThat(instance.getState(), is(JobState.Scheduled)); } private Pipeline scheduleAndCompleteInitialPipelines() throws Exception { autoSchedulePipelines("mingle", "evolve", "go"); assertPipelinesScheduled(); passFirstStage(goConfig); return passFirstStage(mingleConfig); } private Pipeline passFirstStage(PipelineConfig pipelineConfig) { Stage completedMingleStage = stageDao.mostRecentWithBuilds(CaseInsensitiveString.str(pipelineConfig.name()), pipelineConfig.findBy(new CaseInsensitiveString("dev"))); dbHelper.passStage(completedMingleStage); dbHelper.passStage(completedMingleStage); assertThat(completedMingleStage.getJobInstances().first().getState(), is(JobState.Completed)); Pipeline pipeline = pipelineDao.mostRecentPipeline(CaseInsensitiveString.str(pipelineConfig.name())); return dbHelper.passPipeline(pipeline); } }
def _allocate_from_s_variant(group_y: np.ndarray, group_vaccines: np.ndarray, new_e_from_s_variant: float, v_total: float, n_unvaccinated: float, vaccines_out: np.ndarray) -> np.ndarray: expected_total_vaccines_s_variant = group_y[COMPARTMENTS.S_variant] / n_unvaccinated * v_total total_vaccines_s_variant = min(group_y[COMPARTMENTS.S_variant] - new_e_from_s_variant, expected_total_vaccines_s_variant) if expected_total_vaccines_s_variant: total_ineffective = (group_vaccines[VACCINE_TYPES.u] + group_vaccines[VACCINE_TYPES.p] + group_vaccines[VACCINE_TYPES.m]) expected_u_vaccines = group_y[COMPARTMENTS.S_variant] / n_unvaccinated * total_ineffective vaccine_ratio = expected_u_vaccines / expected_total_vaccines_s_variant vaccines_out[COMPARTMENTS.S_variant, VACCINE_TYPES.u] = vaccine_ratio * total_vaccines_s_variant for vaccine_type in [VACCINE_TYPES.pa, VACCINE_TYPES.ma]: expected_vaccines = group_y[COMPARTMENTS.S_variant] / n_unvaccinated * group_vaccines[vaccine_type] vaccine_ratio = expected_vaccines / expected_total_vaccines_s_variant vaccines_out[COMPARTMENTS.S_variant, vaccine_type] = vaccine_ratio * total_vaccines_s_variant if DEBUG: assert np.all(np.isfinite(vaccines_out)) assert np.all(vaccines_out >= 0) return vaccines_out
package arcmop.blog.springbootest.configuracion.model; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.Id; @Entity public class Suma { @Id @GeneratedValue private Integer id; private Integer sum01; private Integer sum02; private Integer resultado; public Integer getSum01() { return sum01; } public void setSum01(Integer sum01) { this.sum01 = sum01; } public Integer getSum02() { return sum02; } public void setSum02(Integer sum02) { this.sum02 = sum02; } public Integer getResultado() { return resultado; } public void setResultado(Integer resultado) { this.resultado = resultado; } }
package cn.ztuo.bitrade.entity; import static com.querydsl.core.types.PathMetadataFactory.*; import com.querydsl.core.types.dsl.*; import com.querydsl.core.types.PathMetadata; import javax.annotation.Generated; import com.querydsl.core.types.Path; import com.querydsl.core.types.dsl.PathInits; /** * QTransferRecord is a Querydsl query type for TransferRecord */ @Generated("com.querydsl.codegen.EntitySerializer") public class QTransferRecord extends EntityPathBase<TransferRecord> { private static final long serialVersionUID = -1132223681L; private static final PathInits INITS = PathInits.DIRECT2; public static final QTransferRecord transferRecord = new QTransferRecord("transferRecord"); public final StringPath address = createString("address"); public final NumberPath<java.math.BigDecimal> amount = createNumber("amount", java.math.BigDecimal.class); public final QCoin coin; public final DateTimePath<java.util.Date> createTime = createDateTime("createTime", java.util.Date.class); public final NumberPath<java.math.BigDecimal> fee = createNumber("fee", java.math.BigDecimal.class); public final NumberPath<Long> id = createNumber("id", Long.class); public final NumberPath<Long> memberId = createNumber("memberId", Long.class); public final StringPath orderSn = createString("orderSn"); public final StringPath remark = createString("remark"); public QTransferRecord(String variable) { this(TransferRecord.class, forVariable(variable), INITS); } public QTransferRecord(Path<? extends TransferRecord> path) { this(path.getType(), path.getMetadata(), PathInits.getFor(path.getMetadata(), INITS)); } public QTransferRecord(PathMetadata metadata) { this(metadata, PathInits.getFor(metadata, INITS)); } public QTransferRecord(PathMetadata metadata, PathInits inits) { this(TransferRecord.class, metadata, inits); } public QTransferRecord(Class<? extends TransferRecord> type, PathMetadata metadata, PathInits inits) { super(type, metadata, inits); this.coin = inits.isInitialized("coin") ? new QCoin(forProperty("coin")) : null; } }
// Parse returns the access ID and signature present in the // given string, presumably taken from a request's Authorization // header. If the header does not match the expected `APIAuth access_id:signature` // format, an error is returned. func Parse(header string) (id, sig string, err error) { var tokens []string if !strings.HasPrefix(header, "APIAuth ") { goto malformed } tokens = strings.Split(header[8:], ":") if len(tokens) != 2 || tokens[0] == "" || tokens[1] == "" { goto malformed } return tokens[0], tokens[1], nil malformed: return "", "", fmt.Errorf("Malformed header: %s", header) }
package buffer import ( "bytes" "github.com/kisielk/vigo/utils" ) type ActionType int const ( ActionInsert ActionType = 1 ActionDelete ActionType = -1 ) // An action is a single entity of undo/redo history. All changes to contents of a buffer // must be initiated by an action. type Action struct { What ActionType Data []byte Cursor Cursor Lines []*Line } // NewInsertAction creates a new action inserting data bytes at c. func NewInsertAction(c Cursor, data []byte) *Action { a := Action{ What: ActionInsert, Data: data, Cursor: c, Lines: make([]*Line, bytes.Count(data, []byte{'\n'})), } for i := range a.Lines { a.Lines[i] = new(Line) } return &a } // NewDeleteAction creates a new action deleting numBytes bytes at c. func NewDeleteAction(c Cursor, numBytes int) *Action { d := c.ExtractBytes(numBytes) a := Action{ What: ActionDelete, Data: d, Cursor: c, Lines: make([]*Line, bytes.Count(d, []byte{'\n'})), } for i := range a.Lines { a.Lines[i] = c.Line.Next c.Line = c.Line.Next } return &a } func (a *Action) Apply(buf *Buffer) { a.do(buf, a.What) } func (a *Action) Revert(buf *Buffer) { a.do(buf, -a.What) } func (a *Action) insert(buf *Buffer) { var data_chunk []byte nline := 0 offset := a.Cursor.Boffset line := a.Cursor.Line utils.IterLines(a.Data, func(data []byte) { if data[0] == '\n' { buf.numBytes++ if offset < line.Len() { // a case where we insert at the middle of the // line, need to save that chunk for later // insertion at the end of the operation data_chunk = line.Data[offset:] line.Data = line.Data[:offset] } // insert a line buf.InsertLine(a.Lines[nline], line) line = a.Lines[nline] nline++ offset = 0 } else { buf.numBytes += len(data) // insert a chunk of data line.Data = utils.InsertBytes(line.Data, offset, data) offset += len(data) } }) if data_chunk != nil { line.Data = append(line.Data, data_chunk...) } buf.Emit(BufferEvent{Type: BufferEventInsert, Action: a}) } func (a *Action) delete(buf *Buffer) { nline := 0 offset := a.Cursor.Boffset line := a.Cursor.Line utils.IterLines(a.Data, func(data []byte) { if data[0] == '\n' { buf.numBytes-- // append the contents of the deleted line the current line line.Data = append(line.Data, a.Lines[nline].Data...) // delete a line buf.DeleteLine(a.Lines[nline]) nline++ } else { buf.numBytes -= len(data) // delete a chunk of data copy(line.Data[offset:], line.Data[offset+len(data):]) line.Data = line.Data[:line.Len()-len(data)] } }) buf.Emit(BufferEvent{Type: BufferEventDelete, Action: a}) } func (a *Action) do(buf *Buffer, what ActionType) { switch what { case ActionInsert: a.insert(buf) case ActionDelete: a.delete(buf) } } func (a *Action) LastLine() *Line { return a.Lines[len(a.Lines)-1] } func (a *Action) lastLineAffectionLen() int { i := bytes.LastIndex(a.Data, []byte{'\n'}) if i == -1 { return len(a.Data) } return len(a.Data) - i - 1 } func (a *Action) firstLineAffectionLen() int { i := bytes.Index(a.Data, []byte{'\n'}) if i == -1 { return len(a.Data) } return i } // returns the range of deleted lines, the first and the last one func (a *Action) DeletedLines() (int, int) { first := a.Cursor.LineNum + 1 last := first + len(a.Lines) - 1 return first, last } func (a *Action) tryMerge(b *Action) bool { if a.What != b.What { // can only merge actions of the same type return false } if a.Cursor.LineNum != b.Cursor.LineNum { return false } if a.Cursor.Boffset == b.Cursor.Boffset { pa, pb := a, b if a.What == ActionInsert { // on insertion merge as 'ba', on deletion as 'ab' pa, pb = pb, pa } pa.Data = append(pa.Data, pb.Data...) pa.Lines = append(pa.Lines, pb.Lines...) *a = *pa return true } // different boffsets, try to restore the sequence pa, pb := a, b if pb.Cursor.Boffset < pa.Cursor.Boffset { pa, pb = pb, pa } if pa.Cursor.Boffset+len(pa.Data) == pb.Cursor.Boffset { pa.Data = append(pa.Data, pb.Data...) pa.Lines = append(pa.Lines, pb.Lines...) *a = *pa return true } return false } type ActionGroup struct { Actions []Action Next *ActionGroup Prev *ActionGroup } func (ag *ActionGroup) Append(a *Action) { if len(ag.Actions) != 0 { // Oh, we have something in the group already, let's try to // merge this action with the last one. last := &ag.Actions[len(ag.Actions)-1] if last.tryMerge(a) { return } } ag.Actions = append(ag.Actions, *a) } // Valid only as long as no new actions were added to the action group. func (ag *ActionGroup) LastAction() *Action { if len(ag.Actions) == 0 { return nil } return &ag.Actions[len(ag.Actions)-1] } // CursorBefore returns cursor position before actions in the group are applied. func (ag *ActionGroup) CursorBefore() Cursor { // FIXME for now, return the cursor of the first action. // This is not accurate for cases like merged deletions, where // we need to return cursor + deletion length. if len(ag.Actions) == 0 { // TODO return some sentinel cursor value instead? panic("action group is empty") } return ag.Actions[0].Cursor } // CursorBefore returns cursor position after actions in the group are applied. func (ag *ActionGroup) CursorAfter() Cursor { // FIXME this is inaccurate for same reasons as CursorBefore() return ag.Actions[len(ag.Actions)-1].Cursor }
With everyone in crypto captivated by the Monero excitement, you might have thought nothing else was taking place. But as we all know in crypto land nothing ever sleeps. Today, we look at Litecoin, GameCredits & Bitcrystals. 1. Litecoin Big news today, Litecoin has been finally added to GDAX (Coinbase). Rumors of the impending add have been thrown around ever since Charlie Lee visited Coinbase HQ. Today, the rumors were confirmed. The news was very positive for LTC which has climbed almost 9% at the time of writing. Trading volumes are also very strong with the coin crossing $12 million. Almost twice the volume of Ethereum & Ethereum Classic. Being added by GDAX is a strong vote of confidence for LTC and should open the door to more institutional players. 2. GameCredits GAME is strongly in green today up over 7% in the last 24 hours. The coin has much potential from its target of the online in-game currency market. 3. Bitcrystals BCY has surpassed it previous market high today of $7.6 million and now looks to reach new heights. With volumes crossing $100,000, BCY definitely has the attention of traders. Visit the Bitcrystals website to learn more about this interesting coin.
package xml import ( "net/http" "strings" "github.com/gorilla/rpc/v2" ) const ( // gorilla error strings methodNotFound = "rpc: can't find method" serviceNotFound = "rpc: can't find service" ) // ServerCodec codec compatible with gorilla/rpc to process each request. type ServerCodec struct { aliases map[string]string } // serverRequest handles reading request and writing response type serverRequest struct { header http.Header call methodCall err error } // NewServerCodec return a new XML-RPC severCodec compatible with "gorilla/rpc". func NewServerCodec() *ServerCodec { return &ServerCodec{aliases: make(map[string]string)} } // RegisterAlias register a method alias. func (c *ServerCodec) RegisterAlias(alias, method string) { c.aliases[alias] = method } // NewRequest returns a new codec request. func (c *ServerCodec) NewRequest(r *http.Request) rpc.CodecRequest { s := &serverRequest{header: r.Header} s.err = withCodec(func(c *Codec) error { return c.readRPC(r.Body, &s.call) }) // resolve aliases parts := strings.Split(s.call.Method, ".") if len(parts) == 2 { if method, ok := c.aliases[parts[1]]; ok { parts[1] = method s.call.Method = strings.Join(parts, ".") } } return s } // Method reads the XML-RPC request and returns the method name. func (s *serverRequest) Method() (string, error) { return s.call.Method, s.err } // ReadRequest reads the XML-RPC request and writes the arguments to the receiver. func (s *serverRequest) ReadRequest(args interface{}) error { return s.call.rpcParams.writeTo(args) } // WriteResponse write an XML-RPC response to reply receiver. func (s *serverRequest) WriteResponse(w http.ResponseWriter, reply interface{}) { withCodec(func(c *Codec) error { w.Header().Set("Content-Type", "text/xml; charset=utf-8") zw := newCompressor(w, s.header) c.writeResponse(zw, reply) if closer, _ := zw.(*compressWriter); closer != nil { closer.Close() } return nil }) } // WriteError write an XML-RPC Fault. func (s *serverRequest) WriteError(w http.ResponseWriter, status int, err error) { // XML-RPC always send 200 OK responses switch v := err.(type) { case Fault: s.WriteResponse(w, v) default: if strings.HasPrefix(err.Error(), methodNotFound) || strings.HasPrefix(v.Error(), serviceNotFound) { s.WriteResponse(w, MethodNotFound.New("")) } else { // service functions should return appropriate XML-RPC faults // wrap any other error as internal s.WriteResponse(w, InternalError.New(err.Error())) } } }
BRUSSELS (AP) – European leaders agreed Thursday morning on a crucial plan to reduce Greece's debts and provide it with more rescue loans so that the faltering country can eventually dig out from under its debt burden. After a marathon summit, EU President Herman Van Rompuy said that the deal will reduce Greece's debt to 120% of its GDP in 2020. Under current conditions, it would have grown to 180%. That will require banks to take on 50% losses on their Greek bond holdings — a hard-fought deal that negotiators will now have to sell to individual bondholders. Van Rompuy also said the eurozone and International Monetary Fund— which have both been propping the country up with loans since May of 2010 — will give the country another —100 billion ($140 billion). That's slightly less than amount agreed in July, presumably because the banks will now pick up more of the slack. "These are exceptional measures for exceptional times. Europe must never find itself in this situation again," European Commission President Jose Manuel Barroso said after the meetings. The question of how to reduce Greece's debt load had proven the sticking point in European leaders' efforts to come up with a grand plan to solve its debt crisis. But it was just one of three prongs necessary to restore confidence in Europe's ability to pay its debts and prevent the 2-year-old crisis from pushing the continent and much of the developed world back into recession. The first details of such a plan emerged hours earlier, when European Union leaders announced they would force the continent's biggest banks to raise —106 billion ($148 billion) by June — partially to ensure they could weather the expected losses on Greek debt. Van Rompuy also announced that the eurozone would boost the firepower of their bailout fund to about —1 trillion ($1.4 trillion) in order to protect larger economies like Italy and Spain from the market turmoil that has already pushed three countries to need bailouts. "We have reached an agreement which I believe lets us give a credible and ambitious and overall response to the Greek crisis," French President Nicolas Sarkozy told reporters as the meeting broke Thursday morning. "Because of the complexity of the issues at stake, it took us a full night. But the results will be a source of huge relief worldwide."
/** * The postal address name suffix. **/ public AccountPostalAddress suffix( String suffix ) { this.suffix = suffix; return this; }
<reponame>MrManiak/Squad-SDK // Name: S, Version: b #include "../SDK.h" #ifdef _MSC_VER #pragma pack(push, 0x01) #endif /*!!HELPER_DEF!!*/ /*!!DEFINE!!*/ namespace UFT { //--------------------------------------------------------------------------- // Functions //--------------------------------------------------------------------------- // Function GraphTicketComponent.GraphTicketComponent_C.ReceiveBeginPlay // (Event, Public, BlueprintEvent) void UGraphTicketComponent_C::ReceiveBeginPlay() { static auto fn = UObject::FindObject<UFunction>("Function GraphTicketComponent.GraphTicketComponent_C.ReceiveBeginPlay"); UGraphTicketComponent_C_ReceiveBeginPlay_Params params; auto flags = fn->FunctionFlags; UObject::ProcessEvent(fn, &params); fn->FunctionFlags = flags; } // Function GraphTicketComponent.GraphTicketComponent_C.OnLatticeUpdated_Event_1 // (BlueprintCallable, BlueprintEvent) void UGraphTicketComponent_C::OnLatticeUpdated_Event_1() { static auto fn = UObject::FindObject<UFunction>("Function GraphTicketComponent.GraphTicketComponent_C.OnLatticeUpdated_Event_1"); UGraphTicketComponent_C_OnLatticeUpdated_Event_1_Params params; auto flags = fn->FunctionFlags; UObject::ProcessEvent(fn, &params); fn->FunctionFlags = flags; } // Function GraphTicketComponent.GraphTicketComponent_C.CaptureStateChangeEvent_Event_1 // (BlueprintCallable, BlueprintEvent) // Parameters: // class USQCaptureZoneComponent* CaptureZone (BlueprintVisible, BlueprintReadOnly, Parm, ZeroConstructor, InstancedReference, IsPlainOldData, NoDestructor, HasGetValueTypeHash) // unsigned char Team (BlueprintVisible, BlueprintReadOnly, Parm, ZeroConstructor, IsPlainOldData, NoDestructor, HasGetValueTypeHash) void UGraphTicketComponent_C::CaptureStateChangeEvent_Event_1(class USQCaptureZoneComponent* CaptureZone, unsigned char Team) { static auto fn = UObject::FindObject<UFunction>("Function GraphTicketComponent.GraphTicketComponent_C.CaptureStateChangeEvent_Event_1"); UGraphTicketComponent_C_CaptureStateChangeEvent_Event_1_Params params; params.CaptureZone = CaptureZone; params.Team = Team; auto flags = fn->FunctionFlags; UObject::ProcessEvent(fn, &params); fn->FunctionFlags = flags; } // Function GraphTicketComponent.GraphTicketComponent_C.ExecuteUbergraph_GraphTicketComponent // (Final) // Parameters: // int EntryPoint (BlueprintVisible, BlueprintReadOnly, Parm, ZeroConstructor, IsPlainOldData, NoDestructor, HasGetValueTypeHash) void UGraphTicketComponent_C::ExecuteUbergraph_GraphTicketComponent(int EntryPoint) { static auto fn = UObject::FindObject<UFunction>("Function GraphTicketComponent.GraphTicketComponent_C.ExecuteUbergraph_GraphTicketComponent"); UGraphTicketComponent_C_ExecuteUbergraph_GraphTicketComponent_Params params; params.EntryPoint = EntryPoint; auto flags = fn->FunctionFlags; UObject::ProcessEvent(fn, &params); fn->FunctionFlags = flags; } } #ifdef _MSC_VER #pragma pack(pop) #endif
Download the Pattern and Assembly Guide of Lofty Paper Model from Bob the Builder Pictures of Bob yhe Builder's Lofty Papercraft Lofty the crane from Bob the Builder paper model/papercraft pattern and build instruction.“Can we build it? Yes we can!” That’s right. After quite a long time since the release of my, new we can continue theby buildingthe blue mobile crane.In the series, Lofty is a rather timid and shy character . Often times, he acts pessimistic in facing the tasks and challenges in building with Bob. He is more or less the opposite of Scoop.In designing and making this, I never thought I would need that much time to finish it. It took me more than a month, from making the 3D design, unfolding the model, test-building the parts, until I finally ready to release it. Nevertheless, it’s here now and I hope you enjoy this model as much as I do.Some photos as preview of the completed paper model of Lofty from Bob the BuilderHere is the video of my build process. Too bad I forgot to capture some of the stages but most of them are here. Details of Lofty Paper Model Document License Lofty Paper Model Pattern and Assembly Instruction Download Specification details of Lofty from Bob the Builder paper model/papercraft Model Dimension: 9.8 cm (width) x 11.2cm (height) x 19.5cm (depth) (Crane not extended)Number of Parts: 161 parts (model); 5 parts (display box)Number of Pages: 7 pages (model); 4 pages (display box)This paper model is available for free.Distribution/sharing of this paper model at any sites, blogs, or social media is allowed on condition that the link points to this page. Please do not use direct link to the file hosting site.Please do not the file(s) to other hosting site(s).Use the link below to download the pattern/template and assembly guide/build instruction to build your own Bob the Builder: lofty paper model Model pattern, display box pattern, and the assembly guide are available in PDF files. The files are archived/compressed in a zip file.Use archive software (such as WinZip or 7zip ) to extract the ZIP file and use PDF reader (such as Adobe Acrobat Reader ) to open the PDF files.
Deep sampling and pooled amplicon sequencing reveals hidden genic variation in heterogeneous rye accessions Background Loss of genetic variation negatively impacts breeding efforts and food security. Genebanks house over 7 million accessions representing vast allelic diversity that is a resource for sustainable breeding. Discovery of DNA variations is an important step in the efficient use of these resources. While technologies have improved and costs dropped, it remains impractical to consider resequencing millions of accessions. Candidate genes are known for most agronomic traits, providing a list of high priority targets. Heterogeneity in seed stocks means that multiple samples from an accession need to be evaluated to recover available alleles. To address this we developed a pooled amplicon sequencing approach and applied it to the out-crossing cereal rye (Secale cereale L.). Results Using the amplicon sequencing approach 95 rye accessions of different improvement status and worldwide origin, each represented by a pooled sample comprising DNA of 96 individual plants, were evaluated for sequence variation in six candidate genes with significant functions on biotic and abiotic stress resistance, and seed quality. Seventy-four predicted deleterious variants were identified using multiple algorithms. Rare variants were recovered including those found only in a low percentage of seed. Conclusions We conclude that this approach provides a rapid and flexible method for evaluating stock heterogeneity, probing allele diversity, and recovering previously hidden variation. A large extent of within-population heterogeneity revealed in the study provides an important point for consideration during rye germplasm conservation and utilization efforts. Supplementary Information The online version contains supplementary material available at 10.1186/s12864-020-07240-3. Background Plants can be made more resilient, yields stabilized, and nutritional components enhanced through selection and combination of gene variants that control these traits. Crop improvement is therefore dependent on the existence of genetic variability for the trait in question. For the past 10,000 years humans have been selecting and combining genetic variants to improve crops. However, most of the history of crop development was carried out without a knowledge of genetics or DNA, and thus modern cultivars have a relatively narrow genetic base, resulting from bottleneck-like effects of domestication and breeding practices . Therefore, the allelic variability existing within contemporary cultivars or breeding programs may be insufficient for successful identification of gene variants for satisfactory productivity and resilience of the crop. Useful alleles conferring important traits that have been lost in modern cultivars may still exist in nature. Plant genetic resources (PGR), such as landraces and wild relatives of crop plants, possess a much higher genetic diversity. While not high yielding and having often undesirable agronomic characteristics, they were shown to contain gene variants that can improve performance of successful modern cultivars . Luckily, the value of PGR as a reservoir of gene variants was recognised over a hundred years ago and nowadays there are over 1700 ex situ germplasm collections worldwide, maintaining about 7.4 million accessions. Approximately 62% of these accessions are landraces and wild species . Unfortunately, in most cases little is known about the extent and structure of genetic diversity within a given collection. The available data is often limited to passport information, and some phenotypic measurements or DNA marker-based genetic diversity assessment for a subset of accessions. Such information is not sufficient to make an informed choice of PGR for inclusion into a breeding program. Therefore the utilisation of primitive, exotic germplasm in crop improvement is limited . To fully profit from the allelic variation of PGR, methods for efficient and reliable screening of hundreds of accessions to discover useful gene variants are needed. Rapid development of next generation sequencing (NGS) technologies resulted in the establishment of various approaches, which can be used for high-throughput assessment of genic variation within gene sequences such as whole genome resequencing (WGS) and exome capture . Unfortunately, these approaches are not yet applied in many species owing to factors including genome size, polyploidy, and associated costs of sequencing and capture probe development. While a future can be envisioned where comprehensive genomic data is available for every accession of every important crop, the current state of technology and funding means that material is prioritized, and compromises made. Insofar as evaluation of WGS data provides information useful for understanding population genetics and evolution, it is expected that only a small fraction of base pairs of a genome are controlling key agronomic traits . Targeting candidate genes and their regulatory elements provides a tremendous reduction in data collected. Indeed, many studies have revealed quantitative trait loci and associated candidate genes that can be used to identify orthologous sequences in other plants . An alternative to whole genome or exome capture sequencing is amplicon sequencing. In this approach, selected genomic regions are first amplified by PCR and then subjected to massively parallel sequencing. Compared to WGS or exome capture, amplicon approaches allow acquisition of a much higher coverage of the selected target bases pairs at a lower sequencing cost. This is because the total yield of the sequencing reaction, in terms of raw bases, is distributed to fewer unique bases of each sample in the pool (e.g. ). One application of amplicon sequencing is the simultaneous genotyping of hundreds of unique samples independently by employing strategies to barcode, or index, each sample uniquely . In addition to this approach, the high sensitivity of current sequencing technologies enables "ultra deep" methods whereby nucleotide variants can be identified in samples containing pools of mixed genotypes. One example is the detection of rare somatic mutations in human samples . Another example is the use of amplicon sequencing to measure intrahost virus diversity. Researchers showed that a rare Zika virus variant could be detected if present at > 3% in a mixed sample when sequencing coverage was at least 400x . In plants, experiments can be designed to discover rare nucleotide variants present at very low frequencies by screening large populations where genomic DNA has been pooled prior to PCR amplification and sequencing. Screening throughputs are increased and assay costs are reduced, making screening thousands of samples practical. This has been used for recovery of induced point mutations in TILLING by Sequencing assays . Here, genomic DNAs from different lines harboring induced mutations are pooled, subjected to target-specific PCR and the PCR products are then pooled and sequenced. The method has been used to recover rare mutations in genomic DNA samples pooled from 64 to 256 fold. These studies suggest that variant calling accuracy is improved when using multiple variant calling algorithms . The approach has been adapted for recovery of natural variation in Populus nigra, Manihot esculenta Crantz (cassava), and Oryza sativa L., whereby DNAs from different accessions were pooled together prior to PCR and variant discovery. In P. nigra, PCR products were prepared from pooled genomic DNA from 64 accessions to identify variants in lignin biosynthesis genes in 768 accessions . In cassava, DNA from up to 281 accessions were pooled prior to sequencing for variants in starch biosynthesis pathway-related genes and herbicide tolerance genes in 1667 accessions . In rice, pooling of DNAs prepared from 233 breeding lines was followed by sequencing for variants in starch synthesis genes . Pooling of multiple samples from the same species has also been used in studies where WGS has been applied. There are many variations to this methodology that has been termed Pool-seq . This includes cases where, contrary to TILLING assays, multiple individuals with similar genotypes are pooled together to estimate population allele frequencies. In such applications, sequencing coverages can be reduced to save costs, but are insufficient to find rare alleles in one or few individuals in the pool. Sequencing intra-species pools has also been described such as in metagenomics studies . Rye (Secale cereale L.) is an outcrossing cereal, popular in Europe and North America, and an important source of variation for wheat breeding due to its high tolerance to biotic and abiotic stresses . Genetically rye is a diploid (n = 7), with a large (ca. 8 Gbp) and complex genome . There are over 21 thousand rye accessions in genebanks worldwide, approximately 35% of them are landraces and wild species . Several studies on genome-wide diversity in rye were published to date . It was shown that accessions from genebanks are genetically distinct from modern varieties, which highlighted the potential of PGR in extending the variability in current rye breeding programs . To date neither NGS-based targeted amplicon sequencing, nor any other method of gene variant discovery was applied to rye genetic resources. Abiotic and biotic stress resistance, and yield constitute the key targets in rye breeding . Although the number of well characterized rye genes is very limited , there are important candidate genes related to abiotic and biotic stress resistance and grain quality to consider. MATE1 (multidrug and toxic compound extrusion, also known as AACT1 -aluminum activated citrate transporter), is a gene involved in aluminum (Al) tolerance of rye. Al-toxicity is one of the main constraints to agricultural production on acidic soils, which constitute ca. 50% of the arable land on Earth . Rye is one of the most Al-tolerant cereals, with the degree of tolerance depending on the allelic variant of MATE1 . TLPs (taumatin-like proteins) are a family of pathogenesisrelated (PR) proteins, involved in fungal pathogen response in many plant species . FBA (fructose-biphosphate aldolase) is one of the key metabolic enzymes involved in CO 2 fixation and sucrose metabolism. FBA genes were found to have an important role in regulation of growth and development, and responses to biotic and abiotic stresses, such as chilling, drought and heat . GSP-1 (grain softness protein) genes, belonging to the prolamine superfamily of seed storage proteins, encode precursor proteins, which after post-translational processing give rise to arabinogalactan peptide AGP and the grain softness protein GSP-1 . Secaloindolines, products of genes Sina (not analyzed in this study) and Sinb, are main components of friabilin -a starchassociated protein fraction of cereal grains . The wheat orthologues of Sina and Sinb, called Pina and Pinb, are key determinants of grain texture, an important breeding trait directly influencing the end-use . PBF (prolamin-box binding factor) is an endosperm specific transcription factor involved in the regulation of protein and starch synthesis . It binds to the prolamin-box motif occurring in promoter regions of multiple cereal seed storage proteins. In barley, SNPs located in PBF were associated with crude protein and starch content , while in wheat, mutating the homeologous PBFs using TILLING resulted in a markedly decreased gluten content and high content of lysine . Exploration of genic variation in outcrossing, generatively propagated crops, such as rye, maize (Zea mays), sugar beet (Beta vulgaris), broccoli (Brassica oleracea var. italica), or carrot (Daucus carrota), is a particularly demanding task. Natural, random-mating populations of such species are heterozygous and heterogeneous, with multiple alleles of a locus being present . Such population structure has important implications for the design of NGS-allele mining experiments. Firstly, due to high levels of heterozygosity, a higher sequencing coverage is needed even when sequencing non-pooled samples to ensure reliable nucleotide variant calling. Secondly, due to the heterogeneity of accessions, a large enough number of individuals of a given accession needs to be included in the screen to obtain a faithful representation of within-accession variability and to successfully recover rare variants. Many potentially useful and interesting alleles may go undiscovered with current experimental designs. To address this, a low-cost, high-throughput, and reliable amplicon sequencing approach suitable for assessment of genic variation in heterozygous and heterogeneous rye accessions was developed. Rather than pool DNA from different accessions, ultra deep amplicon sequencing was used to evaluate intra-accession heterogeneity while also providing information on novel genetic variation. DNA pools were created that contain 96 plants per accession. These were subjected to pooled amplicon sequencing in six target genes implicated in biotic and abiotic stress resistance and seed quality: MATE1, TLP, FBA, PBF, Sinb, and GSP-1. Three variant calling algorithms (GATK HaplotypeCaller , SNVer and CRISP ) were used to identify putative variants at frequencies as low as one heterozygous event per 96 plants assayed in each pool. A subset of variants was independently validated and the functional effect of each variant was evaluated in silico. Common and rare variants were recovered, including variants predicted to affect protein function that are present in only a small fraction of seed representing an accession. This data provides preliminary knowledge on the levels of variant allele frequencies in accessions representing different germplasm groups: wild species, landraces, historical and modern cultivars. DNA sequencing, mapping and coverage Pooled amplicon sequencing using Illumina sequencing by synthesis 2 × 300 paired end reads on 95 accessions and six genes produced a mean coverage of 13,948x and mean mapping quality of 58.65. Mean coverage per accession pool varied approximately 10 fold, between 2924x and 30,275x. Analysis of sequencing coverage at each nucleotide revealed that 94.2% of the experiment produced 20 or more reads to support a rare variant present at 5% in the DNA pool (Additional file 1: Table S1). Evaluation of variant calling algorithms and predicted effects of nucleotide changes Variant calling was first performed on each pool using HaplotypeCaller in GATK (v.4.0) with ploidy set to 192 in order to recover rare alleles. This resulted in 4115 called variants, of which 3682 were single nucleotide polymorphisms, 192 insertions, and 241 deletions. Evaluation of the Variant Call Format (VCF) file, allowed calculation of the frequency of a specific allele within the DNA pool created from the 96 seeds that were sampled to represent an accession. This is referred to as VAF (Variant Allele Frequency), to distinguish the measurement from AF (Allele Frequency) -the frequency of the allele within the set of 95 accessions analyzed in the present study. Data was plotted to evaluate the distribution of the mean VAF for each variant and the number of accessions harboring each discovered allele (Fig. 1). Private variants occurring in only one accession were identified at both low and high VAFs (Fig. 1). The percentage was highest, however, at the lowest VAFs -75 % of private alleles have a VAF of 0.026 (represented at 2.6% in the accession pool) or lower (Additional file 2: Figure S1). Variant calling was next carried out using SNVer and CRISP producing 1570 and 3261 variant calls, respectively. Similar to data produced with GATK, the highest percentage of variants are represented in the lowest VAFs (75% at 0.034 or lower for CRISP and 0.088 or lower for SNVer, Additional file 3: Figure S2). Private variants were also enriched at lower VAFs (Additional file 2: Figure S1). In total, 895 variants were common between the three methods (Fig. 2). Within these common variants, the mean VAF and the number of accessions carrying the variant differed between the three algorithms used. The effect on gene function of putative variants was evaluated with SNPeff and SIFT. This resulted in 695 putative deleterious variants from GATK, 171 from SNVer and 578 from CRISP, with 74 putative deleterious variants common to all three algorithms (Table 1, Additional file 4: Table S2, Additional file 5: Figure S3). Deleterious alleles with a high maximum VAF (the highest VAF reported in an accession) and present in only one accession were recovered along with alleles with a high maximum VAF that were present in 90 or more accessions (Additional file 4: Table S2). Alleles with a maximum VAF less than 0.4 were also identified, suggesting the presence of rare alleles segregating within an accession. In the GATK data set, for example, 29 of the 74 predicted deleterious common variants have a maximum VAF between 0.047 and 0.391 and are found in 1 to 21 accessions (Additional file 4: Table S2, Additional file 6: Figure S4). Within target genes, 18 to 443 polymorphic positions were detected consistently by the three algorithms, corresponding to one SNP or InDel every 8-10 bp of sequence for five of the analyzed genes (Additional file 7: Table S3). For the sixth gene, Sinb, this frequency was markedly lower, with one SNP per 25 bp. The number of putatively deleterious variants per gene ranged from 11 (GSP-1) to 21 (MATE1), corresponding to one deleterious variant every 40 to 80 bp, with exception of Sinb, where only three deleterious variants were identified in 447 bp of coding sequence. Previous data on genic The presence of predicted variants was first assayed using Sanger sequencing of Sinb amplicons in a single individual plant from each of eight accessions, with the aim of evaluating variant prediction while keeping Sanger sequencing costs low. Twelve variants were predicted in this set. Only variants reported by all three algorithms for the tested accession, and where the lowest VAF was greater than 0.295 were validated (Additional file 8: Table S4). Because allele frequencies were calculated from a pooled DNA sample, it was concluded that lower frequency alleles likely represent alleles that are not present in every seed of an accession. Subsequent validation assays were carried out whereby multiple plants from each accession were assayed independently. In CAPS and Sanger sequencing assays on MATE1, PBF, and Sinb amplicons, 13 out of 16 tested variants were recovered when sampling between six and 27 plants ( Table 2). Observed allele frequencies calculated from the number of plants harboring the tested sequence difference varied from the frequencies predicted from the amplicon sequencing data. Seven variants had observed VAF closest to GATK predictions, two variants were closest with SNVer and four with CRISP. The three variants not recovered by CAPS or Sanger assays had frequencies reported by GATK below 0.15. Failure to recover low frequency alleles may have resulted from testing an insufficient number of individuals. Phylogenetic relationships between populations and comparison of VAF distributions The relationship between accessions was evaluated by creating a Neighbor Joining (NJ) tree based on Nei's genetic distance (Fig. 3). This resulted in accessions divided into six clusters (I-VI), with cluster I containing mostly cultivars, including the majority of modern cultivars analyzed. Nevertheless, a coincidence of the clustering with improvement status could not be observed. The accessions: S. sylvestre (abbreviation B6 in Fig. 3), S. strictum subsp. kuprijanovii (F8) and S. strictum subsp. africanum (B7), were indicated as the most divergent of the analyzed set, which is in agreement with results of previous genomewide analyzes of rye germplasm . Conversion of VAF values to genotyping scores was used to evaluate clustering. Different ranges of VAF were used to define heterozygous variants. This resulted in a changed clustering of the populations at each range tested (Additional file 9: Figure S5). Results of principal coordinates analysis, based on the VAF-derived Nei's genetic distance matrix (Fig. 4), are in agreement with the outcome of NJ clustering. The accessions: S. sylvestre (B6), S. strictum subsp. kuprijanovii (F8), S. strictum subsp. africanum (B7), and the sample of historical variety Imperial (B4) are very distant from the rest, while within the group of the remaining accessions several subgroups can be observed, which correspond to clusters indicated in the NJ tree (Fig. 3). Private variants occurred in all germplasm groups included in the study: modern cultivars, historic cultivars, landraces and wild accessions. In the group of commonly identified variants, the number of private variants per accession coincided with the domestication status: private variants were most frequent in wild accessions (five to seven per accession), followed by landraces with approximately two variants per accession, and historic and modern cultivars with less than one private variant per accession. Private variants in wild accessions also had the highest VAF values (mean 0.4-0.45, median 0.22-0.28). Ten of the private variants detected in wild accessions were putatively deleterious: five in the PBF gene, four in MATE1 and one in GSP-1. In the remaining germplasm groups mean VAF and median VAF did not exceed 0.12 and 0.007, respectively. The (Fig. 3). Among the cultivated rye (S. cereale subsp. cereale) accessions, the highest number of private variants (7) was observed in a landrace from Bosnia and Herzegovina (C2) and also in a S. cereale subsp. cereale accession of unknown improvement status from Israel (E4). VAF values were used to prepare combined strip/violin plots in order to qualitatively compare accessions. Several distinct patterns of allele frequency distribution were observed (representative examples shown in Fig. 5). Based on the results of two-part Wilcoxon test of pairwise comparisons of VAF distributions, rye accessions were grouped into 20 clusters ranging in size from 1 to 16 (Additional file 10: Figure S6). Five accessions, characterized by a high proportion of variants with high VAF values, were consistently recognized as markedly different from the rest: S. sylvestre (B6), S. strictum subsp. kuprijanovii (F8), historic cultivars Imperial (B4) and Otello (G12), and landrace R1040 (F6) (Additional file 11: Table S5). Discussion To evaluate the distribution of frequencies of alleles within a landrace or cultivar, we chose to sample 96 plants from each accession of rye selected for our study. This allows the recovery of i) sequence differences compared to the reference sequence used that are fully Table S5). Numbers in brackets indicate private alleles identified in the respective accession. Colors indicate improvement status: light bluemodern cultivar, dark bluehistoric cultivar, dark greenlandrace, light greenwild accession homozygous (those with an allele frequency of 1), ii) heterozygous variants present in all pooled plants, and iii) variants of lower frequencies that are not present in every seed in the seed stock used to represent an accession. To streamline the approach, tissue from each plant was collected and pooled prior to DNA extraction. The experiment was designed such that an allele found in a single plant could be identified. High coverage values were found in all DNA pools suggesting that each pool was suitable for PCR amplification and sequencing. Deviations in coverage values, therefore, likely resulted from differences associated with the quantification, normalization and pooling of PCR products. Such variations were recently reported in a study comparing tomato, cassava and barley amplicon sequencing data sets . The study revealed that minor coverage improvements could be achieved through the addition of extra quantification methods. Alternative approaches to increase read coverage at all nucleotide positions include increasing sequencing yields by adjusting the number of samples in an experiment and/or the number of target genes (amplicons) used in a single sequencing run. As sequencing costs drop, it may prove more cost effective and faster to simply produce more base pairs of data per experiment than to fine-tune the other experimental parameters. Many applications employing next generation sequencing of genomic DNA involve the evaluation of sequence variations in diploid samples. Even in optimized diploid conditions, a balance is struck between maximizing allele calling sensitivity to reduce false negative errors and reducing the sensitivity in order to lower false positive errors. For example, when using GATK HaplotypeCaller with settings for diploid samples, Li et al. reported that more than 80% of false positive errors in diploid rice were at an allele frequency below 40% . When sequencing non-pooled samples, setting an allele frequency threshold of > 40% for heterozygous variants therefore reduced false positive errors. In non-pooled samples, the choice of mapping software and variant calling software can also affect predicted SNPs. Yao and colleagues used whole exome capture wheat data sets and seven variant calling tools to define putative true variants that were identified by all tools . Using this set, the authors concluded that mapping with BWA mem outperformed Bowtie2. Variant callers showed variable performance with GATK Haplotype Caller outperforming SNVer and Samtools/mpileup performing best. Independent validation of SNPs by Sanger sequencing was not carried out. Optimizing variant calling may be more challenging in highly pooled samples. Algorithms such as GATK HaplotypeCaller, SNVer and CRISP provide parameter settings to call low frequency variants. Yet, optimal parameters still need to be determined. For example, evaluation of six SNP calling algorithms in tomato TILLING samples pooled either 64 or 96 fold revealed that accuracy ranged between 89.33 and 25.33% when comparing to Sanger validated SNP mutations . That work described technical differences between different algorithms and concluded that accuracy is improved when a variant call is predicted by at least two algorithms. In cassava, up to 281 different accessions were pooled together prior to sequencing in an approach designed to quickly identify putative deleterious alleles . In that study 24% (79/325) of called variants were predicted by four algorithms tested. The experimental design for rye differed from previous studies in order to allow the discovery and analysis of intra-accession allele variation. Similar to previous studies with pooled samples and wheat exome capture data, multiple variant callers were used to find concordant SNPs. The rye assay was designed to recover two types of what can be considered "rare" variation. The first type of rare variants are alleles that are found in only one accession (known as private alleles) or very few accessions in the tested set, and occur with a high frequency within the respective accessions. This type of rare variation is easily recovered using conventional genotyping and resequencing as alleles can be recovered through assay of a single seed . The second type of rare variants are more difficult to discover. These variants segregate at a low frequency within an accession and are never found at high frequency in any tested accession. To recover this type of rare variant requires the sampling of multiple individual seed per accession. As such, these alleles are hidden from discovery when using traditional methods that sample one or few seed per accession. Using pooled amplicon sequencing we have recovered both types of rare alleles in the tested rye accessions. Importantly, the presence of variants that segregate at a low frequency within an accession, and are never found at high frequency in any tested accession, suggest that a broader genetic diversity can exist in germplasm collections than previously known. We expect this to be most common in outcrossing species like rye where admixtures of alleles are frequent. Variants with mean VAF between 0.7 and 1 represented between 1.87 and 3.06% of all predicted alleles, depending on the algorithm used. In this set of variants, between 41 and 49% are private alleles found in only one accession (Fig. 1, Additional file 2: Figure S1, Additional file 3: Figure S2). The highest number of variants were found in the lowest VAFs. It is expected that false positive errors will increase as the number and percentage of reads supporting the alternative allele decreases. Studies have been carried out on errors associated with MiSeq paired end sequencing, but a thorough investigation into errors in pooled samples has not been reported . False positive errors are expected to be random and therefore infrequently independently predicted when applying multiple variant calling algorithms. Indeed, of the 895 variants common to GATK, SNVer and CRISP, only 20% had a predicted mean VAF of 0.038 or lower, a reduction of more than 50% from the data from any single algorithm. Further experiments are required to determine what, if any, percentage of the sub 0.038 VAF variants predicted by all three algorithms are false positive errors. This requires extensive genotyping, as many individual seed need to be tested to ensure true variants are recovered. In the present study, genotyping assays using approximately 10 seed per accession were sufficient to validate alleles with a VAF of 0.15 or higher that were predicted in the same accession by all three algorithms. We expect it is necessary to test more than 100 seeds per accession to validate the lowest frequency alleles in the data. Some very low frequency false positive errors are expected and may result from biological contamination, for example, from pollen contamination on the leaf tissue collected. This can be ruled out in the present study because seedlings were grown, and tissue collected in growth room conditions where there were no rye plants flowering. Sample to sample cross contamination of DNA or PCR product may also be a source of low VAF false positive errors. Sixty-five percent of sub 0.038 VAF variants commonly predicted by all algorithms were found in more than one accession. However, 96% had a maximum predicted VAF of less than 10%, and the highest maximum VAF was 24.5%. This means that a large volume of accidental liquid transfer between samples would be needed to create a detectable false positive. With the caveat of possible very low frequency false positive errors, we conclude that selecting variants commonly called by multiple algorithms may reduce errors and serves as a useful method to prioritize alleles for further study. We found qualitative evaluation of VAF values using strip and violin plots to be useful to estimate the influence of a taxon's reproductive biology, preservation history and breeding on the genetic composition of an accession. For example, one of the outlier accessions identified is this study is S. sylvestre (B6). Molecular marker-based analyses of genetic diversity indicated this self-pollinating taxon as the most divergent in genus Secale . Its large proportion of high VAF variants (Fig. 5) likely corresponds to homozygosity for alternative alleles, since reference sequences used during variant calling originated from cultivated rye accessions. Another outlier, S. strictum subsp. kuprijanovii (F8), is a perennial outbreeder, also genetically divergent from S. cereale. However, its violin plot differs markedly from plots obtained for the other two S. strictum samples included in the study, S. strictum subsp. africanum (B7) and S. strictum subsp. strictum (B9), which might indicate a sample tracking mistake during genebank preservation or laboratory handling, or a bottleneck during preservation. A sample of Imperial cultivar (B4), widely used in cytological studies, originating from the collection of A. J. Lukaszewski (UCLA, Riverside), showed an approximately equal abundancy of variants with all possible VAF values and differed clearly from another sample of Imperial (C1), obtained from IPK Gatersleben genebank. Less pronounced (although also statistically significant) differences were also observed between the two samples of cultivar Dankowskie Zlote (C6 and F3), obtained from different sources. Samples of hybrid cultivars from KWS, such as Ballistic (D11) and KWS Florano (G10), exhibited a higher percentage of VAF values in the range 0.3-0.5, with median ca. 0.3, and also higher percentage of AF values close to 1.0, in comparison to population cultivars included in the study, such as Dankowskie Zlote (F3 and C6), Petkus (G8), or Carstens Kortstra (E8), which is consistent with the use of the three line system in the development of hybrid rye cultivars. Statistical analysis also showed that wild accessions differed from modern varieties in terms of VAF value distributions, with wild accessions (accession codes F8, B7, B6, B10, B9, A9, F4, F7, E12, full names in Additional file 11: Table S5) always located in different clusters than modern varieties (accession codes F12, G11, F2, F1, G10, G9, D11, B12) in the dendrogram in Additional file 10: Figure S6. However a trend in median values differentiating wild accessions from modern varieties could not be observed. In this study we analyzed six genes linked to biotic and abiotic stress resistance and seed quality. Using deep sampling and pooled amplicon sequencing numerous new variants were identified, (including putatively deleterious ones), in each of the analyzed genes, providing potential targets for future functional studies and, eventually, inclusion in breeding schemes in rye and related species (wheat, triticale). Consistent with a high diversity of the germplasm set used (with respect to domestication status and origin) we obtained a several fold higher estimate of SNP frequency in rye (on average one SNP or InDel every 12 bp), than those reported in the past: 1 SNP/52 bp , 1 SNP/58 bp or 1 SNP or InDel/31 bp . In agreement with the results of previous genome-wide, DArT-marker based characterization of genetic diversity in rye , data obtained in the present study on distribution of private alleles among germplasm groups indicates that the genetic diversity in modern rye cultivars is relatively narrow, with less than one private allele identified per modern cultivar tested, and provides further evidence for the value of rye PGR in genetic research and crop improvement, with more than five private alleles identified per accession, stressing the importance of conservation and characterization efforts. On the other hand, the clustering of the accessions in the NJ tree generated based on the VAF of 895 variants detected in common did not agree with the improvement status of the accessions, suggesting, that selective pressures other than breeding practices have influenced the diversity of the genes analyzed. This study also points out that, in case of open pollinated populations (due to the high within-accession variability), the sampling of a single individual or a small number of individuals from an accession most likely results in an inaccurate and perhaps even misleading representation of genetic relationships between the accessions. This can be seen in NJ trees produced based on conversion of VAF values into genotyping-like scores, where a different clustering of accessions was observed at each range of VAF used to define heterozygous variants (Additional file 9: Figure S5). The approach of deep sampling and pooled amplicon sequencing allows discovery of variants in candidate genes and also an evaluation of the effect of variants on gene function. This provides an additional filter to prioritize variants. The SIFT program was used to identify 73 putative deleterious alleles commonly identified by the three variant calling algorithms. This data set contained different classes of alleles for example, homozygous variants found in one or few individual accessions (private deleterious alleles, the first category of rare variants described above). Homozygous variants present in more than 90 accessions were also recovered. Interestingly, putative deleterious variants were also identified where the maximum VAF was between 0.15 and 0.3 and the variant was found in only one or two individual accessions (the second category of rare variant). This suggests that alleles are segregating within rye accessions at low fractions that may affect gene function and potentially plant phenotype. Such variants would go undiscovered in conventional GBS or WGS assays where only one or two seed per accession are sampled, and may be useful for functional genomic characterizations and breeding. Further studies are being designed to evaluate the different classes of putative deleterious alleles. For example, homozygous private alleles may represent alleles where a fitness penalty results in the allele having been expunged from most populations. Homozygous putative deleterious alleles present in most tested accessions may represent alleles with no fitness penalty, or may represent alleles that have no negative effect on fitness under their natural growing conditions (e.g. low aluminum in the soil). Possible mechanisms for the maintenance of rare low frequency alleles in populations, including meiotic effects, can also be investigated. The rye amplicons used in this study were generated before the release of the rye genome . It is expected that the recent release of the rye reference genome will enable improvements in gene target selection and primer design The broadening of the genetic basis has been identified as one of the most important goals in rye hybrid breeding , however, introduction of PGR into a breeding program is often challenging . The experimental protocol validated in this study provides a means to rapidly and effectively screen numerous accession samples for the genes of interest and identify desired variants. Therefore it has the potential to advance the use of exotic and primitive germplasm for targeted broadening of variation in breeding schemes. Reference genomes have been produced for few of the hundreds of thousands of plant species existing on the planet. Because pooled amplicon sequencing does not require complete genome sequence, we expect that the approach described for rye can be adapted for many plant species and can facilitate better characterization of existing rich germplasm collections. We predict that flexible and low-cost methods for recovery of rare genetic variation will support future efforts to promote sustainable food security. Plant material Ninety-five accessions of rye, each represented by a pooled sample comprising DNA of 96 individual plants, were analyzed in the study. This set included 90 accessions of S. cereale, among them 8 modern cultivars, 34 historic cultivars, 35 landraces, and 5 accessions of other Secale taxa, representing various geographic regions. In total 10 accessions from this set were described as wild/ weedy. Seeds were obtained from several sources including genebanks and breeding companies (Additional file 11: Table S5). Genomic DNA extraction, quantification and pooling Seeds were placed in a growth room in containers lined with moist paper towels. Ten days after germination a 20 mm long leaf segment was harvested from each plant. For each accession 96 plants were sampled. Leaf segments from 16 plants of the same accession were collected into one 2 mL centrifuge tube, with six tubes from 16 individual plants obtained for each accession. Collected leaves were freeze-dried in an Alpha 2-4 LDplus lyophilizator (Christ), for 18 h at − 60°C, 0.011 mbar, followed by 1 h at − 64°C, 0,006 mbar and ground to fine powder using a laboratory mill MM 301 (Retsch) for 2.5-5 min at frequency 30.0 1/s. Genomic DNA was extracted using Mag-Bind Plant DNA DS Kit (OMEGA Bio-Tek) following manufacturer's protocol. Quality and quantity of DNA was assessed using spectrophotometry (NanoDrop2000, Thermo) and electrophoresis in 1% agarose gels stained with ethidium bromide. DNA concentration of each sample tube was adjusted to 100 ng and an equal volume of all samples from an accession were pooled together. Primer design and PCR amplification of target genes Sequences of six target genes: multidrug and toxic compound extrusion (MATE1, also known as AACT1 -aluminium activated citrate transporter), taumatin-like protein (TLP), fructose-biphosphate aldolase (FBA), prolamin-box binding factor (PBF), secaloindoline-b (Sinb) and grain softness protein (GSP-1) were retrieved from GenBank (Additional file 12: Table S6, Additional file 13: Figure S7). The entire sequences of Sinb and GSP-1 genes (456 and 506 bp, respectively) were amplified using primers described, respectively, by Simeone and Lafiandra and Massa et al. . For genes FBA, MATE1, PBF and TLP primer pairs for generation of overlapping, ca. 600 bp long amplicons, covering the entire gene sequence were designed using Primer-BLAST . Primer pairs were tested using the DNA of rye inbred line L318 and those producing single product of expected length were used for amplification of gene fragments from pooled DNAs. Primer design and all other assays described in this work were carried out before the public release of the rye genome. PCR set up was as follows: 200 ng of template DNA, 2.5 mM MgCl 2 , 0.2 μM of each primer, 0.2 mM of each dNTP, 1x Dream Taq Green buffer, 0.5 U Dream Taq DNA polymerase (Thermo Scientific). The reactions were carried out in 25 μL in Mastercycler epgradient S (Eppendorf) thermal cyclers. For all primer pairs the thermal profile of initial denaturation for 60s at 95°C, 30 cycles of 30s at 95°C, 30s at 56°C and 60s at 72°C, followed by final extension for 5 min at 72°C was used. A volume of 5 μL from each reaction was used to check the amplification success using electrophoretic separation in 1.5% agarose gels stained with ethidium bromide. PCR products were shipped to Plant Breeding and Genetics Laboratory, Joint FAO/IAEA Division, International Atomic Energy Agency (Seibersdorf, Austria) for further processing. PCR product quantification and pooling PCR products were quantified using egel 96well gels (Thermo Fisher Scientific) and quantitative lambda DNA standards as previously described (Huynh et al., 2016). PCR product concentration was adjusted to 10 ng/ul in TE. All PCR products from a single gDNA pool were then pooled together. Pooled PCR products from each of the 95 accessions were then quantified using the Advanced Analytical® Fragment Analyzer™ with the low sensitivity 1 kb separation matrix with 30 cm capillaries (Advanced Analytical®#DNF935). All sample pools were normalized to 30 nM concentration in TE prior to library preparation. Library preparation and sequencing Indexed DNA library for NGS was prepared using the TruSeq® Nano DNA HT Library Preparation Kit (Illumina, cat. 20,015,965) according to manufacturer's recommendation. Indexed libraries were then quantified using a Q-bit fluorometer (Thermo Fisher Scientific) and pooled together at an equal concentration. The pooled library was diluted to 18 pM concentration. Sequencing was performed on an Illumina MiSeq® using 2 × 300 PE chemistry according to manufacturer's protocol. The reads were de-multiplexed with the MiSeq Reporter software and were stored as FASTQ files for downstream analysis (Additional file 14: Table S7). Sequence evaluation FASTQ files were aligned to target amplicons using BWA mem (Version: 0.7.17-r1188) with commands -M -t 16 . Amplicon fragment sequences were derived from public databases prior to the release of the rye reference genome. These were given target names that were used throughout the NGS analysis and the sequences are referred to as homozygous reference sequence throughout the manuscript (Additional file 12: Table S6). Samtools view (Version 1.7) was used to convert from SAM to BAM format . BAM files are available in NCBI BioProject PRJNA593253. Coverage statistics were prepared using qualimap (v.2.2.1-dev) . Variant calling was performed using three algorithms CRISP (Version 0.1), GATK (Version 4.0.1.2) and SNVer (Version 0.5.3). Parameters used for CRISP were -OPE 0, −-poolsize 192 and -qvoffset 33 . The GUI of SNVer was used with the following parameters: -bq20,-mq17,-s0,-f0,-pbonferroni = 0.1,-a0,-u30, −n192,-t0 . HaplotypeCaller (GATK) was used following best practices with default settings with the exception that ploidy was set to 192 . For each method, VCF files from individual pools were merged using bcftools. Following this, read group information was unified between the three files using picard tools AddOrReplaceR-eadGroups function (http://broadinstitute.github.io/ picard/index.html). Data for calculation of allele frequency from the VCF files (called VAF in this manuscript) was extracted for each variant and each accession using R libraries vcfR and VariantAnnotation and used to produce AF tables. The potential effect of nucleotide variation on gene function was evaluated with SNPeff . For this, a genome database was prepared using the build -genbank function. The effect of reported nucleotide variation was also evaluated with SIFT4G using a self-prepared genomic database with the fasta file of amplicon sequences used for mapping with BWA mem, a self-prepared gtf file and the uniref90 protein database . Venn diagrams were produced using the R package eulerr (https://github.com/jolars/eulerr). Evaluation of VAF distributions For the variants detected in common by three algorithms distributions of VAF values reported by GATK were compared pairwise using the two-part Wilcoxon test resulting in a pairwise matrix of 0 s and 1 s, with 1 indicating that for the given pair of populations the distributions of VAF values are different at a = 0.05. This matrix was then used for hierarchical clustering analysis with the haclust function of the R package stats. Combined strip/violin plots were drawn using R libraries ggplot2 , ggbeeswarm and ggdendro. Evaluation of phylogenetic relationships between accessions For the purpose of illustrating the relationships between rye populations analyzed, a Nei's genetic distance matrix was calculated using POPTREEW using VAF values reported by GATK for the variants detected in common by three algorithms and imported into MEGA 5. 2 to produce a Neighbor Joining dendrogram. The Nei's genetic distance matrix was also used as input to perform a principal coordinates analysis with NTSYSpc ver. 2.2 . To simulate the effect of treating the accessions as individuals on the clustering, VAF value tables were converted to genotyping scores (with "0" meaning a reference allele homozygote", "1″ meaning a variant allele homozygote, and 2 meaning a heterozygote). Three settings were applied that use different VAFs to define heterozygous variants: i)VAF < 0.3 = 0; VAF ≥ 0.7 = 1; and values in between (greater than 0.3 and less than 0.7) = 2, ii) VAF < 0.4 = 0; VAF ≥ 0.6 = 1; and values in between = 2, and iii) VAF < 0.2 = 0; VAF ≥ 0.8 = 1; in between = 2. The obtained genotype scores were used as input to GenAlEx 6.5 for calculation of Euclidean distances. Neighbor Joining trees were produced from the resulting distance matrices using MEGA 5.2 . Validation of nucleotide variants For validation of nucleotide variants CAPS assays were developed based on output of PARSESNP , which provides a list of restriction endonuclease sites that are gained or lost due to the predicted SNV or indel. Serial Cloner 2.6.1 (http://serialbasics.free.fr/Serial_Cloner. html) software was used to digest in silico the gene fragment of interest and predict restriction patterns for reference and mutant alleles. New batches of seeds were sown for several accessions, where the predicted variant resulted in gain or loss of a restriction enzyme recognition site in amplicons of genes MATE1, PBF and Sinb. Tissue harvest, DNA isolation and PCR reaction were done separately for each plant, using the procedures described above. The number of individual plants ranged from 10 to 27, depending on the availability of seeds after the initial issue collection for NGS amplicon sequencing experiments. Restriction digestion was done for 20 min using 10 μL of PCR reaction as template and 1 μL of the restriction enzyme in the total volume of 20 μL. FastDigest restriction enzymes (ThermoFisher) with dedicated buffers were used. The digestion products were separated in 6% denaturing polyacrylamide gels (if the predicted products were shorter than 200 bp or differed in length by less than 50 bp) and visualized by silver staining as described by Targońska et al. , or in 1.5% agarose gels containing ethidium bromide. For Sanger sequencing-based validation of variants, amplicons of PBF gene from six to 27 plants per accession, obtained as described above, were sent to an external service provider. Sequencing was done on an automated sequencer using fluorescent dye terminator chemistry. The analyzed plants were classified based on electrophoretic separation patterns/chromatograms as homozygous reference (RefRef), heterozygous (RefAlt), or homozygous variant (AltAlt). The variant frequency was calculated using the formula (RefAlt × 1 + Alt/Alt × 2)/ n × 2, where n is the total number of individuals analyzed.
package com.example.group_21_project.maps; import android.content.Context; import android.content.Intent; import android.view.View; import android.widget.TextView; import com.example.group_21_project.R; import com.example.group_21_project.UI.restaurant_detail; import com.example.group_21_project.model.RestaurantManager; import com.google.android.gms.maps.GoogleMap; import com.google.android.gms.maps.model.Marker; import com.google.maps.android.clustering.ClusterManager; public class ClusterMarkerInfoWindowAdapter implements GoogleMap.InfoWindowAdapter, ClusterManager.OnClusterItemClickListener<ClusterMarker>, ClusterManager.OnClusterItemInfoWindowClickListener<ClusterMarker> { private Context context; private ClusterMarker clusterMarker; private RestaurantManager restaurantManager; public ClusterMarkerInfoWindowAdapter(Context context) { this.context = context; this.restaurantManager = RestaurantManager.getInstance(); } @Override public View getInfoWindow(Marker marker) { View view = View.inflate(context, R.layout.cluster_marker_item, null); TextView title = view.findViewById(R.id.cluster_marker_item_title); title.setText(clusterMarker.getTitle()); TextView address = view.findViewById(R.id.cluster_marker_item_address); address.setText(clusterMarker.getRestaurant().getAddress().getPhysicalAddress()); TextView hazardRating = view.findViewById(R.id.cluster_marker_item_hazard); if(clusterMarker.getRestaurant().hasInspection()) { switch (clusterMarker.getRestaurant().getInspections().get(0).getHazardRating()) { case "Moderate": hazardRating.setText(R.string.hazard_rating_moderate); break; case "Low": hazardRating.setText(R.string.hazard_rating_low); break; default: hazardRating.setText(R.string.hazard_rating_high); } } else { hazardRating.setText(R.string.hazard_rating_high); // setting to High since default hazard level/icon is High in ClusterManagerRenderer } return view; } @Override public View getInfoContents(Marker marker) { return null; } @Override public boolean onClusterItemClick(ClusterMarker item) { this.clusterMarker = item; return false; } @Override public void onClusterItemInfoWindowClick(ClusterMarker item) { int selected_rest = 0; for(int i = 0; i < restaurantManager.getRestaurants().size(); i++) { if(item.getRestaurant() == restaurantManager.getRes(i)) { selected_rest = i; break; } } Intent i = restaurant_detail.makeLaunchRestDetail(context); i.putExtra("selected_rest", selected_rest); context.startActivity(i); } }
<filename>voyager/model_wrappers.py import json import os import shutil import time import keras.backend as K import numpy as np import tensorflow as tf from voyager.callbacks import NBatchLogger, ReduceLROnPlateauWithConfig, ResumeCheckpoint from voyager.data_loader import read_benchmark_trace from voyager.losses import HierarchicalSequenceLoss, HierarchicalCrossEntropyWithLogitsLoss from voyager.models import get_model from voyager.utils import load_config, create_prefetch_file, timefunction class ModelWrapper: VERBOSITY_QUIET = 0 VERBOSITY_PROGBAR = 1 VERBOSITY_EPOCH = 2 def __init__(self, config, benchmark, model_name, verbosity=1): self.config = config self.benchmark = benchmark self.verbosity = verbosity self._callbacks = [] self.callbacks = None self.step = 0 self.epoch = 0 self.phase = 0 self.backups = {} self.batch_logger = None self.lr_decay = None self.tensorboard_path = None self.num_offsets = (1 << config.offset_bits) self.model_path = None self.monitor = None self.recreate_chkpt = False self.chkpt = None # Create a model print('DEBUG : Creating a model with...') print(' pc vocab size :', benchmark.num_pcs()) print(' page vocab size :', benchmark.num_pages()) self.model = get_model(model_name).compile_model(config, benchmark.num_pcs(), benchmark.num_pages()) self._compile_metrics() self.backups['optim'] = self.model.optimizer def _compile_metrics(self): # The following is necessary to "compile" the metrics. if self.config.sequence_loss: y_true = tf.zeros((2, 1, 16, 1), dtype=tf.int32) y_pred = tf.zeros((1, 16, self.benchmark.num_pages() + self.num_offsets)) else: y_true = tf.zeros((2, 1, 1, 1), dtype=tf.int32) y_pred = tf.zeros((1, self.benchmark.num_pages() + self.num_offsets)) self.model.compiled_metrics.update_state(y_true, y_pred) # Set up list of things to backup for metric in self.model.metrics: self.backups[metric.name] = metric # This is necessary if we also wanted to compile the loss #self.model.compiled_loss(y_true, y_pred) def _init_callbacks(self, callbacks): if self.tensorboard_path: # Create a new one everytime # Need to do this because the write objects get destroyed at the end # of every train phase, which we have multiple of for online training tensorboard = tf.keras.callbacks.TensorBoard( log_dir=self.tensorboard_path, histogram_freq=1 ) # Do this to not affect the callbacks list which .append(..) would callbacks = callbacks + [tensorboard] if self.config.learning_rate_decay > 1: self.lr_decay = ReduceLROnPlateauWithConfig( monitor='val_acc', factor=1 / self.config.learning_rate_decay, patience=5, mode='max', verbose=1, min_lr=self.config.min_learning_rate, min_delta=0.005, ) callbacks = callbacks + [self.lr_decay] self.backups['lr_decay'] = self.lr_decay if self.model_path is not None: if self.recreate_chkpt or self.chkpt is None: self.chkpt = tf.keras.callbacks.ModelCheckpoint( filepath=self.model_path, save_weights_only=True, monitor='val_acc' if self.monitor is None else self.monitor, mode='max', save_best_only=True, verbose=1, ) callbacks = callbacks + [self.chkpt] # Create callback list self.callbacks = tf.keras.callbacks.CallbackList( callbacks=self._callbacks + callbacks, add_progbar=self.verbosity == ModelWrapper.VERBOSITY_PROGBAR, verbose=self.verbosity, epochs=self.config.num_epochs, steps=self.config.steps_per_epoch, model=self.model, ) def setup_callbacks(self, args): # Set-up batch logger callback. if args.print_every is not None: self.batch_logger = NBatchLogger(args.print_every, start_epoch=self.epoch, start_step=self.step) self._callbacks.append(self.batch_logger) # Set-up model checkpoint callback. if args.model_path: self.model_path = args.model_path else: print('Notice: Not checkpointing the model. To do so, please provide a path to --model-path.') # Set-up Tensorboard callback. if args.tb_dir: self.tensorboard_path = args.tb_dir else: print('Notice: Not logging to Tensorboard. To do so, please provide a directory to --tb-dir.') # Set-up ResumeCheckpoint callback. # Set-up learning rate decay callback. if self.config.learning_rate_decay <= 1: print('Notice: Not decaying learning rate. To do so, please provide learning_rate_decay > 1.0 in the config file.') # Create and add in ResumeCheckpoint if args.checkpoint_every is not None: self._callbacks.append(ResumeCheckpoint( self, args.checkpoint_every, args.model_path, self.step, ) ) def load(self, model_path): if not self.model.built: self.model(tf.zeros((1, self.model.sequence_length * 3,))) self.model.load(model_path) @tf.function def train_step(self, x, y): # Single train step with tf.GradientTape() as tape: logits = self.model(x, training=True) loss_value = self.model.loss(y, logits) # regularization_loss = tf.add_n(self.model.losses) # loss_value += 0.001 * regularization_loss # Update gradient grads = tape.gradient(loss_value, self.model.trainable_weights) self.model.optimizer.apply_gradients(zip(grads, self.model.trainable_weights)) # Update logs of loss and metrics logs = {'loss': loss_value} for metric in self.model.metrics: metric.update_state(y, logits) logs[metric.name] = metric.result() return logs def train(self, train_ds=None, valid_ds=None, callbacks=None): # Create default datasets if there are None if train_ds is None: train_ds, valid_ds, test_ds = self.benchmark.split(self.epoch, self.step) # Create callbacks list anew self._init_callbacks(callbacks if callbacks is not None else []) # Reset metrics and start callbacks self.reset_metrics() self.callbacks.on_train_begin() self.callbacks.on_epoch_begin(self.epoch) epoch_ended = False logs = {} # Main training loop for _, _, x, y_page, y_offset in train_ds: epoch_ended = False self.step += 1 # Needed for resume, without this, there is an off-by-one error if self.step <= self.config.steps_per_epoch: # Do one train step self.callbacks.on_train_batch_begin(self.step) logs = self.train_step(x, (y_page, y_offset)) self.callbacks.on_train_batch_end(self.step, logs) # Advance an epoch if self.step >= self.config.steps_per_epoch: self.step = 0 self.epoch += 1 # Evaluate on validation dataset if it was passed in if valid_ds is not None: val_logs = self.evaluate([valid_ds], training=True) logs.update(val_logs) self.callbacks.on_epoch_end(self.epoch - 1, logs) epoch_ended = True if self.epoch >= self.config.num_epochs: break self.callbacks.on_epoch_begin(self.epoch) self.reset_metrics() # Make sure epochs are ended properly when we run out of data prematurely if not epoch_ended: self.epoch += 1 self.callbacks.on_epoch_end(self.epoch) self.callbacks.on_train_end(logs) def train_online(self, prefetch_file=None, callbacks=None): # Create datasets train_datasets, eval_datasets = self.benchmark.split(self.epoch % self.config.num_epochs_online, self.step, online=True, start_phase=self.phase) # Change # of epochs to # of online epochs orig_num_epochs = self.config.num_epochs self.config.num_epochs = (self.phase + 1) * self.config.num_epochs_online self.monitor = 'acc' self.recreate_chkpt = True # Enables online training printing if self.batch_logger: self.batch_logger.set_online(True, self.phase) # Online training phase for train_ds, eval_ds in zip(train_datasets, eval_datasets): # Train while showing eval performance self.train(train_ds, eval_ds) # Reload the best model self.load(self.model_path) # Generate on eval dataset inst_ids, addresses, _ = self.generate([eval_ds]) # TODO: Resume functionality doesn't work when the file writing fails create_prefetch_file(prefetch_file, inst_ids, addresses, append=True) # Reset for reproducible dropout self.step = 0 self.model.step = 0 self.phase += 1 # Manually advance state if self.batch_logger: self.batch_logger.advance_phase() # New LR Decay every phase if self.lr_decay: self.lr_decay._reset() # Need to advance number of epochs so that we have num_epochs_online # for every phase so that the tensorboard callback works self.config.num_epochs += self.config.num_epochs_online # Restore original # of epochs self.config.num_epochs = orig_num_epochs @tf.function def evaluate_step(self, x, y): # One evaluate step logits = self.model(x, training=False) loss_value = self.model.loss(y, logits) # Save logs of loss and metrics logs = {'val_loss': loss_value} for metric in self.model.metrics: metric.update_state(y, logits) logs['val_' + metric.name] = metric.result() return logs def evaluate(self, datasets=None, callbacks=None, training=False): # Setup default datasets if there are None if datasets is None: train_ds, valid_ds, test_ds = self.benchmark.split(self.epoch, self.step) datasets = [test_ds] # Create callbacks list anew if not training: self._init_callbacks(callbacks if callbacks is not None else []) # Validation over dataset self.reset_metrics() self.callbacks.on_test_begin() # Validation loop for ds in datasets: for step, (_, _, x, y_page, y_offset) in enumerate(ds): self.callbacks.on_test_batch_begin(step) logs = self.evaluate_step(x, (y_page, y_offset)) self.callbacks.on_test_batch_end(step, logs) self.callbacks.on_test_end(logs) return logs @tf.function def generate_step(self, x, y): # Generate step logits = self.model(x, training=False) loss_value = self.model.loss(y, logits) # Save logs, but they may not end up getting used here logs = {'val_loss': loss_value} for metric in self.model.metrics: metric.update_state(y, logits) logs['val_' + metric.name] = metric.result() return logits, logs def generate(self, datasets=None, prefetch_file=None, callbacks=None): # Create default datasets if there are none if datasets is None: train_ds, valid_ds, test_ds = self.benchmark.split(self.epoch, self.step) datasets = [test_ds] # Create callbacks list anew self._init_callbacks(callbacks if callbacks is not None else []) addresses = [] inst_ids = [] self.reset_metrics() self.callbacks.on_test_begin() def sigmoid(x): return 1 / (1 + np.exp(-x)) for ds in datasets: correct = 0 total = 0 for step, (idx, batch_inst_ids, x, y_page, y_offset) in enumerate(ds): self.callbacks.on_test_batch_begin(step) logits, logs = self.generate_step(x, (y_page, y_offset)) # Grab the final (only) timestep logits if self.config.sequence_loss: page_logits = logits[:, -1, :-self.num_offsets] offset_logits = logits[:, -1, -self.num_offsets:] else: page_logits = logits[:, :-self.num_offsets] offset_logits = logits[:, -self.num_offsets:] # Argmax for prediction pred_pages = tf.argmax(page_logits, -1).numpy().tolist() pred_offsets = tf.argmax(offset_logits, -1).numpy().tolist() page_logits = page_logits.numpy() offset_logits = offset_logits.numpy() # Unmap addresses for i, (idxi, xi, inst_id, pred_page, pred_offset, yp, yo) in enumerate(zip(idx.numpy().tolist(), x.numpy().tolist(), batch_inst_ids.numpy().tolist(), pred_pages, pred_offsets, y_page.numpy().tolist(), y_offset.numpy().tolist())): ''' # TODO: Possibly threshold here if sigmoid(page_logits[i, pred_page]) < 0.99999 or sigmoid(offset_logits[i, pred_offset]) < 0.99999: continue ''' total += 1 # OOV if pred_page == 0: continue if pred_page == yp[-1] and pred_offset == yo[-1]: correct += 1 addresses.append(self.benchmark.unmap(idxi, xi, pred_page, pred_offset, self.config.sequence_length)) inst_ids.append(inst_id) self.callbacks.on_test_batch_end(step, logs) print(correct / total * 100, total) self.callbacks.on_test_end(logs) # Create a prefetch file if path is given if prefetch_file is not None: create_prefetch_file(prefetch_file, inst_ids, addresses) else: # Return if no prefetch file return inst_ids, addresses, logs def reset_metrics(self): # Reset all the metrics with one convenient call for metric in self.model.metrics: metric.reset_state() def get_datasets(self, train, valid, test): datasets = [] train_ds, valid_ds, test_ds = self.benchmark.split(self.epoch, self.step) if train: datasets.append(train_ds) if valid: datasets.append(valid_ds) if test: datasets.append(test_ds) return datasets # Need the newline if run using the progress bar instead of NBatchLogger @timefunction('\nCreating checkpoint') def create_checkpoint(self, model_path): # Paths to main resume and backup resume checkpoint_path = os.path.join(model_path, 'resume') backup_path = os.path.join(model_path, 'resume_backup') # If checkpoint already exists, copy to backup path if os.path.exists(os.path.join(checkpoint_path, 'done')): shutil.copytree(checkpoint_path, backup_path) # Remove the done file from the current checkpoint path os.remove(os.path.join(checkpoint_path, 'done')) # Backup model self.model.save_weights(os.path.join(checkpoint_path, 'model')) # Backup callbacks, metrics, optimizer backup_data = { 'epoch': self.epoch, 'step': self.step, 'phase': self.phase, } for name, item in self.backups.items(): backup_data[name] = config_to_python(item.get_config()) # Optimizer weights aren't saved in get_config() unfortunately np.save(os.path.join(checkpoint_path, 'optim_weights.npy'), self.model.optimizer.get_weights(), allow_pickle=True) # Dump to json with open(os.path.join(checkpoint_path, 'data.json'), 'w') as f: json.dump(backup_data, f, indent=4) # Create empty done file to signify that we're done with open(os.path.join(checkpoint_path, 'done'), 'w') as _: pass # Safe to remove backup now if os.path.exists(backup_path): shutil.rmtree(backup_path) @timefunction('Restoring checkpoint') def restore_checkpoint(self, model_path): # Paths to main resume and backup resume checkpoint_path = os.path.join(model_path, 'resume') backup_path = os.path.join(model_path, 'resume_backup') # Check main path and then backup if os.path.exists(os.path.join(checkpoint_path, 'done')): load_path = checkpoint_path elif os.path.exists(os.path.join(backup_path, 'done')): load_path = backup_path else: print('No valid checkpoints', end='') return # Restore callbacks, metrics, optimizer state with open(os.path.join(load_path, 'data.json')) as f: backup_data = json.load(f) for name, config in backup_data.items(): if name == 'epoch': self.epoch = config self.model.epoch = config elif name == 'step': self.step = config self.model.step = config elif name == 'phase': self.phase = config elif name == 'optim': weights = np.load(os.path.join(load_path, 'optim_weights.npy'), allow_pickle=True) for k, v in config.items(): setattr(self.model.optimizer, k, v) # Need to do a single forward pass so that model is initialized self.model(tf.zeros((1, self.model.sequence_length * 3,))) # Need to do a single backwards pass so that optimizer is initialized zero_grads = [tf.zeros_like(w) for w in self.model.trainable_weights] self.model.optimizer.apply_gradients(zip(zero_grads, self.model.trainable_weights)) # Now we can finally set the optimizer weights self.model.optimizer.set_weights(python_to_optim_weights(weights)) elif name in self.backups: self.backups[name].load_config(config) # Reload model state self.model.load(os.path.join(load_path, 'model')) @staticmethod def setup_from_args(args): print(args) # Parse config file config = load_config(args.config, args.debug) print(config) # Load and process benchmark benchmark = read_benchmark_trace(args.benchmark, config) # Create and compile the model model_wrapper = ModelWrapper(config, benchmark, args.model_name, verbosity=1 if args.print_every is None else 2) if args.auto_resume: model_wrapper.restore_checkpoint(args.model_path) model_wrapper.setup_callbacks(args) return model_wrapper # Utility functions for backup and restore def cast_numerical(v): if isinstance(v, (np.int8, np.int16, np.int32, np.int64, np.int)): return int(v) elif isinstance(v, (np.float16, np.float32, np.float64, np.float)): return float(v) return v def config_to_python(config): return {k: cast_numerical(v) for k, v in config.items()} def optim_weights_to_python(weights): ret = [] for weight in weights: if not isinstance(weight, np.ndarray): ret.append(cast_numerical(weight)) else: ret.append(weight.tolist()) return ret def python_to_optim_weights(weights): ret = [] for weight in weights: if not isinstance(weight, list): if isinstance(weight, int): ret.append(np.int64(weight)) else: ret.append(np.float32(weight)) else: ret.append(np.array(weight)) return ret
def project_info(self, project_name): project = None url = "%sprojects" % (self.server_url) response, content = self._get(url) if response.status != 200: raise RecordStoreAccessError("Error in accessing %s\n%s: %s" % (url, response.status, content)) else: code = json.loads(content)['code'] if code != 200: raise RecordStoreAccessError("%d\n%s" % (response.status, content)) for p in serialization.decode_project_list(content)['content']['projects']: if p['name'] == project_name: project = p break if project: return {'name':project['name'], 'description':project['description']} else: return {'name':project_name, 'description':''}
package utility import ( "errors" "fmt" "io" "strings" ) const inputLimit = 50 type Interaction struct { Reader io.Reader } func (i *Interaction) AskUserInput(query string) (string, error) { fmt.Println(query + ": ") buf := make([]byte, inputLimit) l, err := i.Reader.Read(buf) if err != nil { fmt.Println(err.Error()) return "", err } if l > inputLimit { fmt.Println("Error: character limit(50) exceeded") return "", errors.New("Error: character limit(50) exceeded") } return strings.TrimSpace(string(buf[:l])), nil }
def segmentate_grayscale( image: Union[np.ndarray, str], threshold: float, explain: bool = False ) -> np.ndarray: if isinstance(image, str): image = cv2.imread(image) image = cast(np.ndarray, image) _hue, _sat, _value = cv2.split(cv2.cvtColor(image, cv2.COLOR_BGR2HSV)) _value[:] = 200 hsv_img = cv2.merge((_hue, _sat, _value)) bgr_img = cv2.cvtColor(hsv_img, cv2.COLOR_HSV2BGR) if explain: imgutils.show_img(bgr_img, title="Reduced Shadows") gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY) gray_img = cv2.normalize(gray_img, gray_img, 0, 255, cv2.NORM_MINMAX) if explain: imgutils.show_img(gray_img, title="Grayscaled") blurred_img = cv2.GaussianBlur(gray_img, (0, 0), 3, 3) kernel = np.ones((3, 3), np.uint8) blurred_img = cv2.morphologyEx(blurred_img, cv2.MORPH_OPEN, kernel) denoised = cv2.morphologyEx(blurred_img, cv2.MORPH_CLOSE, kernel) mask = cv2.threshold( denoised, thresh=threshold, maxval=255, type=cv2.THRESH_BINARY_INV )[1] if explain: imgutils.show_img(mask, title="Mask") largest_cntr = np.zeros(image.shape, dtype=image.dtype) contours, hierarchy = cv2.findContours( mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE ) largest_cntr_indx = max(enumerate(contours), key=lambda x: cv2.contourArea(x[1]))[0] bounding_rect = cv2.boundingRect(contours[largest_cntr_indx]) largest_cntr = cv2.drawContours( image=largest_cntr, contours=contours, contourIdx=largest_cntr_indx, color=255, thickness=cv2.FILLED, hierarchy=hierarchy, ) largest_cntr = cv2.cvtColor(largest_cntr, cv2.COLOR_BGR2GRAY) if explain: imgutils.show_img(largest_cntr, title="Largest Contour") image = cv2.bitwise_and(image, image, mask=largest_cntr) x, y, w, h = bounding_rect return image[y : y + h, x : x + w]
<gh_stars>0 #include <cstdio> #include <limits> #include <sstream> #include <iomanip> #include <boost/lexical_cast.hpp> #include "asserts.hpp" #include "as_function_params.hpp" #include "as_object.hpp" #include "as_value.hpp" #include "swf_environment.hpp" #include "swf_player.hpp" namespace swf { as_value_ptr as_value::from_bool(bool value) { return std::make_shared<as_value>(value); } as_value::as_value(as_value_ptr get, as_value_ptr set) : type_(ValueType::PROPERTY), b_(false), d_(0), o_(nullptr), p_(std::make_shared<as_property>(get, set)), flags_(PropertyFlags::NONE) { } as_value::as_value(as_native_function_type fn) : type_(ValueType::OBJECT), b_(false), d_(0), flags_(PropertyFlags::NONE) { o_ = as_native_function::create(weak_player_ptr(), fn); } const char* as_value::to_string() const { return to_std_string().c_str(); } std::string as_value::to_std_string() const { switch(type_) { case ValueType::UNDEFINED: return "undefined"; case ValueType::BOOLEAN: return b_ ? "true" : "false"; case ValueType::NUMERIC: { if(d_ == std::numeric_limits<double>::quiet_NaN() || d_ == std::numeric_limits<double>::signaling_NaN()) { return "NaN"; } else { std::stringstream ss_fixed; ss_fixed << std::setprecision(14) << std::ios::fixed << d_; std::string s_fix = ss_fixed.str(); std::stringstream ss_sci; ss_fixed << std::setprecision(14) << std::ios::scientific << d_; std::string s_sci = ss_sci.str(); if(s_sci.length() < s_fix.length()) { return s_sci; } return s_fix; } } case ValueType::STRING: return s_; case ValueType::NULL_VALUE: return "null"; case ValueType::OBJECT: return o_->to_string(); case ValueType::PROPERTY: /* convert property p_ to a string. */ default: ASSERT_LOG(false, "to_string() bad type: " << static_cast<int>(type_)); } return "undefined"; } as_value operator+(const as_value& v1, const as_value& v2) { auto p1 = v1.to_primitive(); auto p2 = v2.to_primitive(); if(p1.type_ == ValueType::STRING || p2.type_ == ValueType::STRING) { return as_value(p1.to_string() + p2.to_std_string()); } return as_value(p1.to_number() + p2.to_number()); } as_value as_value::to_primitive(HintType hint) const { if(type_ == ValueType::OBJECT && o_ != nullptr) { return o_->default_value(hint); } else if(type_ == ValueType::PROPERTY) { ASSERT_LOG(false, "XXX todo PROPERTY::to_primitive"); } return *this; } int32_t as_value::to_int32() { double num = to_number(); if(num == std::numeric_limits<double>::quiet_NaN() || num == std::numeric_limits<double>::signaling_NaN() || abs(num) == 0 || abs(num) == std::numeric_limits<double>::infinity()) { return 0; } double pos_int = (num > 0 ? 1 : -1) * floor(abs(num)); return static_cast<int32_t>(fmod(pos_int, 4294967296)); } int as_value::to_integer() { /// Need to double check this against the standard. double num = to_number(); if(num == std::numeric_limits<double>::quiet_NaN() || num == std::numeric_limits<double>::signaling_NaN() || abs(num) == 0 || abs(num) == std::numeric_limits<double>::infinity()) { return 0; } return static_cast<int>((num > 0 ? 1 : -1) * floor(abs(num))); } double as_value::to_number() { switch(type_) { case ValueType::UNDEFINED: return std::numeric_limits<double>::infinity(); case ValueType::BOOLEAN: return b_ ? 1 : 0; case ValueType::NUMERIC: return d_; case ValueType::NULL_VALUE: return 0; case ValueType::OBJECT: return o_->to_number(); case ValueType::PROPERTY: { ASSERT_LOG(false, "XXX todo PROPERTY::to_number"); } } ASSERT_LOG(type_ != ValueType::STRING, "FATAL: unknown type_ value: " << static_cast<int>(type_)); // String case -- we do thing the lazy way, not the compliant way. double num = std::numeric_limits<double>::infinity(); try { num = boost::lexical_cast<double>(s_); } catch(boost::bad_lexical_cast&) { std::cerr << "Caught a bad floating point cast from " << s_ << " assuming infinity" << std::endl; } return num; } bool as_value::to_boolean() { // XXX check for correctness. return to_integer() != 0; } as_object_ptr as_value::to_object() { switch(type_) { case ValueType::UNDEFINED: case ValueType::BOOLEAN: case ValueType::NUMERIC: case ValueType::NULL_VALUE: case ValueType::STRING: break; case ValueType::OBJECT: return o_; case ValueType::PROPERTY: { ASSERT_LOG(false, "XXX todo PROPERTY::to_object"); } } return nullptr; } as_value_ptr as_value::clone() { return std::make_shared<as_value>(*this); } void as_value::set_property(const as_value_ptr& value) { ASSERT_LOG(is_property(), "Attempt to set property value on non-property."); p_->set(o_, value); } as_value_ptr as_value::get_property() const { ASSERT_LOG(is_property(), "Attempt to get property value on non-property."); return p_->get(o_); } as_value_ptr as_value::get_property(const as_value_ptr& primitive) const { ASSERT_LOG(is_property(), "Attempt to get property value on non-property."); return p_->get(primitive); } void as_value::set_property_target(const as_object_ptr& target) { ASSERT_LOG(is_property(), "Attempt to set property target on non-property."); o_ = target; } as_property::as_property(const as_value_ptr& get, const as_value_ptr& set) { get_ = std::dynamic_pointer_cast<as_function>(get->to_object()); set_ = std::dynamic_pointer_cast<as_function>(set->to_object()); } void as_property::set(const as_object_ptr& target, const as_value_ptr& value) { if(target) { if(set_) { auto env = environment::create(target->get_player()); env->push(value); (*set_)(function_params(as_value::create(target), env, 1, env->get_top_index())); } } else { LOG_WARN("Tried to set property on null target."); } } as_value_ptr as_property::get(const as_object_ptr& target) { as_value_ptr value = as_value::create(); if(target) { if(get_) { auto env = environment::create(target->get_player()); value = (*get_)(function_params(as_value::create(target), env, 0, 0)); } } else { LOG_WARN("Tried to set property on null target."); } return value; } as_value_ptr as_property::get(const as_value_ptr& primitive) { if(get_) { return (*get_)(function_params(primitive, nullptr, 0, 0)); } return nullptr; } as_function_ptr as_value::to_function() { if(type_ == ValueType::OBJECT && o_ != nullptr) { // XXX This is a little icky -- maybe as_object should have a virtual operator(). return std::dynamic_pointer_cast<as_function>(o_); } LOG_WARN("couldn't convert as_value to function."); return nullptr; } as_value_ptr as_value::find_property(const std::string& name) { switch(type_) { case ValueType::UNDEFINED: case ValueType::NULL_VALUE: case ValueType::PROPERTY: break; case ValueType::STRING: return player::get_builtin_string_method(name); case ValueType::BOOLEAN: return player::get_builtin_boolean_method(name); case ValueType::NUMERIC: return player::get_builtin_numeric_method(name); case ValueType::OBJECT: { if(o_) { return o_->get_member(name); } break; } } return nullptr; } }
package main import ( "artlovecode/travel/apiclients" "artlovecode/travel/formatters" "encoding/json" "fmt" "io/ioutil" "math/rand" "os" "strings" "time" ) type Country struct { ISOShortCode string `json:"iso"` Name string `json:"name"` } /* Select a country at random, and display some fun travel facts about it! */ func main() { // Contains { iso: "XX", name: "XXXX" } mappings of country names and country codes countriesFile, fileIoErr := os.Open("mappings.json") if fileIoErr != nil { fmt.Print(fileIoErr) os.Exit(1) } defer countriesFile.Close() countryBytes, fileIoErr := ioutil.ReadAll(countriesFile) var countries [238]Country json.Unmarshal(countryBytes, &countries) rand.Seed(time.Now().Unix()) country := countries[rand.Intn(238)] metadataClient := apiclients.ComposeApiClient("https://www.distance24.org/") advisoryClient := apiclients.ComposeApiClient("https://www.travel-advisory.info/api") advisoryResponse, advisoryErr := advisoryClient("/?countrycode=" + country.ISOShortCode) metadataResponse, metadataErr := metadataClient("/route.json?stops=Norway|" + country.Name) if advisoryErr != nil { fmt.Print(advisoryErr) os.Exit(1) } if metadataErr != nil { fmt.Print(metadataErr) os.Exit(1) } fmt.Println("Your next destination is...", strings.ToUpper(country.Name)+"\n\n") fmt.Println(formatters.FormatAdvisory(advisoryResponse, country.ISOShortCode) + "\n\n") fmt.Println(formatters.FormatMetaData(metadataResponse) + "\n\n") }
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Split a tractogram into multiple files, 2 options available : Split into X files, or split into files of Y streamlines. By default, streamlines to add to each chunk will be chosen randomly. Optionally, you can split streamlines... - sequentially (the first n/nb_chunks streamlines in the first chunk and so on). - randomly, but per Quickbundles clusters. """ import argparse import logging import os from dipy.io.stateful_tractogram import set_sft_logger_level from dipy.io.streamline import save_tractogram import numpy as np from scilpy.io.streamlines import load_tractogram_with_reference from scilpy.io.utils import (add_overwrite_arg, add_reference_arg, assert_inputs_exist, assert_outputs_exist, assert_output_dirs_exist_and_empty, add_verbose_arg) from scilpy.tractograms.tractogram_operations import ( split_sft_sequentially, split_sft_randomly, split_sft_randomly_per_cluster) def _build_arg_parser(): p = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter, description=__doc__) p.add_argument('in_tractogram', help='Tractogram input file name.') p.add_argument('out_prefix', help='Prefix for the output tractogram, index will be ' 'appended \nautomatically (ex, _0.trk), based on ' 'input type.') p.add_argument('--out_dir', default='', help='Put all output tractogram in a specific directory.') group = p.add_mutually_exclusive_group(required=True) group.add_argument('--chunk_size', type=int, help='The maximum number of streamlines per file.') group.add_argument('--nb_chunks', type=int, help='Divide the file in equal parts.') group2 = p.add_mutually_exclusive_group() group2.add_argument( '--split_per_cluster', action='store_true', help='If set, splitting will be done per cluster (computed with \n' 'Quickbundles) to ensure that at least some streamlines are \n' 'kept from each bundle in each chunk. Else, random splitting is\n' 'performed (default).') group2.add_argument( '--do_not_randomize', action='store_true', help="If set, splitting is done sequentially through the original \n" "sft instead of using random indices.") p.add_argument('--qbx_thresholds', nargs='+', type=float, default=[40, 30, 20], metavar='t', help="If you chose option '--split_per_cluster', you may " "set the \nQBx threshold value(s) here. Default: " "%(default)s") p.add_argument('--seed', default=None, type=int, help='Use a specific random seed for the subsampling.') add_reference_arg(p) add_overwrite_arg(p) add_verbose_arg(p) return p def main(): parser = _build_arg_parser() args = parser.parse_args() assert_inputs_exist(parser, args.in_tractogram) _, out_extension = os.path.splitext(args.in_tractogram) assert_output_dirs_exist_and_empty(parser, args, [], optional=args.out_dir) # Check only the first potential output filename, we don't know how many # there are yet. assert_outputs_exist(parser, args, os.path.join( args.out_dir, '{}_0{}'.format(args.out_prefix, out_extension))) log_level = logging.WARNING if args.verbose: log_level = logging.DEBUG set_sft_logger_level('INFO') logging.getLogger().setLevel(log_level) logging.debug("Loading sft.") sft = load_tractogram_with_reference(parser, args, args.in_tractogram) streamlines_count = len(sft.streamlines) if args.nb_chunks: chunk_size = int(streamlines_count/args.nb_chunks) nb_chunks = args.nb_chunks else: chunk_size = args.chunk_size nb_chunks = int(streamlines_count/chunk_size)+1 # Check other outputs out_names = ['{0}_{1}{2}'.format(args.out_prefix, i, out_extension) for i in range(nb_chunks)] assert_outputs_exist(parser, args, [os.path.join(args.out_dir, out_names[i]) for i in range(1, nb_chunks)]) # All chunks will be equal except the last one chunk_sizes = np.ones((nb_chunks,), dtype=np.int16) * chunk_size chunk_sizes[-1] += (streamlines_count - chunk_size * nb_chunks) if args.do_not_randomize: sfts = split_sft_sequentially(sft, chunk_sizes) elif args.split_per_cluster: # With this version, will contain an additional sft with non-included # streamlines. Should be of size close to 0. Not using it. sfts = split_sft_randomly_per_cluster( sft, chunk_sizes, args.seed, args.qbx_thresholds) else: sfts = split_sft_randomly(sft, chunk_sizes, args.seed) for i in range(nb_chunks): out_name = os.path.join(args.out_dir, out_names[i]) save_tractogram(sfts[i], out_name) if __name__ == "__main__": main()
import java.lang.reflect.Array; import java.util.ArrayList; import java.util.HashSet; import java.util.Scanner; import java.util.Set; /** * Created by yuu on 12/4/17. */ public class Problem546C { public static void main(String[] args) { Scanner sc = new Scanner(System.in); Set<String> state = new HashSet<>(); int n = sc.nextInt(); int a = sc.nextInt(); ArrayList<Integer> playerA = new ArrayList<>(); for (int i = 0; i < a; i++) { playerA.add(sc.nextInt()); } int b = sc.nextInt(); ArrayList<Integer> playerB = new ArrayList<>(); for (int i = 0; i < b; i++) { playerB.add(sc.nextInt()); } int gameCounter = 0; boolean loop = false; int winner = 1; ArrayList<String> states = buildState(playerA, playerB); while (true) { for (String s: states) { if (state.contains(s)) { loop = true; break; } else { state.add(s); } } // play the game int first = playerA.remove(0); int second = playerB.remove(0); if (first < second) { playerB.add(first); playerB.add(second); } else { playerA.add(second); playerA.add(first); } gameCounter++; if (playerA.size() == 0) { winner = 2; break; } if (playerB.size() == 0) { winner = 1; break; } states = buildState(playerA, playerB); if (loop) break; } if (loop) { System.out.println(-1); } else { System.out.println(gameCounter + " " + winner); } } public static ArrayList<String> buildState(ArrayList<Integer> a, ArrayList<Integer> b) { // Add first states StringBuilder stateA = new StringBuilder(); for (int i = 0; i < a.size(); i++) { stateA.append(a.get(i)); stateA.append(" "); } StringBuilder stateB = new StringBuilder(); for (int i = 0; i < b.size(); i++) { stateB.append(b.get(i)); stateB.append(" "); } ArrayList<String> results = new ArrayList<>(); results.add(stateA + "0 " + stateB.substring(0, stateB.length()-1)); results.add(stateB + "0 " + stateA.substring(0, stateA.length()-1)); return results; } }
// Has returns true if the version string is in the set. // // vs.Has("apps/v1") func (v VersionSet) Has(apiVersion string) bool { for _, x := range v { if x == apiVersion { return true } } return false }
<reponame>chili-chiu/napari-hub<filename>frontend/src/components/Footer/Footer.test.tsx import { render } from '@testing-library/react'; import { Footer } from './Footer'; describe('<Footer />', () => { it('should match snapshot', () => { const component = render(<Footer />); expect(component.asFragment()).toMatchSnapshot(); }); });
/** * Director to build StampTree XML data. * * @author Kazushi Minagawa, Digital Globe, Inc. */ public class StampTreeXmlDirector { private DefaultStampTreeXmlBuilder builder; /** * Creates new StampTreeXmlDirector * @param builder */ public StampTreeXmlDirector(DefaultStampTreeXmlBuilder builder) { super(); this.builder = builder; } /** * スタンプツリー全体をXMLにエンコードする。 * @param allTrees StampTreeのリスト * @return XML */ public String build(List<StampTree> allTrees) { try { builder.buildStart(); for (StampTree tree : allTrees) { lbuild(tree); } builder.buildEnd(); return builder.getProduct(); } catch (Exception e) { LogWriter.error(getClass(), "",e); } return null; } /** * 一つのツリーをXMLにエンコードする * @param tree StampTree * @throws IOException */ private void lbuild(StampTree tree) throws IOException { // ルートノードを取得しチャイルドのEnumerationを得る DefaultMutableTreeNode rootNode = (DefaultMutableTreeNode) tree.getModel().getRoot(); Enumeration e = rootNode.preorderEnumeration(); StampTreeNode node = (StampTreeNode) e.nextElement(); // ルートノードを書き出す builder.buildRoot(node); // 子を書き出す while (e.hasMoreElements()) { builder.buildNode((StampTreeNode) e.nextElement()); } builder.buildRootEnd(); } }
The Adapted Fresno Test of competence in evidence‐based practice Introduction: Health educators who teach professionals about evidence‐based practice (EBP) need instruments to measure change in skills and knowledge. This study aimed to develop and evaluate the interrater reliability, internal consistency, and responsiveness of the Adapted Fresno Test (AFT) of competence in EBP. Methods: Reliability testing involved 2 raters. The AFT was completed by 114 occupational therapists before, and 106 therapists after a 2‐day workshop on EBP. A random sample of 20 completed tests (10 pre‐ and 10 postworkshop = Versions 1 and 2) were scored by 2 raters. Interrater reliability was calculated with the use of intraclass correlation coefficients (ICC, 1,2) for 7 summed subtest scores and the total AFT score. Internal consistency was calculated with the use of Cronbach's &agr;. Responsiveness was calculated by comparing mean pre–post change scores, change in low scorers with high scorers. Results: Interrater reliability ranged from good to excellent for individual AFT subtests (Version 1: ICC, 0.80–0.96; Version 2: 0.68–0.94), and excellent for total AFT scores (Version 1: ICC, 0.96; Version 2: ICC, 0.91). Internal consistency was acceptable (Cronbach's &agr;, 0.74). Overall mean change on the 156‐point test was 20.6 points (95% confidence interval , 15.6–25.5), 26.8 points (95% CI, 21.6–1.9) for low scorers and −1.8 points (95% CI, −6.4 to 10.1) for high scorers. Discussion: The AFT has acceptable psychometric properties, and measures change in knowledge and skills of rehabilitation professionals following EBP training. The test is most useful for evaluating change in novice learners.
<reponame>brikr/advent-of-code-2020<filename>src/day08/day08.ts import {cloneDeep} from 'lodash'; import {fileMapSync} from '../utils'; interface Visited { // key: pc address we've been to; value: true [address: string]: boolean; } type FuncName = 'nop' | 'acc' | 'jmp'; interface Instruction { func: FuncName; arg: number; } function parseInstruction(line: string): Instruction { const arr = line.split(' '); return { func: arr[0] as FuncName, arg: Number(arr[1]), }; } interface ProgramOutput { accumulator: number; error?: 'loop'; } function runProgram(program: Instruction[]): ProgramOutput { let accumulator = 0; let pc = 0; const visited: Visited = {}; while (pc < program.length) { if (visited[pc]) { // loop! return { accumulator, error: 'loop', }; } else { visited[pc] = true; } const instruction = program[pc]; switch (instruction.func) { case 'nop': // nop break; case 'acc': accumulator += instruction.arg; break; case 'jmp': pc += instruction.arg; continue; } pc++; } return { accumulator, }; } function part1() { const program = fileMapSync('src/day08/input.txt', line => parseInstruction(line) ); return runProgram(program).accumulator; } function part2() { const program = fileMapSync('src/day08/input.txt', line => parseInstruction(line) ); let indexToSwap = 0; let output = runProgram(program); do { if (program[indexToSwap].func === 'acc') { // only swapping jmp and nop indexToSwap++; continue; } const clone = cloneDeep(program); // swap the nth instruction between jmp and nop if (clone[indexToSwap].func === 'jmp') { clone[indexToSwap].func = 'nop'; } else if (clone[indexToSwap].func === 'nop') { clone[indexToSwap].func = 'jmp'; } output = runProgram(clone); indexToSwap++; } while (output?.error === 'loop'); return output.accumulator; } function printSolution() { console.log('Part 1:'); console.log(part1()); console.log('Part 2:'); console.log(part2()); } printSolution();
package details // Auto-generated code. Do not edit. //////////////////////////////////////////////////////////////////////// // Authority var authorityLdrRecordStatus = map[string]string{ "a": "Increase in encoding level", "c": "Corrected or revised", "d": "Deleted", "n": "New", "o": "Obsolete", "s": "Deleted; heading split into two or more headings", "x": "Deleted; heading replaced by another heading", } var authorityLdrTypeOfRecord = map[string]string{ "z": "Authority data", } var authorityLdrCharacterCodingScheme = map[string]string{ " ": "MARC-8", "a": "UCS/Unicode", } var authorityLdrEncodingLevel = map[string]string{ "n": "Complete authority record", "o": "Incomplete authority record", } var authorityLdrPunctuationPolicy = map[string]string{ " ": "No information provided", "c": "Punctuation omitted", "i": "Punctuation included", "u": "Unknown", } // parseAuthorityLdr parses leader data for Authority records data func parseAuthorityLdr(s string) (ldr LdrDesc) { ldr = make(LdrDesc) var c string var l string ldr["(00/05) Record length"] = CodeValue{Code: pluckBytes(s, 0, 5), Label: "", Offset: 0, Width: 5} c, l = codeLookup(authorityLdrRecordStatus, s, 5, 1) ldr["(05/01) Record status"] = CodeValue{Code: c, Label: l, Offset: 5, Width: 1} c, l = codeLookup(authorityLdrTypeOfRecord, s, 6, 1) ldr["(06/01) Type of record"] = CodeValue{Code: c, Label: l, Offset: 6, Width: 1} ldr["(07/02) Undefined character positions"] = CodeValue{Code: pluckBytes(s, 7, 2), Label: "", Offset: 7, Width: 2} c, l = codeLookup(authorityLdrCharacterCodingScheme, s, 9, 1) ldr["(09/01) Character coding scheme"] = CodeValue{Code: c, Label: l, Offset: 9, Width: 1} ldr["(10/01) Indicator count"] = CodeValue{Code: pluckBytes(s, 10, 1), Label: "", Offset: 10, Width: 1} ldr["(11/01) Subfield code length"] = CodeValue{Code: pluckBytes(s, 11, 1), Label: "", Offset: 11, Width: 1} ldr["(12/05) Base address of data"] = CodeValue{Code: pluckBytes(s, 12, 5), Label: "", Offset: 12, Width: 5} c, l = codeLookup(authorityLdrEncodingLevel, s, 17, 1) ldr["(17/01) Encoding level"] = CodeValue{Code: c, Label: l, Offset: 17, Width: 1} c, l = codeLookup(authorityLdrPunctuationPolicy, s, 18, 1) ldr["(18/01) Punctuation policy"] = CodeValue{Code: c, Label: l, Offset: 18, Width: 1} ldr["(19/01) Undefined"] = CodeValue{Code: pluckBytes(s, 19, 1), Label: "", Offset: 19, Width: 1} // (20/04) Entry map ldr["(20/01) Length of the length-of-field portion"] = CodeValue{Code: pluckBytes(s, 20, 1), Label: "", Offset: 20, Width: 1} ldr["(21/01) Length of the starting-character-position portion"] = CodeValue{Code: pluckBytes(s, 21, 1), Label: "", Offset: 21, Width: 1} ldr["(22/01) Length of the implementation-defined portion"] = CodeValue{Code: pluckBytes(s, 22, 1), Label: "", Offset: 22, Width: 1} ldr["(23/01) Undefined"] = CodeValue{Code: pluckBytes(s, 23, 1), Label: "", Offset: 23, Width: 1} return ldr } //////////////////////////////////////////////////////////////////////// // Bibliography var bibliographyLdrRecordStatus = map[string]string{ "a": "Increase in encoding level", "c": "Corrected or revised", "d": "Deleted", "n": "New", "p": "Increase in encoding level from prepublication", } var bibliographyLdrTypeOfRecord = map[string]string{ "a": "Language material", "c": "Notated music", "d": "Manuscript notated music", "e": "Cartographic material", "f": "Manuscript cartographic material", "g": "Projected medium", "i": "Nonmusical sound recording", "j": "Musical sound recording", "k": "Two-dimensional nonprojectable graphic", "m": "Computer file", "o": "Kit", "p": "Mixed material", "r": "Three-dimensional artifact or naturally occurring object", "t": "Manuscript language material", } var bibliographyLdrBibliographicLevel = map[string]string{ "a": "Monographic component part", "b": "Serial component part", "c": "Collection", "d": "Subunit", "i": "Integrating resource", "m": "Monograph/item", "s": "Serial", } var bibliographyLdrTypeOfControl = map[string]string{ " ": "No specific type", "a": "Archival", } var bibliographyLdrCharacterCodingScheme = map[string]string{ " ": "MARC-8", "a": "UCS/Unicode", } var bibliographyLdrEncodingLevel = map[string]string{ " ": "Full level", "1": "Full level, material not examined", "2": "Less-than-full level, material not examined", "3": "Abbreviated level", "4": "Core level", "5": "Partial (preliminary) level", "7": "Minimal level", "8": "Prepublication level", "u": "Unknown", "z": "Not applicable", } var bibliographyLdrDescriptiveCatalogingForm = map[string]string{ " ": "Non-ISBD", "a": "AACR 2", "c": "ISBD punctuation omitted", "i": "ISBD punctuation included", "n": "Non-ISBD punctuation omitted", "u": "Unknown", } var bibliographyLdrMultipartResourceRecordLevel = map[string]string{ " ": "Not specified or not applicable", "a": "Set", "b": "Part with independent title", "c": "Part with dependent title", } // parseBibliographyLdr parses leader data for Bibliography records data func parseBibliographyLdr(s string) (ldr LdrDesc) { ldr = make(LdrDesc) var c string var l string ldr["(00/05) Logical record length"] = CodeValue{Code: pluckBytes(s, 0, 5), Label: "", Offset: 0, Width: 5} c, l = codeLookup(bibliographyLdrRecordStatus, s, 5, 1) ldr["(05/01) Record status"] = CodeValue{Code: c, Label: l, Offset: 5, Width: 1} c, l = codeLookup(bibliographyLdrTypeOfRecord, s, 6, 1) ldr["(06/01) Type of record"] = CodeValue{Code: c, Label: l, Offset: 6, Width: 1} c, l = codeLookup(bibliographyLdrBibliographicLevel, s, 7, 1) ldr["(07/01) Bibliographic level"] = CodeValue{Code: c, Label: l, Offset: 7, Width: 1} c, l = codeLookup(bibliographyLdrTypeOfControl, s, 8, 1) ldr["(08/01) Type of control"] = CodeValue{Code: c, Label: l, Offset: 8, Width: 1} c, l = codeLookup(bibliographyLdrCharacterCodingScheme, s, 9, 1) ldr["(09/01) Character coding scheme"] = CodeValue{Code: c, Label: l, Offset: 9, Width: 1} ldr["(10/01) Indicator count"] = CodeValue{Code: pluckBytes(s, 10, 1), Label: "", Offset: 10, Width: 1} ldr["(11/01) Subfield code count"] = CodeValue{Code: pluckBytes(s, 11, 1), Label: "", Offset: 11, Width: 1} ldr["(12/05) Base address of data"] = CodeValue{Code: pluckBytes(s, 12, 5), Label: "", Offset: 12, Width: 5} c, l = codeLookup(bibliographyLdrEncodingLevel, s, 17, 1) ldr["(17/01) Encoding level"] = CodeValue{Code: c, Label: l, Offset: 17, Width: 1} c, l = codeLookup(bibliographyLdrDescriptiveCatalogingForm, s, 18, 1) ldr["(18/01) Descriptive cataloging form"] = CodeValue{Code: c, Label: l, Offset: 18, Width: 1} c, l = codeLookup(bibliographyLdrMultipartResourceRecordLevel, s, 19, 1) ldr["(19/01) Multipart resource record level"] = CodeValue{Code: c, Label: l, Offset: 19, Width: 1} // (20/04) Entry map ldr["(20/01) Length of the length-of-field portion"] = CodeValue{Code: pluckBytes(s, 20, 1), Label: "", Offset: 20, Width: 1} ldr["(21/01) Length of the starting-character-position portion"] = CodeValue{Code: pluckBytes(s, 21, 1), Label: "", Offset: 21, Width: 1} ldr["(22/01) Length of the implementation-defined portion"] = CodeValue{Code: pluckBytes(s, 22, 1), Label: "", Offset: 22, Width: 1} ldr["(23/01) Undefined Entry map character position"] = CodeValue{Code: pluckBytes(s, 23, 1), Label: "", Offset: 23, Width: 1} return ldr } //////////////////////////////////////////////////////////////////////// // Classification var classificationLdrRecordStatus = map[string]string{ "a": "Increase in encoding level", "c": "Corrected or revised", "d": "Deleted", "n": "New", } var classificationLdrTypeOfRecord = map[string]string{ "w": "Classification data", } var classificationLdrCharacterCodingScheme = map[string]string{ " ": "MARC 8", "a": "UCS/Unicode", } var classificationLdrEncodingLevel = map[string]string{ "n": "Complete classification record", "o": "Incomplete classification record", } // parseClassificationLdr parses leader data for Classification records data func parseClassificationLdr(s string) (ldr LdrDesc) { ldr = make(LdrDesc) var c string var l string ldr["(00/05) Record length"] = CodeValue{Code: pluckBytes(s, 0, 5), Label: "", Offset: 0, Width: 5} c, l = codeLookup(classificationLdrRecordStatus, s, 5, 1) ldr["(05/01) Record status"] = CodeValue{Code: c, Label: l, Offset: 5, Width: 1} c, l = codeLookup(classificationLdrTypeOfRecord, s, 6, 1) ldr["(06/01) Type of record"] = CodeValue{Code: c, Label: l, Offset: 6, Width: 1} ldr["(07/02) Undefined character positions"] = CodeValue{Code: pluckBytes(s, 7, 2), Label: "", Offset: 7, Width: 2} c, l = codeLookup(classificationLdrCharacterCodingScheme, s, 9, 1) ldr["(09/01) Character coding scheme"] = CodeValue{Code: c, Label: l, Offset: 9, Width: 1} ldr["(10/01) Indicator count"] = CodeValue{Code: pluckBytes(s, 10, 1), Label: "", Offset: 10, Width: 1} ldr["(11/01) Subfield code length"] = CodeValue{Code: pluckBytes(s, 11, 1), Label: "", Offset: 11, Width: 1} ldr["(12/05) Base address of data"] = CodeValue{Code: pluckBytes(s, 12, 5), Label: "", Offset: 12, Width: 5} c, l = codeLookup(classificationLdrEncodingLevel, s, 17, 1) ldr["(17/01) Encoding level"] = CodeValue{Code: c, Label: l, Offset: 17, Width: 1} ldr["(18/02) Undefined character positions"] = CodeValue{Code: pluckBytes(s, 18, 2), Label: "", Offset: 18, Width: 2} // (20/04) Entry map ldr["(20/01) Length of the length-of-field portion"] = CodeValue{Code: pluckBytes(s, 20, 1), Label: "", Offset: 20, Width: 1} ldr["(21/01) Length of the starting-character-position portion"] = CodeValue{Code: pluckBytes(s, 21, 1), Label: "", Offset: 21, Width: 1} ldr["(22/01) Length of the implementation-defined portion"] = CodeValue{Code: pluckBytes(s, 22, 1), Label: "", Offset: 22, Width: 1} ldr["(23/01) Undefined"] = CodeValue{Code: pluckBytes(s, 23, 1), Label: "", Offset: 23, Width: 1} return ldr } //////////////////////////////////////////////////////////////////////// // Community var communityLdrRecordStatus = map[string]string{ "c": "Corrected or revised", "d": "Deleted", "n": "New", } var communityLdrTypeOfRecord = map[string]string{ "q": "Community information", } var communityLdrKindOfData = map[string]string{ "n": "Individual", "o": "Organization", "p": "Program or service", "q": "Event", "z": "Other", } var communityLdrCharacterCodingScheme = map[string]string{ " ": "MARC-8", "a": "UCS/Unicode", } // parseCommunityLdr parses leader data for Community records data func parseCommunityLdr(s string) (ldr LdrDesc) { ldr = make(LdrDesc) var c string var l string ldr["(00/05) Record length"] = CodeValue{Code: pluckBytes(s, 0, 5), Label: "", Offset: 0, Width: 5} c, l = codeLookup(communityLdrRecordStatus, s, 5, 1) ldr["(05/01) Record status"] = CodeValue{Code: c, Label: l, Offset: 5, Width: 1} c, l = codeLookup(communityLdrTypeOfRecord, s, 6, 1) ldr["(06/01) Type of record"] = CodeValue{Code: c, Label: l, Offset: 6, Width: 1} c, l = codeLookup(communityLdrKindOfData, s, 7, 1) ldr["(07/01) Kind of data"] = CodeValue{Code: c, Label: l, Offset: 7, Width: 1} ldr["(08/01) Undefined character position"] = CodeValue{Code: pluckBytes(s, 8, 1), Label: "", Offset: 8, Width: 1} c, l = codeLookup(communityLdrCharacterCodingScheme, s, 9, 1) ldr["(09/01) Character coding scheme"] = CodeValue{Code: c, Label: l, Offset: 9, Width: 1} ldr["(10/01) Indicator count"] = CodeValue{Code: pluckBytes(s, 10, 1), Label: "", Offset: 10, Width: 1} ldr["(11/01) Subfield code length"] = CodeValue{Code: pluckBytes(s, 11, 1), Label: "", Offset: 11, Width: 1} ldr["(12/05) Base address of data"] = CodeValue{Code: pluckBytes(s, 12, 5), Label: "", Offset: 12, Width: 5} ldr["(17/03) Undefined character positions"] = CodeValue{Code: pluckBytes(s, 17, 3), Label: "", Offset: 17, Width: 3} // (20/04) Entry map ldr["(20/01) Length of the length-of-field portion"] = CodeValue{Code: pluckBytes(s, 20, 1), Label: "", Offset: 20, Width: 1} ldr["(21/01) Length of the starting-character-position portion"] = CodeValue{Code: pluckBytes(s, 21, 1), Label: "", Offset: 21, Width: 1} ldr["(22/01) Length of the implementation-defined portion"] = CodeValue{Code: pluckBytes(s, 22, 1), Label: "", Offset: 22, Width: 1} ldr["(23/01) Undefined"] = CodeValue{Code: pluckBytes(s, 23, 1), Label: "", Offset: 23, Width: 1} return ldr } //////////////////////////////////////////////////////////////////////// // Holdings var holdingsLdrRecordStatus = map[string]string{ "c": "Corrected or revised", "d": "Deleted", "n": "New", } var holdingsLdrTypeOfRecord = map[string]string{ "u": "Unknown", "v": "Multipart item holdings", "x": "Single-part item holdings", "y": "Serial item holdings", } var holdingsLdrCharacterCodingScheme = map[string]string{ " ": "MARC-8", "a": "UCS/Unicode", } var holdingsLdrEncodingLevel = map[string]string{ "1": "Holdings level 1", "2": "Holdings level 2", "3": "Holdings level 3", "4": "Holdings level 4", "5": "Holdings level 4 with piece designation", "m": "Mixed level", "u": "Unknown", "z": "Other level", } var holdingsLdrItemInformationInRecord = map[string]string{ "i": "Item information", "n": "No item information", } // parseHoldingsLdr parses leader data for Holdings records data func parseHoldingsLdr(s string) (ldr LdrDesc) { ldr = make(LdrDesc) var c string var l string ldr["(00/05) Record length"] = CodeValue{Code: pluckBytes(s, 0, 5), Label: "", Offset: 0, Width: 5} c, l = codeLookup(holdingsLdrRecordStatus, s, 5, 1) ldr["(05/01) Record status"] = CodeValue{Code: c, Label: l, Offset: 5, Width: 1} c, l = codeLookup(holdingsLdrTypeOfRecord, s, 6, 1) ldr["(06/01) Type of record"] = CodeValue{Code: c, Label: l, Offset: 6, Width: 1} ldr["(07/02) Undefined character positions"] = CodeValue{Code: pluckBytes(s, 7, 2), Label: "", Offset: 7, Width: 2} c, l = codeLookup(holdingsLdrCharacterCodingScheme, s, 9, 1) ldr["(09/01) Character coding scheme"] = CodeValue{Code: c, Label: l, Offset: 9, Width: 1} ldr["(10/01) Indicator count"] = CodeValue{Code: pluckBytes(s, 10, 1), Label: "", Offset: 10, Width: 1} ldr["(11/01) Subfield code length"] = CodeValue{Code: pluckBytes(s, 11, 1), Label: "", Offset: 11, Width: 1} ldr["(12/05) Base address of data"] = CodeValue{Code: pluckBytes(s, 12, 5), Label: "", Offset: 12, Width: 5} c, l = codeLookup(holdingsLdrEncodingLevel, s, 17, 1) ldr["(17/01) Encoding level"] = CodeValue{Code: c, Label: l, Offset: 17, Width: 1} c, l = codeLookup(holdingsLdrItemInformationInRecord, s, 18, 1) ldr["(18/01) Item information in record"] = CodeValue{Code: c, Label: l, Offset: 18, Width: 1} ldr["(19/01) Undefined character position"] = CodeValue{Code: pluckBytes(s, 19, 1), Label: "", Offset: 19, Width: 1} // (20/04) Entry map ldr["(20/01) Length of the length-of-field portion"] = CodeValue{Code: pluckBytes(s, 20, 1), Label: "", Offset: 20, Width: 1} ldr["(21/01) Length of the starting-character-position portion"] = CodeValue{Code: pluckBytes(s, 21, 1), Label: "", Offset: 21, Width: 1} ldr["(22/01) Length of the implementation-defined portion"] = CodeValue{Code: pluckBytes(s, 22, 1), Label: "", Offset: 22, Width: 1} ldr["(23/01) Undefined"] = CodeValue{Code: pluckBytes(s, 23, 1), Label: "", Offset: 23, Width: 1} return ldr }
There are new questions about just what a grand jury is investigating after prosecutors were thrown out of the grand jury room. In an incredibly rare move this week, the foreperson of a Harris County Grand Jury asked a bailiff to remove prosecutors so jurors could hear from a witness on their own about potentially faulty DWI tests. 13 Undercover first raised questions about the accuracy of the Houston Police Department's so-called BAT vans, but now it's the investigation into how those problems were handled that's causing controversy. For months, some of the people closest to HPD's breath testing vans have told you and us that the vans are unreliable -- meaning the roadside tests they do on alleged drunk drivers may not be accurate. Now the controversy has spilled over into a grand jury investigation, and it's become so heated that a prosecutor working for Harris Co. District Attorney Pat Lykos was thrown out of the grand jury room earlier this week under the threat of arrest. Amanda Culbertson worked for HPD for four and a half years as one of the supervisors overseeing mobile breathalyzer machines known as the BAT vans. She quit when she says HPD's poor maintenance was leading to unreliable test results. But her real trouble started when she spoke out about it. After Culbertson told a judge about her concerns, the DA questioned her credibility and pushed the county to cancel a contract with her new employer, in essence firing her. Culbertson told her lawyer Chip Lewis that the DA is targeting her. "She has not only thought it herself, but been told by people very close to the fire that the District Attorney's Office is after you," said Lewis. This week a Harris County Grand Jury wanted to hear from Culbertson as well as Brent Mayr, a former prosecutor who previously alleged the DA is bullying Culbertson to force her silence. "Clearly retaliation for these individuals expressing opinions that the DA's office didn't like," Mayr said on October 4. When Mayr walked in to testify before the grand jury on Tuesday, the foreperson told prosecutors to get out. They wanted to hear from Mayr and Culbertson without a DA in the room. "They obviously believe that the DA's Office played a role in this case and that they can't be independent," said KTRK Legal Analyst Joel Androphy. While it is rare -- and legal -- the DA's Office threw a fit. Court records show top assistants to the elected DA refused to leave the room until a bailiff threatened to arrest them. The DA tried to force a judge to let them back in, but it was denied. An appeals court said the same thing. "The grand jury is a function of independent people from the community. It's not supposed to be the vote of the DA's office," said . "This is rare and it would happen one out of a hundred times that a grand jury would have the courage enough to basically say to the DA's Office get out of here or you're going to get arrested." We tried to ask the DA about it Friday, but they refused repeated interview requests. We'd love to ask what they knew about the BAT van problems, when, and what they did about it. It may be the same thing the grand jury is looking at on its own; and it may be the reason the DA was so angry about being thrown out of a supposed independent investigation. "It's clear to me that the grand jury has questions about how this was handled from a law enforcement standpoint. Now, I don't know if that's specifically confined to how HPD treated her or if they have some beef with what the District Attorney's Office did as well," said Lewis. Androphy told us in cases like this it would be best for the DA to ask for an independent prosecutor, but the DA's Office said they haven't. It would've been one of the questions we asked if anyone at the DA's Office would've been willing to join us for an interview, but no one was. 13 Undercover and Wayne Dolcefino exposed the potential maintenance problems with the BAT vans back in March. We found documents detailing electrical problems that kept some of the very expensive mobile breath vans from ever being used. We showed you emails showing some cops were worried it might affect criminal cases. HPD didn't tell the DA's Office. "It's not that we are trying to hide it, but I don't see a need in telling them that; if they want to know that, we'd be glad to share that information with them," said Captain Carl Driskell with HPD. When we asked if the DA's Office ever asked for the information, Capt. Driskell replied, "No." Stay tuned to Eyewitness News for continuing coverage of the BAT van investigation.
/** * Created by Administrator on 2017/3/30. */ public final class GankDay { /** * category : ["休息视频","iOS","福利","前端","Android"] * error : false * results : {"Android":[{"_id":"58dc6b6e421aa969fd8a3ded","createdAt":"2017-03-30T10:20:30.597Z","desc":"Android 按钮进度条效果","images":["http://img.gank.io/f95959ff-5e9a-4d32-a7d6-09b4c2997376"],"publishedAt":"2017-03-30T11:46:55.192Z","source":"chrome","type":"Android","url":"https://github.com/ishaan1995/ButtonProgressBar","used":true,"who":"Allen"},{"_id":"58dc7e5b421aa969fb0fbede","createdAt":"2017-03-30T11:41:15.386Z","desc":"Android 动画管理库,辅助你管理动画效果。","images":["http://img.gank.io/703b7fe0-dda6-45d9-8e73-89ad824b969c"],"publishedAt":"2017-03-30T11:46:55.192Z","source":"chrome","type":"Android","url":"https://github.com/willowtreeapps/spruce-android","used":true,"who":"带马甲"}],"iOS":[{"_id":"58dbc186421aa969f75cee03","createdAt":"2017-03-29T22:15:34.750Z","desc":"Let's play Pac-Man.","images":["http://img.gank.io/0cf25e74-308d-4379-9d9b-e7f63c9cd3ba"],"publishedAt":"2017-03-30T11:46:55.192Z","source":"web","type":"iOS","url":"https://github.com/atuooo/PacmanPageControl","used":true,"who":"oOatuo"},{"_id":"58dc6a00421aa969fd8a3deb","createdAt":"2017-03-30T10:14:24.710Z","desc":"iOS Material Design 风格的组件库","images":["http://img.gank.io/a9781165-f6fd-4245-bee3-73fdeea6c5bf"],"publishedAt":"2017-03-30T11:46:55.192Z","source":"chrome","type":"iOS","url":"https://github.com/material-components/material-components-ios","used":true,"who":"代码家"},{"_id":"58dc6a34421aa969fd8a3dec","createdAt":"2017-03-30T10:15:16.537Z","desc":"iOS Material Design 风格的动画库,做的好细腻,我给满分。","images":["http://img.gank.io/627220ba-4e59-4c7e-849a-4b897a094588"],"publishedAt":"2017-03-30T11:46:55.192Z","source":"chrome","type":"iOS","url":"https://github.com/material-motion/material-motion-swift","used":true,"who":"代码家"},{"_id":"58dc6ab1421aa969f75cee08","createdAt":"2017-03-30T10:17:21.958Z","desc":"Apple TV 图像视差效果高清重置","images":["http://img.gank.io/bfd6293c-51c7-4514-a139-61d1e5729fb7"],"publishedAt":"2017-03-30T11:46:55.192Z","source":"chrome","type":"iOS","url":"https://github.com/asynchrony/Re-Lax","used":true,"who":"malaboom"}],"休息视频":[{"_id":"58d0a884421aa90f033451ae","createdAt":"2017-03-21T12:13:56.54Z","desc":"史上最厉害的台球玩家,全程跪着看完","publishedAt":"2017-03-30T11:46:55.192Z","source":"chrome","type":"休息视频","url":"http://www.miaopai.com/show/1prlWR4mDKZNP~mfatfEuw__.htm","used":true,"who":"lxxself"}],"前端":[{"_id":"58dc6b4e421aa969f75cee09","createdAt":"2017-03-30T10:19:58.544Z","desc":"Vue - ECharts","publishedAt":"2017-03-30T11:46:55.192Z","source":"chrome","type":"前端","url":"https://justineo.github.io/vue-echarts/demo/","used":true,"who":"daimajia "}],"福利":[{"_id":"58dc5645421aa969fd8a3dea","createdAt":"2017-03-30T08:50:13.178Z","desc":"3-30","publishedAt":"2017-03-30T11:46:55.192Z","source":"chrome","type":"福利","url":"http://7xi8d6.com1.z0.glb.clouddn.com/2017-03-30-17265582_1877445642507654_3057988544061505536_n.jpg","used":true,"who":"dmj"}]} */ @Expose private boolean error; @Expose private Results results; @Expose private List<String> category; public final static class Results { @Expose @SerializedName("Android") public List<GankAPI> androidList; @Expose @SerializedName("iOS") public List<GankAPI> iOSList; @Expose @SerializedName("App") public List<GankAPI> AppList; @Expose @SerializedName("前端") public List<GankAPI> webList; @Expose @SerializedName("拓展资源") public List<GankAPI> expandList; @Expose @SerializedName("瞎推荐") public List<GankAPI> xiatuijianList; @Expose @SerializedName("休息视频") public List<GankAPI> videoList; @Expose @SerializedName("福利") public List<GankAPI> girlList; @Override public String toString() { return "Results{" + "androidList=" + androidList + ", iOSList=" + iOSList + ", AppList=" + AppList + ", webList=" + webList + ", expandList=" + expandList + ", xiatuijianList=" + xiatuijianList + ", videoList=" + videoList + ", girlList=" + girlList + '}'; } } public List<String> getCategory() { return category; } // public static ArrayList<MySection> getSctionData(Results gankDayData){ // // ArrayList<MySection> list = new ArrayList<>(); // // if(gankDayData.getAndroidList() != null){ // list.add(new MySection(true,"Android")); // for(GankAPI gankAPI:gankDayData.getAndroidList()){ // list.add(new MySection(gankAPI)); // } // } // if(gankDayData.getiOSList() != null){ // list.add(new MySection(true,"iOS")); // for(GankAPI gankAPI:gankDayData.getiOSList()){ // list.add(new MySection(gankAPI)); // } // } // if(gankDayData.getAppList() != null){ // list.add(new MySection(true,"App")); // for(GankAPI gankAPI:gankDayData.getAppList()){ // list.add(new MySection(gankAPI)); // } // } // if(gankDayData.getWebList() != null){ // list.add(new MySection(true,"前端")); // for(GankAPI gankAPI:gankDayData.getWebList()){ // list.add(new MySection(gankAPI)); // } // } // if(gankDayData.getExpandList() != null){ // list.add(new MySection(true,"扩展资源")); // for(GankAPI gankAPI:gankDayData.getExpandList()){ // list.add(new MySection(gankAPI)); // } // } // if(gankDayData.getXiatuijianList() != null){ // list.add(new MySection(true,"瞎推荐")); // for(GankAPI gankAPI:gankDayData.getXiatuijianList()){ // list.add(new MySection(gankAPI)); // } // } // return list; // } public ArrayList<GankDayBean> getMultiGankData(){ ArrayList<GankDayBean> list = new ArrayList<>(); if(results == null){ return null; } if(results.androidList != null){ String date = results.androidList.get(0).publishedAt; list.add(new GankDayBean(GankDayBean.DATE,date)); }else if (results.iOSList != null){ String date = results.iOSList.get(0).publishedAt; list.add(new GankDayBean(GankDayBean.DATE,date)); }else if (results.AppList != null){ String date = results.AppList.get(0).publishedAt; list.add(new GankDayBean(GankDayBean.DATE,date)); }else if (results.webList != null){ String date = results.webList.get(0).publishedAt; list.add(new GankDayBean(GankDayBean.DATE,date)); }else if (results.expandList != null){ String date = results.expandList.get(0).publishedAt; list.add(new GankDayBean(GankDayBean.DATE,date)); }else if (results.xiatuijianList != null){ String date = results.xiatuijianList.get(0).publishedAt; list.add(new GankDayBean(GankDayBean.DATE,date)); } if(category.contains("Android")){ list.add(new GankDayBean(GankDayBean.CATEGORY,"Android")); for (GankAPI gankAPI:results.androidList){ list.add(new GankDayBean(GankDayBean.ITEM,gankAPI)); } } if(category.contains("iOS")){ list.add(new GankDayBean(GankDayBean.CATEGORY,"iOS")); for (GankAPI gankAPI:results.iOSList){ list.add(new GankDayBean(GankDayBean.ITEM,gankAPI)); } } if(category.contains("App")){ list.add(new GankDayBean(GankDayBean.CATEGORY,"App")); for (GankAPI gankAPI:results.AppList){ list.add(new GankDayBean(GankDayBean.ITEM,gankAPI)); } } if(category.contains("前端")){ list.add(new GankDayBean(GankDayBean.CATEGORY,"前端")); for (GankAPI gankAPI:results.webList){ list.add(new GankDayBean(GankDayBean.ITEM,gankAPI)); } } if(category.contains("拓展资源") || category.contains("扩展资源")){ list.add(new GankDayBean(GankDayBean.CATEGORY,"扩展资源")); for (GankAPI gankAPI:results.expandList){ list.add(new GankDayBean(GankDayBean.ITEM,gankAPI)); } } if(category.contains("瞎推荐")){ list.add(new GankDayBean(GankDayBean.CATEGORY,"瞎推荐")); for (GankAPI gankAPI:results.xiatuijianList){ list.add(new GankDayBean(GankDayBean.ITEM,gankAPI)); } } return list; } public boolean isError() { return error; } public Results getResults() { return results; } @Override public String toString() { return "GankDay{" + "results=" + results + '}'+ "Categoty:"+category; } }