content
stringlengths
10
4.9M
<gh_stars>0 #include <stdio.h> #include <stdlib.h> void teste(int v[], int p) { printf("%d %d\n",v[0], p); v[0] = 7; p = 9; printf("%d %d\n",v[0],p); } int main(int argc, char *argv[]) { int x =10; int valores[10]; valores[0] =99; printf("%d %d\n",valores[0],x); teste(valores,x); printf("%d %d\n",valores[0],x); system("PAUSE"); return 0; }
/** * Userinfo API implementation class */ public class UserinfoApiServiceImpl extends UserinfoApiService { private UserinfoRequestHandler userinfoRequestHandler; private static final Logger log = LoggerFactory.getLogger(UserinfoApiServiceImpl.class); public UserinfoApiServiceImpl(UserinfoRequestHandler userinfoRequestHandler) { this.userinfoRequestHandler = userinfoRequestHandler; } @Override public Response userinfoGet(String authorization, Request request) throws NotFoundException { String userInfo; try { userInfo = userinfoRequestHandler.retrieveUserInfo(authorization); } catch (UserInfoException e) { String errorMessage = "Error while retrieving user information"; ErrorDTO errorDTO = RestApiUtil.getErrorDTO(e.getErrorHandler()); log.error(errorMessage, e); return Response.status(e.getErrorHandler().getHttpStatusCode()).entity(errorDTO).build(); } return Response.ok().entity(userInfo).build(); } }
package com.hwq.fundament.Thread.PCModel; /** * @author hwq * @date 2019/04/13 * <p> * 商品 * wait/notify实现的生产和消费者 * </p> */ public class MallDemo { private int count; private static final int MAX_COUNT = 10; public synchronized void push() { while (count >= MAX_COUNT) { try { System.out.println(Thread.currentThread().getName() + "生产者库存达到上限,停止生产"); wait(); } catch (InterruptedException e) { e.printStackTrace(); } } count++; System.out.println(Thread.currentThread().getName() + "生产者生产,当前库存为:" + count); notifyAll(); } public synchronized void customer() { while (count <= 0) { try { System.out.println(Thread.currentThread().getName() + "库存为零,消费者等待...."); wait(); } catch (InterruptedException e) { e.printStackTrace(); } } count--; System.out.println(Thread.currentThread().getName() + "消费者消费,当前库存为:" + count); notifyAll(); } }
/** * Gets the current session of the event. * @return Session */ public final Session currentSession() { if ((currentSession >= 0) && (currentSession < route.length)) return route[currentSession]; else return null; }
package bombs import "embed" //go:embed 1G.gzip //go:embed 10G.gzip //go:embed 1T.gzip var Bombs embed.FS var BombFileNameList = []string{`1G`, `10G`, `1T`} func Exists(filename string) bool { for _, v := range BombFileNameList { if v == filename { return true } } return false }
import { EntityChangeEvent, EntityEvictEvent } from ".."; import { EntityKey } from "../entities/EntityEvent"; import { StateValue } from "./impl/StateValue"; export function postStateManagerMessage(stateManagerId?: string) { const message: StateManagerMessage = { messageDomain: "graphQLStateMonitor", messageType: "stateManagerChange", stateManagerId }; postMessage(message, "*"); } export function postSimpleStateMessage( stateValue: StateValue, changeType: ChangeType, data?: any ) { if ((window as any).__GRAPHQL_STATE_MONITORS__?.simpleState === true) { const message: SimpleStateMessage = { messageDomain: "graphQLStateMonitor", messageType: "simpleStateChange", stateManagerId: stateValue.stateInstance.scopedStateManager.stateManager.id, changeType, scopePath: stateValue.stateInstance.scopedStateManager.path, name: stateValue.stateInstance.state[" $name"], parameter: stateValue.args?.key ?? (stateValue.stateInstance.state[" $parameterized"] ? "" : undefined), data: changeType === "update" ? data : undefined }; postMessage(message, "*"); } } export function postGraphStateMessage( stateManagerId: string, event: EntityEvictEvent | EntityChangeEvent ) { if ((window as any).__GRAPHQL_STATE_MONITORS__?.graphState === true) { const fields: GraphEventField[] = []; if (event.eventType === "evict") { for (const key of event.evictedKeys) { const fieldKey = fieldKeyOf(key); const field: GraphEventField = { fieldKey, oldValue: event.evictedValue(key) }; fields.push(field); } } else { for (const key of event.changedKeys) { const fieldKey = fieldKeyOf(key); const field: GraphEventField = { fieldKey, oldValue: event.changedType === 'insert' ? undefined : event.oldValue(key), newValue: event.changedType === 'delete' ? undefined : event.newValue(key) }; fields.push(field); } } const message: GraphStateMessage = { messageDomain: "graphQLStateMonitor", messageType: "graphStateChange", stateManagerId, changeType: event.eventType === 'evict' ? (event.evictedType === 'row' ? 'evict-row' : 'evict-fields') : event.changedType, typeName: event.typeName, id: event.id, fields } postMessage(message, "*"); } } export function isEvictLogEnabled() { return (window as any).__GRAPHQL_STATE_MONITORS__?.evictLog === true; } function fieldKeyOf(key: EntityKey): string { if (typeof key === 'string') { return key; } if (key.variables === undefined || key.variables === null) { return key.name; } const parameter = JSON.stringify(key.variables); if (parameter === '{}') { return key.name; } return `${key.name}:${parameter}`; } export type Message = StateManagerMessage | SimpleStateMessage | GraphStateMessage | EvictLogMessage; interface AbstractMessage { readonly messageDomain: "graphQLStateMonitor"; } export interface StateManagerMessage extends AbstractMessage { readonly messageType: "stateManagerChange"; readonly stateManagerId?: string; } export interface SimpleStateMessage extends AbstractMessage { readonly messageType: "simpleStateChange"; readonly stateManagerId: string; readonly changeType: ChangeType; readonly scopePath: string; readonly name: string; readonly parameter?: string; readonly data: any; } export interface GraphStateMessage extends AbstractMessage { readonly messageType: "graphStateChange"; readonly stateManagerId: string; readonly changeType: "evict-row" | "evict-fields" | ChangeType; readonly typeName: string; readonly id: any; readonly fields: readonly GraphEventField[]; } export interface EvictLogMessage extends AbstractMessage { readonly messageType: "evictLogCreate"; readonly stateManagerId: string; readonly typeName: string; readonly id: string; readonly field: string; readonly parameter: string; readonly targetTypeName?: string; readonly reason: EvictReasonType; } export interface SimpleStateScope { readonly name: string; readonly states: SimpleState[]; readonly scopes: readonly SimpleStateScope[]; } export interface SimpleState { readonly name: string; readonly value?: any; readonly parameterizedValues?: readonly ParameterizedValue[]; } export interface GraphSnapshot { readonly typeMetadataMap: { readonly [key: string]: GraphTypeMetadata }; readonly query?: GraphObject; readonly types: readonly GraphType[]; } export interface GraphTypeMetadata { readonly name: string; readonly superTypeName?: string; readonly idFieldName?: string; readonly declaredFieldMap: { readonly [key: string]: GraphFieldMetadata }; } export interface GraphFieldMetadata { readonly name: string; readonly isParamerized: boolean; readonly isConnection: boolean; readonly targetTypeName?: string; } export interface GraphType { readonly name: string; readonly objects: readonly GraphObject[]; } export interface GraphObject { readonly id: string; readonly runtimeTypeName: string; readonly fields: readonly GraphField[]; } export interface GraphField { readonly name: string; readonly value?: any; readonly parameterizedValues?: readonly ParameterizedValue[]; } export type GraphValue = string | Readonly<string> | { readonly edges: ReadonlyArray<{ readonly node: string, readonly [key: string]: any }>, readonly [key:string]: any }; export interface ParameterizedValue { readonly parameter: string; readonly value?: any; } export interface GraphEventField { readonly fieldKey: string; readonly oldValue?: any; readonly newValue?: any; } export type ChangeType = "insert" | "delete" | "update"; export type EvictReasonType = "unknown-owner" | "no-contains" | "no-range" | "contains-returns-undefined" | "position-returns-undefined" | "page-style-pagination" | "forward-tail" | "backward-head" ;
/** * Created by keayuan on 2020/4/8. * * @author keayuan */ public class BannerFragment extends IFragment { @Override protected int getLayoutId() { return R.layout.fr_banner; } @Override protected void onInitView(View rootView, Bundle savedInstanceState) { BannerView bannerView = getView(R.id.banner); bannerView.setAdapter(new SimpleAdapter<>(Utils.getSimpleList(6))); BannerIndicatorView indicatorView = getView(R.id.indicator); indicatorView.setCount(6); bannerView.setOnScrollListener(indicatorView); bannerView.setOnItemClickListener((parent, view, position) -> showToast("click " + position)); } }
export const removeAttr = (attribute: string) => (item: Element) => { item.removeAttribute(attribute); return item; };
// ScaleDeployment scales a deployment and waits until it is scaled func ScaleDeployment(ctx context.Context, client client.Client, desiredReplicas *int32, name, namespace string) (*int32, error) { if desiredReplicas == nil { return nil, nil } replicas, err := GetDeploymentReplicas(ctx, client, namespace, name) if apierrors.IsNotFound(err) { return nil, nil } if err != nil { return nil, fmt.Errorf("failed to retrieve the replica count of deployment %q: '%w'", name, err) } if replicas == nil || *replicas == *desiredReplicas { return replicas, nil } if err := kubernetes.ScaleDeployment(ctx, client, kutil.Key(namespace, name), *desiredReplicas); err != nil { return nil, fmt.Errorf("failed to scale the replica count of deployment %q: '%w'", name, err) } if err := WaitUntilDeploymentScaled(ctx, client, namespace, name, *desiredReplicas); err != nil { return nil, fmt.Errorf("failed to wait until deployment %q is scaled: '%w'", name, err) } return replicas, nil }
<filename>containerdefs/loader.go package containerdefs import ( "fmt" ) type DefinitionLoader interface { LoadContainerDefinitions() ([]*ContainerDefinition, error) ValidateURI() error } // LoadContainerDefinitions scans a local directory (might have been passed from the command line) // for container definitions, reads them into memory and unmarshalls them into ContainerDefinition // structs. func LoadContainerDefinitions(loader DefinitionLoader) ([]*ContainerDefinition, error) { // validate the uri that's been passed to the definition, this might be ensuring that a given // directory exists or that a url returns a 200 status code. if err := loader.ValidateURI(); err != nil { return []*ContainerDefinition{}, err } // load container definitions. By default this is from disk, but could be from a remote // location if a loader for that source exists. definitions, err := loader.LoadContainerDefinitions() if err != nil { return definitions, err } // validate all container definitions individually, dropping any that // don't pass validation var definitionsValidated []*ContainerDefinition for _, definition := range definitions { if definition.Validate() { definitionsValidated = append(definitionsValidated, definition) } } // validate container definitions as a group, if this doesn't pass then we // bail out since it's impossible to know what the user meant to do. for _, definition := range definitionsValidated { if !definitionIsUnique(definition, definitionsValidated) { return []*ContainerDefinition{}, fmt.Errorf("Container definitions clash (name or ports): %s", definition.ContainerName) } } return definitionsValidated, nil } func definitionIsUnique(cd *ContainerDefinition, cds []*ContainerDefinition) bool { // check for clashing container names var containerNameCount int for _, ocd := range cds { if ocd.ContainerName == cd.ContainerName { containerNameCount = containerNameCount + 1 } } // return false if any containers with same name found if containerNameCount > 1 { return false } // check for clashing port numbers var portCount int for port, _ := range cd.PortMapping { port = 0 for _, ocd := range cds { for oport, _ := range ocd.PortMapping { if oport == port { portCount = portCount + 1 } } } // return false if any ports with same number found if portCount > 1 { return false } } return true }
<gh_stars>10-100 import { CacheProvider, CacheStorageKey } from '@discordoo/providers' export async function cacheProviderCountsPolyfill<K = string, V = any, P extends CacheProvider = CacheProvider>( provider: P, keyspace: string, storage: CacheStorageKey, predicates: ((value: V, key: K, provider: P) => boolean | Promise<boolean>)[] ): Promise<number[]> { const results = Array.from({ length: predicates.length }, () => 0) if (results.length === 0) return [] await provider.forEach<K, V, P>(keyspace, storage, async (value, key, prov) => { let i = 0 for await (const predicate of predicates) { if (await predicate(value, key, prov)) { results[i]++ } i++ } }) return results }
/** * Copyright (C) 2015 DataTorrent, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.datatorrent.lib.algo; import java.util.HashMap; import java.util.List; import java.util.Map; import org.junit.Assert; import org.junit.Test; import com.datatorrent.lib.testbench.CollectorTestSink; /** * * Functional tests for {@link com.datatorrent.lib.algo.FilterKeyVals}<p> * */ public class FilterKeyValsTest { @SuppressWarnings("unchecked") int getTotal(List<Object> list) { int ret = 0; for (Object map: list) { for (Map.Entry<String, Number> e: ((HashMap<String, Number>)map).entrySet()) { ret += e.getValue().intValue(); } } return ret; } /** * Test node logic emits correct results */ @SuppressWarnings({ "rawtypes", "unchecked" }) @Test public void testNodeProcessing() throws Exception { FilterKeyVals<String,Number> oper = new FilterKeyVals<String,Number>(); CollectorTestSink sortSink = new CollectorTestSink(); oper.filter.setSink(sortSink); HashMap<String,Number> filter = new HashMap<String,Number>(); filter.put("b",2); oper.setKeyVals(filter); oper.clearKeys(); filter.clear(); filter.put("e", 200); filter.put("f", 2); filter.put("blah", 2); oper.setKeyVals(filter); filter.clear(); filter.put("a", 2); oper.setKeyVals(filter); oper.beginWindow(0); HashMap<String, Number> input = new HashMap<String, Number>(); input.put("a", 2); input.put("b", 5); input.put("c", 7); input.put("d", 42); input.put("e", 202); input.put("e", 200); input.put("f", 2); oper.data.process(input); Assert.assertEquals("number emitted tuples", 3, sortSink.collectedTuples.size()); Assert.assertEquals("Total filtered value is ", 204, getTotal(sortSink.collectedTuples)); sortSink.clear(); input.clear(); input.put("a", 5); oper.data.process(input); Assert.assertEquals("number emitted tuples", 0, sortSink.collectedTuples.size()); sortSink.clear(); input.clear(); input.put("a", 2); input.put("b", 33); input.put("f", 2); oper.data.process(input); Assert.assertEquals("number emitted tuples", 2, sortSink.collectedTuples.size()); Assert.assertEquals("Total filtered value is ", 4, getTotal(sortSink.collectedTuples)); sortSink.clear(); input.clear(); input.put("b", 6); input.put("a", 2); input.put("j", 6); input.put("e", 2); input.put("dd", 6); input.put("blah", 2); input.put("another", 6); input.put("notmakingit", 2); oper.data.process(input); Assert.assertEquals("number emitted tuples", 2, sortSink.collectedTuples.size()); Assert.assertEquals("Total filtered value is ", 4, getTotal(sortSink.collectedTuples)); sortSink.clear(); input.clear(); input.put("c", 9); oper.setInverse(true); oper.data.process(input); Assert.assertEquals("number emitted tuples", 1, sortSink.collectedTuples.size()); Assert.assertEquals("Total filtered value is ", 9, getTotal(sortSink.collectedTuples)); oper.endWindow(); } }
Ultrastructural aspects of the antimesometral implantation in the rabbit. A scanning and transmission electron microscope study of blastocysts immediately prior to ovo implantation and of antimesometrial implantation sites was conducted. External membranes of the eggs showed at 6 days post coitum the imprint of the endometrial surface, pointing to the early establishment of contact between egg and uterus. At 7 days the antimesometrial region showed flattening with continued evidence of gland openings that appear to be the elective sites of attraction of the trophoblastic knobs. Attachment of the trophoblastic knobs followed on days 8 and 9.
<reponame>thomcom/dask-labextension """ A Dashboard handler for the Dask labextension. This proxies the bokeh server http and ws requests through the notebook server, preventing CORS issues. """ from urllib import parse from tornado import web from notebook.utils import url_path_join from jupyter_server_proxy.handlers import ProxyHandler from .manager import manager class DaskDashboardHandler(ProxyHandler): """ A handler that proxies the dask dashboard to the notebook server. Currently the dashboard is assumed to be running on `localhost`. The functions `http_get`, `open`, `post`, `put`, `delete`, `head`, `patch`, `options`, and `proxy` are all overriding the base class with our own request handler parameters of `cluster_id` and `proxied_path`. The `proxy` function uses the cluster ID to get the port for the bokeh server from the Dask cluster manager. This port is then used to call the proxy method on the base class. """ async def http_get(self, cluster_id, proxied_path): return await self.proxy(cluster_id, proxied_path) async def open(self, cluster_id, proxied_path): host, port = self._get_parsed(cluster_id) return await super().proxy_open(host, port, proxied_path) # We have to duplicate all these for now, I've no idea why! # Figure out a way to not do that? def post(self, cluster_id, proxied_path): return self.proxy(cluster_id, proxied_path) def put(self, cluster_id, proxied_path): return self.proxy(cluster_id, proxied_path) def delete(self, cluster_id, proxied_path): return self.proxy(cluster_id, proxied_path) def head(self, cluster_id, proxied_path): return self.proxy(cluster_id, proxied_path) def patch(self, cluster_id, proxied_path): return self.proxy(cluster_id, proxied_path) def options(self, cluster_id, proxied_path): return self.proxy(cluster_id, proxied_path) def proxy(self, cluster_id, proxied_path): host, port = self._get_parsed(cluster_id) return super().proxy(host, port, proxied_path) def _get_parsed(self, cluster_id): """ Given a cluster ID, get the hostname and port of its bokeh server. """ # Get the cluster by ID. If it is not found, # raise an error. cluster_model = manager.get_cluster(cluster_id) if not cluster_model: raise web.HTTPError(404, f"Dask cluster {cluster_id} not found") # Construct the proper websocket proxy link from the cluster dashboard dashboard_link = cluster_model["dashboard_link"] dashboard_link = _normalize_dashboard_link(dashboard_link, self.request) # Parse the url and return parsed = parse.urlparse(dashboard_link) port = parsed.port if not port: port = 443 if parsed.scheme == 'https' else 80 if not parsed.hostname: raise web.HTTPError(500, "Dask dashboard URI malformed") return parsed.hostname, port def _normalize_dashboard_link(link, request): """ Given a dashboard link, make sure it conforms to what we expect. """ if not link.startswith("http"): # If a local url is given, assume it is using the same host # as the application, and prepend that. link = url_path_join(f"{request.protocol}://{request.host}", link) if link.endswith("/status"): # If the default "status" dashboard is give, strip it. link = link[:-len("/status")] return link
def try_get_model_flavor(model) -> Optional[str]: try: from pyspark.ml.base import Model as SparkModel if isinstance(model, SparkModel): return 'spark' except: pass try: from sklearn.base import BaseEstimator as ScikitModel if isinstance(model, ScikitModel): return 'sklearn' except: pass try: from h2o.h2o import ModelBase as H2OModel if isinstance(model, H2OModel): return 'h2o' except: pass
import { publishEvent } from "../events" import { Event, UserGroup, GroupCreatedEvent, GroupDeletedEvent, GroupUpdatedEvent, GroupUsersAddedEvent, GroupUsersDeletedEvent, GroupAddedOnboardingEvent, GroupPermissionsEditedEvent, } from "@budibase/types" import { isScim } from "../../context" async function created(group: UserGroup, timestamp?: number) { const properties: GroupCreatedEvent = { groupId: group._id as string, viaScim: isScim(), audited: { name: group.name, }, } await publishEvent(Event.USER_GROUP_CREATED, properties, timestamp) } async function updated(group: UserGroup) { const properties: GroupUpdatedEvent = { groupId: group._id as string, viaScim: isScim(), audited: { name: group.name, }, } await publishEvent(Event.USER_GROUP_UPDATED, properties) } async function deleted(group: UserGroup) { const properties: GroupDeletedEvent = { groupId: group._id as string, viaScim: isScim(), audited: { name: group.name, }, } await publishEvent(Event.USER_GROUP_DELETED, properties) } async function usersAdded(count: number, group: UserGroup) { const properties: GroupUsersAddedEvent = { count, groupId: group._id as string, viaScim: isScim(), audited: { name: group.name, }, } await publishEvent(Event.USER_GROUP_USERS_ADDED, properties) } async function usersDeleted(count: number, group: UserGroup) { const properties: GroupUsersDeletedEvent = { count, groupId: group._id as string, viaScim: isScim(), audited: { name: group.name, }, } await publishEvent(Event.USER_GROUP_USERS_REMOVED, properties) } async function createdOnboarding(groupId: string) { const properties: GroupAddedOnboardingEvent = { groupId: groupId, onboarding: true, } await publishEvent(Event.USER_GROUP_ONBOARDING, properties) } async function permissionsEdited(group: UserGroup) { const properties: GroupPermissionsEditedEvent = { permissions: group.roles!, groupId: group._id as string, audited: { name: group.name, }, } await publishEvent(Event.USER_GROUP_PERMISSIONS_EDITED, properties) } export default { created, updated, deleted, usersAdded, usersDeleted, createdOnboarding, permissionsEdited, }
The staff at Notre Dame did not stand in the way as more than 100 students chose to peacefully leave their own graduation to protest commencement speaker selection. As the probe into the alleged Russian meddling in 2016 presidential elections deepens, Vice President Mike Pence has hired his own lawyer to represent him in the special counsel investigation, his office confirmed Thursday. The lawyer, Richard Cullen, a former attorney general, has previously worked with Pence and he will now help him respond to the federal inquiries. “I can confirm that the vice president has retained Richard Cullen of McGuire Woods to assist him in responding to inquiries by the special counsel. The vice president is focused entirely on his duties and promoting the President’s agenda and looks forward to a swift conclusion of this matter,” Pence’s communications director Jarrod Agen said in a statement, according to multiple reports. His office said the decision to retain Cullen underscores his desire to fully cooperate with any inquiries related to the Russia probe. By hiring Cullen, Pence has followed in the footsteps of President Donald Trump who hired Marc Kasowitz, his long-time legal advisor, in May as his counsel for the Russia election probe. Kasowitz, who has represented Trump for more than 15 years, will now represent him for the federal investigation of the alleged collusion between Trump’s campaign and Russia in the presidential elections. Read: House Dems Blocked Witness Interviews By Intelligence Committee According to CNN, Pence’s office had been in the process of hiring Cullen for the Russia probe for the last few weeks. The decision came a day after the Washington Post reported Justice Department special counsel Robert Mueller was investigating Trump for obstruction of justice. Trump lashed out at the Post in a Thursday tweet, calling the report “phony.”. Who is Richard Cullen? Cullen, the chairman of McGuireWoods and a senior litigation partner, has vast experience in defending multinational corporations in investigations conducted by the Department of Justice, according to McGuireWoods’ official website. Former FBI Director James Comey, who was recently fired by Trump, and Cullen worked together at McGuireWoods. A former Virginia attorney general, Cullen served as the Attorney for the Eastern District of Virginia under the administration of former President George W Bush where he directed investigations of financial institutions and defense contractors. He also worked with M Caldwell Butler, a five-term Republican congressman from Virginia, as his press aide, reports said. Read: Is Jared Kushner Becoming The Center Of Russia Investigation? Cullen was also the special counsel to former senator Paul S Trible Jr. (R-Virginia), who served on the Senate select committee investigating the Iran-contra scandal. According to the Washington Post, the Iran contra affair was a grand scheme that violated American law and policy. The charges brought by a special prosecutor in 1990 resulted in the conviction of several members of the Reagan administration, including Reagan's national security adviser, Adm. John Poindexter. He was also the counsel for Congressman Tom DeLay in the Abramoff bribery scandal. DeLay was cleared in the corruption case, involving White House staff, lobbyists and Bush administration officials, after a six-year investigation in 2010. During the 2000 presidential election, Cullen worked with George W. Bush’s legal team during the Florida recount. Tiger Wood's ex-wife Elin Nordegren was also represented by Cullen in the high-profile divorce case with the American golfer. Related Articles
/** * Chat recording component * Created by gtq on 2016/11/27. */ public class RecordView extends LinearLayout { private String Tag = "RecordView"; /** Constantly timing little red dot */ private ImageView redDotView; /** Timing text */ private TextView timerTxt; /** The tape on the left shows */ private LinearLayout recordLayout; /** Cancel the left shows */ private LinearLayout releaseLayout; /** The tape movement view */ private DiffuseView recordImg; /** The recording utility class */ private AudioUtil audioUtil; public RecordView(Context context) { super(context); initView(); } public RecordView(Context context, AttributeSet attrs) { super(context, attrs); initView(); } public RecordView(Context context, AttributeSet attrs, int defStyleAttr) { super(context, attrs, defStyleAttr); initView(); } protected void initView() { audioUtil = AudioUtil.getInstance(); audioUtil.setOnAudioRecordListener(new AudioUtil.AudioRecordListener() { private long duration; @Override public void startError() { permissionDialog(); } @Override public void wellPrepared() { duration = TimeUtil.getCurrentTimeInLong(); } @Override public void recording(long recordtime, int decibel) { if (decibel > 5) { decibel = 5; } else if (decibel <= 0) { decibel = 1; } recordImg.startDiffuse(decibel); timerTxt.setText(TimeUtil.getTime(recordtime,TimeUtil.DATE_FORMAT_SECOND)); if (redDotView.getVisibility() == VISIBLE) { redDotView.setVisibility(INVISIBLE); } else { redDotView.setVisibility(VISIBLE); } } @Override public void recordFinish(String path) { stopRecord(); int dur = (int) (TimeUtil.getCurrentTimeInLong() - duration) / 1000; if ((Math.abs(recordX) < SystemDataUtil.getScreenWidth() / 2) || dur < 2) { FileUtil.deleteFile(path); } else { MsgSend.sendOuterMsg(MsgType.Voice, path, dur); } } }); View view = View.inflate(getContext(), R.layout.view_record, this); redDotView = (ImageView) view.findViewById(R.id.img1); timerTxt = (TextView) view.findViewById(R.id.txt1); recordLayout = (LinearLayout) view.findViewById(R.id.relativelayout_1); releaseLayout = (LinearLayout) view.findViewById(R.id.relativelayout_2); recordImg = (DiffuseView) view.findViewById(R.id.record); RelativeLayout.LayoutParams params = new RelativeLayout.LayoutParams(ViewGroup.LayoutParams.MATCH_PARENT, ViewGroup.LayoutParams.MATCH_PARENT); recordImg.setLayoutParams(params); } protected void permissionDialog() { DialogUtil.showAlertTextView(getContext(), getContext().getString(R.string.Set_tip_title), getContext().getString(R.string.Link_Unable_to_get_the_voice_data), "", getContext().getString(R.string.Set_Setting), false, new DialogUtil.OnItemClickListener() { @Override public void confirm(String value) { Intent intent = new Intent(Settings.ACTION_APPLICATION_DETAILS_SETTINGS); intent.setData(Uri.parse("package:" + getContext().getPackageName())); getContext().startActivity(intent); } @Override public void cancel() { } }, false); } protected void startRecord() { recordLayout.setVisibility(VISIBLE); releaseLayout.setVisibility(GONE); audioUtil.prepareAudio(); recordImg.setTranslationX(0);//This must be added, or in the recording for the first time Huawei mobile phones will not show the recording button. } protected void stopRecord() { setVisibility(GONE); recordImg.setTranslationX(SystemDataUtil.getScreenWidth() - recordImg.getWidth()); recordImg.stopDiffuse(); recordLayout.setVisibility(VISIBLE); timerTxt.setText(TimeUtil.getTime(0,TimeUtil.DATE_FORMAT_SECOND)); releaseLayout.setVisibility(GONE); } private float recordX; /** * Moving components */ public void slideVRecord(MotionEvent event, int[] location) { float transX = event.getX() + location[0]; int transY = location[1] - SystemUtil.dipToPx(60)/2; recordX = transX; switch (event.getAction()) { case MotionEvent.ACTION_DOWN: LogManager.getLogger().d(Tag, "ACTION_DOWN"); startRecord(); recordImg.setLocationY(location[1] + SystemUtil.dipToPx(40) / 2); recordImg.startDiffuse(1); moveLcoation((int) transX); leftLocationY(transY); break; case MotionEvent.ACTION_MOVE: LogManager.getLogger().d(Tag, "ACTION_MOVE" + event.getX() + "location:" + transX); moveLcoation((int) transX); break; case MotionEvent.ACTION_UP: case MotionEvent.ACTION_CANCEL: LogManager.getLogger().d(Tag, "ACTION_UP"); audioUtil.finishRecorder(); leftLocationY(SystemDataUtil.getScreenHeight() - releaseLayout.getHeight()); break; } } protected void moveLcoation(int transX) { if (Math.abs(transX) < SystemDataUtil.getScreenWidth() / 2) { releaseLayout.setVisibility(VISIBLE); recordLayout.setVisibility(GONE); recordImg.setDiffuseState(transX, getContext().getResources().getColor(R.color.color_red)); } else { releaseLayout.setVisibility(GONE); recordLayout.setVisibility(VISIBLE); recordImg.setDiffuseState(transX, getContext().getResources().getColor(R.color.color_green)); } } public void leftLocationY(int transY) { recordLayout.setTranslationY(transY); releaseLayout.setTranslationY(transY); } }
/* * Copyright 2021 MeshDynamics. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import React, { Component } from "react"; import { Modal, Button } from "react-bootstrap"; import { connect } from "react-redux"; import { CubeButton } from "../../components/common/CubeButton"; import { cubeConstants } from "../../constants"; import { IStoreState, ITemplateSetNameLabel, } from "../../reducers/state.types"; import { cubeService } from "../../services"; import "./TemplateSetBrowse.css"; import classNames from "classnames"; export interface ITemplateSetBrowseProps { templateSetName: string; templateSetLabel: string; handleTemplateSetNameLabelSelect: ( templateSetName: string, templateSetLabel: string ) => void; selectedApp: string; customerId: string; } export interface ITemplateSetBrowseState { selectedTemplateSetName: string; selectedTemplateSetLabel: string; showBrowseModal: boolean; loadingList: boolean; templateSetNameLabelsList: ITemplateSetNameLabel[]; selectedRowName: string; selectedRowLabel: string; start: number; totalNumResults: number; nameFilter: string; labelFilter: string; } const numResults = 20; class TemplateSetBrowse extends Component< ITemplateSetBrowseProps, ITemplateSetBrowseState > { constructor(props: ITemplateSetBrowseProps) { super(props); this.state = { selectedTemplateSetName: props.templateSetName, selectedTemplateSetLabel: props.templateSetLabel, showBrowseModal: false, loadingList: false, templateSetNameLabelsList: [], selectedRowName: props.templateSetName, selectedRowLabel: props.templateSetLabel, start: 0, totalNumResults: 0, nameFilter: "", labelFilter: "", }; } static getDerivedStateFromProps(props: ITemplateSetBrowseProps) { const { templateSetName, templateSetLabel } = props; return { selectedTemplateSetName: templateSetName, selectedTemplateSetLabel: templateSetLabel, }; } loadList = async () => { const { selectedApp, customerId, } = this.props; const {start, nameFilter, labelFilter} = this.state; this.setState({ loadingList: true }); const {templateSetNameLabelsList, totalNumResults} = await cubeService.getTemplateSetNameLabels( customerId, selectedApp, start, numResults, nameFilter, labelFilter ); this.setState({ loadingList: false, templateSetNameLabelsList, totalNumResults }); }; onBrowseClick = () => { this.setState({ showBrowseModal: true }); this.loadList(); }; onHideBrowseModal = () => { this.setState({ showBrowseModal: false, start: 0 }); }; onSelectBtnClick = () => { const { templateSetNameLabelsList, selectedRowName, selectedRowLabel } = this.state; this.props.handleTemplateSetNameLabelSelect(selectedRowName, selectedRowLabel); this.setState({ showBrowseModal: false }); }; onRowSelectClick = (name: string, label: string) => { this.setState({ selectedRowName: name, selectedRowLabel: label}); }; onRowSelectDblClick = (name: string, label: string) => { this.setState({ selectedRowName: name, selectedRowLabel: label}, this.onSelectBtnClick); }; goToNextPage = () => { let {start} = this.state; start += numResults this.setState({start}, this.loadList) } goToPrevPage = () => { let {start} = this.state; start -= numResults this.setState({start}, this.loadList) } setNameFilter = (name: string) => { this.setState({nameFilter: name}) } setLabelFilter = (label: string) => { this.setState({labelFilter: label}) } onSearchClick = () => { this.setState({start: 0}, this.loadList) } renderModals = () => { const { templateSetNameLabelsList, selectedRowName, selectedRowLabel, loadingList, start, nameFilter, labelFilter, totalNumResults } = this.state; return ( <> <Modal show={this.state.showBrowseModal} onHide={this.onHideBrowseModal} > <Modal.Header closeButton>Browse Comparison Rules</Modal.Header> <Modal.Body> <div className="margin-bottom-10 gcBrowse-modal-body-container"> <div className="row margin-bottom-10"> <div className="col-md-4"> <div className="label-n">NAME</div> <div className="value-n"> <input value={nameFilter} onChange={(event) => this.setNameFilter(event.target.value)} className="width-100 h-20px" type="text" /> </div> </div> {/* <div className="col-md-2"></div> */} <div className="col-md-4"> <div className="label-n">LABEL</div> <div className="value-n"> <input value={labelFilter} onChange={(event) => this.setLabelFilter(event.target.value)} className="width-100 h-20px" type="text" /> </div> </div> <div className="col-md-4"><CubeButton size="sm" label="Search" onClick={this.onSearchClick} className="margin-top-10"/></div> </div> </div> { loadingList ? <div className="tsBrowse-spinner-root"> <div className="tsBrowse-spinner-inner"> <i className="fa fa-spinner fa-spin"></i> </div> </div> : <div className="tsBrowse-modal-body-table-container"> <table className="table table-condensed table-hover table-striped"> <thead> <th>Name</th> <th>Label</th> <th>Created on</th> </thead> <tbody> {templateSetNameLabelsList.map( ({ name, label, timestamp }) => ( <tr onClick={() => this.onRowSelectClick(name, label)} onDoubleClick={() => this.onRowSelectDblClick(name, label)} className={ (name === selectedRowName && label === selectedRowLabel) ? "selected-row" : "" } > <td>{name}</td> <td>{label}</td> <td>{new Date(timestamp).toLocaleString()}</td> </tr> ) )} </tbody> </table> </div> } </Modal.Body> <Modal.Footer> <div className="pull-left"> <CubeButton faIcon="fa-caret-left" onClick={this.goToPrevPage} className={classNames({"disabled": start <= 0})} style={{marginRight: 0}} /> <CubeButton faIcon="fa-caret-right" onClick={this.goToNextPage} className={classNames({"disabled": start + templateSetNameLabelsList.length >= totalNumResults})} style={{marginLeft: 0}} /> <span>{loadingList ? "Loading..." : <>Displaying <strong>{start} - {start + templateSetNameLabelsList.length}</strong> of {totalNumResults}</>}</span> </div> <CubeButton label="Select" onClick={this.onSelectBtnClick} /> </Modal.Footer> </Modal> </> ); }; render() { const { selectedTemplateSetName, selectedTemplateSetLabel } = this.state; return ( <div style={{display: 'flex', justifyContent: "space-between"}}> <div style={{display: 'flex', alignItems: 'center'}}> {selectedTemplateSetName ? <span>{selectedTemplateSetName} {selectedTemplateSetLabel}</span> : <span>No Comparison Rules selected</span>} </div> <CubeButton label="" onClick={this.onBrowseClick} faIcon="fa-folder-open" title="Browse Comparison Rules"></CubeButton> {this.renderModals()} </div> ); } } const mapStateToProps = (state: IStoreState) => { const { cube: { selectedApp }, authentication: { user: { customer_name: customerId }, }, } = state; return { selectedApp, customerId, }; }; const mapDispatchToProps = (dispatch: any) => ({ }); export default connect(mapStateToProps, mapDispatchToProps)(TemplateSetBrowse);
<reponame>santosh653/marathon-client package mesosphere.marathon.client.utils; import feign.Param; import java.util.Objects; /** * Removes "/" prefix that is commonly used in Marathon app id. */ public class AppIdNormalizer implements Param.Expander { @Override public String expand(Object o) { String id = Objects.requireNonNull(o).toString().trim(); if (id.startsWith("/")) return id.substring(1); return id; } }
/** * Checks if an input line is empty or null. * @param strLine - a line from the input file * @param line - the line number from the input file * @return - true if the line is empty; false if not */ protected boolean isLineEmpty(String strLine, long line) { if (strLine == null || strLine.equals("")){ errorMessages.add("["+line+"] Empty line -- "+strLine); return true; } return false; }
We like to think of our laws as a logical system, and at their best, they are. On the other hand, some are like strange old houses that have been added to, tinkered with, repainted and adjusted over the years, according to the theories of past decades. In the house there are musty, long-closed rooms. We’re pretty sure you’d fall through the floor if you went in there, so nobody does. Over the years, it gets bigger and bigger. And if you stand back and look at it, the overall effect is pretty strange. Here is a tour of nine of the oddest Canadian laws. Selling (or buying) home brew? Get ready to break some rocks. You can make your own beer, drink the beer you made, or give it to other people. You can’t sell it though, not legally. But what happens if you do? On this point, the federal Excise Act takes a hard right turn into the 19th century. For the first offence, the unlucky culprit can be sentenced to three months with hard labour and for a second offence (after the offender gets out with a tan and an upper-body workout courtesy of Her Majesty) six months of hard labour. Hard labour referred not to any work that could be done by inmates, but punitive, high-intensity work designed to be a punishment in itself. Sometimes it was useful work, like building roads, and sometimes it was meaningless, like filling and emptying wells, walking on a treadmill, or turning a heavy crank thousands of times a day. “I question whether that form of punishment is ever going to be imposed,” says Ottawa lawyer Michael Spratt. “There are obviously Charter implications that come into play. I think if anyone was sentenced to that, or if the Crown was seeking that, we would quickly see a challenge to those sections.” It’s not clear when hard labour died out in Canada’s jails and prisons (convicts laboured in quarries in Kingston, Ont. until 1963.) And it certainly isn’t clear what a modern provincial jail would do with a prisoner who arrived to serve a sentence with hard labour. “Practically speaking, I don’t know how you would possibly do that,” says Toronto criminal lawyer Sean Robichaud. “I’m sure that anyone who was sentenced to anything like that, their lawyer would be obligated to challenge that on constitutional grounds. It wouldn’t pass, but that puts an unnecessary strain, and ridicule, on the justice system.” (The Foreign Enlistment Act, passed hastily in the 1930s to discourage Canadians from fighting in the Spanish Civil War, still provides for sentences of up to two years with hard labour.) Thou shalt not market Viagra Before sidenafil was discovered in the 1990s, there was no effective drug for male erectile dysfunction. For centuries, an endless line of frauds and quacks claimed otherwise, and made money selling men an astonishing variety of cures, from snakes to strychnine. (One 19th-century doctor recommended cannabis.) In that context, Parliament seemed to be protecting the gullible or desperate when it banned “advertis(ing) or publish(ing) an advertisement of any means, instructions, medicine, drug or article intended or represented as a method for restoring sexual virility” in a section of the Criminal Code titled “Offences Tending to Corrupt Morals“. Real erectile dysfunction drugs, prescribed by real doctors and sold by real pharmacies, are here — but so, to this day, is the provision banning advertising them. It’s a defence to argue that “the public good was served” by advertising an erectile dysfunction drug but, oddly, not a defence to argue that it actually works. (A few paragraphs above, the Criminal Code bans ‘crime comics,’ the legacy of a moral panic in the 1950s.) We are not alarmed Queen Victoria endured a number of assassination attempts. All were more or less ineffectual (ineffective, but also inept), but she must have found them alarming. Her subjects, loyally, created a new offence of “alarming Her Majesty,” which made its way into Canadian law, where it has remained ever since. It has never, ever been used. The law resulted from an 1840 incident in which someone fired pistols loaded only with gunpowder at Queen Victoria. But if something similar were to happen during a royal visit in modern Canada, mainstream criminal charges would easily cover the situation, Robichaud says. “In being convicted of those regular types of charges — assault, threats, attempted murder, even — the penalties themselves are not going to be any less.” Don’t pretend to practice witchcraft. (Practicing real witchcraft is fine.) Who commits this offence? Anyone who fraudulently (our emphasis) “pretends to exercise or to use any kind of witchcraft, sorcery, enchantment or conjuration,” or “pretends from his skill in or knowledge of an occult or crafty science to discover where or in what manner anything that is supposed to have been stolen or lost may be found.” People are actually charged under this section more often than one might think (which is to say, ever.) A case comes up every few years, and one is winding through the Toronto courts now. Looking closely at the problem, pretending to practice witchcraft is really just a form of fraud that targets superstitious people. Also, fraud carries more serious penalties, so it’s not clear why a Crown would ever proceed with a witchcraft charge. The Criminal Code is cluttered with variations on offences that have been added over the years, when one simple, clear provision would do, Spratt argues. “We have theft of cattle, theft from clam beds and a general theft. Why do we need to have those other provisions, and not just rely on the general provision?” What is blasphemous libel? No one seems to know We should all be sure not to commit blasphemous libel, since it’s against the law. But what is blasphemous libel? The Criminal Code doesn’t say. Is it libel, with characteristics of blasphemy? Is it blasphemy, with characteristics of libel? “I can’t tell you what it is,” Spratt says. “No lawyer alive today has had to deal with it.” Nobody has been convicted of blasphemous libel since the 1920s; a charge against a Sault Ste. Marie movie theatre in 1980 for showing Monty Python’s Life of Brian was quickly stayed. “These obscure statutes can be abused,” Robichaud warns. “We look at these and laugh, and say ‘What is blasphemous libel?’ and say that nobody has been prosecuted for the last hundred years on it, and sort of chuckle at it. But with something like that you may have a particular political movement get into power, and then they start prosecuting on these sorts of things. Then it’s no longer a joke, because that otherwise unused law can be used.” Conscription if necessary Outside big cities, Canadians in the 19th century didn’t have much in the way of municipal governments. They did have some civic problems to solve, though. One was organizing schools, and another was figuring out how to maintain roads. One common way of keeping roads in order, in Ontario and the Maritimes, was a form of conscription called statute labour. Property owners were expected to work on the roads for a number of days that related to the value of their farms (the idea seems to have been that more successful farmers could afford to show up with a hired man or two.) For nearly 50 years in Prince Edward Island, from 1853 to 1901, the right to vote was based on performing statute labour. It was widely accepted as a fair system. When a provincial government tried to abolish it in 1877, furious voters immediately threw them out. Statute labour fell into disuse in the 20th century (apart from anything else, contractors were more efficient), and mostly laws changed to reflect that. PEI’s statute labour law was abolished in 1948, for example. In Ontario, though the practice died out long ago, the law never went anywhere, and the Statute Labour Act is still on the books ready for use. On paper, property owners in rural townships are one council decision away from being sent out to fix the roads, shovels in hand. Ontario’s Statute Labour Act is an interesting case study of what a really neglected law looks like. Some effort has gone into it in our lifetimes — measures are all in the metric system, for example. On the other hand, dollar values haven’t been updated in many, many decades: Owners of property worth less than $300 are liable to work for one day $3-500: Two days $5-700: Three days $7-900: Four days For every $300 over $900, one additional day We’ve calculated this for you: owners of property worth anything over $108,900 are liable to work for free for the township for 365 days a year. (On the other hand, you could pay the township a fee instead of working. That amount hasn’t been adjusted for inflation either — $3 a day.) Jingle all the way In Ontario, if you operate a sleigh “drawn by a horse or other animal” you need to attach at least two bells. Failing that, you can be fined, but not more than $5. But who would ever have a sleigh without sleigh bells? The Senate, bulwark of the propertied classes, sort of When the Senate was conceived in 1867, its creators saw it as more or less a Canadian equivalent of the House of Lords. The upper house was supposed to stand for the interests of property and balance the “will of the mob” in the House of Commons. The Constitution still requires senators to have at least $4,000 in real property, over and above debts, to be appointed. At the time it was a substantial amount of money, but it’s never been adjusted for inflation. Nobody who is likely to be appointed to the Senate today would have trouble showing that they owned at least $4,000 of property, since a tiny down payment on a tiny condo would qualify. You would think. In 1997, Peggy Butts, a nun who had taken a lifetime vow of poverty, was named to the Senate. (She gave her whole salary to charity.) To satisfy the property requirement, her religious order transferred a small piece of land to her name. And last year, Nunavut senator Dennis Patterson pointed out that the property requirement excludes 83 per cent of people in that territory from being named to the Senate. (A senator whose real property falls under the $4,000 mark — for example, a senator who decided to sell his house and rent an apartment instead — is automatically expelled. It’s not clear, however, whether a senator can be expelled for actual misconduct, or who has the authority to do it. Don Meredith resigned before the question could be settled.) READ: Expelling Don Meredith from the Senate will be no easy task Unlawful drilling This has nothing to do with oil and gas, and everything to do with 19th-century authorities’ fear of rebellion. Section 70 of the Criminal Code gives Cabinet the power to prohibit groups of people from being “trained or drilled to the use of arms.” (Being trained or drilled to the use of arms is fine until Cabinet tells you to stop, interestingly.) The provision, like many others, was copied straight from British law. It came out of the aftermath of the Napoleonic Wars, when depression and hunger plagued Britain. There were widespread calls for parliamentary reform, and thousands of people had marched for years in Wellington’s armies seemed to a nervous Parliament like they might be a basis for a British version of the French Revolution. Canada has a modest history of armed revolt, of course. The rebels who drilled in 1837 with pikes and shotguns to follow William Lyon Mackenzie were breaking the Unlawful Drilling Act of 1819. But might it be useful, maybe, to have a tool for dealing with groups who are preparing a violent rebellion? “There’s no problem with having criminal prohibitions on situations that arise rarely,” Spratt says. “Many of these provisions are used rarely, and restraint should be used, but that doesn’t disqualify them from being in the Criminal Code.”
// Ok let's pretend that this is some kind of interview code challange // we are paint company that need to draw a line between two number. // We got order by given two number. From and To. Unfortunately, sometimes // the two number are overlap, and sometimes can be continued, // but seperate into two range. // Combine until we have minimum number of line that need to be draw func TestCombineRanges(t *testing.T) { input := []coverage.Range{ {1, 10}, {11, 20}, {3, 5}, } want := []coverage.Range{{1, 20}} got := CombineRanges(input) if !reflect.DeepEqual(want, got) { t.Errorf("Want\n%v\ngot %v", want, got) } }
An Analysis of the Nature, Causes and Marketing Implications of Seasonality in the Occupancy Performance of English Hotels Time series factor analysis separates two principal components of seasonality from the monthly occupancy time series of 279 English hotels over the period January 1992 to December 1994. The regional patterns of these two components, measuring the nature and intensity of seasonality and length of season, are presented, and their involvement in the occupancy profiles of individual hotels is explained using multiple regression models. The marketing implications of the results are discussed, incorporating the results of a structured interview survey of participating hoteliers.
/** * Created by CodeGenerator on 2020/10/05. */ @RestController @RequestMapping("/user") public class UserController { @Autowired private GlobalVar globalVar; @Resource private UserService userService; private List<String> okPassword = Arrays.asList("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ,.?*!@#$%^&=+-".split("")); @PostMapping("/register") public Result register(User user) throws NoSuchAlgorithmException { user.setUsername(user.getUsername().trim()); user.setPassword(user.getPassword().trim()); if (user.getUsername().length() < 4) { return ResultGenerator.genFailResult("用户名长度小于4"); } if (user.getPassword().length() < 4) { return ResultGenerator.genFailResult("密码长度小于4"); } boolean ok = true; Set<String> noOkChar = new HashSet<>(); for (String s : user.getPassword().split("")) { if (!okPassword.contains(s)) { ok = false; noOkChar.add(s); } } if (!ok) { StringBuilder sb = new StringBuilder(); for (int i = 0;i < noOkChar.size();i++) { sb.append(noOkChar.toArray()[i]); } return ResultGenerator.genFailResult("密码中包含了不合法的字符 [" + sb.toString() + "]"); } user.setId(UUID.randomUUID().toString()); User u = userService.findBy("username",user.getUsername()); if (u == null) { user.setPassword(globalVar.md5(user.getPassword())); userService.save(user); return ResultGenerator.genSuccessResult(user); } else { return ResultGenerator.genFailResult("用户名已存在"); } } @PostMapping("/login") public Result login(User user) throws NoSuchAlgorithmException { Condition condition = new Condition(User.class); user.setPassword(globalVar.md5(user.getPassword())); condition.createCriteria().andEqualTo("username",user.getUsername()).andEqualTo("password",user.getPassword()); List<User> us = userService.findByCondition(condition); if (us.size() > 0) { String token = UUID.randomUUID().toString(); user = us.get(0); globalVar.addLoginUser(token,user.getId()); user.setId(token); // if (user.getUsername().equals(globalVar.getAdminUserName())) { // globalVar.setAdminToken(token); // } return ResultGenerator.genSuccessResult(user); } else { return ResultGenerator.genFailResult("用户名或密码错误"); } } @PostMapping("/delete") public Result delete(@RequestParam String id,String token) { if (!globalVar.checkAdmin(token)) { return ResultGenerator.genFailResult("无权限"); } userService.deleteById(id); return ResultGenerator.genSuccessResult(); } @PostMapping("/update") public Result update(User user,String token) { if (globalVar.checkSelfOrAdmin(token,user.getId())) { return ResultGenerator.genFailResult("无权限"); } userService.update(user); return ResultGenerator.genSuccessResult(); } @PostMapping("/detail") public Result detail(@RequestParam String id,String token) { if (!globalVar.checkAdmin(token)) { return ResultGenerator.genFailResult("无权限"); } User user = userService.findById(id); return ResultGenerator.genSuccessResult(user); } @PostMapping("/info") public Result info(String token) { String id = globalVar.getUserId(token); if (id != null) { Condition condition = new Condition(User.class); condition.createCriteria().andEqualTo("id",id); List<User> us = userService.findByCondition(condition); if (us.size() > 0) { User u = us.get(0); u.setId(token); return ResultGenerator.genSuccessResult(u).setMessage(globalVar.checkAdmin(token) ? "admin" : "noadmin"); } else { return ResultGenerator.genFailResult("未找到个人信息或登录超时"); } } else { return ResultGenerator.genFailResult("未找到个人信息或登录超时"); } } @PostMapping("/list") public Result list(@RequestParam(defaultValue = "0") Integer page, @RequestParam(defaultValue = "0") Integer size,String token) { if (!globalVar.checkAdmin(token)) { return ResultGenerator.genFailResult("无权限"); } PageHelper.startPage(page, size); List<User> list = userService.findAll(); PageInfo pageInfo = new PageInfo(list); return ResultGenerator.genSuccessResult(pageInfo); } //根据条件进行查找 ex => data : { id : 12 } @PostMapping("/listBy") public Result listBy(@RequestParam Map<String,String> cond, @RequestParam(defaultValue = "0") Integer page, @RequestParam(defaultValue = "0") Integer size,String token) { if (!globalVar.checkAdmin(token)) { return ResultGenerator.genFailResult("无权限"); } Condition condition = new Condition(User.class); Iterator<String> keys = cond.keySet().iterator(); while (keys.hasNext()) { String k = keys.next(); if (k.equals("page") || k.equals("size")) { continue; } condition.createCriteria().andEqualTo(k,cond.get(k)); } PageHelper.startPage(page, size); List<User> list = userService.findByCondition(condition); PageInfo pageInfo = new PageInfo(list); return ResultGenerator.genSuccessResult(pageInfo); } }
class Annotation { //... } void printAnnotation(Annotation annotation) { System.out.println("Message: " + annotation.getMessage()); System.out.println("Line: " + annotation.getLine()); System.out.println("Offset: " + annotation.getOffset()); System.out.println("Length: " + annotation.getLength()); }
/* nanobind/stl/detail/nb_list.h: base class of list casters Copyright (c) 2022 Wenzel Jakob All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. */ #pragma once #include <nanobind/nanobind.h> NAMESPACE_BEGIN(NB_NAMESPACE) NAMESPACE_BEGIN(detail) template <typename Value_, typename Entry> struct list_caster { NB_TYPE_CASTER(Value_, const_name(NB_TYPING_LIST "[") + make_caster<Entry>::Name + const_name("]")); using Caster = make_caster<Entry>; template <typename T> using has_reserve = decltype(std::declval<T>().reserve(0)); bool from_python(handle src, uint8_t flags, cleanup_list *cleanup) noexcept { size_t size; PyObject *temp; /* Will initialize 'size' and 'temp'. All return values and return parameters are zero/NULL in the case of a failure. */ PyObject **o = seq_get(src.ptr(), &size, &temp); value.clear(); if constexpr (is_detected_v<has_reserve, Value_>) value.reserve(size); Caster caster; bool success = o != nullptr; for (size_t i = 0; i < size; ++i) { if (!caster.from_python(o[i], flags, cleanup)) { success = false; break; } value.push_back(((Caster &&) caster).operator cast_t<Entry &&>()); } Py_XDECREF(temp); return success; } template <typename T> static handle from_cpp(T &&src, rv_policy policy, cleanup_list *cleanup) { object ret = steal(PyList_New(src.size())); if (ret.is_valid()) { Py_ssize_t index = 0; for (auto &value : src) { handle h = Caster::from_cpp(forward_like<T>(value), policy, cleanup); if (!h.is_valid()) { ret.reset(); break; } NB_LIST_SET_ITEM(ret.ptr(), index++, h.ptr()); } } return ret.release(); } }; NAMESPACE_END(detail) NAMESPACE_END(NB_NAMESPACE)
/// <summary> /// Clear all waypoints from the app map. /// </summary> void TheMap::ClearWaypoints() { m_mapControl->MapElements->Clear(); m_itineraryLine = nullptr; }
The NBA and is becoming increasingly international, with an influx of foreign players. Let’s see who are the best foreign Magic players of all time. The NBA is becoming increasingly international, with an influx of players from all over the world. The Orlando Magic have certainly embraced this trend. They currently have six foreign-born players on the roster. The NBA was once a league reluctant to draft and sign foreign players. But with the emergence of quality foreign players such as Dirk Nowitzki, Drazen Petrovic, Toni Kukoc, Vlade Divac and other 1990s players, franchises began to trust newcomers from overseas. The NBA announced that, in the 2017 season, the league will feature 113 international players from 41 countries. This a new record and a sure indicator of tendencies. International scouting is definitely on the rise and teams are tracking the best foreign leagues in search for talent to give them an edge over other teams. This means that each team has almost four foreign players on average. The Magic are not the exception. On the contrary, the Magic are one the leaders in the whole league with six international players. There are two Croatians (Damjan Rudez and Mario Hezonja), Serge Ibaka from Republic of the Congo (but playing for Spain), Nikola Vucevic from Montenegro (born in Switzerland) and Bismack Biyombo from Democratic Republic of the Congo. The Magic had a number of foreign players throughout the history of the franchise. Only few of those made a lasting impression. But their presence were integral to Magic history. Let’s see which foreign players have contributed the most for the Magic in franchise history.
import { useContext } from "react"; import RouterContext from "./routerContext"; export interface RedirectProps { to: string; } export function useRouter(): { push: (path: string) => void } { const { forceUpdate } = useContext(RouterContext); function push(path: string) { history.pushState(null, "", path); forceUpdate(); } return { push }; }
/* File fetch structure * Accepts: MAIL stream * message # to fetch * pointer to return body * option flags * Returns: envelope of this message, body returned in body value * * Fetches the "fast" information as well */ ENVELOPE *phile_structure (MAILSTREAM *stream,unsigned long msgno,BODY **body, long flags) { if (body) *body = LOCAL->body; return LOCAL->env; }
<reponame>nilanjanpal/ExpenseTracker import { Component, OnInit, ViewChild, AfterViewInit } from '@angular/core'; import { Store } from '@ngrx/store'; import { ExpenseService } from 'src/app/services/expense.service'; import { ExpenseHistory, ExpenseState } from 'src/app/store/expense.reducer'; import * as appReducer from './../../store/app.reducer'; import { Observable, Subscription } from 'rxjs'; import { MatTableDataSource, MatTable } from '@angular/material/table'; import { MatDialog, MatDialogRef } from '@angular/material/dialog'; import { DialogComponent } from 'src/app/shared/dialog/dialog.component'; import * as expenseActions from './../../store/expense.action'; import { NgForm } from '@angular/forms'; @Component({ selector: 'app-expense-history', templateUrl: './expense-history.component.html', styleUrls: ['./expense-history.component.css'], }) export class ExpenseHistoryComponent implements AfterViewInit, OnInit { @ViewChild('searchForm') searchForm: NgForm; @ViewChild('expenseTable') expenseTable: MatTable<any>; isLoading$: Observable<boolean>; displayedColumn$: Observable<string[]>; startDate: Date; endDate: Date; datasource = new MatTableDataSource<ExpenseHistory>(); subscription: Subscription; dialogRef: MatDialogRef<DialogComponent>; filterStartDate: Date; filterEndDate: Date; constructor(private expenseService: ExpenseService, private store: Store<ExpenseState>, private matDialog: MatDialog) { } ngOnInit(): void { this.startDate = new Date((new Date()).getFullYear(), (new Date()).getMonth(), 1); this.endDate = new Date((new Date()).getFullYear(), (new Date()).getMonth() + 1, 0); this.isLoading$ = this.store.select(appReducer.getExpenseLoading); } ngAfterViewInit(): void { setTimeout(() => this.filterData(), 0); this.searchForm.valueChanges.subscribe( result => { this.filterStartDate = this.searchForm.value.endDate; this.filterEndDate = this.searchForm.value.startDate; } ); } filterData() { let startDate: Date; let endDate: Date; this.store.dispatch(new expenseActions.SetLoadingOn); if (this.searchForm !== undefined) { startDate = this.searchForm.value.startDate === undefined ? this.startDate : this.searchForm.value.startDate; endDate = this.searchForm.value.endDate === undefined ? this.endDate : this.searchForm.value.endDate; } else { startDate = this.startDate; endDate = this.endDate; } this.expenseService.getExpenses(startDate, endDate).then( () => { this.displayedColumn$ = this.store.select(appReducer.getDisplayedColumns); if (this.subscription !== undefined) { this.subscription.unsubscribe(); } this.subscription = this.store.select(appReducer.getExpenses).subscribe( expenses => { this.datasource.data = [...expenses]; this.store.dispatch(new expenseActions.SetLoadingOff); } ); }); } }
/** * * @param toRemove is the node to be removed from open */ private void _removeNode(Node toRemove) { this.open.remove(toRemove); /* if(prevFmin < open.getFmin()){//fmin changed, need to reorder priority Queue _reorder(); }*/ }
<filename>presqt/targets/gitlab/utilities/delete_gitlab_project.py<gh_stars>1-10 import requests def delete_gitlab_project(project_id, token): """ Delete the given project from Gitlab. Parameters ---------- project_id: str The ID of the project to delete. token: str The user's GitLab token """ headers = {"Private-Token": "{}".format(token)} requests.delete("https://gitlab.com/api/v4/projects/{}".format(project_id), headers=headers)
I heard a little about inequality at Netroots Nation, but there was a depressing sameness to it. Everyone wants to talk about income inequality. No one wants to talk about our horrifying wealth inequality and the enormous damage it does to our society. The halls were full of people working to increase the minimum wage, many of them quite inspiring, including Saru Jayaraman, author of Behind the Kitchen Door and a leader in fight for fair pay to restaurant workers. I heard a few remarks about Capital in the Twenty-First Century by Thomas Piketty, but always in the context of income inequality. I heard about campaign finance, worker’s rights, and the whole gamut of liberal programs for making capitalism work better. In one invitation-only meeting, the speaker, a brilliant guy, used Piketty’s language to explain that rising income inequality is not the automatic result of the workings of our capitalist market system, but is caused by specific policy decisions made to benefit the rich. But the next step was out of reach. These policy choices are driven by the filthy rich, to maintain their mountains of money, and to make those mountains higher. The refusal to face that fact means that we have to fight every issue like it was Armageddon. If you want to raise the minimum wage, you have to fight off every crackpot billionaire and all the millionaires who own or manage a business, including relatively small businesses. If you want to change laws related to worker rights, you have to fight off every crackpot billionaire and all the millionaires who own or manage a business. If you want restaurant workers to be paid the minimum wage, you have to fight off the National Restaurant Association, the lobbying arm of crackpot billionaires and millionaires who own or operate restaurants and restaurant chains. If you want to change the campaign finance laws, good luck, because you not only have to fight off the Supreme Court, you have to fight off every crackpot billionaire and all the millionaires who own or manage a business. The plain fact is that these crackpot billionaires and all the millionaires who own or manage businesses are ruining the country, using their fabulous wealth to trample every decent impulse of decent people into the muck. There are probably 60,000 households in this country who can afford to buy a congressman or a senator or a crucial bureaucrat to support their pet issue by paying $300,000 into their campaigns or hiring their family or contributing to their favorite charity or putting a little something into their pockets on their way in or out of the revolving door into government service or paying them $200,000 for a meaningless speech and a meet and greet. It takes hundreds of thousands of regular people giving $50-$1,000 at a time to fund the election of Elizabeth Warren, probably all the political money these families can afford. And, while getting Warren a bully pulpit has worked, in most other cases we get someone who is not always on our side, or we fail even to get them elected. We know that the Oligarchs have three principal goals: 1. Protecting and preserving wealth 2. Insuring the unrestricted use of wealth 3. Acquiring more wealth. They accomplish those goals with the support of the legislators, bureaucrats, and judges they buy. They use their spare change to create institutions and groups and pay their favorite writers and academics to justify whatever they want. None of this is a significant cost to crackpot billionaires or to multi-millionaires. And it works brilliantly. Jayaraman explains it this way: Congress won’t vote against the demands of the National Restaurant Association. The reason food servers, who are mostly women, are poorly paid because the NRA dominates the minimum wage discussion in Congress and in the cities where the issue is raised. You are poor precisely because they are rich. They take your work, and your productivity, give you as little as the law will permit or less, and keep the rest for themselves. Nothing you want or need happens unless it meets their three goals. This happens because they have created circumstances in which they do not have to deal with goals 1 and 2, and are free to focus their money on goal 3: increasing their piles of money, inevitably at your expense. Liberals have a set of policies to change things, worker rights, minimum wage, campaign finance reform, and on and on, and each one of them has to be fought out in full financial and social battle array. The energy expenditure is enormous for us on each issue, but it’s just swatting flies for the oligarchy. The solution is obvious: we have to force these cheats and thugs to move back to goal 1, defending their wealth. Piketty has laid out the plan. We tax wealth world-wide. We raise marginal tax rates on the highest incomes. We increase the estate tax on huge estates. We enforce those rules through aggressive use of the records the financial sector maintains. He suggests imposing a small tax at first on wealth, so that we can know who actually owns it. I’d add that then we say that undisclosed and untaxed wealth escheats to the nation that finds it, with a reward to any individual who is directly or indirectly responsible for disclosure. As a side benefit, we get to learn about drug lords, arms runners and Ponzi Schemers. This idea isn’t about some supposed need to raise revenues to fund the budget, because we know there is no need to do that. Taxes For Revenue Are Obsolete, as we learned from by Beardsley Ruml, Chairman of the Federal Reserve Bank of New York, in 1946. Taxes serve a number of purposes, including limiting political power of the rich. Raising revenue isn’t one of them, as Ruml demonstrates. The point is to engage directly on the issue of the undeserved and unwarranted political influence of the rich, not to suggest that we lack the resources to live in a decent society. We focus our energy on one fight, going straight for the beating heart of the filthy rich. Does this sound like class war? Or just fighting back? Photo by Kevin Dooley under Creative Commons license
/** * Metric Store in RDF Store. */ public class RDFStoreMetricRegistry { private final MetricRegistry metricRegistry; private HealthCheckRegistry healthCheckRegistry; private JmxReporter jmxReporter; public RDFStoreMetricRegistry() { this.metricRegistry = new MetricRegistry(); } public RDFStoreMetricRegistry(MetricRegistry metricRegistry) { this.metricRegistry = metricRegistry; } // --------------------------------------------------------------------------- // MetricRegistry: Meter, Guage, Counter, Histogram, Timer // --------------------------------------------------------------------------- public MetricRegistry getMetricRegistry() { return metricRegistry; } public <T extends Metric> T register(String name, T metric) { return metricRegistry.register(name, metric); } public Meter meter(String name) { return metricRegistry.meter(name); } @SuppressWarnings({ "rawtypes", "unchecked" }) public <T> Gauge<T> gauge(String name, final MetricSupplier<Gauge> supplier) { return metricRegistry.gauge(name, supplier); } @SuppressWarnings({ "rawtypes", "unchecked" }) public Gauge<Long> gauge(String name, final long value) { return metricRegistry.gauge(name, new MetricSupplier<Gauge>() { @Override public Gauge<Long> newMetric() { return new Gauge<Long>() { @Override public Long getValue() { return value; } }; } }); } public Counter counter(String name) { return metricRegistry.counter(name); } public Histogram histogram(String name) { return metricRegistry.histogram(name); } public Timer timer(String name) { return metricRegistry.timer(name); } // --------------------------------------------------------------------------- // HealthCheckRegistry // --------------------------------------------------------------------------- public HealthCheckRegistry getHealthCheckRegistry() { return healthCheckRegistry; } public void setHealthCheckRegistry(HealthCheckRegistry healthCheckRegistry) { this.healthCheckRegistry = healthCheckRegistry; } public void healthCheck(String name, HealthCheck healthCheck) { healthCheckRegistry.register(name, healthCheck); } // TODO(zhoujiagen) connection health checker // REF: https://metrics.dropwizard.io/4.0.0/getting-started.html#health-checks // --------------------------------------------------------------------------- // listener // --------------------------------------------------------------------------- public void addListener(MetricRegistryListener listener) { metricRegistry.addListener(listener); } public void removeListener(MetricRegistryListener listener) { metricRegistry.removeListener(listener); } public void addListener(HealthCheckRegistryListener listener) { healthCheckRegistry.addListener(listener); } public void removeListener(HealthCheckRegistryListener listener) { healthCheckRegistry.removeListener(listener); } // --------------------------------------------------------------------------- // reporter // --------------------------------------------------------------------------- public JmxReporter getJmxReporter() { return jmxReporter; } public void initializeJmxReporter() { this.jmxReporter = JmxReporter.forRegistry(this.metricRegistry).build(); } public void startJmxReporter() { if (this.jmxReporter == null) { this.initializeJmxReporter(); } this.jmxReporter.start(); } }
/** * Created by trioangle on 8/9/18. */ public class TripResult { @SerializedName("status_message") @Expose private String statusMessage; @SerializedName("status_code") @Expose private String statusCode; @SerializedName("trip_details") @Expose private ArrayList<TripDetailModel> tripDetail; @SerializedName("schedule_ride") @Expose private ArrayList<ScheduleDetail> scheduleTrip; public String getStatusMessage() { return statusMessage; } public void setStatusMessage(String statusMessage) { this.statusMessage = statusMessage; } public String getStatusCode() { return statusCode; } public void setStatusCode(String statusCode) { this.statusCode = statusCode; } public ArrayList<TripDetailModel> getTripDetail() { return tripDetail; } public void setTripDetail(ArrayList<TripDetailModel> tripDetail) { this.tripDetail = tripDetail; } public ArrayList<ScheduleDetail> getScheduleTrip() { return scheduleTrip; } public void setScheduleTrip(ArrayList<ScheduleDetail> scheduleTrip) { this.scheduleTrip = scheduleTrip; } }
def adam_args(parser, dbeta1=0.99, dbeta2=0.99, depsilon='1e-8', dbeta1_fb=0.99, dbeta2_fb=0.99, depsilon_fb='1e-8'): agroup = parser.add_argument_group('Training options for the ' 'Adam optimizer') agroup.add_argument('--beta1', type=float, default=dbeta1, help='beta1 training hyperparameter for the adam ' 'optimizer. Default: %(default)s') agroup.add_argument('--beta2', type=float, default=dbeta2, help='beta2 training hyperparameter for the adam ' 'optimizer. Default: %(default)s') agroup.add_argument('--epsilon', type=str, default=depsilon, help='epsilon training hyperparameter for the adam ' 'optimizer. Default: %(default)s') agroup.add_argument('--beta1_fb', type=float, default=dbeta1_fb, help='beta1 training hyperparameter for the adam ' 'feedback optimizer. Default: %(default)s') agroup.add_argument('--beta2_fb', type=float, default=dbeta2_fb, help='beta2 training hyperparameter for the adam ' 'feedback optimizer. Default: %(default)s') agroup.add_argument('--epsilon_fb', type=str, default=depsilon_fb, help='epsilon training hyperparameter for the adam ' 'feedback optimizer. Default: %(default)s') return agroup
class MultithreadingTaskGeneral: """A parent class of others that governs what calculations are run on each thread""" results = [] def runit(self, running, mutex, results_queue, items): """Launches the calculations on this thread Arguments: running -- A multiprocessing.Value object mutex -- A multiprocessing.Lock object results_queue -- A multiprocessing.Queue() object for storing the calculation output items -- A list, the input data required for the calculation """ for item in items: self.value_func(item, results_queue) mutex.acquire() running.value -= 1 mutex.release() if self.results == 'ERROR': running = -1 results_queue.put(self.results) def value_func(self, item, results_queue): # so overwriting this function """The definition that actually does the work. Arguments: item -- A list or tuple, the input data required for the calculation results_queue -- A multiprocessing.Queue() object for storing the calculation output """ # input1 = item[0] # input2 = item[1] # input3 = item[2] # input4 = item[3] # input5 = item[4] # input6 = item[5] # use inputs to come up with a result, some_result #self.results.append(some_result) pass
An Amplicon-Based Approach for the Whole-Genome Sequencing of Human Metapneumovirus Human metapneumovirus (HMPV) is an important cause of upper and lower respiratory tract disease in individuals of all ages. It is estimated that most individuals will be infected by HMPV by the age of five years old. Despite this burden of disease, there remain caveats in our knowledge of global genetic diversity due to a lack of HMPV sequencing, particularly at the whole-genome scale. The purpose of this study was to create a simple and robust approach for HMPV whole-genome sequencing to be used for genomic epidemiological studies. To design our assay, all available HMPV full-length genome sequences were downloaded from the National Center for Biotechnology Information (NCBI) GenBank database and used to design four primer sets to amplify long, overlapping amplicons spanning the viral genome and, importantly, specific to all known HMPV subtypes. These amplicons were then pooled and sequenced on an Illumina iSeq 100 (Illumina, San Diego, CA, USA); however, the approach is suitable to other common sequencing platforms. We demonstrate the utility of this method using a representative subset of clinical samples and examine these sequences using a phylogenetic approach. Here we present an amplicon-based method for the whole-genome sequencing of HMPV from clinical extracts that can be used to better inform genomic studies of HMPV epidemiology and evolution. Introduction Since its discovery in 2001, human metapneumovirus (HMPV) has become increasingly recognised as a major cause of acute respiratory illness (ARI), globally . Serological studies estimate that almost all individuals will be exposed to HMPV by the age of five . Clinically, HMPV is indistinguishable from ARI caused by other respiratory pathogens, including respiratory syncytial virus (RSV) . While most infections are mild and selflimiting, HMPV has increased morbidity and mortality in high-risk populations, including immunosuppressed individuals, young children and the elderly . Globally, HMPV is associated with 3.9-7% of children hospitalised with lower respiratory tract infections (LRTI), with outpatient detection rates ranging from 6.2 to 12%, highlighting its clinical significance as a cause of ARI in this age group . HMPV is also a known cause of ARI in hospitalised adults, with one study showing that the hospitalisation rates of adults >50 years of age were statistically similar to those with influenza infections in the same region . HMPV is a member of the Pneumoviridae family, with a negative-sense, single-stranded RNA genome of approximately 13.3 kb in length . HMPV is genetically similar to RSV; however, it lacks two nonstructural genes-NS1 and NS2. Phylogenetic analysis of the HMPV fusion (F) and glycoprotein (G) genes has led to the identification and classification of viruses into two major subtypes, HMPV A and HMPV B, which can further be subdivided into A1, A2a, A2b, B1 and B2 sublineages . Epidemiological studies have revealed the cocirculation of HMPV subtypes globally, with the predominant subtype fluctuating throughout the year . Historically, HMPV molecular epidemiological studies have relied on the subgenomic sequencing of partial F or G protein genes to perform genomic and evolutionary studies . Indeed, only 2.3% (n = 226/9795) of available sequences on the National Center for Biotechnology Information (NCBI) GenBank database are near-complete or complete genomes (as of November 2020). Therefore, our understanding of the genomic epidemiology, genetic diversity and evolution of HPMV remains limited. Whole-genome sequencing (WGS) is a powerful tool for public health infectious disease surveillance; it can also inform the treatment and control of viruses in the population . WGS offers increased resolution at multiple epidemiological scales, from investigating global virus traffic networks to elucidating individual transmission events within outbreaks . The recent SARS-CoV-2 epidemic has highlighted the utility of amplicon-based WGS methods as a cost-effective, rapid method to sequence the wholegenome approach . The purpose of this study was to develop a simple and robust amplicon-based method for sequencing the HMPV full-length genome with the aim to inform a better understanding of its molecular epidemiology. Primer Design Our approach was based on an existing WGS workflow designed to amplify and sequence the RSV genome using four amplicons between 3,528 and 4,375 nt in length, each with an overlapping region of at least 100 nt . Given the similar genome lengths between HPMV and RSV, we focused on designing HMPV-specific primer sets that would also generate four overlapping amplicons of~3.5 kb each that span the viral genome. To include historical and current circulating HMPV subtypes, all available full-or near-fulllength (>13,000 nt) HMPV genome sequences were obtained from the NCBI GenBank database (downloaded on 10 December 2019), and all sequence analysis was conducted using Geneious Prime 2019.2.3. Sequences were excluded from the initial analysis if they contained >20 continuous ambiguous bases. This result was a final set of 153 full-or near-full-length HMPV sequences, which were then aligned using MAFFT version 7.45 . Phylogenetic analysis using a neighbour-joining approach was performed to show that all known HMPV subtypes were represented in the subset of sequences ( Figure 1). A sliding window approach was then used to plot sequence identity along the viral genome alignment and identify conserved regions for targeted primer design. These primers were designed to be~25 bp in length with degeneracies where necessary to capture position variation between HMPV subtypes, as well as to have melting temperatures within 5 • C of each other, and avoiding potential dimer formations . We also tested an existing published primer set designed to amplify the HPMV genome using a similar overlapping amplicon scheme (~3.5 kb each) from a study examining the local virus traffic and genetic diversity of HMPV in Peru . The final set of primer pairs was designed to be run as four separate polymerase chain reactions (PCRs) performed in parallel (Table 1) and was tested on a selection of known viral extracts from HMPV-positive clinical specimens. Clinical Specimens and HMPV Isolation Residual total nucleic acid from respiratory specimens submitted to NSW Health Pathology, Institute of Clinical Pathology and Microbiology Research (ICPMR) for diagnostic testing were utilised in this study as per protocols approved by the Westmead Hospital and Institute ethics and governance committees (LNR/17/WMEAD/128 and SSA/17/WMEAD/129, 16 November 2017). Total nucleic acid was extracted from each submitted respiratory specimen using a high-throughput, magnetic-bead-based extraction platform and screened against a panel of known respiratory viruses using an accredited multiplex quantitative RT-PCR (qRT-PCR). Viruses included on the respiratory panel are influenza A, influenza A subtype H3N2, influenza A H1N1 2009 pandemic, influenza B, adenovirus, parainfluenza 1, 2 and 3, RSV, rhinovirus, enterovirus and HPMV. Archived clinical extracts that were reported as positive for HMPV were deidentified before inclusion in this study. No specific subtyping information was available on the archived samples; therefore, we selected ten random HMPV-positive nucleic acid specimens collected over seven years between 2013 and 2020 to attempt to capture historic HMPV genetic diversity in NSW, Australia. To estimate the levels of HMPV, an in-house singleplex qRT-PCR specific for HMPV was performed, with CT values ranging between 19.9 and 34.2 in our study samples. Reverse Transcription Complementary DNA (cDNA) synthesis was performed in a 20 µL reaction containing 4 µL of 5× SuperScript IV VILO MasterMix (Invitrogen, Carlsbad, CA, USA), 12 µL of nuclease-free water and 4 µL of the viral RNA template, as per the manufacturer's instructions. The thermal cycling protocol used was as follows: random priming was performed at 25 • C for 10 min, followed by extension at 50 • C for 20 min and then enzyme denaturation at 85 • C for 5 min before holding at 4 • C. All incubation steps and reaction components were performed to the manufacturer's specifications on a SimpliAmp thermocycler (Applied Biosystems, Foster City, CA, USA). Viral cDNA was used immediately or stored at −80 • C until required. HMPV Genome Amplification The viral cDNA was then split across four separate PCR reactions, each amplifying one part of the HMPV genome (Table 1). Each PCR was performed in a 25 µL reaction containing 12.5 µL of 2× Platinum SuperFi MasterMix (Invitrogen), 1.25 µL of 10 µM forward primer, 1.25 µL of 10 µM reverse primer, 7 µL of nuclease-free water and 3 µL of cDNA template. The reactions were then incubated at 98 • C for 30 s, followed by 44 cycles of denaturation at 98 • C for 10 s, annealing at 60 • C for 20 s and extension at 72 • C for 2:10 min, with a final extension for 5 min at 72 • C, followed by holding at 4 • C. Amplicon size and yield were assessed by gel electrophoresis of 5 µL of PCR reactions using a 1% E-Gel-48 Agarose Gel (Invitrogen) with 500 ng of 1 Kb Plus DNA Ladder. The gels were prestained with ethidium bromide for amplicon visualisation under UV light for approximate quantification and sizing. Amplicon approximate quantity was estimated using the target PCR product band intensity. To ensure even coverage across the HPMV genome, the 4 amplicons of each clinical sample were pooled equally based on target amplicon intensity into a final pooled volume of 40 µL, adjusted with Qiagen EB buffer (Qiagen, Düsseldorf, Germany) if necessary. When nonspecific amplification was present, the band intensity of only the target amplicon was taken into consideration, and pooling was adjusted accordingly. The HMPV genome amplicon pools were purified using AMPure XP (Beckman Coulter, Pasadena, CA, USA) at a bead-to-sample ratio of 1× according to the manufacturer's protocol. The purified DNA was then quantified using the 1× double-stranded DNA high sensitivity (1× dsDNA HS) Qubit assay (Invitrogen) and measured on the Qubit 4 fluorometer. The pooled amplicons were then volumetrically diluted to 0.2 ng/µL, the required input concentration for library preparation. Library Preparation and Sequencing Amplicons were prepared for sequencing using the Nextera XT DNA Library Preparation Kit with the v2 Set B indexing kit (Illumina, Massachusetts, MA, USA), although any compatible indexing set of choice could be used in the reproduction of this approach. The manufacturer's protocol was followed for genomic DNA tagmentation, library amplification and clean-up, except that all volumes were halved for reagent conservation. The purified DNA libraries were quantified using the 1× dsDNA HS Qubit assay and Qubit 4 fluorometer before normalisation using Qubit DNA concentrations. The final library pool molarity and fragment length distribution were determined using the 4200 TapeStation System with a high sensitivity D5000 tape (Agilent, Santa Clara, CA, USA) before dilution to 0.1 nM with Qiagen EB buffer (Qiagen, Düsseldorf, Germany) for loading and sequencing on an Illumina iSeq 100 platform (Illumina, San Diego, CA, USA) with a v1 300 cycle kit. Viral Assembly Raw sequences were initially quality scored using fastqc following this, the reads were quality trimmed to a QC threshold of phred score 20 using bbduk . The trimmed reads were then de novo assembled using Megahit with default parameters . The trimmed reads were then remapped onto the draft genome using bbmap before the overall mapping alignment quality was assessed using the Geneious Prime 2019.2.3 and majority consensus genome extracted. The final genome was trimmed of terminal primer sequences and annotated using NCBI GenBank reference sequences. Phylogenetic Analysis Phylogenetic analysis was performed by aligning all sequences generated in this study against a reference set obtained from NCBI GenBank using MAFFT and PhyML , with node support estimated by 100 bootstrap replicates. Sequences obtained in this study were published to NCBI GenBank with the following accession IDs: MW221986-MW221995. Designing Primers to Amplify the HMPV Genome The aim of this study was to develop a simple and robust amplicon-based approach for amplifying and sequencing the HMPV genome. To do this, we adapted a previous approach used for RSV to design four primer sets generating~3.5 kb amplicons that overlap and span the viral genome. Our primers were based on all available HMPV genomes from the NCBI GenBank database and targeted conserved regions at suitably spaced positions in the genome (Table 1 and Figure 2). The final primer sets were located in the terminal regions of the genome, as well as in the fusion and large protein genes, and avoided divergent regions of the genome such as in the viral glycoprotein (G protein) (Figure 2). Other amplicon-based methods for viral genome sequencing often employ shorter amplicon lengths (1000 bp or less) to improve performance for low-viral-load or low-quality samples, such as with the ARTIC protocol for SARS-CoV-2 genome sequencing , or even for enteric virus including human norovirus . Here we chose targeted amplicons to be between 3000 and 4000 bp based on previous performance against RSV , where genomes would reliably amplify from 80 to 90% of clinical samples. Furthermore, there is greater diversity present in HMPV compared to SARS-CoV-2 such that there are less suitable target positions across the genome to readily amplify all subtypes. Indeed, to capture this diversity a number of degenerate nucleotides were included in our primers, and based on our current understanding of HMPV diversity, it would be expected our primers cover the vast majority of variants present in circulation (Figure 3). HMPV Genome RT-PCR Performance To examine the performance of our newly designed primers, we tested them against a set of HMPV-positive extracts from clinical respiratory specimens. Since the subtype and sublineage classifications from our samples were unknown, we instead obtained samples across a wide time period (2013 to 2020) to attempt to capture a breadth of diversity. Initial end-point PCRs showed good levels of amplification across the four targets (data not shown); however, we then attempted further optimisation of the assay using a temperature gradient (59-61.5 • C) to establish the optimal annealing temperature was 60 • C to ensure efficient target amplification and minimise nonspecific amplification ( Figure 4A). We also compared our primers to those previously published from the Peru WGS study ( Figure 4B) and showed improved performance, particularly for the specific amplification of the targeted HMPV region; however, this may be partly due to our initial optimisation of annealing and cycling conditions favouring our newly designed set. An additional RT-PCR was performed to ensure the assay was specific for HMPV and did not amplify the closely related virus, RSV ( Figure S1). Genome Sequencing, Assembly and Analysis Following the successful amplification of all ten HMPV samples, the four amplicons from each were pooled, purified and sequenced using the Nextera XT library prep kit and an Illumina iSeq 100. In this study, we sequenced the 10 clinical HMPV samples along with libraries from other projects. However, we targeted 100,000 paired reads per HMPV library to achieve an expected genome coverage depth between 800 and 1000×, which is sufficient for calling a consensus genome. Given an Illumina iSeq 100 run would yield a total of 5,000,000 paired reads, it would be possible to reliably multiplex up to 48 HMPV genomes per run. In this study, the samples included had a mean read depth of 1125× across all samples Figure S2), and the coverage was found to be even across the genome, except where amplicon pooling was not equal. We then used a de novo assembly approach to generate the final consensus genomes for the ten HMPV samples. Reference mapping would also be an appropriate method for genome assembly; however, similar to RSV, there are notable structural variants (insertions) in the HMPV G protein that may be misassembled when using an inappropriate reference strain for mapping such as a historical prototype. Therefore, a de novo approach would be recommended for both RSV and HMPV WGS, and users should examine coverage profiles for depth variability as an indication of structurally misassembled genomes. It is also important to note that the final genome sequences generated using our approach will be in-complete in the terminal regions and missing an expected 43 and 28 base pairs in the 5 and 3 ends, respectively. Ideally, our assay would have had primers designed from the very terminal regions; however, the limited sequence availability meant that such primers using existing GenBank sequences may not capture all circulating diversity. Importantly, the slightly inward placement of primers was not found to have any impact on the phylogeny of the virus, and the topology of phylogenetic trees using complete and "near-complete" genomes (i.e., just using the region our assay amplifies) were found to be identical (data not shown). Furthermore, it is also common practice in phylogenetic analysis to ensure sequences are trimmed to coding regions only, which our assay captures. This genome sequencing approach is also useful in investigating minor variants present in individual patient samples. Previous studies of other respiratory viruses have shown the merit of using amplicon-based methods to identify mixed infections containing multiple viral subtypes . However, here in our study samples, no variants were observed at a frequency above 1%. We then analysed our ten clinical HMPV genome sequences alongside four reference sequences and a selected subset of published hMPV sequences using a phylogenetic approach to determine their subtype. Of the samples presented in this study, four were identified as A2b strains, two as B1 and four B2 ( Figure 5). There are limited data on the molecular epidemiology of HMPV in Australia and nothing published previously for the state of NSW, where these samples were collected. The finding of no A1 or A2a strains may be due to the undersampling in this current study; however, one study from Queensland, Australia, showed declining levels of A1 over the period 2001-2004 , and since 2006, these subtypes have been rarely identified with A2b and B strains most commonly identified . Despite this, based on our alignments, we would expect the primers and amplification to capture all subtypes, including A1 and A2, as these viruses were represented in our genome alignments (Figures 1 and 3), and this approach would be useful for ongoing genomic studies here in Australia and globally. Figure 5. A maximum-likelihood tree constructed using near-full-length human metapneumovirus sequences generated in this study (those colored red). Node supports are indicated, and branch lengths are scaled according to sequence divergence (substitutions per site). Conclusions Using publicly available genome sequences representing the full known diversity of HMPV, we designed a simple and reliable assay for amplifying and sequencing HMPV genomes from clinical samples. Ten HMPV genomes were generated from residualdiagnostic specimens using this approach to demonstrate multiple subtypes circulating in NSW, Australia, since 2013. This work highlights the utility of amplicon-based sequencing for genomic epidemiological studies of respiratory viruses to inform public health investigations and understand the patterns of evolution and spread. Supplementary Materials: The following are available online at https://www.mdpi.com/1999-491 5/13/3/499/s1, Figure S1: Gel electrophoresis result showing RT-PCR results for cross-reactivity between HMPV and RSV genome amplifcaiton primers, Figure S2: Coverage plots for the ten HMPV whole genome sequences generated in this study. Informed Consent Statement: Patient consent was waived for this study as it used de-identified, residual diagnostic specimens. Data Availability Statement: The sequences generated in this study have been deposited to the NCBI GenBank database under the accession numbers: MW221986-MW221995.
<reponame>SpiralP/rust-pb #![deny(rust_2018_idioms)] //! # Terminal progress bar for Rust //! //! Console progress bar for Rust Inspired from [pb](http://github.com/cheggaaa/pb), support and //! tested on MacOS, Linux and Windows //! //! ![Screenshot](https://raw.githubusercontent.com/a8m/pb/master/gif/rec_v3.gif) //! //! [Documentation](http://a8m.github.io/pb/doc/pbr/index.html) //! //! ### Examples //! 1. simple example //! //! ```ignore //! extern crate pbr; //! //! use pbr::ProgressBar; //! use std::thread; //! //! fn main() { //! let count = 1000; //! let mut pb = ProgressBar::new(count); //! pb.format("╢▌▌░╟"); //! for _ in 0..count { //! pb.inc(); //! thread::sleep_ms(200); //! } //! pb.finish_print("done"); //! } //! ``` //! //! 2. MultiBar example. see full example [here](https://github.com/a8m/pb/blob/master/examples/multi.rs) //! //! ```ignore //! extern crate pbr; //! //! use std::thread; //! use pbr::MultiBar; //! use std::time::Duration; //! //! fn main() { //! let mut mb = MultiBar::new(); //! let count = 100; //! mb.println("Application header:"); //! //! let mut p1 = mb.create_bar(count); //! let _ = thread::spawn(move || { //! for _ in 0..count { //! p1.inc(); //! thread::sleep(Duration::from_millis(100)); //! } //! // notify the multibar that this bar finished. //! p1.finish(); //! }); //! //! mb.println("add a separator between the two bars"); //! //! let mut p2 = mb.create_bar(count * 2); //! let _ = thread::spawn(move || { //! for _ in 0..count * 2 { //! p2.inc(); //! thread::sleep(Duration::from_millis(100)); //! } //! // notify the multibar that this bar finished. //! p2.finish(); //! }); //! //! // start listen to all bars changes. //! // this is a blocking operation, until all bars will finish. //! // to ignore blocking, you can run it in a different thread. //! mb.listen(); //! } //! ``` //! //! 3. Broadcast writing(simple file copying) //! //! ```ignore //! #![feature(io)] //! extern crate pbr; //! //! use std::io::copy; //! use std::io::prelude::*; //! use std::fs::File; //! use pbr::{ProgressBar, Units}; //! //! fn main() { //! let mut file = File::open("/usr/share/dict/words").unwrap(); //! let n_bytes = file.metadata().unwrap().len() as usize; //! let mut pb = ProgressBar::new(n_bytes); //! pb.set_units(Units::Bytes); //! let mut handle = File::create("copy-words").unwrap().broadcast(&mut pb); //! copy(&mut file, &mut handle).unwrap(); //! pb.finish_print("done"); //! } //! ``` // Macro for writing to the giving writer. // Used in both pb.rs and multi.rs modules. // // # Examples // // ``` // let w = io::stdout(); // printfl!(w, ""); // printfl!(w, "\r{}", out); // // ``` macro_rules! printfl { ($w:expr, $($tt:tt)*) => {{ $w.write(&format!($($tt)*).as_bytes()).ok().expect("write() fail"); $w.flush().ok().expect("flush() fail"); }} } mod multi; mod pb; mod tty; pub use crate::multi::{MultiBar, MultiBarSender, Pipe}; pub use crate::pb::{ProgressBar, Units}; use std::io::{stdout, Stdout, Write}; pub struct PbIter<T, I> where I: Iterator, T: Write, { iter: I, progress_bar: ProgressBar<T>, } impl<I> PbIter<Stdout, I> where I: Iterator, { pub fn new(iter: I) -> Self { Self::on(stdout(), iter) } } impl<T, I> PbIter<T, I> where I: Iterator, T: Write, { pub fn on(handle: T, iter: I) -> Self { let size = iter.size_hint().0; PbIter { iter, progress_bar: ProgressBar::on(handle, size as u64), } } } impl<T, I> Iterator for PbIter<T, I> where I: Iterator, T: Write, { type Item = I::Item; fn next(&mut self) -> Option<I::Item> { match self.iter.next() { Some(i) => { self.progress_bar.inc(); Some(i) } None => None, } } fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } }
# -*- coding: utf-8 -*- import argparse from os import path import json from nltk.tokenize import word_tokenize as tokenize from trainer import Trainer from datautils import Dataset, Vocabulary from preprocessing import Preprocessor from model import BiLSTMCRF parser = argparse.ArgumentParser(description='Train a new intent') parser.add_argument('task', nargs='?', type=str, default='train', help='The task we want to run') parser.add_argument('-n', '--name', type=str, help='The path to the intent schema') #Train arguments parser.add_argument('-f', '--schema_file', type=str, help='The path to the intent schema') parser.add_argument('-s', '--sample_size', type=int, help='The path to the intent schema', default=300) parser.add_argument('-b', '--batch_size', type=int, help='The path to the intent schema', default=64) parser.add_argument('-e', '--epochs', type=int, help='The path to the intent schema', default=10) #Predict arguments parser.add_argument('--command', type=str, help='A command for our intent', default='') def run(schema_path, name, sample_size, batch_size, epochs): dataset = Dataset(schema_path, name) labels, data = dataset.get_data() X = [x['words'] for x in data] y = [x['labels'] for x in data] word_vocab = Vocabulary() word_vocab.build_vocab([w for command in X for w in command]) #char embedding char_vocab = Vocabulary() char_vocab.build_vocab([ch for w in word_vocab for ch in w]) labels2idx = dict((label, idx) for idx, label in enumerate(labels)) idx2label = dict((idx, label) for idx, label in enumerate(labels)) preprocessor = Preprocessor(word_vocab, labels2idx, char_vocab) model = BiLSTMCRF(labels, len(word_vocab), len(char_vocab)) trainer = Trainer(model, X, y, preprocessor.transform, split=[0.75, 0.95]) trainer.train(batch_size, epochs) trainer.evaluate(idx2label) model.save_weights(name) dataset.save(X[:sample_size], labels) word_vocab.save("%s_word_vocab.json" % name) char_vocab.save("%s_char_vocab.json" % name) def predict(name, command): command = command.lower() label_path = path.join( path.dirname(path.realpath(__file__)), "intents", "config", "labels", "%s_labels.json" % name ) with open(label_path, encoding="utf8") as f: labels = json.load(f) word_vocab = Vocabulary() word_vocab.load("%s_word_vocab.json" % name) #char embedding char_vocab = Vocabulary() char_vocab.load("%s_char_vocab.json" % name) idx2label = dict((idx, label) for idx, label in enumerate(labels)) preprocessor = Preprocessor(word_vocab, None, char_vocab) model = BiLSTMCRF(labels, len(word_vocab), len(char_vocab)) model.load_weights('intents/config/weights/%s.hdf5' % name) sentence = tokenize(command) features = preprocessor.transform([sentence]) p = model.predict(features) predicted_labels = [] for pred in p: predicted_labels.append(idx2label[pred]) for word, label in zip(sentence, predicted_labels): print('%s: %s' % (word, label)) if __name__ == '__main__': args = parser.parse_args() if args.task == 'train': run( args.schema_file, args.name, args.sample_size, args.batch_size, args.epochs ) elif args.task == 'predict': predict( args.name, args.command ) else: raise RuntimeError('Task not supported')
/** * Request and response header-based {@link WebSessionIdResolver}. * * @author Greg Turnquist * @author Rob Winch * @since 5.0 */ public class HeaderWebSessionIdResolver implements WebSessionIdResolver { /** Default value for {@link #setHeaderName(String)}. */ public static final String DEFAULT_HEADER_NAME = "SESSION"; private String headerName = DEFAULT_HEADER_NAME; /** * Set the name of the session header to use for the session id. The name is used to extract the * session id from the request headers as well to set the session id on the response headers. * * <p>By default set to {@code DEFAULT_HEADER_NAME} * * @param headerName the header name */ public void setHeaderName(String headerName) { Assert.hasText(headerName, "'headerName' must not be empty"); this.headerName = headerName; } /** * Return the configured header name. * * @return the configured header name */ public String getHeaderName() { return this.headerName; } @Override public List<String> resolveSessionIds(ServerWebExchange exchange) { HttpHeaders headers = exchange.getRequest().getHeaders(); return headers.getOrDefault(getHeaderName(), Collections.emptyList()); } @Override public void setSessionId(ServerWebExchange exchange, String id) { Assert.notNull(id, "'id' is required."); exchange.getResponse().getHeaders().set(getHeaderName(), id); } @Override public void expireSession(ServerWebExchange exchange) { this.setSessionId(exchange, ""); } }
Fourteen gangs involved in the smuggling of frozen meat products that pose huge health risks have been hunted down and the products seized in a recent crackdown, the General Administration of Customs said. Customs officials sealed more than 100,000 metric tons of smuggled frozen meat worth up to 3 billion yuan ($483 million), including frozen chicken wings, beef and pork, the administration said. The crackdown took place in June in 14 provinces and regions. The Changsha Administration of Customs in Hunan province broke up two gangs with 20 members suspected of smuggling frozen meat products on June 1 and seized 800 tons of products worth more than 10 million yuan. It is the largest case involving smuggled frozen meat products in the province's history. "The products fully filled an entire compartment. It was smelly, and I nearly threw up when I opened the door," said Zhang Tao, an administration official. According to Yang Bo, deputy director of the Anti-Smuggling Bureau of the Changsha Administration of Customs, smuggled meat products are not inspected, and this is unsafe because people may get life-threatening illnesses if they eat products that have bacteria and viruses such as bird flu or foot-and-mouth disease. Yang said another problem with smuggled meat is that much of it has expired. Officers from the Guangxi Zhuang autonomous region's Administration of Customs found that some of the confiscated smuggled frozen meat is far older than the given expiration dates suggest, with some being more than 40 years old. Experts say that as long as frozen meat shows no signs of thawing, customers can't tell fresh meat from bad meat that is decades old. When smuggled meat is moved under poor conditions or repeatedly thawed-or even refrozen after the meat has already gone bad-the threat it poses will be especially serious. Changsha customs said that high profits have already spurred the creation of an extensive supply chain. Smugglers generally purchase meat for very low prices from foreign countries, and then have it delivered to Hong Kong in refrigerated containers. The products will then be moved to the mainland via Vietnam, with smugglers hiring residents of border areas to move the products to Chinese border cities and then on to Changsha before the products are transported to sites around the country. "To save costs, smugglers often hire ordinary vehicles instead of refrigerated ones. So the meat has often thawed out several times before reaching customers," said Yang, deputy director of the anti-smuggling bureau. The boom in sales of agricultural products on Internet platforms also assists smugglers. Experts called for the establishment of a nationwide supervision network and enhanced controls in border regions to prevent the products from entering China.
PARAMS = { "ppileup" : { "--map-qual": 15, "--sr-mindist" : 10000, "--id-up-quant": 0.01 }, "subsampleppileup" : { "run" : False, "--target-coverage": 100, "--with-replace": False }, "identifySignatures" : { "--min-count": 2.0, "--signature-window": "median", "--min-valley": "median", "--chunk-distance": 5 }, "updateStrand" : { "--map-qual": 15, "--max-disagreement": 0.1, "--sr-mindist": 10000, "--id-up-quant": 0.01 }, "pairupSignatures" : { "--min-distance": -100, "--max-distance": 500, "--max-freq-diff": 1.0 } } #ppileup # create a physical pileup file from one or multiple bam files # == Main parameters == # --map-qual minimum mapping quality; default=15 # == Parameters for fine tuning == # --sr-mindist minimum inner distance for structural rearrangements; # default=10000 # --id-up-quant paired end fragments with an insert size in the upper # quantile will be ignored [fraction]; default=0.01 # subsampleppileup # NOTE: subsampling of ppileup does not run by default, change "run" : True to turn on this step # subsample a ppileup file to uniform coverage # == Main parameters == # --target-coverage the target coverage of the output file [int]; mandatory # == Parameters for fine tuning == # --with-replace use sampling with replacement instead of without replacement # identifySignatures # identify signatures of TE insertions # == Main parameters == # --min-count the minimum count of a TE insertion; default=2.0 # == Parameters for fine tuning == # --signature-window the window size of the signatures of TE insertions; # [median|fixNNNN|minimumSampleMedian|maximumSampleMedian] # ; default=median # --min-valley the minimum size of the valley between two consecutive # signatures of the same family ; [median|fixNNNN|minimumSampleMedian|maximumSampleMedian] # ; default=median # --chunk-distance minimum distance between chromosomal chunks in multiples # of --min-valley [int]; default=5 # updateStrand # estimate the strand of TEs for signatures of TE insertions # == Main parameters == # --map-qual minimum mapping quality; default=15 # --max-disagreement the maximum disagreement for the strand of the TE insertion # in fraction of reads; mandatory # == Parameters for fine tuning == # --sr-mindist minimum inner distance for structural rearrangements; # default=10000 # --id-up-quant paired end fragments with an insert size in the upper # quantile will be ignored [fraction]; default=0.01 # pairupSignatures # pairs up signatures of TE insertions and yields TE insertions # == Main parameters == # --min-distance the minimum distance between signatures; default=-100 # --max-distance the maximum distance between signatures; default=500 # == Parameters for fine tuning == # --max-freq-diff the maximum frequency difference between signatures; # default=1.0
<gh_stars>1-10 import {Command, CommandExecutionError} from '../commands/Command'; import {Message, VoiceChannel} from 'discord.js'; import {GuildContext} from '../guild/Context'; import {Logger} from '../Logger'; export default abstract class VoiceCommand extends Command { abstract botMustBeInTheSameVoiceChannel(): boolean; abstract botMustAlreadyBeInVoiceChannel(): boolean; abstract userMustBeInVoiceChannel(): boolean; protected botShouldNotJoinVoiceChannelIfNotReady(): boolean { return false; } protected preExecute(context: GuildContext, message?: Message): Promise<void> { const status = this.checkBotVoiceChannelStatus(context, message); if (status === Status.READY) { return Promise.resolve(); } if (status === Status.NEEDS_JOIN) { if (this.botShouldNotJoinVoiceChannelIfNotReady()) { return Promise.reject( new CommandExecutionError('Bot needs to be in the voice channel before this command is called') ); } else { return this.joinVoiceChannel(context, message); } } return Promise.reject(new CommandExecutionError('Bot is not ready to perform this command')); } protected checkBotVoiceChannelStatus(context: GuildContext, message?: Message): Status { if (!message) { return Status.READY; // Implies already in a voice channel } const guildID = message.guild?.id; if (!guildID) { return Status.INVALID; } const userVoiceChannel = message.member?.voice.channel || undefined; const botVoiceChannel = message.client?.voice?.connections.get(guildID)?.channel; if (this.userMustBeInVoiceChannel() && !userVoiceChannel) { Logger.w(VoiceCommand.name, `${message.member?.user.tag} was not in voice channel`, context); return Status.INVALID; } if (this.botMustAlreadyBeInVoiceChannel() && !botVoiceChannel) { Logger.w(VoiceCommand.name, 'Bot was not already in voice channel', context); return Status.INVALID; } if (this.botMustBeInTheSameVoiceChannel()) { if (!userVoiceChannel || !botVoiceChannel || userVoiceChannel.id !== botVoiceChannel.id) { Logger.w( VoiceCommand.name, `Bot [${botVoiceChannel?.name}] was not in same voice channel as User [${userVoiceChannel?.name}]`, context ); return Status.INVALID; } } if (isAlreadyInVoiceChannel(context, userVoiceChannel)) { return Status.READY; } return Status.NEEDS_JOIN; } protected async joinVoiceChannel(context: GuildContext, message?: Message): Promise<void> { try { await context.getProvider().getVoiceConnectionHandler().joinVoiceChannel(message?.member?.voice?.channel); } catch (e) { throw new CommandExecutionError(`Error joining voice channel: ${e}`); } } } export enum Status { READY, NEEDS_JOIN, INVALID, } function isAlreadyInVoiceChannel(context: GuildContext, voiceChannel: VoiceChannel | undefined): boolean { return voiceChannel !== undefined && voiceChannel?.id === context.getVoiceConnection()?.channel?.id; }
/** * \brief BitBanged SPI implementation * * Has full support for operating modes. */ class bus_bitbang : public spi_base_bus { protected: hwlib::pin_direct_from_out_t sclk; hwlib::pin_direct_from_out_t mosi; hwlib::pin_direct_from_in_t miso; public: bus_bitbang(hwlib::pin_out &_sclk, hwlib::pin_out &_mosi, hwlib::pin_in &_miso, const spi::spi_mode &mode); protected: void wait_half_period(); void write_read_byte(uint8_t &d); void write_read(size_t n, const uint8_t *data_out, uint8_t *data_in) override; void write_read_reverse(size_t n, const uint8_t *data_out, uint8_t *data_in) override; }
It’s nice to be able to track the changes while you’re “On the go” to what’s happening with DigiByte, both in terms of your wallet balance and the value of that balance in fiat currencies. I’d personally been using the Blockfolio app (Android or iOS), and while it’s a great app, it requires you manually entering in any trades that you may have done, in order to maintain your balance. As a miner, this was frustrating, as I could be mining 10,000-12,000 DigiByte a day and so if I didn’t update my balance for a day or two, it would be quite a bit off. A few miner friends had also been doing similar, and it wasn’t until one of them asked “Surely there’s got to be a way for this to be automatic?” that I stopped to think about automating the whole process specifically for DigiByte. So, I’ve created a page that you can bookmark, with your wallet address in there, and it will automatically pull the most up-to-date balance of that wallet, along with showing you the conversion rates to USD, NZD (Because I’m from New Zealand) and also in Bitcoin. To use, simply pop your address in and hit “Submit”. DigiByte Wallet However if you want to be able to bookmark the page including your wallet address, simply adjust the URL to look like this: http://digibyteguide.com/dgb.php?d=DLkhnxFKgSZL2cwh9Gnt6FofmUH3kmxZQS You can replace after the “=” and put your wallet address there. Once you’ve done that, it’s easy enough to add to your Home Screen on your Cellphone for easy access! A few things to note This page doesn’t track your balance historically, so any deposits or withdrawals won’t be noted in the 24h / 7-day changes. The changes only track your current balance vs the price of DigiByte 24h / 7 days ago. If you’re not seeing your correct balance, this is because you have sent some DigiByte and the change hasn’t gone to your primary wallet. This is nothing to fear, it’s part of how the Blockchain works. You can easily correct this by following the instructions on the website. This was not created to be a “Panic button” for use with selling to avoid any loss in value, but rather for “at a glance” information on what’s happening with the price of DigiByte. Thanks goes out to DigiExplorer and Coinmarketcap for the APIs that were used to make this happen!
""" ### cache.py ### author : <NAME> ### created : 2019-05-01 """ """ Leave all the records in the text file. If the administrator uses the student number to search the log file, he can see the time and number of times he escaped. """ #!/usr/bin/python #---------------------------------------------------------------------- # import std lib #---------------------------------------------------------------------- from datetime import datetime now = datetime.now() FILEPATH = "/root/log/log.txt" #---------------------------------------------------------------------- # Create log file for the first time # The Method is executed only once. #---------------------------------------------------------------------- def NewLogFile(): f = open(FILEPATH, 'a') # log.txt file open(write mode) curDate = str(now.year) + "-" + str(now.month) + "-" + str(now.day) + " " data = "\n\n\n\n" + curDate + "\n\n" f.write(data) f.close() #---------------------------------------------------------------------- # ex) 2019-01-01 01:23:45 20140000 has gone out. # Written to text file # file name : log.txt #---------------------------------------------------------------------- def Log(STUDENT_NUM): f = open(FILEPATH, 'a') # log.txt file open(append mode) # Get current time curTime = str(now.hour) + ":" + str(now.minute) + ":" + str(now.second) + " " data = curTime + " " + str(STUDENT_NUM) + " has gone out. \n" f.write(data) f.close() print(data + '\n')
import torch.nn as nn from npf.utils.initialization import weights_init from .attention import get_attender from .encoders import SinusoidalEncodings, RelativeSinusoidalEncodings __all__ = ["SelfAttention"] class SelfAttention(nn.Module): """Self Attention Layer. Parameters ---------- x_dim : int Input dimension. out_dim : int Output dimension. If not None will do all the computation with a size of `x_dim` and add a linear layer at the end to reshape. n_attn_layers : int, optional Number of self attention layers. attention : callable or str, optional Type of attention to use. More details in `get_attender`. positional : {"absolute", "relative", None}, optional Type of positional encodings. `"absolute"` adds positional encodings (sinusoidals) to the input before self attention (Transformer). `"relative"` uses relative encodings at every attention layer (Transformer XL). `position_dim` has to be given when not `None`. position_dim : int, optional Dimenion of the position. max_len : int, optional Maximum number of x. Only used if `positional is not None`. kwargs : Additional arguments to `get_attender`. """ def __init__( self, x_dim, out_dim=None, n_attn_layers=2, attention="transformer", positional=None, position_dim=None, max_len=2000, **kwargs ): super().__init__() self.positional = positional if self.positional == "absolute": self.pos_encoder = SinusoidalEncodings(position_dim, x_dim) elif self.positional == "relative": self.rel_pos_encoder = RelativeSinusoidalEncodings(position_dim, x_dim) kwargs["is_relative_pos"] = True elif self.positional is None: pass else: raise ValueError("Unknown positional={}.".format(positional)) self.attn_layers = nn.ModuleList( [ get_attender(attention, x_dim, x_dim, x_dim, **kwargs) for _ in range(n_attn_layers) ] ) self.is_resize = out_dim is not None if self.is_resize: self.resize = nn.Linear(x_dim, out_dim) self.reset_parameters() def reset_parameters(self): weights_init(self) def forward(self, X, positions=None): add_to_keys = 0 if self.positional == "absolute": X = X + self.pos_encoder(positions) elif self.positional == "relative": # n^2 for now but could be n(n+1)/2 if sorted add_to_keys = self.rel_pos_encoder(positions, positions) out = X for attn_layer in self.attn_layers: out = attn_layer(out + add_to_keys, out, out) if self.is_resize: out = self.resize(out) return out
Association of a lysine-232/alanine polymorphism in a bovine gene encoding acyl-CoA:diacylglycerol acyltransferase (DGAT1) with variation at a quantitative trait locus for milk fat content DGAT1 encodes diacylglycerol O-acyltransferase (EC 2.3.1.20), a microsomal enzyme that catalyzes the final step of triglyceride synthesis. It became a functional candidate gene for lactation traits after studies indicated that mice lacking both copies of DGAT1 are completely devoid of milk secretion, most likely because of deficient triglyceride synthesis in the mammary gland. Our mapping studies placed DGAT1 close to the region of a quantitative trait locus (QTL) on bovine chromosome 14 for variation in fat content of milk. Sequencing of DGAT1 from pooled DNA revealed significant frequency shifts at several variable positions between groups of animals with high and low breeding values for milk fat content in different breeds (Holstein–Friesian, Fleckvieh, and Braunvieh). Among the variants was a nonconservative substitution of lysine by alanine (K232A), with the lysine-encoding allele being associated with higher milk fat content. Haplotype analysis indicated the lysine variant to be ancestral. Two animals that were typed heterozygous (Qq) at the QTL based on marker-assisted QTL-genotyping were heterozygous for the K232A substitution, whereas 14 animals that are most likely qq at the QTL were homozygous for the alanine-encoding allele. An independent association study in Fleckvieh animals confirmed the positive effect of the lysine variant on milk fat content. We consider the nonconservative K232A substitution to be directly responsible for the QTL variation, although our genetic studies cannot provide formal proof.
<gh_stars>0 import { Engine } from "../Engine"; enum DialogType { YES_NO, OK } export class UI { public static currentScreen = 'loading'; private static engine: Engine; private static forceValue; public static initialize(engine) { document.getElementById('menu__start').addEventListener('click', () => { this.screen('game'); this.engine.run(); }); document.getElementById('menu__options').addEventListener('click', () => { // Not implemented yet UI.dialog('This is not implemented yet.', DialogType.OK, window.close); }); document.getElementById('menu__exit').addEventListener('click', () => { UI.dialog('Are you sure you want to exit?', DialogType.YES_NO, window.close); }); this.forceValue = document.getElementById('force__value'); this.engine = engine; } public static flow(element: string, information: string) { document.getElementById('flow__' + element).innerText = information; } public static screen(element: string) { document.getElementById('screen__' + UI.currentScreen).hidden = true; document.getElementById('screen__' + element).hidden = false; UI.currentScreen = element; } public static dialog(text: string, type: DialogType, yes: Function) { document.getElementById('dialog__yes_no').hidden = true; document.getElementById('dialog__ok').hidden = true; document.getElementById('dialog__text').innerText = text; if (type === DialogType.YES_NO) { document.getElementById('dialog__yes_no').hidden = false; document.getElementById('dialog__no').addEventListener('click', () => { document.getElementById('dialog').hidden = true; }); document.getElementById('dialog__yes').addEventListener('click', () => { yes(); }); } else if (type === DialogType.OK) { document.getElementById('dialog__ok').hidden = false; document.getElementById('dialog__okay').addEventListener('click', () => { document.getElementById('dialog').hidden = true; }); } document.getElementById('dialog').hidden = false; } public static force(ratio: number) { if (this.forceValue) { this.forceValue.style.transform = 'scaleY(' + ratio + ')'; // this.forceValue.style.webkitTransform = 'scaleY(' + ratio + ')'; } } }
def make_radius_list(max_pix, n, log=False): if log: return np.logspace(0, np.log10(max_pix), num=n, endpoint=True, base=10.0, dtype=float, axis=0) else: return np.array([x * max_pix / n for x in range(1, n + 1)])
The South African Football Association (SAFA) has signed a kit deal with PUMA that will see Bafana Bafana wear gear from the German sportswear firm until 2018. Here is a first look at the South Africa PUMA 2011/12 home and away kits along with more details on the deal via a PUMA press release. PUMA is delighted to announce that it has entered into a new partnership with the South African Football Association to become the official technical supplier to the SAFA. Starting immediately and lasting beyond the next two FIFA World Cups, PUMA will provide the official playing kits for all South African teams including the National ‘A’, Youth and Women’s teams. The new PUMA South Africa kit was also launched in Johannesburg today, by South African captain Stephen Pienaar along with Bongani Khumalo, Itumeleng Khune and Katlego Mphela. The South African national team will debut the new PUMA kit during their next fixture scheduled for the 10th August (opponents to be confirmed). Featuring the very latest innovations of PUMA technology, the kit incorporates performance enhancing fabric through moisture wicking properties, mesh inserts on the side for improved ventilation and embossed fabric to enhance the aesthetic appearance. The shirt will also feature both the Protea and SAFA badges. In line with PUMA’s CSR commitment to Africa, all proceeds from replica home shirt sales will be donated to SOS and the Children’s Fund. In line with PUMA’s CSR commitment to Africa, all proceeds from replica home shirt sales of the launch kit, will be donated to the SOS Children Villages in South Africa, a cause designated by the South African Football Association. South Africa become the twelfth African international team to be outfitted by PUMA, following in the footsteps of African Cup of Nations champions Egypt, as well as Ghana, Ivory Coast, Cameroon, Algeria, Senegal, Morocco, Togo, Burkina Faso, Malawi and Namibia. Check out our Kits and Jerseys section for more brand new kits.
/** * Return the evaluation decision for the resource, subject, action, environment and contexts */ private Decision evaluate( Map<String, String> resource, Subject subject, String action, Set<Attribute> environment, List<AclRule> matchedRules ) { return internalEvaluate(resource, subject, action, environment, matchedRules); }
Genu Recurvatum Congenitum in a day old Nigerian female neonate: A case report and challenges in the management in a resource-poor country Abstract: Genu recurvatum congenitum (GRC) is a Greek phrase that literally translates to 'backward-bending of the knee that is noticed at birth'. It is a rare condition of unknown aetiology resulting in exaggerated knee hyperextension and marked limitation of flexion. GRC may be unilateral or bilateral. It may occur in isolation or associated with other anomalies such as congenital hip dislocation (CDH) or congenital talipes equinovarus deformity (CTEVD). Though rare, it is easy to diagnose and treatment commonly involves gentle manipulation and serial casting, especially if presentation is early. Unfortunately, late presentation or refusal of treatment is the case in resourcepoor countries such as Nigeria, due mainly to financial constraints and traditional beliefs. We present this rare case of a bilateral GRC in a day old Nigerian female neonate and also highlight the challenges in the management in a resource-poor country
package pasa.cbentley.swing.task; public class TaskExitHard implements Runnable { public void run() { System.exit(0); } }
#include "printf_console.h" #include <stdio.h> #include <rbdl/Logging.h> void logError(const char* msg, const char* arg0, const char* arg1, const char* arg2) { LOG << msg << " " << arg0 << " " << arg1 << " " << arg2 << std::endl; } void logDebug(const char* msg, float v0, float v1) { LOG << msg << " " << v0 << " " << v1 << std::endl; }; void logDebug(const char* msg, const char* msg1, const char* arg1) { LOG << msg << " " << msg1 << " " << arg1 << std::endl; } void logInform(const char* msg, const char* arg0) { LOG << msg << " " << arg0 << std::endl; } void logWarn(const char* msg,int id, const char* arg0) { LOG << msg << " " << id << " " << arg0 << std::endl; }
def is_nsfw(): def pred(ctx): is_dm_channel = bool(isinstance(ctx.channel, discord.DMChannel)) is_nsfw_guild_channel = bool(isinstance(ctx.channel, discord.TextChannel) and ctx.channel.is_nsfw()) if is_nsfw_guild_channel: with db_session: return bool(db.get("SELECT `enabled` FROM `NSFWSingle` WHERE `guild_id` = '{}'".format(ctx.guild.id))) else: return is_dm_channel return commands.check(pred)
#include "mesinkata.h" //definisi state mesin kata boolean EndKata; Kata CKata; /* Primitif-primitif mesin kata */ void Ignore_Blank() /* Mengabaikan satu atau beberapa BLANK I.S. : CC sembarang F.S. : CC != BLANK atau CC == MARK */ { while (((CC==blank)||(CC=='\n'))&&(CC!=mark)) { ADV(); } } void STARTKATA() /* I.S. : CC sembarang F.S : Salah satu dari dua kondisi dibawah. 1. EndKata = true dan CC == Mark 2. EndKata = false, CKata adalah kata yang sudah diakuisisi, dan CC adalah satu karakter setelah karakter terakhir kata Keterangan: CC mengacu pada yang disebut pada mesinkarakter */ { START(); Ignore_Blank(); if (CC==mark) { EndKata = true; } else { EndKata = false; SalinKata(); } } void ADVKATA() /* I.S. : EndKata = false; CC adalah karakter sesudah karakter terakhir dari kata yg sudah diakuisisi F.S. : Jika CC == MARK, maka EndKata == true atau EndKata = false, CKata adalah kata terakhir yang sudah diakuisisi; CC karakter pertama sesudah karakter terakhir kata */ { Ignore_Blank(); if (CC==mark) { EndKata=true; } else { SalinKata(); } } void SalinKata() /* Mengakuisisi kata, menyimpan dalam CKata I.S. : CC adalah karakter pertama dari kata F.S. : CKata berisi kata yang sudah diakuisisi, jika karakternya melebihi NMax, sisa "kata" dibuang; CC == BLANK atau CC == MARK; CC adalah karakter sesudah karakter terakhir yang diakuisisi */ { //Kamus Lokal int i=0; //Algoritma for (;;) { CKata.TabKata[i]=CC; ADV(); if ((CC==mark)||(CC==blank)||(CC=='\n')) break; else i++; } CKata.Length=i+1; } /* Operasi Lain */ boolean IsKataSama(Kata K1, Kata K2) /* Mengembalikan true jika K1 = K2; dua kata dikatakan sama jika panjangnya sama dan urutan karakter yang menyusun kata juga sama. Sensitif terhadap uppercase dan lowercase */ { if (K1.Length!=K2.Length) return false; else { boolean found; found = true; int i=0; while ((i<K1.Length)&&(found)) { if (K1.TabKata[i]!=K2.TabKata[i]) found = false; else i++; } if (found) return true; else return false; } } int ConvertToBilangan(Kata CKata) { /* Mengembalikan nilai bilangan dari CKata */ int i; int total = 0; if (CKata.TabKata[0]=='-') { for (i=1; i<CKata.Length; i++) { if (i>1) total *= 10; total = total + (CKata.TabKata[i]-'0'); } total = total * (-1); } else { for (i=0; i<CKata.Length; i++) { if (i>0) total *= 10; total = total + (CKata.TabKata[i]-'0'); } } return total; } Kata CopyKata(Kata CKata) /* Copy CKata ke suatu variabel */ { Kata Temp; int i; for (i=0;i<CKata.Length;i++) { if(CKata.TabKata[i]=='_') { Temp.TabKata[i]=' '; } else { Temp.TabKata[i]=CKata.TabKata[i]; } } Temp.Length=CKata.Length; return Temp; } void PrintKata(Kata CKata) //Mencetak Kata { int i; for (i=0;i<CKata.Length;i++) { printf("%c",CKata.TabKata[i]); } }
<reponame>parsonsmatt/gear-tracker module Main where import Spec.GT.Prelude import qualified Spec.GT.DB main :: IO () main = hspec $ do describe "Spec.GT.DB" $ do Spec.GT.DB.spec
import {BaseIfc} from "./BaseIfc" import {IfcPresentationLayerAssignment} from "./IfcPresentationLayerAssignment.g" import {IfcStyledItem} from "./IfcStyledItem.g" import {IfcDimensionCount} from "./IfcDimensionCount.g" import {IfcInteger} from "./IfcInteger.g" import {IfcCartesianPoint} from "./IfcCartesianPoint.g" import {IfcBSplineSurfaceForm} from "./IfcBSplineSurfaceForm.g" import {IfcLogical} from "./IfcLogical.g" import {IfcParameterValue} from "./IfcParameterValue.g" import {IfcKnotType} from "./IfcKnotType.g" import {IfcReal} from "./IfcReal.g" import {IfcBSplineSurfaceWithKnots} from "./IfcBSplineSurfaceWithKnots.g" /** * http://www.buildingsmart-tech.org/ifc/IFC4/final/html/link/ifcrationalbsplinesurfacewithknots.htm */ export class IfcRationalBSplineSurfaceWithKnots extends IfcBSplineSurfaceWithKnots { WeightsData : Array<Array<IfcReal>> get Weights() : Array<Array<IfcReal>>{throw "Derived property logic has not been implemented for Weights."} // derived set Weights(value : Array<Array<IfcReal>>){super.Weights = value} constructor(uDegree : IfcInteger, vDegree : IfcInteger, controlPointsList : Array<Array<IfcCartesianPoint>>, surfaceForm : IfcBSplineSurfaceForm, uClosed : IfcLogical, vClosed : IfcLogical, selfIntersect : IfcLogical, uMultiplicities : Array<IfcInteger>, vMultiplicities : Array<IfcInteger>, uKnots : Array<IfcParameterValue>, vKnots : Array<IfcParameterValue>, knotSpec : IfcKnotType, weightsData : Array<Array<IfcReal>>) { super(uDegree,vDegree,controlPointsList,surfaceForm,uClosed,vClosed,selfIntersect,uMultiplicities,vMultiplicities,uKnots,vKnots,knotSpec) this.WeightsData = weightsData } getStepParameters() : string { var parameters = new Array<string>(); parameters.push(BaseIfc.toStepValue(this.UDegree)) parameters.push(BaseIfc.toStepValue(this.VDegree)) parameters.push(BaseIfc.toStepValue(this.ControlPointsList)) parameters.push(BaseIfc.toStepValue(this.SurfaceForm)) parameters.push(BaseIfc.toStepValue(this.UClosed)) parameters.push(BaseIfc.toStepValue(this.VClosed)) parameters.push(BaseIfc.toStepValue(this.SelfIntersect)) parameters.push(BaseIfc.toStepValue(this.UMultiplicities)) parameters.push(BaseIfc.toStepValue(this.VMultiplicities)) parameters.push(BaseIfc.toStepValue(this.UKnots)) parameters.push(BaseIfc.toStepValue(this.VKnots)) parameters.push(BaseIfc.toStepValue(this.KnotSpec)) parameters.push(BaseIfc.toStepValue(this.WeightsData)) return parameters.join(); } }
<gh_stars>0 /* * Load and run static executables * * objcopy -I binary -O elf64-x86-64 -B i386 ./lib/linux/libscope.so ./lib/linux/libscope.o * gcc -Wall -g src/scope.c -ldl -lrt -o scope ./lib/linux/libscope.o */ #define _GNU_SOURCE #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <fcntl.h> #include <sys/stat.h> #include <string.h> #include <sys/mman.h> #include <elf.h> #include <stddef.h> #include <sys/wait.h> #include <dlfcn.h> #include <sys/utsname.h> #include <limits.h> #include <errno.h> #include "fn.h" #include "dbg.h" #include "scopeelf.h" #include "scopetypes.h" #include "os.h" #define DEVMODE 0 #define __NR_memfd_create 319 #define _MFD_CLOEXEC 0x0001U #define SHM_NAME "libscope" #define PARENT_PROC_NAME "start_scope" #define GO_ENV_VAR "GODEBUG" #define GO_ENV_SERVER_VALUE "http2server" #define GO_ENV_CLIENT_VALUE "http2client" extern unsigned char _binary___lib_linux_libscope_so_start; extern unsigned char _binary___lib_linux_libscope_so_end; typedef struct { char *path; char *shm_name; int fd; int use_memfd; } libscope_info_t; // Wrapper to call memfd_create syscall static inline int _memfd_create(const char *name, unsigned int flags) { return syscall(__NR_memfd_create, name, flags); } static void print_usage(char *prog, libscope_info_t *info, int argc, char **argv) { void (*__scope_main)(void); void *handle = NULL; __scope_main = dlsym(RTLD_NEXT, "__scope_main"); if (!__scope_main) { if ((handle = dlopen(info->path, RTLD_LAZY)) == NULL) { fprintf(stderr, "handle error: %s\n", dlerror()); exit(EXIT_FAILURE); } __scope_main = dlsym(handle, "__scope_main"); if (!__scope_main) { fprintf(stderr, "symbol error: %s from %s\n", dlerror(), info->path); exit(EXIT_FAILURE); } } printf("usage: %s command [args]\n", prog); if (argc == 2) { strncpy(argv[1], "all", strlen(argv[1])); } __scope_main(); } /** * Checks if kernel version is >= 3.17 */ static int check_kernel_version(void) { struct utsname buffer; char *token; char *separator = "."; int val; if (uname(&buffer)) { return 0; } token = strtok(buffer.release, separator); val = atoi(token); if (val < 3) { return 0; } else if (val > 3){ return 1; } token = strtok(NULL, separator); val = atoi(token); return (val < 17) ? 0 : 1; } static void release_libscope(libscope_info_t **info_ptr) { if (!info_ptr || !*info_ptr) return; libscope_info_t *info = *info_ptr; if (info->fd != -1) close(info->fd); if (info->shm_name) { if (info->fd != -1) shm_unlink(info->shm_name); free(info->shm_name); } if (info->path) free(info->path); free(info); *info_ptr = NULL; } static libscope_info_t * setup_libscope() { libscope_info_t *info = NULL; int everything_successful = FALSE; if (!(info = calloc(1, sizeof(libscope_info_t)))) { perror("setup_libscope:calloc"); goto err; } info->fd = -1; info->use_memfd = check_kernel_version(); if (info->use_memfd) { info->fd = _memfd_create(SHM_NAME, _MFD_CLOEXEC); } else { if (asprintf(&info->shm_name, "%s%i", SHM_NAME, getpid()) == -1) { perror("setup_libscope:shm_name"); info->shm_name = NULL; // failure leaves info->shm_name undefined goto err; } info->fd = shm_open(info->shm_name, O_RDWR | O_CREAT, S_IRWXU); } if (info->fd == -1) { perror(info->use_memfd ? "setup_libscope:memfd_create" : "setup_libscope:shm_open"); goto err; } size_t libsize = (size_t) (&_binary___lib_linux_libscope_so_end - &_binary___lib_linux_libscope_so_start); if (write(info->fd, &_binary___lib_linux_libscope_so_start, libsize) != libsize) { perror("setup_libscope:write"); goto err; } int rv; if (info->use_memfd) { rv = asprintf(&info->path, "/proc/%i/fd/%i", getpid(), info->fd); } else { rv = asprintf(&info->path, "/dev/shm/%s", info->shm_name); } if (rv == -1) { perror("setup_libscope:path"); info->path = NULL; // failure leaves info->path undefined goto err; } /* * DEVMODE is here only to help with gdb. The debugger has * a problem reading symbols from a /proc pathname. * This is expected to be enabled only by developers and * only when using the debugger. */ #if DEVMODE == 1 asprintf(&info->path, "./lib/linux/libscope.so"); printf("LD_PRELOAD=%s\n", info->path); #endif everything_successful = TRUE; err: if (!everything_successful) release_libscope(&info); return info; } // If possible, we want to set GODEBUG=http2server=0,http2client=0 // This tells go not to upgrade to http2, which allows // our http1 protocol capture stuff to do it's thing. // We consider this temporary, because when we support http2 // it will not be necessary. static void setGoHttpEnvVariable(void) { char *cur_val = getenv(GO_ENV_VAR); // If GODEBUG isn't set, try to set it to http2server=0,http2client=0 if (!cur_val) { if (setenv(GO_ENV_VAR, GO_ENV_SERVER_VALUE "=0," GO_ENV_CLIENT_VALUE "=0", 1)) { perror("setGoHttpEnvVariable:setenv"); } return; } // GODEBUG is set. // If http2server wasn't specified, let's append ",http2server=0" if (!strstr(cur_val, GO_ENV_SERVER_VALUE)) { char *new_val = NULL; if ((asprintf(&new_val, "%s,%s=0", cur_val, GO_ENV_SERVER_VALUE) == -1)) { perror("setGoHttpEnvVariable:asprintf"); return; } if (setenv(GO_ENV_VAR, new_val, 1)) { perror("setGoHttpEnvVariable:setenv"); } if (new_val) free(new_val); } cur_val = getenv(GO_ENV_VAR); // If http2client wasn't specified, let's append ",http2client=0" if (!strstr(cur_val, GO_ENV_CLIENT_VALUE)) { char *new_val = NULL; if ((asprintf(&new_val, "%s,%s=0", cur_val, GO_ENV_CLIENT_VALUE) == -1)) { perror("setGoHttpEnvVariable:asprintf"); return; } if (setenv(GO_ENV_VAR, new_val, 1)) { perror("setGoHttpEnvVariable:setenv"); } if (new_val) free(new_val); } } int main(int argc, char **argv, char **env) { elf_buf_t *ebuf; int (*sys_exec)(elf_buf_t *, const char *, int, char **, char **); pid_t pid; void *handle = NULL; libscope_info_t *info; // Use dlsym to get addresses for everything in g_fn initFn(); info = setup_libscope(); if (!info) { fprintf(stderr, "%s:%d ERROR: unable to set up libscope\n", __FUNCTION__, __LINE__); exit(EXIT_FAILURE); } //check command line arguments char *scope_cmd = argv[0]; if ((argc < 2) || ((argc == 2) && (strncmp(argv[1], "--help", 6) == 0))) { print_usage(scope_cmd, info, argc, argv); exit(EXIT_FAILURE); } char *inferior_command = getpath(argv[1]); if (!inferior_command) { fprintf(stderr,"%s could not find or execute command `%s`. Exiting.\n", scope_cmd, argv[1]); exit(EXIT_FAILURE); } argv[1] = inferior_command; // update args with resolved inferior_command // before processing, try to set SCOPE_EXEC_PATH for execve char *sep; if (osGetExePath(&sep) == 0) { // doesn't overwrite an existing env var if already set setenv("SCOPE_EXEC_PATH", sep, 0); free(sep); } ebuf = getElf(inferior_command); if (ebuf && (is_go(ebuf->buf) == TRUE)) { if (setenv("SCOPE_APP_TYPE", "go", 1) == -1) { perror("setenv"); goto err; } setGoHttpEnvVariable(); } else { if (setenv("SCOPE_APP_TYPE", "native", 1) == -1) { perror("setenv"); goto err; } } if ((ebuf == NULL) || (!is_static(ebuf->buf))) { // Dynamic executable path if (ebuf) freeElf(ebuf->buf, ebuf->len); if (setenv("LD_PRELOAD", info->path, 0) == -1) { perror("setenv"); goto err; } if (setenv("SCOPE_EXEC_TYPE", "dynamic", 1) == -1) { perror("setenv"); goto err; } pid = fork(); if (pid == -1) { perror("fork"); goto err; } else if (pid > 0) { int status; int ret; do { ret = waitpid(pid, &status, 0); } while (ret == -1 && errno == EINTR); release_libscope(&info); if (WIFEXITED(status)) exit(WEXITSTATUS(status)); exit(EXIT_FAILURE); } else { execve(inferior_command, &argv[1], environ); perror("execve"); goto err; } } if (setenv("SCOPE_EXEC_TYPE", "static", 1) == -1) { perror("setenv"); goto err; } // Static executable path if (getenv("LD_PRELOAD") != NULL) { unsetenv("LD_PRELOAD"); execve(argv[0], argv, environ); } program_invocation_short_name = basename(argv[1]); if ((handle = dlopen(info->path, RTLD_LAZY)) == NULL) { fprintf(stderr, "%s\n", dlerror()); goto err; } sys_exec = dlsym(handle, "sys_exec"); if (!sys_exec) { fprintf(stderr, "%s\n", dlerror()); goto err; } release_libscope(&info); sys_exec(ebuf, inferior_command, argc, argv, env); return 0; err: release_libscope(&info); if (ebuf) free(ebuf); exit(EXIT_FAILURE); }
/** * A command that runs a Runnable continuously. Has no end condition as-is; * either subclass it or use {@link Command#withTimeout(double)} or * {@link Command#withInterrupt(BooleanSupplier)} to give it one. If you only * wish to execute a Runnable once, use {@link InstantCommand}. */ public class RunCommand extends CommandBase { protected final Runnable m_toRun; /** * Creates a new RunCommand. The Runnable will be run continuously until the * command ends. Does not run when disabled. * * @param toRun * the Runnable to run * @param requirements * the subsystems to require */ public RunCommand(Runnable toRun, Subsystem... requirements) { m_toRun = requireNonNullParam(toRun, "toRun", "RunCommand"); addRequirements(requirements); } @Override public void execute() { m_toRun.run(); } }
<reponame>tato123/adobexd-serializer<filename>src/node/Text.ts import GraphicsNode from './GraphicsNode' import { Text } from '../@types/scenegraph' import { SerializedNode, JsonSerializer } from './SerializedNode' export default class XDTextWrapper implements JsonSerializer { private xdNode: Text private parentNodeWrapper: GraphicsNode constructor(xdNode: Text) { this.xdNode = xdNode this.parentNodeWrapper = new GraphicsNode(this.xdNode) } toJSON(): SerializedNode { let result = {} if (this.parentNodeWrapper) { result = this.parentNodeWrapper.toJSON() } const node = this.xdNode return { type: node.constructor.name, text: node.text, styleRanges: node.styleRanges, flipY: node.flipY, textAlign: node.textAlign, lineSpacing: node.lineSpacing, areaBox: node.areaBox, clippedByArea: node.clippedByArea, ...result } } }
def kin_slices(w,es,ef,e,trace,norm): trace -= np.percentile(trace, 50) slice_w = w slice_c = np.arange(es, ef, w) slices = OrderedDict() de = e[1:] - e[-1:] for c in slice_c: mask = (e - c < slice_w) & (e - c > 0) sl = np.compress(mask, trace, axis=0) * np.compress(mask, de)[:,np.newaxis] slices[c] = np.compress(mask, trace, axis=0).sum(axis=0) if norm==True: slices[c]=slices[c]/np.max(slices[c]) return slices
/** * Convenience method to return the next resource. * @return the next File. */ public FileResource nextResource() { if (!hasNext()) { throw new NoSuchElementException(); } FileResource result = new FileResource(basedir, files[pos++]); result.setProject(project); return result; }
<gh_stars>0 package fastly import "testing" func TestClient_Dictionaries(t *testing.T) { t.Parallel() fixtureBase := "dictionaries/" testVersion := createTestVersion(t, fixtureBase+"version", testServiceID) // Create var err error var d *Dictionary record(t, fixtureBase+"create", func(c *Client) { d, err = c.CreateDictionary(&CreateDictionaryInput{ Service: testServiceID, Version: testVersion.Number, Name: "test_dictionary", }) }) if err != nil { t.Fatal(err) } // Ensure deleted defer func() { record(t, fixtureBase+"cleanup", func(c *Client) { c.DeleteDictionary(&DeleteDictionaryInput{ Service: testServiceID, Version: testVersion.Number, Name: "test_dictionary", }) c.DeleteDictionary(&DeleteDictionaryInput{ Service: testServiceID, Version: testVersion.Number, Name: "new_test_dictionary", }) }) }() if d.Name != "test_dictionary" { t.Errorf("bad name: %q", d.Name) } // List var ds []*Dictionary record(t, fixtureBase+"list", func(c *Client) { ds, err = c.ListDictionaries(&ListDictionariesInput{ Service: testServiceID, Version: testVersion.Number, }) }) if err != nil { t.Fatal(err) } if len(ds) < 1 { t.Errorf("bad dictionaries: %v", ds) } // Get var nd *Dictionary record(t, fixtureBase+"get", func(c *Client) { nd, err = c.GetDictionary(&GetDictionaryInput{ Service: testServiceID, Version: testVersion.Number, Name: "test_dictionary", }) }) if err != nil { t.Fatal(err) } if d.Name != nd.Name { t.Errorf("bad name: %q (%q)", d.Name, nd.Name) } // Update var ud *Dictionary record(t, fixtureBase+"update", func(c *Client) { ud, err = c.UpdateDictionary(&UpdateDictionaryInput{ Service: testServiceID, Version: testVersion.Number, Name: "test_dictionary", NewName: "new_test_dictionary", }) }) if err != nil { t.Fatal(err) } if ud.Name != "new_test_dictionary" { t.Errorf("bad name: %q", ud.Name) } // Delete record(t, fixtureBase+"delete", func(c *Client) { err = c.DeleteDictionary(&DeleteDictionaryInput{ Service: testServiceID, Version: testVersion.Number, Name: "new_test_dictionary", }) }) if err != nil { t.Fatal(err) } } func TestClient_ListDictionaries_validation(t *testing.T) { var err error _, err = testClient.ListDictionaries(&ListDictionariesInput{ Service: "", }) if err != ErrMissingService { t.Errorf("bad error: %s", err) } _, err = testClient.ListDictionaries(&ListDictionariesInput{ Service: "foo", Version: 0, }) if err != ErrMissingVersion { t.Errorf("bad error: %s", err) } } func TestClient_CreateDictionary_validation(t *testing.T) { var err error _, err = testClient.CreateDictionary(&CreateDictionaryInput{ Service: "", }) if err != ErrMissingService { t.Errorf("bad error: %s", err) } _, err = testClient.CreateDictionary(&CreateDictionaryInput{ Service: "foo", Version: 0, }) if err != ErrMissingVersion { t.Errorf("bad error: %s", err) } } func TestClient_GetDictionary_validation(t *testing.T) { var err error _, err = testClient.GetDictionary(&GetDictionaryInput{ Service: "", }) if err != ErrMissingService { t.Errorf("bad error: %s", err) } _, err = testClient.GetDictionary(&GetDictionaryInput{ Service: "foo", Version: 0, }) if err != ErrMissingVersion { t.Errorf("bad error: %s", err) } _, err = testClient.GetDictionary(&GetDictionaryInput{ Service: "foo", Version: 1, Name: "", }) if err != ErrMissingName { t.Errorf("bad error: %s", err) } } func TestClient_UpdateDictionary_validation(t *testing.T) { var err error _, err = testClient.UpdateDictionary(&UpdateDictionaryInput{ Service: "", }) if err != ErrMissingService { t.Errorf("bad error: %s", err) } _, err = testClient.UpdateDictionary(&UpdateDictionaryInput{ Service: "foo", Version: 0, }) if err != ErrMissingVersion { t.Errorf("bad error: %s", err) } _, err = testClient.UpdateDictionary(&UpdateDictionaryInput{ Service: "foo", Version: 1, Name: "", }) if err != ErrMissingName { t.Errorf("bad error: %s", err) } } func TestClient_DeleteDictionary_validation(t *testing.T) { var err error err = testClient.DeleteDictionary(&DeleteDictionaryInput{ Service: "", }) if err != ErrMissingService { t.Errorf("bad error: %s", err) } err = testClient.DeleteDictionary(&DeleteDictionaryInput{ Service: "foo", Version: 0, }) if err != ErrMissingVersion { t.Errorf("bad error: %s", err) } err = testClient.DeleteDictionary(&DeleteDictionaryInput{ Service: "foo", Version: 1, Name: "", }) if err != ErrMissingName { t.Errorf("bad error: %s", err) } }
package mobilesecurityservice import ( "fmt" mobilesecurityservicev1alpha1 "github.com/aerogear/mobile-security-service-operator/pkg/apis/mobilesecurityservice/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Returns the Service with the properties used to setup/config the Mobile Security Service Project func (r *ReconcileMobileSecurityService) buildServiceAccount(mss *mobilesecurityservicev1alpha1.MobileSecurityService) *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: mss.Name, Namespace: mss.Namespace, Annotations: buildOauthAnnotationWithRoute(mss.Spec.RouteName), }, } } // buildOauthAnnotationWithRoute return required annotations for the Oauth setup func buildOauthAnnotationWithRoute(route string) map[string]string { annotation := fmt.Sprintf("{\"kind\":\"OAuthRedirectReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Route\",\"name\":\"%s\"}}", route) return map[string]string{ "serviceaccounts.openshift.io/oauth-redirectreference.mobile-security-service-app": annotation, } }
Jeremy Corbyn’s leading allies have given up on him and are frantically searching for a young new standard-bearer to see off the centre-left candidates. The man himself is desperate to go. And a leadership election this year seems a racing certainty. Those, it would seem, are the obvious conclusions from the Sunday Times‘ gripping splash, today, which declares “Secret Labour search for Corbyn heir”. It is dramatic stuff, fuelled by a torrid week over Brexit – and wildly off-key. Whatever one’s view of Corbyn – and this animates our readers like almost nothing else – it is clear he is not going anywhere. Sometimes the reality of politics is not quite as dramatic as the headline. Yes, the core of the story – that the Labour Party has carried out polling on some of its most senior MPs, including Angela Rayner and Rebecca Long-Bailey – is accurate but that adds up to rather less than the sum of its parts. So pause for a moment and look at what has been reported. The paper says that Labour asked BMG Research, its regular pollster, to carry out a survey on Corbyn an members of the shadow cabinet and report back on public perception. Senior sources I spoke to this morning are not disputing that. What is also clear, however, is that political parties frequently conduct polling on their top people. Having worked for a Labour MP I know this is not so much an open secret as simply common sense. If you want to win a general election – which requires the public to back a whole team, rather than just a would-be prime minister or president – then it seems reasonable to find out what the public thinks about the people in the top 10 or 15 jobs. Rayner, the shadow education secretary, and Long-Bailey, newly promoted from John McDonnell’s deputy to shadow business secretary, would easily fall into this category while Clive Lewis – another one touted as a potential leadership candidate, even if he resists the label – was also part of the top team until he resigned this week over the three-line whip on Brexit. Cast your mind back seven or eight years. The Tories carried out focus groups on their senior shadow cabinet members in the run-up to the 2010 general election. They were presumably told that key allies of David Cameron – like George Osborne – were seen as competent and realistic about the health of the public finances, even if not terribly nice. You and I would, of course, feel differently about the Tories’ former top two, but there is no need to rehearse that debate here. Focus groups and polling are inevitable part of politics, as Tom Watson acknowledged today on the Andrew Marr Show. The mere fact of them having been used does not further weaken Corbyn. The Labour leader does have his own problems but he shows no sign of wanting to step down this year. It was fewer than six months ago that he won a second leadership election with an overwhelming mandate and pledged to take Labour into government. The other part of the Sunday Times‘ story is also correct, even if the conclusions to most Labour people seem rather different. With the party at a severely weakened level in the national opinion polls, there is a huge challenge awaiting Corbyn if he wants to deliver in Downing Street the policies which he has held so dear for 30 years. But that does not mean he is about to walk away from the job. No leader wants to go out on a low and with Labour seats on the line in Copeland and Stoke-on-Trent Central – despite two strong local candidates – that could be the risk of any departure in the spring. So there is no reason to expect one. Win or lose in the by-elections, and it is still possible we can win both, Corbyn is set to be leader throughout 2017. So are Labour “road-testing” future leadership candidates? No – anyone internal who seriously wanted to change the person at the top would do things a bit more subtly. The view of Corbyn in the parliamentary party – amongst both friends and foes – is little changed in recent months. The chance of another contest this year is close to zero. Sometimes the true drama comes from what doesn’t happen.
def create_agent_done(self): assert not self.flag_create_agent_done self.flag_create_agent_done = True self._try_start_step()
import math def judge(Y): year = int(Y) former = math.floor(year/100) latter = year%100 if(former >= 1 and former <= 12): if(latter >= 1 and latter <= 12): return("AMBIGUOUS") elif(latter == 0 or latter > 12): return("MMYY") elif(former == 0 or former > 12): if(latter >= 1 and latter <= 12): return("YYMM") elif(latter == 0 or latter > 12): return("NA") S = input() print(judge(S))
<gh_stars>0 // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License.. macro_rules! impl_marker_for { ($traitname:ident, $($ty:ty)*) => { $( impl $traitname for $ty { } )* } } macro_rules! impl_marker_for_array { ($traitname:ident, $($N:expr)+) => { $( impl<T: $traitname> $traitname for [T; $N] { } )+ } } macro_rules! impl_unsafe_marker_for { ($traitname:ident, $($ty:ty)*) => { $( unsafe impl $traitname for $ty { } )* } } macro_rules! impl_unsafe_marker_for_array { ($traitname:ident, $($N:expr)+) => { $( unsafe impl<T: $traitname> $traitname for [T; $N] { } )+ } } /// Trait implemented for types that can be compared for equality using their bytewise representation /// A type can implement BytewiseEquality if all of its components implement BytewiseEquality. pub trait BytewiseEquality { } impl_marker_for!(BytewiseEquality, u8 i8 u16 i16 u32 i32 u64 i64 usize isize char bool); impl<T: BytewiseEquality> BytewiseEquality for [T] { } impl_marker_for_array! {BytewiseEquality, 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 } /// Trait for demonstrating one structure locates in contiguous memory. /// /// This is required for SGX related operations, e.g. crypto related /// computations. Many of these APIs require the input data locates in /// a contiguous area of memory inside the enclave. Developer needs to /// implement this trait as a marker for the data structure he/she wants /// to feed into SGX apis. pub unsafe trait ContiguousMemory { } impl_unsafe_marker_for!(ContiguousMemory, u8 i8 u16 i16 u32 i32 u64 i64 usize isize char bool); unsafe impl<T: ContiguousMemory> ContiguousMemory for [T] { } impl_unsafe_marker_for_array! {ContiguousMemory, 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 } /* impl<T: ?Sized> !ContiguousMemory for * const T {} impl<T: ?Sized> !ContiguousMemory for * mut T {} impl<'a, T: 'a + ?Sized> !ContiguousMemory for &'a T {} impl<'a, T: 'a + ?Sized> !ContiguousMemory for &'a mut T {} */
<reponame>nescience8/starting-out-with-python-global-4th-edition # This program gets three names from the user # and writes them to a file. def main(): # Get three names. print('Enter the names of three friends.') name1 = input('Friend #1: ') name2 = input('Friend #2: ') name3 = input('Friend #3: ') # Open a file named friends.txt. myfile = open('friends.txt', 'w') # Write the names to the file. myfile.write(name1 + '\n') myfile.write(name2 + '\n') myfile.write(name3 + '\n') # Close the file. myfile.close() print('The names were written to friends.txt.') # Call the main function. main()
<gh_stars>10-100 import { RcTextField, RcTextFieldProps } from '@ringcentral/juno'; import classnames from 'classnames'; import React, { Component } from 'react'; import { bindDebounce } from '../../../../lib/bindDebounce'; import { bindNextPropsUpdate } from '../../../../lib/bindNextPropsUpdate'; import styles from '../styles.scss'; type LogFieldsInputProps = { onChange: (...args: any[]) => any; } & Omit<RcTextFieldProps, 'ref'>; type LogFieldsInputState = { value: any; }; export class LogFieldsInput extends Component< LogFieldsInputProps, LogFieldsInputState > { static defaultProps: Partial<LogFieldsInputProps> = { type: 'text', required: false, placeholder: 'no label', value: undefined, multiline: false, }; checkPropsUpdate = bindNextPropsUpdate(this); debounce = bindDebounce(this, 500); constructor(props: LogFieldsInputProps) { super(props); const { value } = props; this.state = { value: value || '', }; } // eslint-disable-next-line react/no-deprecated componentWillReceiveProps(nextProps) { this.checkPropsUpdate(nextProps, 'value'); } updateValue( value: string | number, onChange: LogFieldsInputProps['onChange'], ) { this.setState({ value }); this.debounce(() => onChange(value)); } render() { const { onChange, required, error, type, ...rest } = this.props; const { value } = this.state; const styleRequired = required ? styles.isRequired : null; return ( <div className={classnames(styleRequired, styles.commonStyle)}> <RcTextField {...rest} type={type} required={required} error={error} value={value} gutterBottom onChange={(e) => this.updateValue( type === 'number' && e.target.value !== '' ? Number(e.target.value) : e.target.value, onChange, ) } fullWidth clearBtn={false} /> </div> ); } }
/** * This class is used to test browser capabilities builder classes */ public class CapabilitiesBuildersTest { @Test(groups = "unit") public void testGeckoDriverCaps() { DesiredCapabilities caps = new FireFoxCapabilitiesBuilder().createCapabilities(); assertTrue(caps.getCapability(FirefoxDriver.MARIONETTE) == Boolean.TRUE); } }
// Custom lower trunc store for v4i8 vectors, since it is promoted to v4i16. static SDValue LowerTruncateVectorStore(SDLoc DL, StoreSDNode *ST, EVT VT, EVT MemVT, SelectionDAG &DAG) { assert(VT.isVector() && "VT should be a vector type"); assert(MemVT == MVT::v4i8 && VT == MVT::v4i16); SDValue Value = ST->getValue(); xtn v0.8b, v0.8h str s0, [x0] SDValue Undef = DAG.getUNDEF(MVT::i16); SDValue UndefVec = DAG.getBuildVector(MVT::v4i16, DL, {Undef, Undef, Undef, Undef}); SDValue TruncExt = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16, Value, UndefVec); SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, TruncExt); Trunc = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Trunc); SDValue ExtractTrunc = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, Trunc, DAG.getConstant(0, DL, MVT::i64)); return DAG.getStore(ST->getChain(), DL, ExtractTrunc, ST->getBasePtr(), ST->getMemOperand()); }
64 SHARES Facebook Twitter Linkedin Reddit VorpX, the highly anticipated utility that adds Oculus Rift support to many existing games, is finally available to the public. The program makes it easy to enjoy your existing game library in virtual reality. VorpX is a stereoscopic 3D driver for DirectX 9, 10, and 11 games that adapts many existing games for virtual reality gaming with the Oculus Rift. This includes two modes of 3D for many titles, head tracking, and a number of smart VR-specific enhancements to make it easy to enjoy games in VR even though they weren’t designed with the Rift in mind. Over the last few months, Paul and I have been providing feedback to vorpX lead developer Ralf Ostertag. Today we’re happy to share with you that the vorpX Beta is ready for the public. “We’re pleased to announce that the first public beta of vorpX, our 3D driver for head mounted displays, is finally available. Many thanks to those who were patiently waiting for quite some time now, we’re glad you stayed with us. Much work has been done to provide you with what we believe already is the best option to play some of your favorite games on your Oculus Rift development kit. Although this is still a beta release, we’re confident that vorpX can provide you many hours of fun. We’re looking forward to your feedback and are eager to improve vorpX further based on your thoughts and experiences,” said Ostertag. Buy vorpX Now Z-buffer 3D and Geometry 3D Modes VorpX is capable of rendering many games in two different 3D modes. The first of which is Z-buffer which is the highest performing of the two modes as it doesn’t require rending of two distinct views. This provides a 3D effect without a heavy impact on your system — great for when you want to keep framerates high and latency low for smooth headtracking! Geometry 3D is ‘true’ 3D which uses two independent views to create an accurate 3D scene which has the most pronounced effect of depth and parallax. However, Geometry 3D is approximately twice as demanding of PC performance compared to the Z-buffer mode. Note: Not all vorpX compatible games support both 3D modes Edge Peek and VR Keys Edge Peek is a much needed and well implemented feature for virtual reality gaming on the Oculus Rift. Because vorpX is adapting existing games not made for VR for the Oculus Rift, you’ll find that some games have important HUD elements on the edges of the screen. These can be difficult to see given that the game’s view is warped to provide a wide field of view in the Oculus Rift HMD. Edge Peek makes it easy to look at any edge of the screen before snapping back into a proper forward view. Just click in the mouse-wheel and use your head to look toward an edge. Click again to snap back to the normal view. Works great for games like Halo where shields and radar are off in the corners of the screen! VR Keys is a universal in-game menu with easy access buttons which can be bound to key presses. This makes it easy to access complicated keyboard shortcuts without having to fumble around blind with the keyboard. For instance, in Skyrim it can be used to quickly pull up the Map or Rest menu. Image Zooming (AKA Big Screen Mode) Image Zoom is another great feature which let’s you zoom away from the rendered view. This is useful for zooming out to view cinematic which weren’t designed for the Rift and would otherwise have you feeling uncomfortably close. I like to call it Big Screen Mode because you can opt to zoom out and play games as though you are in front of a huge virtual screen instead of pressed directly up against it. This makes sense for many non-FPS games like Bastion* and League of Legends* where you want to feel like you are floating above the game rather than trying to emulate a wide FoV FPS scene. You can use Image Zoom from the in-game menu (delete key) or adjust it on the fly with Shift+Mousewheel (quick adjustment may not work in all games). *not officially supported by vorpX but have been shown to work Tons of Settings VorpX is incredibly feature rich, with lots of settings to let you configure each game to perfection. You’ll definitely want to hit the Delete key on your keyboard once you launch a game with vorpX to have a look at the myriad of options. Playing with the settings to get the experience right is going to be part of using vorpX, but thankfully there’s lots of options which means you can toggle things to your precise liking. Settings include the 3D reconstruction (eye separation, depth weightining, and focal distance), various aspect ratio adjustments, FoV augmentation, chromatic aberration adjustment, Image Zoom, head tracking sensitivity, and more. Support for 90 Games, Partial Support for Hundreds More VorpX supports a wide range of modern games. Some highlights: Alan Wake Batman Arkham City Battlefield 3 BioShock Infinite Borderlands 2 Call of Duty: Black Ops 2 Dirt 3 Far Cry 3 GTA IV Mass Effect, 2, 3 Skyrim World of Warcraft See a list of compatible games here. Game testing is done manually; there may be some changes and additions made to the list in the future. There are hundreds of additional titles that may not support 3D modes, but vorpX will do its best to make them compatible anyway by adding head tracking and adapting the view for use in the Oculus Rift. Buy vorpX Now After using the vorpX alpha at various points in development for the last few months, we’re really impressed with what developer Ralf Ostertag has come up with, and excited for the larger community to get their hands on the vorpX beta. We believe that vorpX is the best way to play your favorite games in virtual reality on the Oculus Rift! Full disclosure: Road to VR participated in testing the vorpX alpha and has an affiliate agreement with vorpX on sales through our site.
Calculations for plane-parallel ion chambers in 60Co beams using the EGSnrc Monte Carlo code. The EGSnrc Monte Carlo simulation system is used to obtain, for 10 plane-parallel ionization chambers in 60Co beams, the correction factors Kcomp and Pwall that account for the nonequivalence of the chamber wall material to the buildup cap and the phantom material, respectively. A more robust calculation method has been used compared to that used in previous works. A minor conceptual error related to the axial nonuniformity correction factor, Kan, has been identified and shown to have an effect of about 0.2%. The assumption that Pwall in-phantom is numerically equal to Kcomp calculated for a water buildup cap is shown to be accurate to better than 0.06%, thereby justifying the use of Kcomp calculations which are much more efficient. The effect on the calculated dose to the air in the cavity of the particle production threshold and transport energies used in the simulations is studied. Uncertainties in the calculated correction factors due to uncertainties in the photon and electron cross-section data are studied. They are 0.14% and 0.24%, respectively (1 standard deviation), for Kcomp factors. The uncertainties on Kwall factors are 0.03% from photon cross-section uncertainties and negligible from electron cross-section uncertainties. A comparison with previous EGS4/PRESTA calculations shows that present results are systematically higher by an average of 0.8%, ranging from 0.4% up to 1.4%. The present results are in better agreement with reported experimental values.
/** * @author blair christensen. * @version $Id: TestMemberOf1.java,v 1.2 2009-08-12 12:44:45 shilen Exp $ * @since 1.0 */ public class TestMemberOf1 extends GrouperTest { private static final Log LOG = GrouperUtil.getLog(TestMemberOf1.class); public TestMemberOf1(String name) { super(name); } public void testForwardMemberOfDeletion() { LOG.info("testForwardMemberOfDeletion"); try { R r = R.populateRegistry(0, 0, 10); Stem ns = r.ns; // qsuob Stem nsA = ns.addChildStem("a", "a"); // qsuob:faculties Stem nsA_A = nsA.addChildStem("artf", "artf"); // qsuob:faculties:artf nsA.addChildStem("mvsf", "mvsf"); // qsuob:faculties:mvsf nsA.addChildStem("scif", "scif"); // qsuob:faculties:scif nsA.addChildStem("engf", "engf"); // qsuob:faculties:engf Group gA_A = nsA_A.addChildGroup("artf", "artf"); // qsuob:faculties:artf:staff Group gA_B = nsA_A.addChildGroup("mvsf", "mvsf"); // qsuob:faculties:mvsf:staff Group gA_C = nsA_A.addChildGroup("scif", "scif"); // qsuob:faculties:scif:staff Group gA_D = nsA_A.addChildGroup("engf", "engf"); // qsuob:faculties:engf:staff Group gALL = ns.addChildGroup("all", "all"); // qsuob:all Group gAAS = ns.addChildGroup("aas", "aas"); // qsuob:all_academic_staff Subject iawi = r.getSubject("a"); Subject iata = r.getSubject("b"); Subject kewi = r.getSubject("c"); Subject keta = r.getSubject("d"); Subject mawi = r.getSubject("e"); Subject mata = r.getSubject("f"); Subject fiwi = r.getSubject("g"); Subject fita = r.getSubject("h"); Subject jowi = r.getSubject("i"); Subject jota = r.getSubject("j"); gA_A.addMember(iawi); // iawi -> gA_A gA_A.addMember(iata); // iawi -> gA_A // iata -> gA_A gA_B.addMember(kewi); // iawi -> gA_A // iata -> gA_A // kewi -> gA_B gA_B.addMember(keta); // iawi -> gA_A // iata -> gA_A // kewi -> gA_B // keta -> gA_B gA_B.addMember(mawi); // iawi -> gA_A // iata -> gA_A // kewi -> gA_B // keta -> gA_B // mawi -> gA_B gA_B.addMember(mata); // iawi -> gA_A // iata -> gA_A // kewi -> gA_B // keta -> gA_B // mawi -> gA_B // mata -> gA_B gA_C.addMember(fiwi); // iawi -> gA_A // iata -> gA_A // kewi -> gA_B // keta -> gA_B // mawi -> gA_B // mata -> gA_B // fiwi -> gA_C gA_C.addMember(fita); // iawi -> gA_A // iata -> gA_A // kewi -> gA_B // keta -> gA_B // mawi -> gA_B // mata -> gA_B // fiwi -> gA_C // fita -> gA_C gA_D.addMember(jowi); // iawi -> gA_A // iata -> gA_A // kewi -> gA_B // keta -> gA_B // mawi -> gA_B // mata -> gA_B // fiwi -> gA_C // fita -> gA_C // jowi -> gA_D gA_D.addMember(jota); // iawi -> gA_A // iata -> gA_A // kewi -> gA_B // keta -> gA_B // mawi -> gA_B // mata -> gA_B // fiwi -> gA_C // fita -> gA_C // jowi -> gA_D // jota -> gA_D gALL.addMember( gAAS.toSubject() ); // iawi -> gA_A // iata -> gA_A // kewi -> gA_B // keta -> gA_B // mawi -> gA_B // mata -> gA_B // fiwi -> gA_C // fita -> gA_C // jowi -> gA_D // jota -> gA_D // gAAS -> gALL gAAS.addMember( gA_A.toSubject() ); // iawi -> gA_A // iata -> gA_A // kewi -> gA_B // keta -> gA_B // mawi -> gA_B // mata -> gA_B // fiwi -> gA_C // fita -> gA_C // jowi -> gA_D // jota -> gA_D // gAAS -> gALL // gA_A -> gAAS -> gALL // iawi -> gA_A -> gAAS -> gALL // iata -> gA_A -> gAAS -> gALL // gA_A -> gAAS // iawi -> gA_A -> gAAS // iata -> gA_A -> gAAS gAAS.addMember( gA_D.toSubject() ); // iawi -> gA_A // iata -> gA_A // kewi -> gA_B // keta -> gA_B // mawi -> gA_B // mata -> gA_B // fiwi -> gA_C // fita -> gA_C // jowi -> gA_D // jota -> gA_D // gAAS -> gALL // gA_A -> gAAS -> gALL // iawi -> gA_A -> gAAS -> gALL // iata -> gA_A -> gAAS -> gALL // gA_D -> gAAS -> gALL // jowi -> gA_D -> gAAS -> gALL // jota -> gA_D -> gAAS -> gALL // gA_A -> gAAS // iawi -> gA_A -> gAAS // iata -> gA_A -> gAAS // gA_D -> gAAS // jowi -> gA_D -> gAAS // jota -> gA_D -> gAAS gAAS.addMember( gA_C.toSubject() ); // iawi -> gA_A // iata -> gA_A // kewi -> gA_B // keta -> gA_B // mawi -> gA_B // mata -> gA_B // fiwi -> gA_C // fita -> gA_C // jowi -> gA_D // jota -> gA_D // gAAS -> gALL // gA_A -> gAAS -> gALL // iawi -> gA_A -> gAAS -> gALL // iata -> gA_A -> gAAS -> gALL // gA_C -> gAAS -> gALL // fiwi -> gA_C -> gAAS -> gALL // fita -> gA_C -> gAAS -> gALL // gA_D -> gAAS -> gALL // jowi -> gA_D -> gAAS -> gALL // jota -> gA_D -> gAAS -> gALL // gA_A -> gAAS // iawi -> gA_A -> gAAS // iata -> gA_A -> gAAS // gA_C -> gAAS // fiwi -> gA_C -> gAAS // fita -> gA_C -> gAAS // gA_D -> gAAS // jowi -> gA_D -> gAAS // jota -> gA_D -> gAAS gAAS.addMember( gA_B.toSubject() ); // iawi -> gA_A // iata -> gA_A // kewi -> gA_B // keta -> gA_B // mawi -> gA_B // mata -> gA_B // fiwi -> gA_C // fita -> gA_C // jowi -> gA_D // jota -> gA_D // gAAS -> gALL // gA_A -> gAAS -> gALL // iawi -> gA_A -> gAAS -> gALL // iata -> gA_A -> gAAS -> gALL // gA_B -> gAAS -> gALL // kewi -> gA_B -> gAAS -> gALL // keta -> gA_B -> gAAS -> gALL // mawi -> gA_B -> gAAS -> gALL // mata -> gA_B -> gAAS -> gALL // gA_C -> gAAS -> gALL // fiwi -> gA_C -> gAAS -> gALL // fita -> gA_C -> gAAS -> gALL // gA_D -> gAAS -> gALL // jowi -> gA_D -> gAAS -> gALL // jota -> gA_D -> gAAS -> gALL // gA_A -> gAAS // iawi -> gA_A -> gAAS // iata -> gA_A -> gAAS // gA_B -> gAAS // kewi -> gA_B -> gAAS // keta -> gA_B -> gAAS // mawi -> gA_B -> gAAS // mata -> gA_B -> gAAS // gA_C -> gAAS // fiwi -> gA_C -> gAAS // fita -> gA_C -> gAAS // gA_D -> gAAS // jowi -> gA_D -> gAAS // jota -> gA_D -> gAAS // Setup now complete T.getMemberships(gA_A, 2); T.getMemberships(gA_B, 4); T.getMemberships(gA_C, 2); T.getMemberships(gA_D, 2); T.getMemberships(gALL, 15); T.getMemberships(gAAS, 14); // Now try to break things Iterator iter = gA_A.getImmediateMembers().iterator(); while (iter.hasNext()) { Member m = (Member) iter.next(); gA_A.deleteMember( m.getSubject() ); } T.getMemberships(gA_A, 0); T.getMemberships(gA_B, 4); T.getMemberships(gA_C, 2); T.getMemberships(gA_D, 2); T.getMemberships(gALL, 13); T.getMemberships(gAAS, 12); // kewi -> gA_B // keta -> gA_B // mawi -> gA_B // mata -> gA_B // fiwi -> gA_C // fita -> gA_C // jowi -> gA_D // jota -> gA_D // gAAS -> gALL // gA_A -> gAAS -> gALL // gA_B -> gAAS -> gALL // kewi -> gA_B -> gAAS -> gALL // keta -> gA_B -> gAAS -> gALL // mawi -> gA_B -> gAAS -> gALL // mata -> gA_B -> gAAS -> gALL // gA_C -> gAAS -> gALL // fiwi -> gA_C -> gAAS -> gALL // fita -> gA_C -> gAAS -> gALL // gA_D -> gAAS -> gALL // jowi -> gA_D -> gAAS -> gALL // jota -> gA_D -> gAAS -> gALL // gA_A -> gAAS // gA_B -> gAAS // kewi -> gA_B -> gAAS // keta -> gA_B -> gAAS // mawi -> gA_B -> gAAS // mata -> gA_B -> gAAS // gA_C -> gAAS // fiwi -> gA_C -> gAAS // fita -> gA_C -> gAAS // gA_D -> gAAS // jowi -> gA_D -> gAAS // jota -> gA_D -> gAAS gA_A.addMember(iawi); T.getMemberships(gA_A, 1); T.getMemberships(gA_B, 4); T.getMemberships(gA_C, 2); T.getMemberships(gA_D, 2); T.getMemberships(gALL, 14); T.getMemberships(gAAS, 13); // iawi -> gA_A // kewi -> gA_B // keta -> gA_B // mawi -> gA_B // mata -> gA_B // fiwi -> gA_C // fita -> gA_C // jowi -> gA_D // jota -> gA_D // gAAS -> gALL // gA_A -> gAAS -> gALL // iawi -> gA_A -> gAAS -> gALL // gA_B -> gAAS -> gALL // kewi -> gA_B -> gAAS -> gALL // keta -> gA_B -> gAAS -> gALL // mawi -> gA_B -> gAAS -> gALL // mata -> gA_B -> gAAS -> gALL // gA_C -> gAAS -> gALL // fiwi -> gA_C -> gAAS -> gALL // fita -> gA_C -> gAAS -> gALL // gA_D -> gAAS -> gALL // jowi -> gA_D -> gAAS -> gALL // jota -> gA_D -> gAAS -> gALL // gA_A -> gAAS // iawi -> gA_A -> gAAS // gA_B -> gAAS // kewi -> gA_B -> gAAS // keta -> gA_B -> gAAS // mawi -> gA_B -> gAAS // mata -> gA_B -> gAAS // gA_C -> gAAS // fiwi -> gA_C -> gAAS // fita -> gA_C -> gAAS // gA_D -> gAAS // jowi -> gA_D -> gAAS // jota -> gA_D -> gAAS gA_A.addMember(iata); T.getMemberships(gA_A, 2); T.getMemberships(gA_B, 4); T.getMemberships(gA_C, 2); T.getMemberships(gA_D, 2); T.getMemberships(gALL, 15); T.getMemberships(gAAS, 14); // iawi -> gA_A // iata -> gA_A // kewi -> gA_B // keta -> gA_B // mawi -> gA_B // mata -> gA_B // fiwi -> gA_C // fita -> gA_C // jowi -> gA_D // jota -> gA_D // gAAS -> gALL // gA_A -> gAAS -> gALL // iawi -> gA_A -> gAAS -> gALL // iata -> gA_A -> gAAS -> gALL // gA_B -> gAAS -> gALL // kewi -> gA_B -> gAAS -> gALL // keta -> gA_B -> gAAS -> gALL // mawi -> gA_B -> gAAS -> gALL // mata -> gA_B -> gAAS -> gALL // gA_C -> gAAS -> gALL // fiwi -> gA_C -> gAAS -> gALL // fita -> gA_C -> gAAS -> gALL // gA_D -> gAAS -> gALL // jowi -> gA_D -> gAAS -> gALL // jota -> gA_D -> gAAS -> gALL // gA_A -> gAAS // iawi -> gA_A -> gAAS // iata -> gA_A -> gAAS // gA_B -> gAAS // kewi -> gA_B -> gAAS // keta -> gA_B -> gAAS // mawi -> gA_B -> gAAS // mata -> gA_B -> gAAS // gA_C -> gAAS // fiwi -> gA_C -> gAAS // fita -> gA_C -> gAAS // gA_D -> gAAS // jowi -> gA_D -> gAAS // jota -> gA_D -> gAAS try { Membership ms = MembershipFinder.findImmediateMembership( r.rs, gALL, gAAS.toSubject(), Group.getDefaultList(), true ); Assert.assertNotNull(ms); gALL.deleteMember( gAAS.toSubject() ); Assert.assertTrue("finally, a hibernate exception wasn't thrown", true); T.getMemberships(gA_A, 2); T.getMemberships(gA_B, 4); T.getMemberships(gA_C, 2); T.getMemberships(gA_D, 2); T.getMemberships(gALL, 0); T.getMemberships(gAAS, 14); } catch (GrouperException eGRT) { Assert.fail("GrouperRuntimeException thrown: " + eGRT.getMessage()); } r.rs.stop(); } catch (Exception e) { T.e(e); } } // public void testForwardMemberOfDeletion() }
package main import ( "context" "fmt" "io" "os" "strings" platform "github.com/influxdata/influxdb" "github.com/influxdata/influxdb/http" "github.com/influxdata/influxdb/kit/signals" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/write" "github.com/spf13/cobra" ) var writeFlags struct { OrgID string Org string BucketID string Bucket string Precision string } func cmdWrite(f *globalFlags, opt genericCLIOpts) *cobra.Command { cmd := opt.newCmd("write line protocol or @/path/to/points.txt", fluxWriteF) cmd.Args = cobra.ExactArgs(1) cmd.Short = "Write points to InfluxDB" cmd.Long = `Write a single line of line protocol to InfluxDB, or add an entire file specified with an @ prefix.` opts := flagOpts{ { DestP: &writeFlags.OrgID, Flag: "org-id", Desc: "The ID of the organization that owns the bucket", Persistent: true, }, { DestP: &writeFlags.Org, Flag: "org", Short: 'o', Desc: "The name of the organization that owns the bucket", Persistent: true, }, { DestP: &writeFlags.BucketID, Flag: "bucket-id", Desc: "The ID of destination bucket", Persistent: true, }, { DestP: &writeFlags.Bucket, Flag: "bucket", Short: 'b', EnvVar: "BUCKET_NAME", Desc: "The name of destination bucket", Persistent: true, }, { DestP: &writeFlags.Precision, Flag: "precision", Short: 'p', Default: "ns", Desc: "Precision of the timestamps of the lines", Persistent: true, }, } opts.mustRegister(cmd) return cmd } func fluxWriteF(cmd *cobra.Command, args []string) error { ctx := context.Background() if writeFlags.Org != "" && writeFlags.OrgID != "" { return fmt.Errorf("please specify one of org or org-id") } if writeFlags.Bucket != "" && writeFlags.BucketID != "" { return fmt.Errorf("please specify one of bucket or bucket-id") } if !models.ValidPrecision(writeFlags.Precision) { return fmt.Errorf("invalid precision") } bs, err := newBucketService() if err != nil { return err } var filter platform.BucketFilter if writeFlags.BucketID != "" { filter.ID, err = platform.IDFromString(writeFlags.BucketID) if err != nil { return fmt.Errorf("failed to decode bucket-id: %v", err) } } if writeFlags.Bucket != "" { filter.Name = &writeFlags.Bucket } if writeFlags.OrgID != "" { filter.OrganizationID, err = platform.IDFromString(writeFlags.OrgID) if err != nil { return fmt.Errorf("failed to decode org-id id: %v", err) } } if writeFlags.Org != "" { filter.Org = &writeFlags.Org } buckets, n, err := bs.FindBuckets(ctx, filter) if err != nil { return fmt.Errorf("failed to retrieve buckets: %v", err) } if n == 0 { if writeFlags.Bucket != "" { return fmt.Errorf("bucket %q was not found", writeFlags.Bucket) } if writeFlags.BucketID != "" { return fmt.Errorf("bucket with id %q does not exist", writeFlags.BucketID) } } bucketID, orgID := buckets[0].ID, buckets[0].OrgID var r io.Reader if args[0] == "-" { r = os.Stdin } else if len(args[0]) > 0 && args[0][0] == '@' { f, err := os.Open(args[0][1:]) if err != nil { return fmt.Errorf("failed to open %q: %v", args[0][1:], err) } defer f.Close() r = f } else { r = strings.NewReader(args[0]) } s := write.Batcher{ Service: &http.WriteService{ Addr: flags.host, Token: flags.token, Precision: writeFlags.Precision, InsecureSkipVerify: flags.skipVerify, }, } ctx = signals.WithStandardSignals(ctx) if err := s.Write(ctx, orgID, bucketID, r); err != nil && err != context.Canceled { return fmt.Errorf("failed to write data: %v", err) } return nil }
class TestBinanceAggTrades: """Test suit for `aggTrades` Binance API endpoint.""" def test_aggTrades_with_noParametrs(self, binance: Binance): """ Test function without mandatory parameters. Expect `TypeError`. """ # Import third-party modules from pytest import raises with raises(TypeError) as typeError: binance.public.aggTrades() err = ( "aggTrades() missing 1 required positional argument: " "'symbol'" ) err in str(typeError.value) def test_aggTrades_with_symbol( self, binance: Binance, aggTrades_keys: List[str], symbol: str ): """Test function with valid mandatory parameters.""" aggTrades = binance.public.aggTrades(symbol) assert type(aggTrades) == list \ and aggTrades_keys == sorted(next(iter(aggTrades)).keys()) def test_aggTrades_with_badSymbol( self, binance: Binance, badSymbol: str ): """ Test function with invalid `symbol` and default optional parameters. Expect `RequestException`. """ # Import standard modules from json import dumps # Import third-party modules from pytest import raises from requests.exceptions import RequestException with raises(RequestException) as badRequest: binance.public.aggTrades(badSymbol) err = dumps( { "url": ( "https://testnet.binance.vision/api/v3/" f"aggTrades?symbol={badSymbol.upper()}" "&limit=500" ), "status_code": 400, "reason": "Bad Request", "message": '{"code":-1121,"msg":"Invalid symbol."}' } ) err in str(badRequest.value) def test_aggTrades_with_validDataTimeTimestampParameters( self, binance: Binance, aggTrades_keys: List[str], symbol: str, limit: int ): """Test with valid `startTime` and `endTime` (datetime) parameters.""" # Import standard modules from datetime import datetime, timedelta startTime = datetime.now() - timedelta(days=1) endTime = startTime + timedelta(hours=1) aggTrades = binance.public.aggTrades( symbol, startTime, endTime, limit=limit ) assert type(aggTrades) == list and ( aggTrades == [] or aggTrades_keys == sorted(next(iter(aggTrades)).keys()) ) def test_aggTrades_with_validStringTimestampParameters( self, binance: Binance, aggTrades_keys: List[str], symbol: str, limit: int ): """Test with valid `startTime` and `endTime` (str) parameters.""" # Import standard modules from datetime import datetime, timedelta startTime = datetime.now() - timedelta(days=1) endTime = startTime + timedelta(hours=1) _startTime = str(startTime) _endTime = str(endTime) aggTrades = binance.public.aggTrades( symbol, _startTime, _endTime, limit=limit ) assert type(aggTrades) == list and ( aggTrades == [] or aggTrades_keys == sorted(next(iter(aggTrades)).keys()) ) def test_aggTrades_with_invalidTimestampParameters( self, binance: Binance, symbol: str, limit: int ): """ Test with invalid `startTime` and `endTime` parameters. Expect `RequestException` due to `endTime` - `startTime` > 1h. """ # Import standard modules from datetime import datetime, timedelta from json import dumps # Import third-party modules from pytest import raises from requests.exceptions import RequestException startTime = datetime.now() - timedelta(days=1) endTime = startTime + timedelta(hours=2) with raises(RequestException) as badRequest: binance.public.aggTrades(symbol, startTime, endTime, limit=limit) err = dumps( { "url": ( "https://testnet.binance.vision/api/v3/" f"historicalTrades?symbol={symbol.upper()}" f"&limit={limit}" f"&startTime={datetime.timestamp(startTime)}" f"&endTime={datetime.timestamp(endTime)}" ), "status_code": 400, "reason": "Bad Request", "message": ( '{"code":-1127,"msg":"More than 1 hours between ' 'startTime and endTime."}' ) } ) err in str(badRequest.value) def test_aggTrades_with_validFromIdParameter( self, binance: Binance, aggTrades_keys: List[str], symbol: str, limit: int ): """Test function with valid parameters.""" aggTrades = binance.public.aggTrades(symbol, fromId=1, limit=limit) assert type(aggTrades) == list \ and aggTrades_keys == sorted(next(iter(aggTrades)).keys()) def test_aggTrades_with_invalidFromIdParameter( self, binance: Binance, symbol: str, limit: int ): """ Test with invalid `fromId` parameter. Expect `RequestException`. """ # Import standard modules from json import dumps # Import third-party modules from pytest import raises from requests.exceptions import RequestException with raises(RequestException) as badRequest: binance.public.aggTrades(symbol, fromId=-1, limit=10) err = dumps( { "url": ( "https://testnet.binance.vision/api/v3/" f"historicalTrades?symbol={symbol.upper()}" f"&limit={limit}&fromId=-1" ), "status_code": 400, "reason": "Bad Request", "message": ( '{"code":-1100,"msg":"Illegal characters found in ' "parameter 'fromId'; legal range is '^[0-9]{1,20}" '$\'."}' ) } ) err in str(badRequest.value)
def to_n(x,n,cnt): ans=[] for _ in range(cnt): ans.append(x%n) x//=n return ans[::-1] from sys import stdin def main(): #入力 readline=stdin.readline n=int(readline()) alp=[chr(i) for i in range(97,97+26)] tmp=26 cnt=1 while n>tmp: n-=tmp tmp*=26 cnt+=1 n-=1 m=to_n(n,26,cnt) ans="".join([alp[m[i]] for i in range(len(m))]) print(ans) if __name__=="__main__": main()
def calc_Ct(nhvecs): sh = nhvecs.shape nReplicates=sh[0] ; nDeltas=int(sh[1]/2) ; nResidues=sh[2] Ct = np.zeros( (nDeltas, nResidues), dtype=nhvecs.dtype ) dCt = np.zeros( (nDeltas, nResidues), dtype=nhvecs.dtype ) for delta in range(1,1+nDeltas): nVals=sh[1]-delta tmp = -0.5 + 1.5 * np.square( np.einsum( 'ijkl,ijkl->ijk', nhvecs[:,:-delta,...] , nhvecs[:,delta:,...] ) ) tmp = np.einsum( 'ijk->ik', tmp ) / nVals Ct[delta-1] = np.mean( tmp, axis=0 ) dCt[delta-1] = np.std( tmp, axis=0 ) / ( np.sqrt(nReplicates) - 1.0 ) return Ct, dCt
11 December 2015 WASHINGTON DC (11 December 2015) – At the end of a 10-day mission to the United States, in which the expert group’s delegation, comprised of Eleonora Zielinska, Frances Raday and Alda Facio held meetings in Washington DC and visited the states of Alabama, Oregon and Texas, Frances Raday delivered the following statement: We want to express our sincere appreciation to the Government of the United States for having invited us to conduct this country visit. We are grateful to all our interlocutors, officials at the Federal and state levels and members of civil society, including women’s organisations, practitioners and individual women who shared their experiences with us. US Women in Global Context In its greatly appreciated invitation to our expert group, the United States opened the door to a frank interchange regarding both good practices and gaps in US women’s enjoyment of international human rights. We acknowledge the United States’ commitment to liberty, so well represented by the Statue of Liberty which symbolizes both womanhood and freedom. Nevertheless, in global context, US women do not take their rightful place as citizens of the world’s leading economy, which has one of the highest rates of per capita income. In the US, women fall behind international standards as regards their public and political representation, their economic and social rights and their health and safety protections. In 2010 and 2015, in the framework of its Universal Periodic Review, the US government committed to ratify the Convention on the Elimination of All of Forms of Discrimination Against Women (CEDAW) but this commitment has not yet been implemented. Resistance to ratification of CEDAW reflects the opposition of a powerful sector of society to the Convention’s formulation of women’s international human right to equality. This political resistance has also consistently blocked efforts to pass an Equal Rights Amendment, which would entrench women’s right to equality in the US Constitution. We strongly urge ratification of CEDAW and adoption in the Constitution of women’s right to equality and non-discrimination as defined in the Convention. The US is one of only seven countries which have not ratified CEDAW. Even in the absence of ratification of CEDAW, many of its standards are entrenched in the Universal Declaration of Human Rights, the International Covenant on Civil and Political Rights(1) and in customary international law, and are hence binding on the US. Nevertheless, we are of the unreserved opinion that ratification of CEDAW is crucial, on both the domestic and the global levels, in order to confirm the US commitment to substantive equality for women in all spheres of life. At the domestic level, ratification is essential in order to provide all US women with rights and protections guaranteed under CEDAW. There is a myth that women already enjoy all these rights and protections under US law. However, there are missing rights and protections such as universal paid maternity leave, accessible reproductive health care and equal opportunity in standing for political election. We welcome the Cities for CEDAW initiatives which have started a process of incorporating CEDAW principles at the local level. This not only has intrinsic value for the women and men in those cities but it also serves to demonstrate the sustainability of women’s international human rights standards in the US context and can act as a lever for their further expansion. As regards resistance to CEDAW, our visit is particularly timely at a moment when the political rhetoric of some of the candidates for the Presidency in the upcoming elections has included unprecedented hostile stereotyping of women; when there are increasingly restrictive legislative measures in some states and violent attacks to prevent women’s access to exercise of their reproductive rights; and when there is an increase in the rate of women living in poverty, a persistent wage gap and increasingly precarious employment. Public and political life Four out of 15 members of cabinet are women. Women hold 19.4% of Congressional seats and their representation in state legislatures varies widely between 12.9% and 46.2%, with an average of 24.9%. This represents the highest level of legislative representation ever achieved by women in the United States. However, it still places the country at only 72 in global ranking. According to several interlocutors, the low level of representation for women in elected political posts is partly due to the greater difficulties women face in fundraising for campaigns. The role of money in political campaigns has grown significantly in the last decades and has drastically altered the landscape for elections and political participation. Women’s difficulty in fundraising is considered to result from complex causes. In particular, it is a result of exclusion from the predominantly male political networks that promote funding. It also results from underlying factors, such as negative stereotypes and biased presentation of women in the media, which adversely affect both women’s fundraising ability and their political candidacy. It is our view that the claim that women have lesser political ambition should not be regarded as unrelated to all the other factors since these must act as a rational deterrent to women’s political involvement. Our group regards the objective difficulties women face in raising campaign funding as a serious limitation on women’s opportunities for political representation and is deeply concerned that the removal of limits on campaign funding by the Supreme Court threatens to exacerbate this situation. A small number of states and cities have started to use programs for public financing of campaigns. One method, which its supporters call “Clean Money, Clean Elections”, gives each candidate who chooses to participate a fixed amount of money. Some interlocutors have pointed out that, in order to effectively give women an equal chance, competing private funding would have to be restricted. The Group encourages the efforts deployed by some voluntary organisations, such as Emily’s List, which promote women candidates. We would like to recall that, in accordance with international human rights law requirements, temporary special measures have been adopted in many democratic countries to ensure more adequate representation of women in politics. It is essential to ensure women’s continued access to the voting booth. At present women vote in higher percentages than men. Our group is concerned that changes in voter identification laws, such as those in Alabama, which increase bureaucratic requirements for voter identification, in particular problematic for women who change their name in marriage and reduce the number of voting centers, can make registration and voting less accessible for the poor, of whom a majority are women. A counter example and good practice is the state of Oregon which has facilitated voter registration and voting by mail. The courts play a central role in determining women’s ability to enjoy and exercise the rights accorded to them by law. There has been an increase in global awareness of the need for gender diversity and gender sensitive adjudication in judiciaries. In the US, the number of women justices has increased significantly, with women justices constituting three out of the nine Supreme Court justices(2) and women constituting over a third of the judges in federal and in state courts. While the presence of female judges does not guarantee judicial decision-making which is in substance gender-sensitive, the importance of diversity remains and the increase in the number of women judges is a positive trend. A severe problem for women litigants is in access to justice: free legal counsel and aid is not systematic for women living in poverty and when legal aid is partially provided to the most destitute, it is allegedly of very poor quality. Furthermore, the institution of the class action which has allowed large numbers of women to access compensation for discrimination or injury caused by powerful corporations, is being eroded. Our group recommends that the issue of substantive equality for women in court proceedings be revisited and reinvigorated and that access to justice for all, with adequate legal representation, be regarded as a civil right which, where necessary, should be publically funded. Economic and social life The global economic crisis created a serious challenge for the realization of economic and social rights in the United States and had a significantly adverse impact on women. As noted previously by other UN independent experts, the subprime mortgage market had disparately targeted the poor and, in particular, poor women. Subsequent government policies to boost the economy resulted in decreased expenditures on critical social protection programs, many of which are essential for women. These cuts had a disproportionately negative impact on minority women and single mothers. Women constitute nearly half of the US labour force, at a participation rate of 57.0%, and have been an important factor in driving the last decades of US economic growth. Furthermore, working mothers account for two thirds of household earnings. Our expert group is concerned that this crucial labour force participation by women is not accompanied by equal economic opportunity and we are shocked by the lack of mandatory standards for workplace accommodation for pregnant women, post-natal mothers and persons with care responsibilities, which are required in international human rights law. The gender wage gap is 21%, affecting women’s income throughout their lives, increasing women’s pension poverty. During the last decade little improvement has been made in closing it. Education increases women’s earnings but does not eliminate the gap, which is in fact largest for those with the highest levels of educational attainment. Women’s earnings differ considerably by ethnicity: Afro-American, Native American and Hispanic women have the lowest earnings. Despite the existence of the 1963 Equal Pay Act and Title VII, federal law does not require equal pay for work of equal value. However, California has now set a precedent with its 2015 California Fair Pay Act thus applying for the first time in US legislation the right to equal pay for work of equal value, which is required by international human rights law. Minimum wages have lost value as a living wage and the majority of minimum wage earners are women. Many are working full time and are the sole breadwinners for their families. Interlocutors regard the raising of the minimum wage to the level of a living wage as one of the most appropriate ways both to reduce the wage gap and reduce poverty amongst working women. The estimated 2.5 million domestic workers in the US are overwhelmingly women, frequently immigrant women many of whom are undocumented. We learned that many of these workers are vulnerable to verbal and physical abuse and to wage theft. We welcome the initiatives taken by the CSOs to improve conditions for domestic workers through a domestic workers’ bill of rights. The Group calls for the US to ratify the ILO Domestic Workers Convention and apply its provisions to ensure that domestic work is decent work. This does not capture the situation of other informal economy spaces, such as tip employees and seasonal jobs, where minimum conditions of employment should also be regulated. An additional severe problem is lack of enforcement. Wage theft, particularly in manufacturing, construction, and some service jobs impacts low-income and migrant workers, in particular undocumented women. Our group welcomes the recent increase in the budget of the Wage and Hour Division (U.S. Department of Labor) to support investigations and urges the government to increase supervision and to hold employers who violate the rights of these particularly vulnerable women workers to account. The 1993 Family and Medical Leave Act provides employees with the right to take unpaid, job-protected leave of twelve workweeks in a 12-month period, including for the birth of a child and to care for the newborn child within one year of birth; A significant number of employees are not covered by the Act because it is restricted, amongst other things, to employers who have more than 50 employees. However, even for those employees whom it covers, this provision falls far beneath international human rights standards, which require that maternity leave must be paid leave, with best practice being the provision of additional paid leave for fathers too. The US is one of only two countries in the world without a mandatory paid maternity leave for all women workers. As of 2014, paid maternity leave is provided by legislation in 3 states and in Federal government employment but it is only for six weeks, which is beneath the international minimum of 14 weeks. The Group regards it as vital that 14 weeks paid maternity leave for pregnancy birth and post natal related needs be guaranteed for all women workers in public and private employment and advises that best practice is payment from a social security fund which does not impose the direct burden on employers. Caring responsibilities fall primarily on women and women are reported to be far more likely than men to work only part time for family care reasons. Our expert group considers that the public budget should provide childcare, after-school and also elder and disabled facilities, which are affordable and accessible, to allow adults with care responsibilities, women and men, to work in full time employment. The percentage of women in poverty has increased over the past decade, from 12.1% to 14.5%, with a higher rate of poverty than men, affecting predominantly ethnic minorities, single parent families and older women. We suggest that both Federal and state governments address this problem urgently, by promoting employment for women, raising the minimum wage and eliminating the wage gap. Residual poverty should be addressed through the social security system and, given the country’s economic strength, there should be a policy of zero tolerance for the relegation of people to poverty. Poverty may result in homelessness which exposes women to higher levels of violence and vulnerability. Furthermore, interlocutors pointed out that victims of domestic violence are often numbered amongst the homeless, either because they have been evicted as a result of the violence or because they have fled from their violent partner. Solutions should include effective protection orders, increased availability of shelters, housing support, prioritizing eligibility particularly for single mother households and those facing heavy unpaid care burdens. We were informed that women own over one third of US firms, mainly in small and medium size businesses. These businesses face greater barriers in obtaining low cost capital from sources such as the Small Business Administration and clearly need support in order to achieve equal economic potential. However, the Small Businesses Administration has a stated goal of awarding only 5% of federal contracts to women-owned businesses. Furthermore, it is reported that this goal has never been reached in practice. International human rights law requires the establishment of social protection floors for core economic and social needs, provision for paid maternity leave, and the taking of all appropriate measures to produce de facto equality between all women and men in the labour market and in women-owned businesses. It is not for our group to suggest how these minimum standards should be achieved but only to point out how the United States, as economic leader of the world, lags behind in providing a safety net and a decent life for those of its women who do not have access to independent wealth, high salaries or economic support from a partner or family. Access to health care The group acknowledges the legislative and institutional(3) efforts deployed towards improving the enjoyment of women’s right to health. In particular, we welcome the steps taken by the current administration to expand access to health care for many uninsured citizens through the Affordable Care Act (ACA) passed in 2010(4), aimed at reducing the cost of health insurance and augmenting access to health care through the expansion of Medicaid, thus reducing the number of uninsured. ACA also established crucial protections against discriminatory practices consisting, inter alia, of charging women more for health insurance than men due to perceived higher costs associated with women’s reproductive health needs. Despite this considerable progress, there is still no universal health coverage in the country, and too many women pay the price, sometimes with their lives, of this considerable coverage gap with strong regional and ethnic disparities. According to the information we received, a third of the people living in poverty are still uninsured, affecting primarily women, in particular Afro-American and Hispanic women, preventing them from accessing basic preventive care and treatments. Furthermore, there are restrictions for immigrants, including immigrant women to access Medicaid during a five year waiting period and there is perpetual exclusion of undocumented migrants from any health care with the limited exception of emergency care. According to various stakeholders we met, Texas and Alabama do not allow lawfully residing immigrants to enrol in Medicaid even after completion of the federal waiting period of five years. We heard appalling testimonies of migrant women who were diagnosed with breast cancer but could not afford the appropriate treatment. The Group hopes that the Health Equity and Access under the Law (HEAL) for Immigrant Women and Families Act, currently before Congress, would expand access to health care for immigrants, particularly for women and children. Our Group also regretted to learn about the serious inadequacies of health care facilities to treat women with disabilities and calls for improvement. Reproductive health and rights Our expert Group is concerned at the increase in maternal mortality rates in the United States. According to UN reports, the ratio increased by 136% between 1990 and 2013. These numbers also hide distressing ethnic and socio-economic disparities. Afro-American women are nearly four times more at risk to die in childbirth. States with high poverty rates have a 77% higher maternal mortality rate. We strongly encourage concerned authorities to continue their efforts to identify the root causes and elaborate adequate policies to address this issue. Our group welcomes the ACA’s requirement that new private health plans cover contraceptive counselling, without out-of-pocket costs. However, we are concerned that the Supreme Court’s recognition, in Hobby Lobby, of an exemption on grounds of freedom of religion to opt out of contraceptive insurance for employees, will deprive some women of the possibility of accessing contraceptives. The Group would like to recall that, under international human rights law, states must take all appropriate measures to ensure women’s equal right to decide freely and responsibly on the number and spacing of their children which includes women’s right to access contraceptives. Our Group was informed that, being a prerogative of each state, adequate and quality sex education in schools was lacking in many curricula. We learned that in many schools, only abstinence was taught instead of providing objective and scientifically based sex education which is a key element of health policy. However, we were pleased to learn that in Oregon for instance, sex education is included in the school curriculum. Women’s reproductive rights include the constitutional guarantee under Roe v. Wade for a woman to be able to choose to terminate a pregnancy in the first trimester prior to viability. Although women have a legal right to terminate a pregnancy under federal law, ever increasing barriers are being created to prevent their access to abortion procedures. In 1976 the Hyde Amendment prevented federal Medicaid and Medicare coverage for the termination of a pregnancy except in cases where the life of the woman is in danger, in cases of rape and incest. Women’s access to reproductive health services has been truncated in some states by imposition of severe barriers. These take the form of unjustified medical procedures, such as compelling women to undergo ultrasounds or to endure groundless waiting periods, withholding of early pregnancy abortion medications, imposing burdensome conditions for the licensing of clinics, which have resulted in the closing of clinics across the country leaving women without geographical access to sexual and reproductive health services. These restrictions have a disproportionate and discriminatory impact on poor women. As we observed during our visit in the Rio Grande Valley, one of the poorest regions in the country, immigrant women face severe barriers in accessing sexual and reproductive health services. Furthermore, the marketplace insurance coverage for a safe and legal termination of pregnancy is far from universal. Thus, insurance will frequently not be available for women who wish to exercise their right to terminate their pregnancy in the first trimester. In addition, many of the clinics work in conditions of constant threats, harassment and vandalising, too often without any kind of protection measures by law enforcement officials, as we observed during our visits to Texas and Alabama. Alabama has a history of severe violence against abortion providers including the killing of Dr. David Gunn, in 1993, the first doctor to be murdered for performing abortions in the United States. The recent massacre in the Colorado family planning centre, which occurred just before the start of our visit, once again demonstrated the extreme hostility and danger faced by family planning providers and patients. We encourage the adoption of the Woman’s Health Protection Act, which would prohibit states from enacting restrictions on reproductive health care providers that interfere with women’s personal decision making and block access to safe and legal abortion services; and to require all hospitals to provide these services and insurance schemes to provide coverage for abortions to which women have a right under US law.We also encourage increased funding of clinics under the Title X Family Planning Program(5) in order to expand coverage for low-income women who lack insurance in order for them to access preventive care, including sexual and reproductive health services, and in order to reduce maternal mortality. We urge the authorities to combat the stigma attached to reproductive and sexual health care, which leads to violence, harassment and intimidation against those seeking or providing reproductive health care, and to investigate and prosecute violence or threats of violence. We wish to recall, as independent United Nations human rights experts have consistently stressed, that freedom of religion cannot be used to justify discrimination against women, and therefore should not be regarded as a justification for denying women’s right to enjoyment of the highest attainable standard of health. We encourage steps to reconcile U.S. laws on religious or conscience-based refusals to provide reproductive health care with international human rights law and to prohibit refusal to provide sexual and reproductive health services on grounds of religious freedom, where such refusal will effectively deny women immediate access to the health care to which they are entitled under both international human rights law and US law. Women’s safety Our group acknowledges the significant efforts deployed at the legislative(6) and institutional(7) levels to lower the prevalence of violence against women. We share the concerns expressed by the Special Rapporteur on violence against women in her report on her visit to the United States in 2011(8), regarding, inter alia, women in detention (over-incarceration, sexual violence, shackling of pregnant women, solitary confinement, lack of alternatives to custodial sentences for women with dependent children, inappropriate access to health care and inadequate re-entry programmes) as well as the alarming high rates of violence against Native-American women. We also share the concerns of the Special Rapporteur regarding the fatal consequences for women of lack of gun control, in particular in cases of domestic violence. Our group also deplores police brutality and the increased number of homicides of Afro-American women by the police. Our attention was also drawn to numerous cases of violence against LBTQ women, including homicides. We are extremely concerned at the situation of migrant women in detention centers, in particular women with minor children who are in prolonged detention. According to the information received, detention facilities are not complying with federal mandates and agency policies. We received allegations of women being subjected to an “expedited removal process” which in spite of the credible fear of return exception, results in the denial of many legitimate asylum claims. We also received allegations of sexual abuse and assault of women detainees, as well as mistreatment from CBP officials. Migrant women are often victims of trafficking and violence, including sexual violence during their journey to the United States. We regretted to learn that appropriate health care services are not systematically provided to these women in a timely manner despite the horrifying physical and emotional ordeals endured. We also received complaints of transgender women being mistreated in detention often wrongfully placed with males. The group encourages the establishment of accountability mechanisms and adequate gender sensitive training as well as the release of women and children from detention. The criminalization of women in prostitution in most of the country places them in a situation of injustice, vulnerability and stigma and is contrary to international human rights law. As the CEDAW Committee has systematically reiterated, women should not be criminalized for being in a situation of prostitution. Furthermore, as stipulated in the Palermo Protocol, efforts should be deployed to discourage the demand that fosters all forms of exploitation of women. Conclusions We want to reiterate our gratitude to the Government for inviting our expert group to conduct this visit and for engaging in frank and open dialogue. The current administration has demonstrated its will to cooperate with the international human rights mechanisms: the UPR, the Committee on the Elimination of Racial Discrimination, the Human Rights Committee and the Committee against Torture and has invited numerous special procedures. Nonetheless, we note that there is no proper institutional framework to follow-up on recommendations received. In this regard, we would like to insist on the importance of establishing an independent human rights institution in compliance with the Paris Principles (which should include a woman's rights commission). While the current administration has consistently expressed its unconditional support for the cause of women's equality, we regret to observe a gap between rhetoric and reality. As many stakeholders have underscored, the extreme polarisation of politics is profoundly affecting the ability of the Government to ratify CEDAW and to guarantee women’s human rights. We understand the complexity of federalism but this cannot be regarded as a justification for failure to secure these rights. Under the Vienna Declaration, these rights are universal, indivisible and inalienable. The United States, which is a leading state in formulating international human rights standards, is allowing its women to lag behind international human rights standards. Although there is a wide diversity in state law and practice, which makes it impossible to give a comprehensive report, we could discern an overall picture of women’s missing rights. While all women are the victims of these missing rights, women who are poor, belong to Native American, Afro-American and Hispanic ethnic minorities, migrant women, LBTQ women, women with disabilities and older women are disparately vulnerable. These preliminary findings and conclusions will be developed and presented in a more comprehensive report to the Human Rights Council in June 2016. ENDS The UN Working Group on the issue of discrimination against women in law and in practice was created by the Human Rights Council in 2011 to identify, promote and exchange views, in consultation with States and other actors, on good practices related to the elimination of laws that discriminate against women. The Group is also tasked with developing a dialogue with States and other actors on laws that have a discriminatory impact where women are concerned. The Working Group is composed of five independent experts: the Current Chair-Rapporteur Eleonora Zielinska (Poland), the Vice-Chair Alda Facio (Costa Rica) and the other members Emna Aouij (Tunisia), Kamala Chandrakirana (Indonesia), and Frances Raday (Israel/United Kingdom) Learn more, log on to: http://www.ohchr.org/EN/Issues/Women/WGWomen/Pages/WGWomenIndex.aspx Notes 1. Ratified in 1992 2. Highest rate of women in the Supreme Court in the history 3. Including the work undertaken by the Office of Women’s Health conducting gender mainstreamed research, the Family Violence Prevention and Services Division and the Office of Minority Health within the Department of Health and Human Services 4. Offers regulated insurance marketplaces, tax credits to purchase private insurance, and expanded access to government health insurance for the poorest 5. In the past 40 years, Title X family planning clinics have played a critical role in ensuring access to a broad range of family planning and related preventive health services for millions of low-income or uninsured individuals and others. 6. 1994 Violence Against Women Act and its subsequent reauthorizations, most recently in 2013 7. White House Office on Violence Against Women, Department of Justice Office on Violence Against Women, Department of Health Office on Violence Against Women 8. See her report A/HRC/17/26/Add.5
#include <stdio.h> #include <setjmp.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <fcntl.h> #include <unistd.h> #include <sys/stat.h> #include <time.h> #include "testcase.h" /** @cond */ struct TestCaseInfo { const char *name; T_TestCaseFunc func; T_DataDrivenTestCaseFunc dataFunc; }; #define MAX_TEST_CASES 1000 static struct TestCaseInfo testCases[MAX_TEST_CASES]; static int numTestCases = 0; static struct TestCaseInfo *currentTestCase = 0; static int testsRun = 0; static int failures = 0; static int successes = 0; static int skipped = 0; #define LONGJMP_FAIL 1 #define LONGJMP_SKIP 2 #define LONGJMP_SKIP_ALL 3 #define LONGJMP_EXPECT_FAIL 4 #define EXPECT_FAIL_NONE 0 #define EXPECT_FAIL_ABORT 1 #define EXPECT_FAIL_CONTINUE 2 static int expectFailMode = EXPECT_FAIL_NONE; static const char *expectFailMessage = 0; static const char *expectFailFile = 0; static long expectFailLine = 0; static jmp_buf jumpBack; FILE *test_output; static FILE *xml_output = NULL; static const char *suiteName = NULL; static char line_buffer[4096]; static int in_failure = 0; static struct timespec startTime; static struct timespec endTime; void T_RegisterTestCase(const char *name, T_TestCaseFunc func) { if (numTestCases < MAX_TEST_CASES) { testCases[numTestCases].name = name; testCases[numTestCases].func = func; testCases[numTestCases].dataFunc = 0; ++numTestCases; } } void T_RegisterDataDrivenTestCase(const char *name, T_DataDrivenTestCaseFunc func, T_TestCaseFunc populateFunc) { if (numTestCases < MAX_TEST_CASES) { testCases[numTestCases].name = name; testCases[numTestCases].func = populateFunc; testCases[numTestCases].dataFunc = func; ++numTestCases; } } void T_Fail(const char *msg, const char *file, long line) { T_printf_fail("failed: %s\n", msg); T_printf(" File: %s\n Line: %ld\n", file, line); T_printf_fail_end(); longjmp(jumpBack, LONGJMP_FAIL); } void T_ImmediateFail(void) { longjmp(jumpBack, LONGJMP_FAIL); } void T_Verify(int ok, const char *msg, const char *file, long line) { if (ok) { if (expectFailMode != EXPECT_FAIL_NONE) { T_printf_fail("verify succeeded: %s\n", msg); T_printf(" File: %s\n Line: %ld\n", file, line); T_printf("did not see expected failure: %s\n", expectFailMessage); T_printf(" File: %s\n Line: %ld\n", expectFailFile, expectFailLine); T_printf_fail_end(); longjmp(jumpBack, LONGJMP_FAIL); } } else { T_printf_fail("failed: %s\n", msg); T_printf(" File: %s\n Line: %ld\n", file, line); if (expectFailMode == EXPECT_FAIL_NONE) { T_printf_fail_end(); longjmp(jumpBack, LONGJMP_FAIL); } T_printf("expected failure: %s\n", expectFailMessage); T_printf(" File: %s\n Line: %ld\n", expectFailFile, expectFailLine); T_printf_fail_end(); if (expectFailMode == EXPECT_FAIL_ABORT) longjmp(jumpBack, LONGJMP_EXPECT_FAIL); } expectFailMode = EXPECT_FAIL_NONE; } void T_ComparePtr(int ok, void * actualValue, void * expectedValue, const char *actualString, const char *expectedString, const char *file, long line) { if (ok) { if (expectFailMode != EXPECT_FAIL_NONE) { T_printf_fail("compare succeeded: %s == %s\n", actualString, expectedString); T_printf(" File: %s\n Line: %ld\n", file, line); T_printf("did not see expected failure: %s\n", expectFailMessage); T_printf(" File: %s\n Line: %ld\n", expectFailFile, expectFailLine); T_printf_fail_end(); longjmp(jumpBack, LONGJMP_FAIL); } } else { T_printf_fail("failed: %p != %p\n", actualString, expectedString); T_printf(" Actual: ptr %p\n", actualValue); T_printf(" Expected: ptr %p\n", expectedValue); T_printf(" File: %s\n Line: %ld\n", file, line); if (expectFailMode == EXPECT_FAIL_NONE) { T_printf_fail_end(); longjmp(jumpBack, LONGJMP_FAIL); } T_printf("expected failure: %s\n", expectFailMessage); T_printf(" File: %s\n Line: %ld\n", expectFailFile, expectFailLine); T_printf_fail_end(); if (expectFailMode == EXPECT_FAIL_ABORT) longjmp(jumpBack, LONGJMP_EXPECT_FAIL); } expectFailMode = EXPECT_FAIL_NONE; } void T_CompareLong(int ok, long actualValue, long expectedValue, const char *actualString, const char *expectedString, const char *file, long line) { if (ok) { if (expectFailMode != EXPECT_FAIL_NONE) { T_printf_fail("compare succeeded: %s == %s\n", actualString, expectedString); T_printf(" File: %s\n Line: %ld\n", file, line); T_printf("did not see expected failure: %s\n", expectFailMessage); T_printf(" File: %s\n Line: %ld\n", expectFailFile, expectFailLine); T_printf_fail_end(); longjmp(jumpBack, LONGJMP_FAIL); } } else { T_printf_fail("failed: %s != %s\n", actualString, expectedString); T_printf(" Actual: %ld (0x%lX)\n", actualValue, actualValue); T_printf(" Expected: %ld (0x%lX)\n", expectedValue, expectedValue); T_printf(" File: %s\n Line: %ld\n", file, line); if (expectFailMode == EXPECT_FAIL_NONE) { T_printf_fail_end(); longjmp(jumpBack, LONGJMP_FAIL); } T_printf("expected failure: %s\n", expectFailMessage); T_printf(" File: %s\n Line: %ld\n", expectFailFile, expectFailLine); T_printf_fail_end(); if (expectFailMode == EXPECT_FAIL_ABORT) longjmp(jumpBack, LONGJMP_EXPECT_FAIL); } expectFailMode = EXPECT_FAIL_NONE; } void T_CompareDouble(int ok, double actualValue, double expectedValue, double epsilonValue, const char *actualString, const char *expectedString, const char *epsilonString, const char *file, long line) { if (ok) { if (expectFailMode != EXPECT_FAIL_NONE) { T_printf_fail("compare succeeded: %s == %s\n", actualString, expectedString); T_printf(" File: %s\n Line: %ld\n", file, line); T_printf("did not see expected failure: %s\n", expectFailMessage); T_printf(" File: %s\n Line: %ld\n", expectFailFile, expectFailLine); T_printf_fail_end(); longjmp(jumpBack, LONGJMP_FAIL); } } else { T_printf_fail("failed: %s != %s\n", actualString, expectedString); T_printf(" Actual: %g\n", actualValue); T_printf(" Expected: %g\n", expectedValue); T_printf(" Epsilon: %g\n", epsilonValue); T_printf(" File: %s\n Line: %ld\n", file, line); if (expectFailMode == EXPECT_FAIL_NONE) { T_printf_fail_end(); longjmp(jumpBack, LONGJMP_FAIL); } T_printf("expected failure: %s\n", expectFailMessage); T_printf(" File: %s\n Line: %ld\n", expectFailFile, expectFailLine); T_printf_fail_end(); if (expectFailMode == EXPECT_FAIL_ABORT) longjmp(jumpBack, LONGJMP_EXPECT_FAIL); } expectFailMode = EXPECT_FAIL_NONE; } void T_Skip(const char *message, const char *file, long line, int skipAll) { T_printf("skipped: %s\n", message); T_printf(" File: %s\n Line: %ld\n", file, line); longjmp(jumpBack, skipAll ? LONGJMP_SKIP_ALL : LONGJMP_SKIP); } void T_ExpectFail(const char *message, const char *file, long line, int mode) { expectFailMessage = message; expectFailFile = file; expectFailLine = line; expectFailMode = mode; } // Sets the name of the test suite based on the program name. static void T_SetSuiteName(const char *argv0) { size_t len = strlen(argv0); while (len > 0 && argv0[len - 1] != '/') --len; if (len >= 9 && !strncmp(argv0 + len, "unittest_", 9)) len += 9; suiteName = argv0 + len; } // Print to the XML output without quoting. static void T_printf_xml(const char *format, ...) { va_list va; if (!xml_output) return; va_start(va, format); vfprintf(xml_output, format, va); va_end(va); } // Print to the XML output with quoting. static void T_printf_xml_quoted(const char *value) { if (!value || !xml_output) return; while (*value != '\0') { int ch = *value++; if (ch == '"') fprintf(xml_output, "&quot;"); else if (ch == '&') fprintf(xml_output, "&amp;"); else if (ch == '<') fprintf(xml_output, "&lt;"); else if (ch == '>') fprintf(xml_output, "&gt;"); else putc(ch & 0xFF, xml_output); } } // Print to the XML output with quoting but ignore EOL's. static void T_printf_xml_quoted_no_eol(const char *value) { if (!value || !xml_output) return; while (*value != '\0') { int ch = *value++; if (ch == '"') fprintf(xml_output, "&quot;"); else if (ch == '&') fprintf(xml_output, "&amp;"); else if (ch == '<') fprintf(xml_output, "&lt;"); else if (ch == '>') fprintf(xml_output, "&gt;"); else if (ch != '\n' && ch != '\r') putc(ch & 0xFF, xml_output); } } // Prints the XML header for the test suite output. This is written twice, // once to reserve space for the overall results and again for the results. static void T_PrintXmlHeader(unsigned long ms) { T_printf_xml("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"); T_printf_xml("<testsuites failures=\"%010d\" tests=\"%010d\" time=\"%07lu.%03lu\">\n", failures, testsRun, ms / 1000, ms % 1000); T_printf_xml("<testsuite failures=\"%010d\" tests=\"%010d\" time=\"%07lu.%03lu\" name=\"", failures, testsRun, ms / 1000, ms % 1000); T_printf_xml_quoted(suiteName); T_printf_xml("\">\n"); } // Opens the XML log if necessary. static void T_OpenXmlOutput(void) { char *dir; // Look for the expected environment variable. xml_output = NULL; dir = getenv("XML_UNIT_TEST_OUTPUT_DIR"); if (!dir || *dir == '\0') dir=(char *)"."; else // Create the directory. We don't care if this fails because that // usually means that the directory already exists. We'll get a // better error from fopen() below if the directory isn't created. mkdir(dir, 0777); // Try to open the XML log file. Give up and log to stdout if we cannot open. snprintf(line_buffer, sizeof(line_buffer), "%s/%s.xml", dir, suiteName); printf("Test artifacts: %s\n", line_buffer); xml_output = fopen(line_buffer, "w"); if (!xml_output) { perror(line_buffer); return; } // Print the XML header the first time. T_PrintXmlHeader(0); // Start timing the test suite. clock_gettime(CLOCK_MONOTONIC, &startTime); } // Close the XML output log. static void T_CloseXmlOutput(void) { unsigned long ms; // Bail out if XML logging is not in use. if (!xml_output) return; // How long did the test suite take to execute? clock_gettime(CLOCK_MONOTONIC, &endTime); if (endTime.tv_sec == startTime.tv_sec) { ms = (endTime.tv_nsec - startTime.tv_nsec) / 1000000; } else { ms = (endTime.tv_sec - startTime.tv_sec - 1) * 1000; ms += (1000000000 - endTime.tv_nsec) / 1000000; ms += startTime.tv_nsec / 1000000; } // Rewind and print the XML header again. fseek(xml_output, 0, SEEK_SET); T_PrintXmlHeader(ms); fclose(xml_output); xml_output = NULL; } static int cmpTestCase(const void *e1, const void *e2) { return strcmp(((const struct TestCaseInfo *)e1)->name, ((const struct TestCaseInfo *)e2)->name); } int T_RunTestCases(int argc, char *argv[]) { int index, result; const char *prefix = 0; test_output = stdout; xml_output = NULL; T_SetSuiteName(argv[0]); for (index = 1; index < argc; ++index) { if (argv[index][0] != '-') prefix = argv[index]; } qsort(testCases, numTestCases, sizeof(struct TestCaseInfo), cmpTestCase); T_OpenXmlOutput(); for (index = 0; index < numTestCases; ++index) { struct TestCaseInfo *info = &(testCases[index]); if (prefix && strncmp(info->name, prefix, strlen(prefix)) != 0) continue; currentTestCase = info; if (info->dataFunc) { /* Data-driven test case */ (*(info->func))(); } else { /* Ordinary test case */ fprintf(test_output, "%s ... ", info->name); fflush(test_output); T_printf_xml("<testcase classname=\""); T_printf_xml_quoted(suiteName); T_printf_xml("\" name=\""); T_printf_xml_quoted(info->name); T_printf_xml("\">\n"); expectFailMode = EXPECT_FAIL_NONE; if ((result = setjmp(jumpBack)) != 0) { if (result == LONGJMP_FAIL) ++failures; else if (result == LONGJMP_SKIP || result == LONGJMP_SKIP_ALL) ++skipped; else ++successes; // expect fail } else { (*(info->func))(); ++successes; fprintf(test_output, "ok\n"); } T_printf_xml("</testcase>\n"); ++testsRun; } } fprintf(test_output, "%d tests run, %d succeeded, %d skipped, %d failed\n", testsRun, successes, skipped, failures); T_printf_xml("</testsuite>\n"); T_printf_xml("</testsuites>\n"); T_CloseXmlOutput(); return failures ? 1 : 0; } int T_RunDataDrivenTestCase(const char *rowName, const void *data) { int result; int skipAll = 0; fprintf(test_output, "%s[%s] ... ", currentTestCase->name, (rowName ? rowName : "(null)")); fflush(test_output); T_printf_xml("<testcase classname=\""); T_printf_xml_quoted(suiteName); T_printf_xml("\" name=\""); T_printf_xml_quoted(currentTestCase->name); T_printf_xml("["); T_printf_xml_quoted(rowName ? rowName : "(null)"); T_printf_xml("]\">\n"); expectFailMode = EXPECT_FAIL_NONE; if ((result = setjmp(jumpBack)) != 0) { if (result == LONGJMP_FAIL) ++failures; else if (result == LONGJMP_SKIP || result == LONGJMP_SKIP_ALL) ++skipped; else ++successes; // expect fail skipAll = (result == LONGJMP_SKIP_ALL); } else { (*(currentTestCase->dataFunc))(data); ++successes; fprintf(test_output, "ok\n"); } T_printf_xml("</testcase>\n"); ++testsRun; return skipAll; } #define MAX_IO_PINS 256 struct TestPin { const char *name; int value; }; static struct TestPin pins[MAX_IO_PINS]; static int numPins = 0; int T_GetPin(const char *name) { int index; for (index = 0; index < numPins; ++index) { if (!strcmp(pins[index].name, name)) { /* Shift the pin to the front of the list to perform a simple "Most Recently Used" ordering for requests */ int value = pins[index].value; if (index > 0) { struct TestPin temp = pins[index]; pins[index] = pins[0]; pins[0] = temp; } return value; } } return 0; } void T_SetPin(const char *name, int value) { int index; for (index = 0; index < numPins; ++index) { if (!strcmp(pins[index].name, name)) { pins[index].value = value; return; } } if (numPins >= MAX_IO_PINS) T_FAIL("Too many I/O pins have been defined; increase MAX_IO_PINS in the test harness"); /* Shift the first pin to the end of the list and add this value at the front because it is highly likely to be requested by a call to T_GetPin in the near future */ pins[numPins++] = pins[0]; pins[0].name = name; pins[0].value = value; } static int cmpTestPin(const void *e1, const void *e2) { return strcmp(((const struct TestPin *)e1)->name, ((const struct TestPin *)e2)->name); } void T_DumpPins(void) { int index; /* Sort the list to make it easier to find a specific pin */ qsort(pins, numPins, sizeof(struct TestPin), cmpTestPin); /* Dump the list */ fprintf(test_output, "\n"); for (index = 0; index < numPins; ++index) { fprintf(test_output, "%-30s: %d", pins[index].name, pins[index].value); if (pins[index].value < 0 || pins[index].value >= 10) fprintf(test_output, " (0x%04X)\n", pins[index].value); else fprintf(test_output, "\n"); } } /** @endcond */ void T_printf(const char *format, ...) { va_list va; va_start(va, format); vsnprintf(line_buffer, sizeof(line_buffer), format, va); va_end(va); fputs(line_buffer, test_output); if (!in_failure) T_printf_xml("<system-out>\n"); T_printf_xml_quoted(line_buffer); if (!in_failure) T_printf_xml("</system-out>\n"); } void T_printf_fail(const char *format, ...) { va_list va; va_start(va, format); vsnprintf(line_buffer, sizeof(line_buffer), format, va); va_end(va); fputs(line_buffer, test_output); T_printf_xml("<failure message=\""); T_printf_xml_quoted_no_eol(line_buffer); T_printf_xml("\" type=\"failure\">"); T_printf_xml_quoted(line_buffer); in_failure = 1; } void T_printf_fail_end(void) { if (in_failure) { T_printf_xml("</failure>\n"); in_failure = 0; } }
package nepic.image; import java.util.TreeMap; /** * * @author <NAME> * @since AutoCBFinder_ALpha_v0-9_122212 * @version AutoCBFinder_Alpha_v0-9-2013-01-29 * @param <C> */ public class ConstraintMap { TreeMap<String, Object> map; public ConstraintMap() { map = new TreeMap<String, Object>(); } public ConstraintMap addConstraint(String key, Object constraint) { map.put(key, constraint); return this; } public Object getConstraint(String key) { return map.get(key); } @Override public String toString() { String toReturn = ""; for (String key : map.keySet()) { toReturn += "[" + key + "\t\t" + map.get(key) + "]\n"; } return toReturn; } }
<reponame>acorin64/COSI11_Trac import java.io.BufferedReader; import java.io.InputStreamReader; import java.net.URL; import java.net.URLConnection; import java.nio.charset.Charset; import java.util.Scanner; import org.json.JSONObject; public class GameMethod { //This holds all of the methods used in each game /** * This is a method which will connect to a website and return the content as a string. * You can get information about time in different timezones by connecting * to the URL http://worldtimeapi.org/api/, e.g. * myURL="http://worldtimeapi.org/api/timezones/America/New_York.txt" * gives info about the current time in New York */ public static String getStringFromURL(String myURL) { StringBuilder sb = new StringBuilder(); URLConnection urlConn = null; InputStreamReader in = null; try { URL url = new URL(myURL); urlConn = url.openConnection(); if (urlConn != null) urlConn.setReadTimeout(60 * 1000); if (urlConn != null && urlConn.getInputStream() != null) { in = new InputStreamReader(urlConn.getInputStream(), Charset.defaultCharset()); BufferedReader bufferedReader = new BufferedReader(in); if (bufferedReader != null) { int cp; while ((cp = bufferedReader.read()) != -1) { sb.append((char) cp); } bufferedReader.close(); } } in.close(); } catch (Exception e) { throw new RuntimeException("Exception while calling URL:"+ myURL, e); } return sb.toString(); } public static int k2f(int k){ return (((k - 273) * 9/5) + 32); } /** This gets the weather for a given zipcode... */ public static void getWeather(String zipcode){ System.out.print("Enter a ZIP code: "); Scanner scanner = new Scanner(System.in); String zip = scanner.next(); String apiKey = "06d70799a9fcdfb5cffd48536349e502"; String url = "https://api.openweathermap.org/data/2.5/weather?zip="+zip+",us"+"&appid="+apiKey; String json = getStringFromURL(url); JSONObject obj = new JSONObject(json); System.out.print("Guess the current temperature (°F): "); int tempK = obj.getJSONObject("main").getInt("temp"); int tempF = k2f(tempK); int guess = scanner.nextInt(); int difference = Math.abs(guess - tempF); if (difference == 0) { System.out.println("Correct!"); } else { System.out.println("So close! you were " + Math.round(difference) + " degrees F away!"); System.out.println("Current temp in " + zip + ": " + tempF + " degrees F"); } } public static void checkForTie() { //checks for a tie int count = 0; for (int i=0; i<3; i++) { for (int j=0; j<3; j++) { if (Static.board[i][j] != ' ') { count++; } } //end of inner FOR loop } //end of outer FOR loop if (count == 9 && Static.gameWon == false) { Static.tie = true; } } //end of checkForTie public static boolean moveCheck(int move) { //checks to see whether the move is valid if (move >= 1 && move <= 9 && Static.usedMoves[move-1] == 0) { return true; } else { return false; } } //end of moveCheck public static void replayTTT() { //determines if player will play again and relaunches/ends game accordingly System.out.println("Would you like to play again? (Type yes or no)"); boolean playAgain = TextIO.getlnBoolean(); if (playAgain) { GameMethod.resetTTT(); //resets the game so it can be played again MainGame.playTTT(); //calls the initial method, restarting the game } else { return; } } //end of replay public static void welcomePlayer() { //welcomes player and explains rules //Welcomes the player to the game System.out.println(); System.out.println(); System.out.println("Welcome to Text-based Two-player Tic-Tac-Toe!"); System.out.println("Would you like to see the instructions?"); boolean instructions = TextIO.getlnBoolean(); //explains the instructions to the player if needed if (instructions) { System.out.println("Player X (1) will go first, followed by Player O (2)."); System.out.println("To select a move, type a number between 1 and 9,"); System.out.println("corresponding to the spots in the board below."); System.out.println("[1][2][3]"); System.out.println("[4][5][6]"); System.out.println("[7][8][9]"); System.out.println("The new status of the board will then be printed out,"); System.out.println("and the next player may then enter their move."); } //end of instructions if statement System.out.println("Let's get Started!"); } //end of welcomePlayer public static void announceWinner() { //announces the winner if (Static.tie) { System.out.println("The game is a Tie!"); } else { if (Static.playerSymbol == 'X') { System.out.println("The winner is Player 2 (O)!"); } else { System.out.println("The winner is Player 1 (X)!"); } } } //end of announceWinner public static void printBoard() { //prints the board in its current state for (int i=0; i<3; i++) { for (int j=0; j<3; j++) { System.out.printf("[%1s]", Static.board[i][j]); } System.out.println(); } System.out.println(); } //end of printBoard public static void emptyBoard() { //empties the board, fills elements with a space for (int i=0; i<3; i++) { for (int j=0; j<3; j++) { Static.board[i][j] = ' '; } //end of inner FOR loop } //end of outer FOR loop } //end of emptyBoard public static void updateBoard(int a) { //updates the board switch (a) { case 1: Static.board[0][0] = Static.playerSymbol; break; case 2: Static.board[0][1] = Static.playerSymbol; break; case 3: Static.board[0][2] = Static.playerSymbol; break; case 4: Static.board[1][0] = Static.playerSymbol; break; case 5: Static.board[1][1] = Static.playerSymbol; break; case 6: Static.board[1][2] = Static.playerSymbol; break; case 7: Static.board[2][0] = Static.playerSymbol; break; case 8: Static.board[2][1] = Static.playerSymbol; break; case 9: Static.board[2][2] = Static.playerSymbol; break; } } //end of updateBoard public static void switchPlayer() { //switches current player/symbol used if (Static.playerSymbol == 'X') { Static.playerSymbol = 'O'; } else if (Static.playerSymbol == 'O') { Static.playerSymbol = 'X'; } } //end of switchPlayer public static boolean checkForWin() { //checks to see if a player has won if (Static.board[0][0] == Static.board[0][1] && Static.board[0][1] == Static.board[0][2] && Static.board[0][0] != ' ') { //horizontal top return true; } else if (Static.board[1][0] == Static.board[1][1] && Static.board[1][1] == Static.board[1][2] && Static.board[1][0] != ' ') { //horizontal mid return true; } else if (Static.board[2][0] == Static.board[2][1] && Static.board[2][1] == Static.board[2][2] && Static.board[2][0] != ' ') { //horizontal bot return true; } else if (Static.board[0][0] == Static.board[1][0] && Static.board[1][0] == Static.board[2][0] && Static.board[0][0] != ' ') { //vertical left return true; } else if (Static.board[0][1] == Static.board[1][1] && Static.board[1][1] == Static.board[2][1] && Static.board[0][1] != ' ') { //vertical mid return true; } else if (Static.board[0][2] == Static.board[1][2] && Static.board[1][2] == Static.board[2][2] && Static.board[0][2] != ' ') { //vertical right return true; } else if (Static.board[0][0] == Static.board[1][1] && Static.board[1][1] == Static.board[2][2] && Static.board[0][0] != ' ') { //top left corner return true; } else if (Static.board[0][2] == Static.board[1][1] && Static.board[1][1] == Static.board[2][0] && Static.board[0][2] != ' ') { //top right corner return true; } else { return false; } } //end of checkForWin public static void resetTTT() { //resets the game so it can be played again //reset starting player to player 1 (X), and turn off tie Static.playerSymbol = 'X'; Static.tie = false; Static.gameWon = false; //empty out the list of moves already used for (int i=0; i<Static.usedMoves.length; i++) { Static.usedMoves[i] = 0; } GameMethod.emptyBoard(); } //end of resetTTT public static int randomNum(){ return (int)(20 * Math.random()) + 1; } //mathQuiz public static void addProb(){ int randomNumber1 = randomNum(); int randomNumber2 = randomNum(); System.out.println("What is "+randomNumber1+" + "+randomNumber2+"?"); int guess = TextIO.getlnInt(); int answer = randomNumber1 + randomNumber2; if (guess == answer) { System.out.println("Correct!"); Static.mathCorrect++; } else { System.out.println("Incorrect!"); System.out.println("The correct answer is " + answer); } }//mathQuiz public static void subProb(){ int randomNumber1 = randomNum(); int randomNumber2 = randomNum(); System.out.println("What is "+randomNumber1+" - "+randomNumber2+"?"); int guess = TextIO.getlnInt(); int answer = randomNumber1 - randomNumber2; if (guess == answer) { System.out.println("Correct!"); Static.mathCorrect++; } else { System.out.println("Incorrect!"); System.out.println("The correct answer is " + answer); } }//mathQuiz public static void multProb(){ int randomNumber1 = randomNum(); int randomNumber2 = randomNum(); System.out.println("What is "+randomNumber1+" * "+randomNumber2+"?"); int guess = TextIO.getlnInt(); int answer = randomNumber1 * randomNumber2; if (guess == answer) { System.out.println("Correct!"); Static.mathCorrect++; } else { System.out.println("Incorrect!"); System.out.println("The correct answer is " + answer); } }//mathQuiz public static void divProb(){ int randomNumber1 = randomNum(); int randomNumber2 = randomNum(); System.out.println("What is "+randomNumber1+" / "+randomNumber2+"?"); int guess = TextIO.getlnInt(); int answer = randomNumber1 / randomNumber2; if (guess == answer) { System.out.println("Correct!"); Static.mathCorrect++; } else { System.out.println("Incorrect!"); System.out.println("The correct answer is " + answer); } }//mathQuiz public static void modProb(){ int randomNumber1 = randomNum(); int randomNumber2 = randomNum(); System.out.println("What is "+randomNumber1+" % "+randomNumber2+"?"); int guess = TextIO.getlnInt(); int answer = randomNumber1 % randomNumber2; if (guess == answer) { System.out.println("Correct!"); Static.mathCorrect++; } else { System.out.println("Incorrect!"); System.out.println("The correct answer is " + answer); } }//mathQuiz public static void scoreCalc(){ double percentageCorrect = Static.mathCorrect * 20; System.out.println("You got " + Static.mathCorrect + " correct answers."); System.out.println("That's a score of " + percentageCorrect + "%"); }//mathQuiz public static void loveTest(int sum) { if (sum == 0){ Static.love = "0%"; } else if (sum == 1){ Static.love = "20%"; } else if (sum == 2){ Static.love = "40%"; } else if (sum == 3){ Static.love = "60%"; } else if (sum == 4){ Static.love = "80%"; } else if (sum >= 5){ Static.love = "100%"; } else { System.out.println("You might wanna find a new boo!"); } } public static void selectQ(int q) { switch (q) { case 1: question1(); break; case 2: question2(); break; case 3: question3(); break; case 4: question4(); break; case 5: question5(); break; case 6: question6(); break; case 7: question7(); break; case 8: question8(); break; case 9: question9(); break; case 10: question10(); break; } } public static int randomNum10(){ return (int)(10 * Math.random()) + 1; } public static void question1(){ System.out.println("What is the capital of Austrailia?"); String answer = TextIO.getlnString(); if (answer.equals("Canberra")) { System.out.println("Correct"); Static.wordCorrect++; } else{ System.out.println("Incorrect, the capital is Canberra."); } } public static void question2(){ System.out.println("Which country is the world's biggest producer of coffee?"); String answer = TextIO.getlnString(); if (answer.equals("Brazil")) { System.out.println("Correct"); Static.wordCorrect++; } else{ System.out.println("Incorrect, the answer is Brazil."); } } public static void question3(){ System.out.println("In what city is the Dome of the Rock found?"); String answer = TextIO.getlnString(); if (answer.equals("Jerusalem")) { System.out.println("Correct"); Static.wordCorrect++; } else{ System.out.println("Incorrect, the answer is Jerusalem."); } } public static void question4(){ System.out.println("Who is younger, Serena or <NAME>?"); String answer = TextIO.getlnString(); if (answer.equals("Serena")) { System.out.println("Correct"); Static.wordCorrect++; } else{ System.out.println("Incorrect, the answer is Serena."); } } public static void question5(){ System.out.println("Which movement was founded by <NAME>?"); String answer = TextIO.getlnString(); if (answer.equals("Scientology")) { System.out.println("Correct"); Static.wordCorrect++; } else{ System.out.println("Incorrect, the answer is Scientology."); } } public static void question6(){ System.out.println("Do the Yankees play in the American or National League?"); String answer = TextIO.getlnString(); if (answer.equals("American")) { System.out.println("Correct"); Static.wordCorrect++; } else{ System.out.println("Incorrect, the answer is American."); } } public static void question7(){ System.out.println("What is the world's largest ocean?"); String answer = TextIO.getlnString(); if (answer.equals("Pacific")) { System.out.println("Correct"); Static.wordCorrect++; } else{ System.out.println("Incorrect, the answer is Pacific."); } } public static void question8(){ System.out.println("What year did the Cold War end?"); String answer = TextIO.getlnString(); if (answer.equals("1989")) { System.out.println("Correct"); Static.wordCorrect++; } else{ System.out.println("Incorrect, the answer is 1989."); } } public static void question9(){ System.out.println("What is the largest freshwater lake in the world?"); String answer = TextIO.getlnString(); if (answer.equals("Superior")) { System.out.println("Correct"); Static.wordCorrect++; } else{ System.out.println("Incorrect, the answer is Superior."); } } public static void question10(){ System.out.println("What is the world's longest river?"); String answer = TextIO.getlnString(); if (answer.equals("Amazon")) { System.out.println("Correct"); Static.wordCorrect++; } else{ System.out.println("Incorrect, the answer is Amazon."); } } public static void quizScoreCalc(){ double percentageCorrect = (double) Static.wordCorrect/3*100; System.out.println("You got " + Static.wordCorrect + " correct answers."); System.out.println("That's a score of " + percentageCorrect + "%"); } public static void chooseCH() { double cHand = Math.random()*3; if (cHand <= 1) { Static.computerHand = 1; Static.compHand = "Rock"; } else if (cHand <= 2 && cHand > 1) { Static.computerHand = 2; Static.compHand = "Paper"; } else if (cHand <= 3 && cHand > 2) { Static.computerHand = 3; Static.compHand = "Scissors"; } System.out.println("The computer chooses "+Static.compHand); } public static void findWinnerRPS() { if (Static.yourHand == 1) { //you are rock if (Static.computerHand == 1) { //comp is rock System.out.println("It is a tie!"); } else if (Static.computerHand == 2) { //comp is paper System.out.println("You Lose!"); } else if (Static.computerHand == 3) { //comp is scissors System.out.println("You Win!"); } } else if (Static.yourHand == 2) { //you are paper if (Static.computerHand == 1) { //comp is rock System.out.println("You Win!"); } else if (Static.computerHand == 2) { //comp is paper System.out.println("It is a tie!"); } else if (Static.computerHand == 3) { //comp is scissors System.out.println("You Lose!"); } } else if (Static.yourHand == 3) { //you are scissors if (Static.computerHand == 1) { //comp is rock System.out.println("You Lose!"); } else if (Static.computerHand == 2) { //comp is paper System.out.println("You Win!"); } else if (Static.computerHand == 3) { //comp is scissors System.out.println("It is a tie!"); } } } public static void playRPSAgain() { System.out.println("Would you like to play again? (Type yes or no)"); boolean playAgain = TextIO.getlnBoolean(); if (playAgain) { MainGame.playRPS(); } else { return; } } public static void hang(String guess) { //set up to manipulate String newUnderscore = ""; /*loop with ifs so that if a character is right then it changes it into the word that is right */ for (int i = 0; i < Static.word.length(); i++) { if (Static.word.charAt(i) == guess.charAt(0)) { newUnderscore += guess.charAt(0); } else if (Static.Underscore.charAt(i) != '_') { newUnderscore += Static.word.charAt(i); } else { newUnderscore += "_"; } } // if statement that changes the image if wrong if (Static.Underscore.equals(newUnderscore)) { Static.count++; hangmanImageSwitch(); //if right, change asteriks } else { Static.Underscore = newUnderscore; } //if win, then stop if (Static.Underscore.equals(Static.word)) { System.out.println("Correct! You won! The word was " + Static.word); } }//new method - for guessing public static void hangmanImageSwitch() { //change so that it starts with a full thing, make sure to camelcase too switch(Static.count) { // switch case 1: hang1(); break; case 2: hang2(); break; case 3: hang3(); break; case 4: hang4(); break; case 5: hang5(); break; case 6: hang6(); break; case 7: hang7(); break; } }//method for changing picture public static void hang1() { System.out.println("Wrong guess, try again"); System.out.println(" ____________"); System.out.println(" | |"); System.out.println(" |"); System.out.println(" |"); System.out.println(" |"); System.out.println(" |"); System.out.println(" |"); System.out.println(" | "); System.out.println("___|___"); } public static void hang2() { System.out.println("Wrong guess, try again"); System.out.println(" ____________"); System.out.println(" | _|_"); System.out.println(" | / \\"); System.out.println(" | \\ __/"); System.out.println(" |"); System.out.println(" |"); System.out.println(" |"); System.out.println(" |"); System.out.println("___|___"); } public static void hang3() { System.out.println("Wrong guess, try again"); System.out.println(" ____________"); System.out.println(" | _|_"); System.out.println(" | / \\"); System.out.println(" | \\ __/"); System.out.println(" | |"); System.out.println(" | |"); System.out.println(" |"); System.out.println(" |"); System.out.println("___|___"); } public static void hang4() { System.out.println("Wrong guess, try again"); System.out.println(" ____________"); System.out.println(" | _|_"); System.out.println(" | / \\"); System.out.println(" | \\__ /"); System.out.println(" | |"); System.out.println(" | |"); System.out.println(" | \\"); System.out.println(" | \\ "); System.out.println("___|___"); } public static void hang5() { System.out.println("Wrong guess, try again!"); System.out.println(" ____________"); System.out.println(" | _|_"); System.out.println(" | / \\"); System.out.println(" | \\__ /"); System.out.println(" | |"); System.out.println(" | | "); System.out.println(" | / \\ "); System.out.println(" | / \\"); System.out.println("___|___"); } public static void hang6() { System.out.println("Wrong guess, try again"); System.out.println(" ____________"); System.out.println(" | _|_"); System.out.println(" | / \\"); System.out.println(" | \\__ /"); System.out.println(" | _|"); System.out.println(" | / |"); System.out.println(" | / \\"); System.out.println(" | / \\ "); System.out.println("___|___"); } public static void hang7() { System.out.println("GAME OVER!"); System.out.println(" ____________"); System.out.println(" | _|_"); System.out.println(" | / \\"); System.out.println(" | \\__ /"); System.out.println(" | _|_"); System.out.println(" | / | \\"); System.out.println(" | / \\ "); System.out.println(" | / \\"); System.out.println("___|___"); System.out.println("Game over! The word was " + Static.word); } public static String generateTestWords(int size) { String testString = ""; String[] words = new String[] {"cat", "dog", "animal", "basketball", "pizza", "house", "desk", "elephant", "student", "mushroom", "banana", "spongebob", "zealous", "sheep", "aloof", "pneumonia"}; for (int i = 0; i < size; i++) { int randomIndex = (int) (Math.random() * words.length); testString += words[randomIndex] + " "; } return testString; } // Method from https://stackoverflow.com/a/22186845 public static double round (double value, int precision) { int scale = (int) Math.pow(10, precision); return (double) Math.round(value * scale) / scale; } public static void printStats(double totalSeconds, int testStringSize) { double wordsPerMinute = testStringSize / (totalSeconds / 60); double roundedTime = round(totalSeconds, 1); double roundedWpm = round(wordsPerMinute, 1); System.out.println("\nCORRECT! Here are your stats:\n---------"); System.out.println("Total time: " + roundedTime + " seconds"); System.out.println("Words Per Minute: " + roundedWpm); } } //end of gameMethod
package main import ( "errors" "fmt" "io/ioutil" "log" "os" "regexp" "strings" ) func setupEnvironment(pathScript string) error { f, err := ioutil.ReadFile(pathScript) if err != nil { log.Printf("Error opening file %v. Error: %v ", pathScript, err) return err } ex := `(?m)^export (.*)="(.*)"$` r := regexp.MustCompile(ex) vars := r.FindAllStringSubmatch(string(f), -1) if vars == nil { message := fmt.Sprintf("No env vars found with pattern %v", ex) log.Printf(message) return errors.New(message) } for _, v := range vars { value := strings.Replace(v[2], `\`, "", -1) // Remove escape chars log.Printf("Defining %v=%v", v[1], value) os.Setenv(v[1], value) } return nil }
/* Selects an (Open)SSL crypto engine */ CURLcode Curl_ssl_set_engine(struct SessionHandle *data, const char *engine) { #ifdef USE_SSLEAY return Curl_ossl_set_engine(data, engine); #else #ifdef USE_GNUTLS (void)data; (void)engine; return CURLE_FAILED_INIT; #else (void)data; (void)engine; return CURLE_FAILED_INIT; #endif #endif }
<gh_stars>0 #pragma once #include "Polygon3D.h" #include "Vertex.h" #include "Matrix.h" #include "UVCoords.h" #include "DirectionalLight.h" #include "AmbientLight.h" #include "PointLight.h" #include "SpotLight.h" #include "Texture.h" #include <vector> #include <array> class Model { public: Model(); ~Model(); // Accessors const std::vector<Polygon3D>& GetPolygons(); const std::vector<Vertex>& GetLocalVertices(); const std::vector<Vertex>& GetTransformedVertices(); const std::vector<UVCoords>& GetUVCoords(); size_t GetPolygonCount() const; size_t GetVertexCount() const; Texture& GetTexture(); void AddVertex(float x, float y, float z); void AddPolygon(int i0, int i1, int i2, int uvIndex0, int uvIndex1, int uvIndex2); void AddTextureUV(float u, float v); void DehomogenizeVertices(); void ApplyTransformToLocalVertices(const Matrix& transform); void ApplyTransformToTransformedVertices(const Matrix& transform); void CalculateBackfaces(const Vertex& cameraPos); void CalculateVertexNormals(); void Sort(); void CalculateAmbientLighting(const AmbientLight& light); void CalculateDirectionalLighting(std::vector<DirectionalLight> lights); void CalculatePointLighting(std::vector<PointLight> lights); void CalculateVertexDirectionalLighting(std::vector<DirectionalLight> lights, Vertex CameraPos); void CalculateVertexPointLighting(std::vector<PointLight> lights, Vertex cameraPos); void CalcualteVertexSpotLighting(std::vector<SpotLight> lights, Vertex cameraPos); void SetBackfaces(bool culled); private: std::vector<Polygon3D> _polygons; std::vector<Vertex> _localVertices; std::vector<Vertex> _transformedVertices; std::vector<UVCoords> _uvCoords; Texture _texture; std::array<float, 3> _ambient{ 0.05f, 0.05f, 0.05f }; std::array<float, 3> _diffuse{ 0.5f, 0.5f, 0.5f }; std::array<float, 3> _specular{ 1.0f, 1.0f, 1.0f }; float _shininess{ 8.0f }; };
<reponame>Xander-21RUS/JavaFX-Chia-Plotter-Helper<filename>src/main/java/ru/xander/JavaFxChiaPlotterHelper/Controllers/ListView/PlotSettingsData.java<gh_stars>0 package ru.xander.JavaFxChiaPlotterHelper.Controllers.ListView; import javafx.scene.control.Alert; import java.io.File; public class PlotSettingsData { public static final int minPlotSize=32; public static final int maxPlotSize=35; public static final int minimumBuckets=16; public static final int maximumBuckets=256; public static final int minimumRamUsage=512; public static final int maximumRamUsage=Integer.MAX_VALUE; public static final int minimumThreads=1; public static final int maximumThreads=128; private int plotSize=-1; private int buckets=-1; private int maxRamUsage=-1; private int threads=-1; private String temporaryPath=null; private boolean useSecondTemporary=false; private String secondTemporaryPath=null; private String finalPath=null; public PlotSettingsData(int plotSize,int buckets,int maxRamUsage,int threads,String temporaryPath,boolean useSecondTemporary,String secondTemporaryPath,String finalPath){ checkPlotSize(plotSize); checkBucketsNumber(buckets); checkMaxRamUsage(maxRamUsage); checkThreadsNumber(threads); checkTemporaryPath(temporaryPath); checkSecondTemporaryPath(useSecondTemporary,secondTemporaryPath); checkFinalPath(finalPath); this.plotSize=plotSize; this.buckets=buckets; this.maxRamUsage=maxRamUsage; this.threads=threads; this.temporaryPath=temporaryPath; this.useSecondTemporary=useSecondTemporary; if(!useSecondTemporary){ this.secondTemporaryPath=""; }else { this.secondTemporaryPath=secondTemporaryPath; } this.finalPath=finalPath; } public int getPlotSize(){ return this.plotSize; } public void setPlotSize(int plotSize){ checkPlotSize(plotSize); this.plotSize=plotSize; } public int getBuckets(){ return this.buckets; } public void setBuckets(int buckets){ checkBucketsNumber(buckets); this.buckets=buckets; } public int getMaxRamUsage() { return this.maxRamUsage; } public void setMaxRamUsage(int maxRamUsage) { checkMaxRamUsage(maxRamUsage); this.maxRamUsage = maxRamUsage; } public int getThreads(){ return this.threads; } public void setThreads(int threads){ checkThreadsNumber(threads); this.threads=threads; } public String getTemporaryPath(){ return this.temporaryPath; } public void setTemporaryPath(String temporaryPath){ checkTemporaryPath(temporaryPath); } public boolean getUseSecondTemporary(){ return this.useSecondTemporary; } public void setUseSecondTemporary(boolean useSecondTemporary){ this.useSecondTemporary=useSecondTemporary; this.secondTemporaryPath=""; } public String getSecondTemporaryPath(){ return this.secondTemporaryPath; } public void setSecondTemporaryPath(String secondTemporaryPath){ checkSecondTemporaryPath(true,secondTemporaryPath); } public String getFinalPath(){ return this.finalPath; } public void setFinalPath(String finalPath){ checkFinalPath(finalPath); this.finalPath=finalPath; } public static void checkPlotSize(int plotSize){ if(plotSize<minPlotSize || maxPlotSize<plotSize){ throw new IllegalArgumentException(); } } public static void checkBucketsNumber(int buckets){ if(buckets <minimumBuckets || maximumBuckets <buckets){ throw new IllegalArgumentException(); } double log2= Math.log(buckets)/Math.log(2); if (log2 % 1 == 0) { //целое }else { //не целое throw new IllegalArgumentException(); } } public static void checkMaxRamUsage(int maxRamUsage){ if(maxRamUsage<minimumRamUsage || maximumRamUsage<maxRamUsage){ throw new IllegalArgumentException(); } } public static void checkThreadsNumber(int threads){ if (threads < minimumThreads || maximumThreads < threads) { throw new IllegalArgumentException(); } } private static void checkDir(String path,String dirName){ if(path==null){ throw new IllegalArgumentException(dirName + " path is NULL"); } File file=new File(path); if(!file.exists()){ throw new IllegalArgumentException( dirName + " path does not exist"); } if(!file.isDirectory()){ throw new IllegalArgumentException(dirName + " path is not directory"); } if(!file.canWrite()){ throw new IllegalArgumentException(dirName+" directory can't be written"); } } public static void checkTemporaryPath(String temporaryPath){ checkDir(temporaryPath,"Temporary"); } public static void checkSecondTemporaryPath(boolean useSecondTemporary,String secondTemporaryPath){ if(!useSecondTemporary) return; checkDir(secondTemporaryPath,"Second temporary"); } public static void checkFinalPath(String finalPath){ checkDir(finalPath,"Final"); } }
<gh_stars>1-10 #include "../../../catamorph/interpreters/create_evmdd.h" #include "../../../evmdd/abstract_factory.h" #include "../../../polynomial.h" #include "../../Catch/include/catch.hpp" #include <iostream> using std::endl; /******************************************************************** * * Testing construction for expressions with logical propositions * ********************************************************************/ SCENARIO("Testing numeric EVMDDs for construction on iverson based input", "[evmdd][construction][iverson]") { GIVEN("Term [a == b] with domain 0,1,2") { Polynomial p = Polynomial("[a == b]"); WHEN("We create the evmdd") { Domains d = {{"a", 3}, {"b", 3}}; Ordering o = {"b", "a"}; auto evmdd = p.create_evmdd<int>(d, o); THEN("evmdd has the expected structure") { std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "a 0 0 0" << endl; expected << "b 1 0 0" << endl; expected << "b 0 1 0" << endl; expected << "b 0 0 1" << endl; REQUIRE(result.str() == expected.str()); } } } GIVEN("Term [![a&&b]] with domain 0,1") { Polynomial p = Polynomial("[![a&&b]]"); WHEN("We create the evmdd") { Domains d = {{"a", 2}, {"b", 2}}; Ordering o = {"b", "a"}; auto evmdd = p.create_evmdd<int>(d, o); THEN("reduced evmdd has the expected structure") { std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "a 1 0" << endl; expected << "b 1 0" << endl; REQUIRE(result.str() == expected.str()); } THEN("quasi-reduced evmdd has the expected structure") { auto &factory = AbstractFactory<int>::get_factory(d, o); evmdd = factory.quasi_reduce(evmdd); std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "a 1 0" << endl; expected << "b 0 0" << endl; expected << "b 1 0" << endl; REQUIRE(result.str() == expected.str()); } } } GIVEN("Term [!a] with domain 0,1") { Polynomial p = Polynomial("[!a]"); WHEN("We create the evmdd") { Domains d = {{"a", 2}}; Ordering o = {"a"}; auto evmdd = p.create_evmdd<int>(d, o); THEN("evmdd has the expected structure") { std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "a 1 0" << endl; REQUIRE(result.str() == expected.str()); } } } GIVEN("Term [!!a] with domain 0,1") { Polynomial p = Polynomial("[!!a]"); WHEN("We create the evmdd") { Domains d = {{"a", 2}}; Ordering o = {"a"}; auto evmdd = p.create_evmdd<int>(d, o); THEN("evmdd has the expected structure") { std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "a 0 1" << endl; REQUIRE(result.str() == expected.str()); } } } // If shannon reduction feature added, update accordingly. // (will have less nodes). Similar cases: [a||1] // @etavas GIVEN("Term [a && b] with domains Da = Db = {0,1}") { Polynomial p = Polynomial("[a&&b]"); WHEN("We create the evmdd") { Domains d = {{"a", 2}, {"b", 2}}; Ordering o = {"b", "a"}; auto evmdd = p.create_evmdd<int>(d, o); THEN("reduced evmdd has the expected structure") { std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "a 0 0" << endl; expected << "b 0 1" << endl; REQUIRE(result.str() == expected.str()); } THEN("quasi-reduced evmdd has the expected structure") { auto &factory = AbstractFactory<int>::get_factory(d, o); evmdd = factory.quasi_reduce(evmdd); std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "a 0 0" << endl; expected << "b 0 0" << endl; expected << "b 0 1" << endl; REQUIRE(result.str() == expected.str()); } } } GIVEN("Term [a || b] with domains Da = Db = {0,1}") { Polynomial p = Polynomial("[a || b]"); WHEN("We create the evmdd") { Domains d = {{"a", 2}, {"b", 2}}; Ordering o = {"b", "a"}; auto evmdd = p.create_evmdd<int>(d, o); THEN("reduced evmdd has the expected structure") { std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "a 0 1" << endl; expected << "b 0 1" << endl; REQUIRE(result.str() == expected.str()); } THEN("quasi-reduced evmdd has the expected structure") { auto &factory = AbstractFactory<int>::get_factory(d, o); evmdd = factory.quasi_reduce(evmdd); std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "a 0 1" << endl; expected << "b 0 1" << endl; expected << "b 0 0" << endl; REQUIRE(result.str() == expected.str()); } } } // If shannon reduction feature added, // update below accordingly. (will have less nodes) // @etavas GIVEN("Term [[!a]&&b] with domain 0,1") { Polynomial p = Polynomial("[[!a]&&b]"); WHEN("We create the evmdd") { Domains d = {{"a", 2}, {"b", 2}}; Ordering o = {"b", "a"}; auto evmdd = p.create_evmdd<int>(d, o); THEN("reduced evmdd has the expected structure") { std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "a 0 0" << endl; expected << "b 0 1" << endl; REQUIRE(result.str() == expected.str()); } THEN("quasi-reduced evmdd has the expected structure") { auto &factory = AbstractFactory<int>::get_factory(d, o); evmdd = factory.quasi_reduce(evmdd); std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "a 0 0" << endl; expected << "b 0 1" << endl; expected << "b 0 0" << endl; REQUIRE(result.str() == expected.str()); } } } /*************************************************************** * * Tests for combinations of arithmetic and iversion expressions * ****************************************************************/ GIVEN("Term [!a]+b with domain 0,1") { Polynomial p = Polynomial("[!a]+b"); WHEN("We create the evmdd") { Domains d = {{"a", 2}, {"b", 2}}; Ordering o = {"b", "a"}; auto evmdd = p.create_evmdd<int>(d, o); THEN("evmdd has the expected structure") { std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "a 1 0" << endl; expected << "b 0 1" << endl; REQUIRE(result.str() == expected.str()); } } } GIVEN("Term [a&&[!b]]*c+5 with domain 0,1") { Polynomial p = Polynomial("[a&&[!b]]*c+5"); WHEN("We create the evmdd") { Domains d = {{"a", 2}, {"b", 2}, {"c", 2}}; Ordering o = {"c", "b", "a"}; auto evmdd = p.create_evmdd<int>(d, o); THEN("reduced evmdd has the expected structure") { std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 5" << endl; expected << "a 0 0" << endl; expected << "b 0 0" << endl; expected << "c 0 1" << endl; // [a&&[!b]] = 1 REQUIRE(result.str() == expected.str()); } THEN("quasi-reduced evmdd has the expected structure") { auto &factory = AbstractFactory<int>::get_factory(d, o); evmdd = factory.quasi_reduce(evmdd); std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 5" << endl; expected << "a 0 0" << endl; expected << "b 0 0" << endl; expected << "b 0 0" << endl; expected << "c 0 0" << endl; // [a&&[!b]] = 0 expected << "c 0 1" << endl; // [a&&[!b]] = 1 REQUIRE(result.str() == expected.str()); } } } /**************************************************************** * * Tests for logical expressions on non-bool. variable valuations * *****************************************************************/ GIVEN("Term [a==b] with domain Da=Db={0,1,2}") { Polynomial p = Polynomial("[a==b]"); WHEN("We create the evmdd") { Domains d = {{"a", 3}, {"b", 3}}; Ordering o = {"b", "a"}; auto evmdd = p.create_evmdd<int>(d, o); THEN("evmdd has the expected structure") { std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "a 0 0 0" << endl; expected << "b 1 0 0" << endl; expected << "b 0 1 0" << endl; expected << "b 0 0 1" << endl; REQUIRE(result.str() == expected.str()); } } } GIVEN("Term [!a] with domain 0,1,2") { Polynomial p = Polynomial("[!a]"); Domains d = {{"a", 3}}; Ordering o = {"a"}; WHEN("We create the evmdd") { THEN("We get an error") { REQUIRE_THROWS_AS(p.create_evmdd<int>(d, o), std::logic_error); } } } GIVEN("Term [a && b] with domains Da ={0,1} Db = {0,1,2}") { Polynomial p = Polynomial("[a&&b]"); Domains d = {{"a", 2}, {"b", 3}}; Ordering o = {"b", "a"}; WHEN("We create the evmdd") { THEN("We get an error") { REQUIRE_THROWS_AS(p.create_evmdd<int>(d, o), std::logic_error); } } } GIVEN("Term [a || b] with domains Da = Db = {0,1,2}") { Polynomial p = Polynomial("[a || b]"); Domains d = {{"a", 3}, {"b", 3}}; Ordering o = {"b", "a"}; WHEN("We create the evmdd") { THEN("We get an error") { REQUIRE_THROWS_AS(p.create_evmdd<int>(d, o), std::logic_error); } } } // TODO : If shannon reduction is implemented, update below accordingly. // (will have less nodes) // (@etavas) GIVEN("Term [[!a]&&b] with domain 0,1,2") { Polynomial p = Polynomial("[[!a]&&b]"); Domains d = {{"a", 3}, {"b", 3}}; Ordering o = {"b", "a"}; WHEN("We create the evmdd") { THEN("We get an error") { REQUIRE_THROWS_AS(p.create_evmdd<int>(d, o), std::logic_error); } } } GIVEN("Term [!a]+b with domain Da=Db={0,1,2}") { Polynomial p = Polynomial("[!a]+b"); Domains d = {{"a", 3}, {"b", 3}}; Ordering o = {"b", "a"}; WHEN("We create the evmdd") { THEN("We get an error") { REQUIRE_THROWS_AS(p.create_evmdd<int>(d, o), std::logic_error); } } } // Valid example GIVEN("Term [!a]+b with domain Da={0,1} Db={0,1,2}") { Polynomial p = Polynomial("[!a]+b"); WHEN("We create the evmdd") { Domains d = {{"a", 2}, {"b", 3}}; Ordering o = {"b", "a"}; auto evmdd = p.create_evmdd<int>(d, o); THEN("evmdd has the expected structure") { std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "a 1 0" << endl; expected << "b 0 1 2" << endl; REQUIRE(result.str() == expected.str()); } } } } SCENARIO( "Testing numeric EVMDDs for construction with partial ordering on iverson " "based input", "[evmdd][construction][iversion][partial]") { GIVEN("Term [a&&b]+c with domain 0,1") { Domains d = {{"a", 2}, {"b", 2}, {"c", 2}}; Polynomial p = Polynomial("[a&&b]+c"); WHEN("We create the evmdd with ordering a over b") { Ordering o = {"b", "a"}; auto evmdd = p.create_evmdd<int>(d, o); THEN("reduced evmdd has the expected structure") { std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "c 0 1" << endl; expected << "a 0 0" << endl; expected << "b 0 1" << endl; REQUIRE(result.str() == expected.str()); } THEN("quasi-reduced evmdd has the expected structure") { auto &factory = AbstractFactory<int>::get_factory(d, o); evmdd = factory.quasi_reduce(evmdd); std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "c 0 1" << endl; expected << "a 0 0" << endl; expected << "b 0 0" << endl; expected << "b 0 1" << endl; REQUIRE(result.str() == expected.str()); } } WHEN("We create the evmdd with ordering b over a") { Ordering o = {"a", "b"}; auto evmdd = p.create_evmdd<int>(d, o); THEN("reduced evmdd has the expected structure") { std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "c 0 1" << endl; expected << "b 0 0" << endl; expected << "a 0 1" << endl; REQUIRE(result.str() == expected.str()); } THEN("quasi-reduced evmdd has the expected structure") { auto &factory = AbstractFactory<int>::get_factory(d, o); evmdd = factory.quasi_reduce(evmdd); std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "c 0 1" << endl; expected << "b 0 0" << endl; expected << "a 0 0" << endl; expected << "a 0 1" << endl; REQUIRE(result.str() == expected.str()); } } WHEN("We create the evmdd with ordering c over a") { Ordering o = {"a", "c"}; auto evmdd = p.create_evmdd<int>(d, o); THEN("reduced evmdd has the expected structure") { std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "b 0 0" << endl; expected << "c 0 1" << endl; expected << "c 0 1" << endl; expected << "a 0 1" << endl; REQUIRE(result.str() == expected.str()); } THEN("quasi-reduced evmdd has the expected structure") { auto &factory = AbstractFactory<int>::get_factory(d, o); evmdd = factory.quasi_reduce(evmdd); std::stringstream result; evmdd.print(result); std::stringstream expected; expected << "input: 0" << endl; expected << "b 0 0" << endl; expected << "c 0 1" << endl; expected << "c 0 1" << endl; expected << "a 0 0" << endl; expected << "a 0 1" << endl; REQUIRE(result.str() == expected.str()); } } } }
<gh_stars>1-10 import { Relation, relationTypes, Table } from 'nestjs-objection'; import { ObjectionModel } from './objection.model'; import { OmCategory } from './om.category'; // noinspection JSUnusedGlobalSymbols @Table({ tableName: 'products', softDelete: true }) export class OmProduct extends ObjectionModel { id: number; name: string; description: string; image: string; categoryId: number; published: boolean; createdAt: Date; updatedAt: Date; deletedAt: Date; @Relation({ modelClass: OmCategory, relation: relationTypes.HasOneRelation, join: { from: 'products.categoryId', to: 'categories.id' }, }) category: OmCategory; }
package com.springboot.controller; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RestController; import com.springboot.bean.BlogProperties; import com.springboot.bean.ConfigBean; import com.springboot.bean.TestConfigBean; @RestController public class IndexController { @Autowired private BlogProperties blogProperties; @Autowired private ConfigBean configBean; @Autowired private TestConfigBean testConfigBean; @RequestMapping("/") String index() { return testConfigBean.getName(); } }
The automatic computer detection of subtle calcifications in radiographically dense breasts. A preliminary study has been conducted into the automatic detection of extremely small subtle calcifications occurring in radiographically dense breasts. Improvements were made to an algorithm described by the authors in a previous publication (ibid., vol.35, p.1111-18, 1990) in an attempt to improve that algorithm's detection rates for this type of calcification. The new enhanced algorithm has improved true positive detection rates but still has an unacceptable false negative rate. In order to improve the true positive detection rates for this type of calcification the false positive detection rates of all types of calcification would rise to an ineffective level. The preliminary results reported indicate how difficult the detection of extremely subtle calcifications is. Further work using a larger database of high resolution, high quality digital mammograms (currently not available) is clearly required for the detection of these calcifications.
A Delphi consultation to assess indicators of readiness to provide quality health facility-based lymphoedema management services Background The World Health Organization (WHO) in collaboration with partners is developing a toolkit of resources to guide lymphatic filariasis (LF) morbidity management and disability prevention (MMDP) implementation and evaluation. Direct health facility inspection is the preferred method for documenting the readiness of a country programme to provide quality lymphoedema management services, one of the three MMDP criteria used to demonstrate the elimination of LF as a public health problem. Methodology/Principal findings As component of tool development, a Delphi consultation was implemented to gain consensus on six proposed domains and fourteen proposed tracer indicators to measure national programme readiness to provide quality health facility-based lymphoedema management services. A seven-point Likert-type scale was used to rank the importance of proposed domains and tracer indicators. Consensus for inclusion of the indicator was defined a priori as 70% or more of respondents ranking the proposed indicator in the top three tiers (5–7). Purposive sampling was used to select 43 representative experts including country representatives, programme implementers, and technical experts. A 55.8% response rate (n = 24) was achieved for the survey. Analysis of the responses demonstrated that consensus for inclusion had been reached for all proposed domains including trained staff (mean = 6.9, standard deviation (SD) = 0.34), case management and education materials (mean = 6.1, SD = 0.65), water infrastructure (mean = 6.3, SD = 0.81), medicines and commodities (mean = 6.3, SD = 0.69), patient tracking system (mean = 6.3, SD = 0.85), and staff knowledge (mean = 6.5, SD = 0.66). Significance The Delphi consultation provided an efficient and structured method for gaining consensus among lymphatic filariasis experts around key lymphoedema management quality indicators. The results from this analysis were used to refine the indicators included within the direct inspection protocol tool to ensure its ability to assess health facility readiness to provide quality lymphoedema management services. Introduction Lymphatic filariasis (LF) is a parasitic infection caused by filarial nematodes that are transmitted by mosquitoes. Chronic infection with LF can lead to clinical manifestations such as lymphoedema and hydrocele that have significant impacts on the mobility and quality of life of affected individuals. Further, individuals with lymphoedema are prone to painful and debilitating secondary bacterial infections, known as acute attacks, that are associated with diminished quality of life and progression of disease . Approximately 947 million people are at risk for LF in more than 73 countries worldwide . In an effort to reduce suffering, LF has been targeted for elimination as a public health problem by 2020 following the World Health Assembly Resolution 50.29 . The Global Programme to Eliminate LF (GPELF) has established a two-pillar strategy for elimination: (1) interruption of transmission through mass drug administration (MDA) and (2) alleviating the suffering of individuals affected by the chronic manifestations of LF infection through the provision of morbidity management and disability prevention (MMDP) services. To meet the criteria established by the Word Health Organization (WHO) for the MMDP pillar for elimination, national LF elimination programmes are asked to provide data on the number of patients with lymphoedema (or elephantiasis) and hydrocele, the number of health facilities designated to provide care, and the readiness and quality of the care provided . Quality of care assessments can be used to understand what resources are needed to improve services as well as advocate for other sectors or departments within the Ministry of Health to supplement these services. While the implementation and evaluation activities for MDA have been clearly defined, there is a need for clearer guidance on the provision and assessment of MMDP services for national LF elimination programmes. In order to meet this need, WHO is developing a toolkit to guide LF MMDP implementation and evaluation. One component of the toolkit is a direct inspection protocol, a tool designed to measure readiness to provide quality health facilitybased lymphoedema management services in accordance with WHO recommendations. Here, we summarize an expert consultation following the Delphi methodology to reach consensus on domains and indicators that should be used to evaluate health-facility readiness to provide quality lymphoedema management services . The results of the Delphi consultation informed the refinement of the direct inspection protocol tool and will assist national LF elimination programmes demonstrating that they have achieved the requirements for validation of elimination of LF as a public health problem. Delphi methodology Indicator development. To establish consensus on indicators to assess the quality of MMDP services, a Delphi methodology was implemented. The Delphi methodology has been utilized by others in the neglected tropical disease (NTD) field to obtain consensus on programmatic targets as well as indicators for programme monitoring and evaluation . It is a quantitative mechanism to gain consensus on a particular topic among a panel of subject matter experts . The Delphi methodology that we implemented (Fig 1) was based on a framework proposed by Deribe and colleagues in the context of establishing indicators to assess endemicity, elimination, and clinical outcomes of podoconiosis . Based on a literature review, an expert panel proposed six key domains of facility readiness and 14 tracer indicators to measure readiness of health facilities to provide quality MMDP services for lymphoedema care. The six domains selected were modeled after the Service Availability and Readiness Assessment (SARA), a WHO tool for evaluating health facilities, and included availability of: trained staff, case management and education materials, water infrastructure, medicines and commodities, patient tracking system, and staff knowledge . Fourteen tracer indicators were proposed to evaluate the domains (Table 1). Participant selection. Forty-three experts in LF and NTDs representing various stakeholders globally were identified to participate in this consultation ( Table 2). This target was determined assuming a 20% loss to follow-up over two steps and aimed to achieve 15 to 25 final participants. The process for selecting these participants was modeled on the recommendations for participant selection outlined in previous literature utilizing the Delphi methodology . Participants were from more than ten countries across all WHO regions with LFendemic countries and represented country programmes, non-governmental organizations, bi-lateral and multi-lateral organizations, donor organizations, and academic institutions. Questionnaire. Participants were invited by email to participate in the consultation via a link where they could access an online questionnaire. Participants were asked to report demographic information including professional role, educational background, area of expertise, and years of experience in their respective area of expertise. Participants were then asked to evaluate the domains and corresponding indicators using a seven-point Likert-type scale to optimize discriminating power . Finally, participants were asked to rank the importance of each of the domains in determining quality of MMDP services a health facility could provide, ranging from "1 = not at all important" to "7 = extremely important". Participants were asked to rank how well each of the indicators evaluated the respective domain ranging from "1 = strongly disagree" to "7 = strongly agree". Participants were also invited to provide open-ended feedback on the domains and indicators. Seven indicators measured general health facility readiness and quality (e.g. water infrastructure, provision of medications and commodities) and seven measured lymphoedema-specific readiness and quality of services provided by health facilities (e.g. staff training and knowledge). In an effort to harmonize and integrate with ongoing WHO initiatives, it was determined that if the water infrastructure domain was deemed relevant, the tracer indicators for water infrastructure outlined by WHO's water, sanitation, and hygiene (WASH) in healthcare facilities initiative also would be used in the direct inspection protocol for consistency. Therefore, the water infrastructure tracer indicator was not evaluated, thus only thirteen indicators were evaluated in the Delphi methodology versus the total 14 indicators that would be assessed as a component of the direct inspection protocol. Data analysis. Data were analyzed using Microsoft Excel and SAS 9.3 (Cary, NC). Consensus criteria for each domain and tracer indicator were defined a priori as follows: consensus for inclusion was achieved if !70% of participants ranked the item in the top three categories (5-7); consensus for exclusion was achieved if !70% of participants ranked the item in the bottom three categories (1-3); and no consensus was achieved if neither of the above conditions were met. Counts of respondents' selections were used to calculate the frequency of selected answers to inform whether consensus had been reached. In addition, the sample mean, median, and range were calculated to assess central tendency for each tracer indicator and domain and to characterize the responses. Central tendency was included in the assessment as a secondary measure of consensus among the respondents. If consensus was not achieved by the above criteria, further refinement and evaluation of the domains and indicators in subsequent rounds of questionnaires was planned until consensus was reached. As a sensitivity analysis, we evaluated the impact of using more stringent consensus criteria as follows: consensus for inclusion was achieved if !70% of participants ranked the item in the top two categories (6-7); consensus for exclusion was achieved if !70% of participants ranked the item in the bottom two categories (1-2); and no consensus was achieved if neither of the above conditions were met. Results The response rate for the online survey component of the Delphi consultation was 55.8% (n = 24). The individuals who participated in the survey represented a range of professions ( Table 2). A third of participants (n = 8, 33.3%) had more than 25 years of experience in their respective field. Participants' responses to the domains and tracer indicators are presented in Tables 3 and 4. In the first round, there was consensus that all six domains of readiness and quality of MMDP services in health care facilities were important. None of the respondents ranked the domains in the bottom three categories. The strongest agreement was observed for trained staff and staff knowledge with 87.5% and 62.5% of respondents respectively indicating they felt that these domains were extremely important. Furthermore, consensus was reached for all thirteen evaluated tracer indicators, though a wider range of ranking was observed. The strongest consensus was observed for tracer indicators related to medicines and commodities-primarily the availability of medicines-as well as the tracer indicators for case management and education materials. Based on the sensitivity analysis using stricter criteria for consensus, all domains met consensus criteria under stricter conditions with between 79.2% and 100% of respondents ranking the domains in the top two categories. All of the tracer indicators met the stricter consensus ,7) 100.0 Medicines and Commodities Antiseptics are available at the facility criteria except for staff training in the last two years (66.7%) and at least one patient with lymphoedema recorded in the reporting system in the last 12 months (50.0%). Common themes from the qualitative feedback included: the need for a more robust definition of training and refresher training, the importance of clinic staff being able to identify more than one sign, symptom, and management strategy, and a need for a more clearly stated definition for a patient tracking system. Discussion The World Health Assembly resolution to eliminate LF was built on a desire to mitigate the harm caused by LF, both by preventing future infection as well as by alleviating the suffering experienced by individuals who present with clinical manifestation as a result of infection . Since the clinical sequelae of LF develop many years after infection and are chronic, national LF programmes must work closely within the health care system to ensure that MMDP services are well integrated, available, and sustainable. While the components of a minimum package of care for lymphoedema and hydrocele patients has been clearly defined , there is a need for standardization in the evaluating and reporting of the availability and quality of MMDP services in the provision of the minimum package of care at healthcare facilities. This Delphi consultation allowed input from multiple stakeholders and improved the practicality and acceptability of a standard survey for direct inspection of health facilities to assess readiness and quality of MMDP. Based on previous literature, framing questions using a Likerttype scale for Delphi consultations facilitates straightforward statistical analysis to assess for consensus across respondents . A strength of the Delphi methodology is that it allows stakeholders from a variety of perspectives to offer their expert opinion on the key elements that need to be included in an evaluation of quality services. Using a Delphi consultation, we were able to gather input for indicator development from a range of stakeholders. Our hope is that this approach will lead to broader stakeholder support and acceptability. Based on the diversity of participants, we feel that the consensus achieved reflects the priorities of global partners working towards elimination of LF as a public health problem. However, due to limitations in accessibility we were unable to include the perspectives of two important stakeholders: health facility level staff and affected patients. Steps were taken to include feedback from staff and patients during the pilot testing of the direct inspectional protocol as discussed later. Though consensus was reached for the domains and indicators, through open-ended feedback experts proposed more stringent criteria to strengthen the indicators to measure the readiness of health facilities to provide quality lymphoedema management care. Citing the critical need for appropriate identification of lymphoedema in patients, experts suggested that a greater emphasis should be placed on the evaluation of staff knowledge. To address this, questions assessing staff knowledge were modified to require two correct responses instead of one for each tracer indicator. No significant changes were made to the components of the remaining domains and indicators. The fourteen tracer indicators, refined as part of the Delphi consultation, are intended to comprise the questionnaire component of a health facility inspection tool, allowing LF programmes to evaluate the readiness of health facilities to provide quality lymphoedema management services as a component of MMDP programmes. The inspection comprises a facility walkthrough and interview with key health personnel at randomly selected health facilities providing lymphoedema management services. The surveyor evaluates if the facility meets the criteria for each indicator, through direct observation where relevant (e.g. the presence of medicines and commodities). The results of the questionnaire generate a health facility score, by which the programme can evaluate highly performing and poorly performing health facilities. Programmes can also evaluate indicator scores across facilities to evaluate systematic strengths and weakness, in order to implement informed process-improvement steps to strengthen the quality of lymphoedema management services. In addition, the standardization offered through these tracer indicators provides programmes with the ability to compare lymphoedema management services across settings. We recognize that while the primary focus of this Delphi consultation was assessing lymphoedema management services, hydrocele care is also important in LF endemic countries. We feel confident that we could replicate similar procedures to develop indicators to assess health facility readiness to provide quality hydrocele care. Due to the unique components of care required for hydrocele patients, expert consultation with urologists with expertise in hydrocele management will be conducted to determine the domains and tracer indicators for evaluating readiness and quality of health-facility based hydrocele care. A standardized protocol, the WHO Surgical Assessment Tool (SAT), provides information on general surgical capacity including the availability of hydrocelectomy and is under revision. Efforts are under consideration to include a module specifically evaluating the readiness and quality of hydrocele care. By demonstrating that the global community is in agreement about the components of lymphoedema management that healthcare facilities must be prepared to provide their patients, there is evidence to support the inclusion of the direct inspection protocol tool in the MMDP toolkit for countries implementing the MMDP component of the GPELF strategy. In an effort to assess the measurability of these indicators in healthcare facilities, the quality domains and tracer indicators identified were included in a pilot direct inspection tool to assess the readiness of healthcare facilities to provide quality lymphoedema management services in Mali, Vietnam, and Haiti . The pilot demonstrated that these indicators were feasible to implement and yielded useful information about the quality of services; however minor changes were incorporated to the survey based on the results of the pilot. The direct inspection protocol is intended to supplement SARA assessments, provide more detailed information on lymphoedema management services, and to ensure programme managers have the information needed to plan for services to meet the needs of individuals with LF.
<gh_stars>0 import puppeteer = require('puppeteer'); const jobElementSelector = '.job-card-container__link.job-card-list__title'; // const jobElementSelector = 'a.job-card-container__link'; // const jobListSelector = '.jobs-search-results__list.list-style-none'; /** * @param page * @returns Devuelve los enlaces de las ofertas de trabajo encontradas. */ export async function scrapJobLinks(page: puppeteer.Page): Promise<string[]> { console.debug('Init scrapping job links...'); const links = await page.evaluate((elementSelector) => { const elements = document.querySelectorAll(elementSelector); return Array.from(elements).map((element: HTMLLinkElement) => element.href); }, jobElementSelector); console.debug(`Job links scrapped successfully (count: ${links.length})`); console.debug('Job links scrapped', links); return links; }
<gh_stars>0 import { parseDate, serializeDate } from "../serialization/date"; export function toBrowserLocalTime(value: KnockoutObservable<string | null>) : KnockoutComputed<string | null> { const convert = () => { const unwrappedValue = ko.unwrap(value); return serializeDate(parseDate(unwrappedValue, true), false); }; const convertBack = (newVal: string | null) => { const result = serializeDate(parseDate(newVal, false), true); value(result); } if (ko.isWriteableObservable(value)) { return ko.pureComputed({ read: convert, write: convertBack }); } else { return ko.pureComputed(convert); } }
/** * adds Listeners that are to operate an action of the game * * @param a * an ActionListener added to JButton * @param k * a KeyListener that will operate actions of typed keys */ public void addAction(ActionListener a, KeyListener k) { Starter.addActionListener(a); Starter.addKeyListener(k); getContentPane().addKeyListener(k); }